2015-09-18 20:35:56 +00:00
/ *
2016-07-05 07:29:09 +00:00
Copyright 2016 The Kubernetes Authors .
2015-09-18 20:35:56 +00:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
2016-07-05 07:29:09 +00:00
package util
2015-09-18 20:35:56 +00:00
import (
"fmt"
2016-07-05 07:29:09 +00:00
"sort"
2016-01-20 23:48:52 +00:00
"strconv"
2016-10-06 15:02:51 +00:00
"strings"
2015-11-11 23:22:57 +00:00
"time"
2015-09-18 20:35:56 +00:00
2016-02-25 00:41:26 +00:00
"github.com/golang/glog"
2017-01-25 13:39:54 +00:00
apiequality "k8s.io/apimachinery/pkg/api/equality"
2017-01-11 14:09:48 +00:00
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
2017-02-26 23:26:33 +00:00
"k8s.io/apimachinery/pkg/types"
2017-01-11 14:09:48 +00:00
"k8s.io/apimachinery/pkg/util/errors"
2017-01-27 20:42:17 +00:00
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
2017-01-11 14:09:48 +00:00
"k8s.io/apimachinery/pkg/util/wait"
2017-01-23 18:37:22 +00:00
"k8s.io/client-go/util/integer"
2015-09-18 20:35:56 +00:00
"k8s.io/kubernetes/pkg/api"
2016-11-18 20:50:17 +00:00
"k8s.io/kubernetes/pkg/api/v1"
internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
2017-01-06 06:34:29 +00:00
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
2017-02-06 18:35:50 +00:00
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
2017-02-10 16:43:30 +00:00
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
2016-03-05 00:32:32 +00:00
"k8s.io/kubernetes/pkg/controller"
2016-01-12 23:37:51 +00:00
labelsutil "k8s.io/kubernetes/pkg/util/labels"
2015-09-18 20:35:56 +00:00
)
2016-01-13 01:52:18 +00:00
const (
2016-07-05 07:29:09 +00:00
// RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence
2016-01-13 01:52:18 +00:00
RevisionAnnotation = "deployment.kubernetes.io/revision"
2016-10-06 15:02:51 +00:00
// RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment.
RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history"
2016-01-28 16:35:14 +00:00
// DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation
// in its replica sets. Helps in separating scaling events from the rollout process and for
// determining if the new replica set for a deployment is really saturated.
DesiredReplicasAnnotation = "deployment.kubernetes.io/desired-replicas"
// MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which
// is deployment.spec.replicas + maxSurge. Used by the underlying replica sets to estimate their
// proportions in case the deployment has surge replicas.
MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas"
2016-01-19 22:50:03 +00:00
2016-07-05 07:29:09 +00:00
// RollbackRevisionNotFound is not found rollback event reason
RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound"
// RollbackTemplateUnchanged is the template unchanged rollback event reason
2016-01-19 22:50:03 +00:00
RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged"
2016-07-05 07:29:09 +00:00
// RollbackDone is the done rollback event reason
RollbackDone = "DeploymentRollback"
2017-03-16 17:08:22 +00:00
// OverlapAnnotation marks deployments with overlapping selector with other deployments
// TODO: Delete this annotation when we no longer need to support a client
// talking to a server older than v1.6.
OverlapAnnotation = "deployment.kubernetes.io/error-selector-overlapping-with"
2016-09-15 15:57:53 +00:00
// Reasons for deployment conditions
//
// Progressing:
//
// ReplicaSetUpdatedReason is added in a deployment when one of its replica sets is updated as part
// of the rollout process.
ReplicaSetUpdatedReason = "ReplicaSetUpdated"
// FailedRSCreateReason is added in a deployment when it cannot create a new replica set.
FailedRSCreateReason = "ReplicaSetCreateError"
// NewReplicaSetReason is added in a deployment when it creates a new replica set.
NewReplicaSetReason = "NewReplicaSetCreated"
// FoundNewRSReason is added in a deployment when it adopts an existing replica set.
FoundNewRSReason = "FoundNewReplicaSet"
// NewRSAvailableReason is added in a deployment when its newest replica set is made available
// ie. the number of new pods that have passed readiness checks and run for at least minReadySeconds
// is at least the minimum available pods that need to run for the deployment.
NewRSAvailableReason = "NewReplicaSetAvailable"
// TimedOutReason is added in a deployment when its newest replica set fails to show any progress
// within the given deadline (progressDeadlineSeconds).
TimedOutReason = "ProgressDeadlineExceeded"
// PausedDeployReason is added in a deployment when it is paused. Lack of progress shouldn't be
// estimated once a deployment is paused.
PausedDeployReason = "DeploymentPaused"
// ResumedDeployReason is added in a deployment when it is resumed. Useful for not failing accidentally
// deployments that paused amidst a rollout and are bounded by a deadline.
ResumedDeployReason = "DeploymentResumed"
//
// Available:
//
// MinimumReplicasAvailable is added in a deployment when it has its minimum replicas required available.
MinimumReplicasAvailable = "MinimumReplicasAvailable"
// MinimumReplicasUnavailable is added in a deployment when it doesn't have the minimum required replicas
// available.
MinimumReplicasUnavailable = "MinimumReplicasUnavailable"
2016-01-13 01:52:18 +00:00
)
2016-09-15 15:57:53 +00:00
// NewDeploymentCondition creates a new deployment condition.
2016-11-18 20:50:17 +00:00
func NewDeploymentCondition ( condType extensions . DeploymentConditionType , status v1 . ConditionStatus , reason , message string ) * extensions . DeploymentCondition {
2016-09-15 15:57:53 +00:00
return & extensions . DeploymentCondition {
Type : condType ,
Status : status ,
2016-12-03 18:57:26 +00:00
LastUpdateTime : metav1 . Now ( ) ,
LastTransitionTime : metav1 . Now ( ) ,
2016-09-15 15:57:53 +00:00
Reason : reason ,
Message : message ,
}
}
// GetDeploymentCondition returns the condition with the provided type.
func GetDeploymentCondition ( status extensions . DeploymentStatus , condType extensions . DeploymentConditionType ) * extensions . DeploymentCondition {
for i := range status . Conditions {
c := status . Conditions [ i ]
if c . Type == condType {
return & c
}
}
return nil
}
2016-11-18 20:58:22 +00:00
// TODO: remove the duplicate
// GetDeploymentConditionInternal returns the condition with the provided type.
func GetDeploymentConditionInternal ( status internalextensions . DeploymentStatus , condType internalextensions . DeploymentConditionType ) * internalextensions . DeploymentCondition {
for i := range status . Conditions {
c := status . Conditions [ i ]
if c . Type == condType {
return & c
}
}
return nil
}
2016-09-15 15:57:53 +00:00
// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that
// we are about to add already exists and has the same status and reason then we are not going to update.
func SetDeploymentCondition ( status * extensions . DeploymentStatus , condition extensions . DeploymentCondition ) {
currentCond := GetDeploymentCondition ( * status , condition . Type )
if currentCond != nil && currentCond . Status == condition . Status && currentCond . Reason == condition . Reason {
return
}
// Do not update lastTransitionTime if the status of the condition doesn't change.
if currentCond != nil && currentCond . Status == condition . Status {
condition . LastTransitionTime = currentCond . LastTransitionTime
}
newConditions := filterOutCondition ( status . Conditions , condition . Type )
status . Conditions = append ( newConditions , condition )
}
// RemoveDeploymentCondition removes the deployment condition with the provided type.
func RemoveDeploymentCondition ( status * extensions . DeploymentStatus , condType extensions . DeploymentConditionType ) {
status . Conditions = filterOutCondition ( status . Conditions , condType )
}
// filterOutCondition returns a new slice of deployment conditions without conditions with the provided type.
func filterOutCondition ( conditions [ ] extensions . DeploymentCondition , condType extensions . DeploymentConditionType ) [ ] extensions . DeploymentCondition {
var newConditions [ ] extensions . DeploymentCondition
for _ , c := range conditions {
if c . Type == condType {
continue
}
newConditions = append ( newConditions , c )
}
return newConditions
}
// ReplicaSetToDeploymentCondition converts a replica set condition into a deployment condition.
// Useful for promoting replica set failure conditions into deployments.
func ReplicaSetToDeploymentCondition ( cond extensions . ReplicaSetCondition ) extensions . DeploymentCondition {
return extensions . DeploymentCondition {
Type : extensions . DeploymentConditionType ( cond . Type ) ,
Status : cond . Status ,
LastTransitionTime : cond . LastTransitionTime ,
LastUpdateTime : cond . LastTransitionTime ,
Reason : cond . Reason ,
Message : cond . Message ,
}
}
2016-10-06 15:02:51 +00:00
// SetDeploymentRevision updates the revision for a deployment.
func SetDeploymentRevision ( deployment * extensions . Deployment , revision string ) bool {
updated := false
if deployment . Annotations == nil {
deployment . Annotations = make ( map [ string ] string )
}
if deployment . Annotations [ RevisionAnnotation ] != revision {
deployment . Annotations [ RevisionAnnotation ] = revision
updated = true
}
return updated
}
2016-07-05 07:29:09 +00:00
// MaxRevision finds the highest revision in the replica sets
func MaxRevision ( allRSs [ ] * extensions . ReplicaSet ) int64 {
max := int64 ( 0 )
for _ , rs := range allRSs {
if v , err := Revision ( rs ) ; err != nil {
// Skip the replica sets when it failed to parse their revision information
glog . V ( 4 ) . Infof ( "Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions." , err , rs )
} else if v > max {
max = v
}
}
return max
}
// LastRevision finds the second max revision number in all replica sets (the last revision)
func LastRevision ( allRSs [ ] * extensions . ReplicaSet ) int64 {
max , secMax := int64 ( 0 ) , int64 ( 0 )
for _ , rs := range allRSs {
if v , err := Revision ( rs ) ; err != nil {
// Skip the replica sets when it failed to parse their revision information
glog . V ( 4 ) . Infof ( "Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions." , err , rs )
} else if v >= max {
secMax = max
max = v
} else if v > secMax {
secMax = v
}
}
return secMax
}
2016-10-10 11:07:38 +00:00
// Revision returns the revision number of the input object.
func Revision ( obj runtime . Object ) ( int64 , error ) {
acc , err := meta . Accessor ( obj )
if err != nil {
return 0 , err
}
v , ok := acc . GetAnnotations ( ) [ RevisionAnnotation ]
2016-10-06 15:02:51 +00:00
if ! ok {
return 0 , nil
}
return strconv . ParseInt ( v , 10 , 64 )
}
2016-07-05 07:29:09 +00:00
// SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and
// copying required deployment annotations to it; it returns true if replica set's annotation is changed.
func SetNewReplicaSetAnnotations ( deployment * extensions . Deployment , newRS * extensions . ReplicaSet , newRevision string , exists bool ) bool {
// First, copy deployment's annotations (except for apply and revision annotations)
annotationChanged := copyDeploymentAnnotationsToReplicaSet ( deployment , newRS )
// Then, update replica set's revision annotation
if newRS . Annotations == nil {
newRS . Annotations = make ( map [ string ] string )
}
2016-10-06 15:02:51 +00:00
oldRevision , ok := newRS . Annotations [ RevisionAnnotation ]
2016-07-05 07:29:09 +00:00
// The newRS's revision should be the greatest among all RSes. Usually, its revision number is newRevision (the max revision number
// of all old RSes + 1). However, it's possible that some of the old RSes are deleted after the newRS revision being updated, and
// newRevision becomes smaller than newRS's revision. We should only update newRS revision when it's smaller than newRevision.
2016-10-06 15:02:51 +00:00
if oldRevision < newRevision {
2016-07-05 07:29:09 +00:00
newRS . Annotations [ RevisionAnnotation ] = newRevision
annotationChanged = true
glog . V ( 4 ) . Infof ( "Updating replica set %q revision to %s" , newRS . Name , newRevision )
}
2016-10-06 15:02:51 +00:00
// If a revision annotation already existed and this replica set was updated with a new revision
// then that means we are rolling back to this replica set. We need to preserve the old revisions
// for historical information.
if ok && annotationChanged {
revisionHistoryAnnotation := newRS . Annotations [ RevisionHistoryAnnotation ]
oldRevisions := strings . Split ( revisionHistoryAnnotation , "," )
if len ( oldRevisions [ 0 ] ) == 0 {
newRS . Annotations [ RevisionHistoryAnnotation ] = oldRevision
} else {
oldRevisions = append ( oldRevisions , oldRevision )
newRS . Annotations [ RevisionHistoryAnnotation ] = strings . Join ( oldRevisions , "," )
}
}
// If the new replica set is about to be created, we need to add replica annotations to it.
2016-11-18 20:50:17 +00:00
if ! exists && SetReplicasAnnotations ( newRS , * ( deployment . Spec . Replicas ) , * ( deployment . Spec . Replicas ) + MaxSurge ( * deployment ) ) {
2016-07-05 07:29:09 +00:00
annotationChanged = true
}
return annotationChanged
}
var annotationsToSkip = map [ string ] bool {
2017-05-16 22:30:29 +00:00
v1 . LastAppliedConfigAnnotation : true ,
RevisionAnnotation : true ,
RevisionHistoryAnnotation : true ,
DesiredReplicasAnnotation : true ,
MaxReplicasAnnotation : true ,
OverlapAnnotation : true ,
2016-07-05 07:29:09 +00:00
}
// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key
// TODO: How to decide which annotations should / should not be copied?
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
func skipCopyAnnotation ( key string ) bool {
return annotationsToSkip [ key ]
}
// copyDeploymentAnnotationsToReplicaSet copies deployment's annotations to replica set's annotations,
// and returns true if replica set's annotation is changed.
// Note that apply and revision annotations are not copied.
func copyDeploymentAnnotationsToReplicaSet ( deployment * extensions . Deployment , rs * extensions . ReplicaSet ) bool {
rsAnnotationsChanged := false
if rs . Annotations == nil {
rs . Annotations = make ( map [ string ] string )
}
for k , v := range deployment . Annotations {
// newRS revision is updated automatically in getNewReplicaSet, and the deployment's revision number is then updated
// by copying its newRS revision number. We should not copy deployment's revision to its newRS, since the update of
// deployment revision number may fail (revision becomes stale) and the revision number in newRS is more reliable.
if skipCopyAnnotation ( k ) || rs . Annotations [ k ] == v {
continue
}
rs . Annotations [ k ] = v
rsAnnotationsChanged = true
}
return rsAnnotationsChanged
}
// SetDeploymentAnnotationsTo sets deployment's annotations as given RS's annotations.
// This action should be done if and only if the deployment is rolling back to this rs.
// Note that apply and revision annotations are not changed.
func SetDeploymentAnnotationsTo ( deployment * extensions . Deployment , rollbackToRS * extensions . ReplicaSet ) {
deployment . Annotations = getSkippedAnnotations ( deployment . Annotations )
for k , v := range rollbackToRS . Annotations {
if ! skipCopyAnnotation ( k ) {
deployment . Annotations [ k ] = v
}
}
}
func getSkippedAnnotations ( annotations map [ string ] string ) map [ string ] string {
skippedAnnotations := make ( map [ string ] string )
for k , v := range annotations {
if skipCopyAnnotation ( k ) {
skippedAnnotations [ k ] = v
}
}
return skippedAnnotations
}
// FindActiveOrLatest returns the only active or the latest replica set in case there is at most one active
// replica set. If there are more active replica sets, then we should proportionally scale them.
func FindActiveOrLatest ( newRS * extensions . ReplicaSet , oldRSs [ ] * extensions . ReplicaSet ) * extensions . ReplicaSet {
if newRS == nil && len ( oldRSs ) == 0 {
return nil
}
sort . Sort ( sort . Reverse ( controller . ReplicaSetsByCreationTimestamp ( oldRSs ) ) )
allRSs := controller . FilterActiveReplicaSets ( append ( oldRSs , newRS ) )
switch len ( allRSs ) {
case 0 :
// If there is no active replica set then we should return the newest.
if newRS != nil {
return newRS
}
return oldRSs [ 0 ]
case 1 :
return allRSs [ 0 ]
default :
return nil
}
}
// GetDesiredReplicasAnnotation returns the number of desired replicas
func GetDesiredReplicasAnnotation ( rs * extensions . ReplicaSet ) ( int32 , bool ) {
return getIntFromAnnotation ( rs , DesiredReplicasAnnotation )
}
func getMaxReplicasAnnotation ( rs * extensions . ReplicaSet ) ( int32 , bool ) {
return getIntFromAnnotation ( rs , MaxReplicasAnnotation )
}
func getIntFromAnnotation ( rs * extensions . ReplicaSet , annotationKey string ) ( int32 , bool ) {
annotationValue , ok := rs . Annotations [ annotationKey ]
if ! ok {
return int32 ( 0 ) , false
}
intValue , err := strconv . Atoi ( annotationValue )
if err != nil {
2017-02-24 17:37:32 +00:00
glog . V ( 2 ) . Infof ( "Cannot convert the value %q with annotation key %q for the replica set %q" , annotationValue , annotationKey , rs . Name )
2016-07-05 07:29:09 +00:00
return int32 ( 0 ) , false
}
return int32 ( intValue ) , true
}
// SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations
func SetReplicasAnnotations ( rs * extensions . ReplicaSet , desiredReplicas , maxReplicas int32 ) bool {
updated := false
if rs . Annotations == nil {
rs . Annotations = make ( map [ string ] string )
}
desiredString := fmt . Sprintf ( "%d" , desiredReplicas )
if hasString := rs . Annotations [ DesiredReplicasAnnotation ] ; hasString != desiredString {
rs . Annotations [ DesiredReplicasAnnotation ] = desiredString
updated = true
}
maxString := fmt . Sprintf ( "%d" , maxReplicas )
if hasString := rs . Annotations [ MaxReplicasAnnotation ] ; hasString != maxString {
rs . Annotations [ MaxReplicasAnnotation ] = maxString
updated = true
}
return updated
}
// MaxUnavailable returns the maximum unavailable pods a rolling deployment can take.
func MaxUnavailable ( deployment extensions . Deployment ) int32 {
2017-04-19 12:40:31 +00:00
if ! IsRollingUpdate ( & deployment ) || * ( deployment . Spec . Replicas ) == 0 {
2016-07-05 07:29:09 +00:00
return int32 ( 0 )
}
// Error caught by validation
2016-11-18 20:50:17 +00:00
_ , maxUnavailable , _ := ResolveFenceposts ( deployment . Spec . Strategy . RollingUpdate . MaxSurge , deployment . Spec . Strategy . RollingUpdate . MaxUnavailable , * ( deployment . Spec . Replicas ) )
2017-03-30 09:11:52 +00:00
if maxUnavailable > * deployment . Spec . Replicas {
return * deployment . Spec . Replicas
}
2016-07-05 07:29:09 +00:00
return maxUnavailable
}
2016-11-29 10:20:09 +00:00
// MinAvailable returns the minimum available pods of a given deployment
2016-08-01 22:26:17 +00:00
func MinAvailable ( deployment * extensions . Deployment ) int32 {
if ! IsRollingUpdate ( deployment ) {
return int32 ( 0 )
}
2016-11-18 20:50:17 +00:00
return * ( deployment . Spec . Replicas ) - MaxUnavailable ( * deployment )
2016-08-01 22:26:17 +00:00
}
2016-07-05 07:29:09 +00:00
// MaxSurge returns the maximum surge pods a rolling deployment can take.
func MaxSurge ( deployment extensions . Deployment ) int32 {
if ! IsRollingUpdate ( & deployment ) {
return int32 ( 0 )
}
// Error caught by validation
2016-11-18 20:50:17 +00:00
maxSurge , _ , _ := ResolveFenceposts ( deployment . Spec . Strategy . RollingUpdate . MaxSurge , deployment . Spec . Strategy . RollingUpdate . MaxUnavailable , * ( deployment . Spec . Replicas ) )
2016-07-05 07:29:09 +00:00
return maxSurge
}
// GetProportion will estimate the proportion for the provided replica set using 1. the current size
// of the parent deployment, 2. the replica count that needs be added on the replica sets of the
// deployment, and 3. the total replicas added in the replica sets of the deployment so far.
func GetProportion ( rs * extensions . ReplicaSet , d extensions . Deployment , deploymentReplicasToAdd , deploymentReplicasAdded int32 ) int32 {
2016-11-18 20:50:17 +00:00
if rs == nil || * ( rs . Spec . Replicas ) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
2016-07-05 07:29:09 +00:00
return int32 ( 0 )
}
rsFraction := getReplicaSetFraction ( * rs , d )
allowed := deploymentReplicasToAdd - deploymentReplicasAdded
if deploymentReplicasToAdd > 0 {
// Use the minimum between the replica set fraction and the maximum allowed replicas
// when scaling up. This way we ensure we will not scale up more than the allowed
// replicas we can add.
return integer . Int32Min ( rsFraction , allowed )
}
// Use the maximum between the replica set fraction and the maximum allowed replicas
// when scaling down. This way we ensure we will not scale down more than the allowed
// replicas we can remove.
return integer . Int32Max ( rsFraction , allowed )
}
// getReplicaSetFraction estimates the fraction of replicas a replica set can have in
// 1. a scaling event during a rollout or 2. when scaling a paused deployment.
func getReplicaSetFraction ( rs extensions . ReplicaSet , d extensions . Deployment ) int32 {
// If we are scaling down to zero then the fraction of this replica set is its whole size (negative)
2016-11-18 20:50:17 +00:00
if * ( d . Spec . Replicas ) == int32 ( 0 ) {
return - * ( rs . Spec . Replicas )
2016-07-05 07:29:09 +00:00
}
2016-11-18 20:50:17 +00:00
deploymentReplicas := * ( d . Spec . Replicas ) + MaxSurge ( d )
2016-07-05 07:29:09 +00:00
annotatedReplicas , ok := getMaxReplicasAnnotation ( & rs )
if ! ok {
// If we cannot find the annotation then fallback to the current deployment size. Note that this
// will not be an accurate proportion estimation in case other replica sets have different values
// which means that the deployment was scaled at some point but we at least will stay in limits
// due to the min-max comparisons in getProportion.
annotatedReplicas = d . Status . Replicas
}
// We should never proportionally scale up from zero which means rs.spec.replicas and annotatedReplicas
// will never be zero here.
2016-11-18 20:50:17 +00:00
newRSsize := ( float64 ( * ( rs . Spec . Replicas ) * deploymentReplicas ) ) / float64 ( annotatedReplicas )
return integer . RoundToInt32 ( newRSsize ) - * ( rs . Spec . Replicas )
2016-07-05 07:29:09 +00:00
}
2016-06-01 21:14:40 +00:00
// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
// The third returned value is the new replica set, and it may be nil if it doesn't exist yet.
func GetAllReplicaSets ( deployment * extensions . Deployment , c clientset . Interface ) ( [ ] * extensions . ReplicaSet , [ ] * extensions . ReplicaSet , * extensions . ReplicaSet , error ) {
2017-03-15 00:39:29 +00:00
rsList , err := ListReplicaSets ( deployment , rsListFromClient ( c ) )
2016-06-01 22:00:29 +00:00
if err != nil {
return nil , nil , nil , err
}
2017-03-22 09:26:13 +00:00
oldRSes , allOldRSes , err := FindOldReplicaSets ( deployment , rsList )
2017-03-15 00:39:29 +00:00
if err != nil {
return nil , nil , nil , err
}
newRS , err := FindNewReplicaSet ( deployment , rsList )
if err != nil {
return nil , nil , nil , err
}
return oldRSes , allOldRSes , newRS , nil
}
2016-01-20 00:40:18 +00:00
// GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
2016-02-28 02:13:32 +00:00
func GetOldReplicaSets ( deployment * extensions . Deployment , c clientset . Interface ) ( [ ] * extensions . ReplicaSet , [ ] * extensions . ReplicaSet , error ) {
2017-03-15 00:39:29 +00:00
rsList , err := ListReplicaSets ( deployment , rsListFromClient ( c ) )
2016-06-01 22:00:29 +00:00
if err != nil {
return nil , nil , err
}
2017-03-22 09:26:13 +00:00
return FindOldReplicaSets ( deployment , rsList )
2016-03-14 19:07:56 +00:00
}
// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface.
// Returns nil if the new replica set doesn't exist yet.
func GetNewReplicaSet ( deployment * extensions . Deployment , c clientset . Interface ) ( * extensions . ReplicaSet , error ) {
2017-03-15 00:39:29 +00:00
rsList , err := ListReplicaSets ( deployment , rsListFromClient ( c ) )
2016-06-01 22:00:29 +00:00
if err != nil {
return nil , err
}
return FindNewReplicaSet ( deployment , rsList )
2016-06-01 21:14:40 +00:00
}
2017-03-15 00:39:29 +00:00
// rsListFromClient returns an rsListFunc that wraps the given client.
func rsListFromClient ( c clientset . Interface ) rsListFunc {
return func ( namespace string , options metav1 . ListOptions ) ( [ ] * extensions . ReplicaSet , error ) {
rsList , err := c . Extensions ( ) . ReplicaSets ( namespace ) . List ( options )
if err != nil {
return nil , err
}
2017-04-02 15:59:30 +00:00
var ret [ ] * extensions . ReplicaSet
2017-03-15 00:39:29 +00:00
for i := range rsList . Items {
ret = append ( ret , & rsList . Items [ i ] )
}
return ret , err
}
}
// podListFromClient returns a podListFunc that wraps the given client.
func podListFromClient ( c clientset . Interface ) podListFunc {
return func ( namespace string , options metav1 . ListOptions ) ( * v1 . PodList , error ) {
return c . Core ( ) . Pods ( namespace ) . List ( options )
}
2015-11-18 23:12:11 +00:00
}
2016-02-19 18:25:34 +00:00
// TODO: switch this to full namespacers
2017-01-22 03:36:02 +00:00
type rsListFunc func ( string , metav1 . ListOptions ) ( [ ] * extensions . ReplicaSet , error )
type podListFunc func ( string , metav1 . ListOptions ) ( * v1 . PodList , error )
2016-02-11 01:49:11 +00:00
2016-03-14 19:07:56 +00:00
// ListReplicaSets returns a slice of RSes the given deployment targets.
2017-02-26 23:26:33 +00:00
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
2016-10-04 17:23:27 +00:00
func ListReplicaSets ( deployment * extensions . Deployment , getRSList rsListFunc ) ( [ ] * extensions . ReplicaSet , error ) {
2016-03-14 19:07:56 +00:00
// TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
2017-02-26 23:26:33 +00:00
// should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830.
2016-03-14 19:07:56 +00:00
namespace := deployment . Namespace
2016-12-03 18:57:26 +00:00
selector , err := metav1 . LabelSelectorAsSelector ( deployment . Spec . Selector )
2016-03-14 19:07:56 +00:00
if err != nil {
return nil , err
}
2017-01-22 03:36:02 +00:00
options := metav1 . ListOptions { LabelSelector : selector . String ( ) }
2017-02-26 23:26:33 +00:00
all , err := getRSList ( namespace , options )
if err != nil {
return all , err
}
// Only include those whose ControllerRef matches the Deployment.
owned := make ( [ ] * extensions . ReplicaSet , 0 , len ( all ) )
for _ , rs := range all {
controllerRef := controller . GetControllerOf ( rs )
if controllerRef != nil && controllerRef . UID == deployment . UID {
owned = append ( owned , rs )
}
}
return owned , nil
2016-03-14 19:07:56 +00:00
}
2017-04-01 09:16:07 +00:00
// ListReplicaSetsInternal is ListReplicaSets for internalextensions.
// TODO: Remove the duplicate when call sites are updated to ListReplicaSets.
func ListReplicaSetsInternal ( deployment * internalextensions . Deployment , getRSList func ( string , metav1 . ListOptions ) ( [ ] * internalextensions . ReplicaSet , error ) ) ( [ ] * internalextensions . ReplicaSet , error ) {
2017-03-06 22:51:18 +00:00
namespace := deployment . Namespace
selector , err := metav1 . LabelSelectorAsSelector ( deployment . Spec . Selector )
if err != nil {
return nil , err
}
options := metav1 . ListOptions { LabelSelector : selector . String ( ) }
all , err := getRSList ( namespace , options )
if err != nil {
2017-03-16 18:43:09 +00:00
return nil , err
2017-03-06 22:51:18 +00:00
}
2017-04-01 09:16:07 +00:00
// Only include those whose ControllerRef matches the Deployment.
2017-03-16 18:43:09 +00:00
filtered := make ( [ ] * internalextensions . ReplicaSet , 0 , len ( all ) )
2017-03-06 22:51:18 +00:00
for _ , rs := range all {
controllerRef := controller . GetControllerOf ( rs )
2017-04-01 09:16:07 +00:00
if controllerRef != nil && controllerRef . UID == deployment . UID {
filtered = append ( filtered , rs )
2017-03-06 22:51:18 +00:00
}
}
2017-03-16 18:43:09 +00:00
return filtered , nil
2017-03-06 22:51:18 +00:00
}
2016-03-14 19:07:56 +00:00
// ListPods returns a list of pods the given deployment targets.
2017-02-26 23:26:33 +00:00
// This needs a list of ReplicaSets for the Deployment,
// which can be found with ListReplicaSets().
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
func ListPods ( deployment * extensions . Deployment , rsList [ ] * extensions . ReplicaSet , getPodList podListFunc ) ( * v1 . PodList , error ) {
2016-03-14 19:07:56 +00:00
namespace := deployment . Namespace
2016-12-03 18:57:26 +00:00
selector , err := metav1 . LabelSelectorAsSelector ( deployment . Spec . Selector )
2016-03-14 19:07:56 +00:00
if err != nil {
return nil , err
}
2017-01-22 03:36:02 +00:00
options := metav1 . ListOptions { LabelSelector : selector . String ( ) }
2017-02-26 23:26:33 +00:00
all , err := getPodList ( namespace , options )
if err != nil {
return all , err
}
// Only include those whose ControllerRef points to a ReplicaSet that is in
// turn owned by this Deployment.
rsMap := make ( map [ types . UID ] bool , len ( rsList ) )
for _ , rs := range rsList {
rsMap [ rs . UID ] = true
}
owned := & v1 . PodList { Items : make ( [ ] v1 . Pod , 0 , len ( all . Items ) ) }
for i := range all . Items {
pod := & all . Items [ i ]
controllerRef := controller . GetControllerOf ( pod )
if controllerRef != nil && rsMap [ controllerRef . UID ] {
owned . Items = append ( owned . Items , * pod )
}
}
return owned , nil
2016-03-14 19:07:56 +00:00
}
2016-12-01 09:10:30 +00:00
// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
2016-06-03 17:53:14 +00:00
// We ignore pod-template-hash because the hash result would be different upon podTemplateSpec API changes
// (e.g. the addition of a new field will cause the hash code to change)
// Note that we assume input podTemplateSpecs contain non-empty labels
2016-12-01 09:10:30 +00:00
func EqualIgnoreHash ( template1 , template2 v1 . PodTemplateSpec ) bool {
2016-08-03 18:19:12 +00:00
// First, compare template.Labels (ignoring hash)
labels1 , labels2 := template1 . Labels , template2 . Labels
if len ( labels1 ) > len ( labels2 ) {
labels1 , labels2 = labels2 , labels1
}
// We make sure len(labels2) >= len(labels1)
for k , v := range labels2 {
if labels1 [ k ] != v && k != extensions . DefaultDeploymentUniqueLabelKey {
2016-12-01 09:10:30 +00:00
return false
2016-08-03 18:19:12 +00:00
}
}
// Then, compare the templates without comparing their labels
template1 . Labels , template2 . Labels = nil , nil
2017-01-25 13:39:54 +00:00
return apiequality . Semantic . DeepEqual ( template1 , template2 )
2016-06-03 17:53:14 +00:00
}
2016-03-14 19:07:56 +00:00
// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template).
2016-10-04 17:23:27 +00:00
func FindNewReplicaSet ( deployment * extensions . Deployment , rsList [ ] * extensions . ReplicaSet ) ( * extensions . ReplicaSet , error ) {
2016-03-14 19:07:56 +00:00
newRSTemplate := GetNewReplicaSetTemplate ( deployment )
2017-02-22 00:00:24 +00:00
sort . Sort ( controller . ReplicaSetsByCreationTimestamp ( rsList ) )
2016-03-14 19:07:56 +00:00
for i := range rsList {
2016-12-01 09:10:30 +00:00
if EqualIgnoreHash ( rsList [ i ] . Spec . Template , newRSTemplate ) {
2017-02-22 00:00:24 +00:00
// In rare cases, such as after cluster upgrades, Deployment may end up with
// having more than one new ReplicaSets that have the same template as its template,
// see https://github.com/kubernetes/kubernetes/issues/40415
// We deterministically choose the oldest new ReplicaSet.
2016-10-04 17:23:27 +00:00
return rsList [ i ] , nil
2016-03-14 19:07:56 +00:00
}
}
// new ReplicaSet does not exist.
return nil , nil
}
2017-03-22 09:26:13 +00:00
// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes.
2016-01-20 00:40:18 +00:00
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
2017-03-22 09:26:13 +00:00
func FindOldReplicaSets ( deployment * extensions . Deployment , rsList [ ] * extensions . ReplicaSet ) ( [ ] * extensions . ReplicaSet , [ ] * extensions . ReplicaSet , error ) {
var requiredRSs [ ] * extensions . ReplicaSet
var allRSs [ ] * extensions . ReplicaSet
2017-02-22 00:00:24 +00:00
newRS , err := FindNewReplicaSet ( deployment , rsList )
if err != nil {
2017-03-22 09:26:13 +00:00
return nil , nil , err
2015-09-18 20:35:56 +00:00
}
2017-03-22 09:26:13 +00:00
for _ , rs := range rsList {
// Filter out new replica set
if newRS != nil && rs . UID == newRS . UID {
continue
}
allRSs = append ( allRSs , rs )
if * ( rs . Spec . Replicas ) != 0 {
requiredRSs = append ( requiredRSs , rs )
}
2016-01-13 01:52:18 +00:00
}
2016-01-20 00:40:18 +00:00
return requiredRSs , allRSs , nil
2015-09-18 20:35:56 +00:00
}
2016-07-05 07:29:09 +00:00
// WaitForReplicaSetUpdated polls the replica set until it is updated.
2017-02-10 16:43:30 +00:00
func WaitForReplicaSetUpdated ( c extensionslisters . ReplicaSetLister , desiredGeneration int64 , namespace , name string ) error {
return wait . PollImmediate ( 1 * time . Second , 1 * time . Minute , func ( ) ( bool , error ) {
rs , err := c . ReplicaSets ( namespace ) . Get ( name )
2016-02-19 18:25:34 +00:00
if err != nil {
return false , err
}
return rs . Status . ObservedGeneration >= desiredGeneration , nil
} )
}
2016-07-05 07:29:09 +00:00
// WaitForPodsHashPopulated polls the replica set until updated and fully labeled.
2017-02-10 16:43:30 +00:00
func WaitForPodsHashPopulated ( c extensionslisters . ReplicaSetLister , desiredGeneration int64 , namespace , name string ) error {
return wait . PollImmediate ( 1 * time . Second , 1 * time . Minute , func ( ) ( bool , error ) {
rs , err := c . ReplicaSets ( namespace ) . Get ( name )
2016-03-11 18:34:13 +00:00
if err != nil {
return false , err
}
return rs . Status . ObservedGeneration >= desiredGeneration &&
2016-11-18 20:50:17 +00:00
rs . Status . FullyLabeledReplicas == * ( rs . Spec . Replicas ) , nil
2016-03-11 18:34:13 +00:00
} )
}
2016-03-14 19:07:56 +00:00
// LabelPodsWithHash labels all pods in the given podList with the new hash label.
2017-02-06 18:35:50 +00:00
func LabelPodsWithHash ( podList * v1 . PodList , c clientset . Interface , podLister corelisters . PodLister , namespace , name , hash string ) error {
2016-02-11 01:49:11 +00:00
for _ , pod := range podList . Items {
2017-04-02 15:59:30 +00:00
// Ignore inactive Pods.
if ! controller . IsPodActive ( & pod ) {
continue
}
2016-02-18 19:45:24 +00:00
// Only label the pod that doesn't already have the new hash
if pod . Labels [ extensions . DefaultDeploymentUniqueLabelKey ] != hash {
2016-11-07 13:23:15 +00:00
_ , err := UpdatePodWithRetries ( c . Core ( ) . Pods ( namespace ) , podLister , pod . Namespace , pod . Name ,
2016-11-18 20:50:17 +00:00
func ( podToUpdate * v1 . Pod ) error {
2016-03-03 23:48:56 +00:00
// Precondition: the pod doesn't contain the new hash in its label.
if podToUpdate . Labels [ extensions . DefaultDeploymentUniqueLabelKey ] == hash {
return errors . ErrPreconditionViolated
}
2016-03-02 23:06:00 +00:00
podToUpdate . Labels = labelsutil . AddLabel ( podToUpdate . Labels , extensions . DefaultDeploymentUniqueLabelKey , hash )
2016-03-03 23:48:56 +00:00
return nil
2016-11-07 13:23:15 +00:00
} )
if err != nil {
return fmt . Errorf ( "error in adding template hash label %s to pod %q: %v" , hash , pod . Name , err )
2016-02-11 01:49:11 +00:00
}
2016-11-07 13:23:15 +00:00
glog . V ( 4 ) . Infof ( "Labeled pod %s/%s of ReplicaSet %s/%s with hash %s." , pod . Namespace , pod . Name , namespace , name , hash )
2016-02-11 01:49:11 +00:00
}
2016-03-01 02:28:32 +00:00
}
2016-11-07 13:23:15 +00:00
return nil
2016-02-18 19:45:24 +00:00
}
2016-07-05 07:29:09 +00:00
// GetNewReplicaSetTemplate returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet.
2016-12-01 09:10:30 +00:00
// Callers of this helper need to set the DefaultDeploymentUniqueLabelKey k/v pair.
2016-11-18 20:50:17 +00:00
func GetNewReplicaSetTemplate ( deployment * extensions . Deployment ) v1 . PodTemplateSpec {
2016-12-01 09:10:30 +00:00
// newRS will have the same template as in deployment spec.
return v1 . PodTemplateSpec {
2015-09-18 20:35:56 +00:00
ObjectMeta : deployment . Spec . Template . ObjectMeta ,
Spec : deployment . Spec . Template . Spec ,
}
}
2016-11-18 20:50:17 +00:00
// TODO: remove the duplicate
2016-11-18 20:58:22 +00:00
// GetNewReplicaSetTemplateInternal returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet.
func GetNewReplicaSetTemplateInternal ( deployment * internalextensions . Deployment ) api . PodTemplateSpec {
2016-11-18 20:50:17 +00:00
// newRS will have the same template as in deployment spec, plus a unique label in some cases.
newRSTemplate := api . PodTemplateSpec {
ObjectMeta : deployment . Spec . Template . ObjectMeta ,
Spec : deployment . Spec . Template . Spec ,
}
newRSTemplate . ObjectMeta . Labels = labelsutil . CloneAndAddLabel (
deployment . Spec . Template . ObjectMeta . Labels ,
internalextensions . DefaultDeploymentUniqueLabelKey ,
2016-12-01 09:10:30 +00:00
fmt . Sprintf ( "%d" , GetInternalPodTemplateSpecHash ( newRSTemplate ) ) )
2016-11-18 20:50:17 +00:00
return newRSTemplate
}
2016-01-20 00:40:18 +00:00
// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment.
2016-11-18 20:50:17 +00:00
func SetFromReplicaSetTemplate ( deployment * extensions . Deployment , template v1 . PodTemplateSpec ) * extensions . Deployment {
2016-01-15 02:04:05 +00:00
deployment . Spec . Template . ObjectMeta = template . ObjectMeta
deployment . Spec . Template . Spec = template . Spec
deployment . Spec . Template . ObjectMeta . Labels = labelsutil . CloneAndRemoveLabel (
deployment . Spec . Template . ObjectMeta . Labels ,
2016-02-05 22:45:05 +00:00
extensions . DefaultDeploymentUniqueLabelKey )
2016-01-15 02:04:05 +00:00
return deployment
}
2016-07-05 07:29:09 +00:00
// GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets.
2016-04-27 04:35:14 +00:00
func GetReplicaCountForReplicaSets ( replicaSets [ ] * extensions . ReplicaSet ) int32 {
2016-10-11 14:37:39 +00:00
totalReplicas := int32 ( 0 )
2016-01-20 00:40:18 +00:00
for _ , rs := range replicaSets {
2016-02-25 00:09:20 +00:00
if rs != nil {
2016-11-18 20:50:17 +00:00
totalReplicas += * ( rs . Spec . Replicas )
2016-02-25 00:09:20 +00:00
}
2015-09-29 23:55:06 +00:00
}
2016-10-11 14:37:39 +00:00
return totalReplicas
2015-09-29 23:55:06 +00:00
}
2016-02-22 22:28:28 +00:00
// GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets.
2016-04-27 04:35:14 +00:00
func GetActualReplicaCountForReplicaSets ( replicaSets [ ] * extensions . ReplicaSet ) int32 {
2016-10-11 14:37:39 +00:00
totalActualReplicas := int32 ( 0 )
2016-02-22 22:28:28 +00:00
for _ , rs := range replicaSets {
2016-02-25 00:09:20 +00:00
if rs != nil {
2016-10-11 14:37:39 +00:00
totalActualReplicas += rs . Status . Replicas
2016-02-25 00:09:20 +00:00
}
2016-02-22 22:28:28 +00:00
}
2016-10-11 14:37:39 +00:00
return totalActualReplicas
2016-02-22 22:28:28 +00:00
}
2016-12-02 16:32:34 +00:00
// GetReadyReplicaCountForReplicaSets returns the number of ready pods corresponding to the given replica sets.
func GetReadyReplicaCountForReplicaSets ( replicaSets [ ] * extensions . ReplicaSet ) int32 {
totalReadyReplicas := int32 ( 0 )
for _ , rs := range replicaSets {
if rs != nil {
totalReadyReplicas += rs . Status . ReadyReplicas
}
}
return totalReadyReplicas
}
2016-10-11 14:37:39 +00:00
// GetAvailableReplicaCountForReplicaSets returns the number of available pods corresponding to the given replica sets.
func GetAvailableReplicaCountForReplicaSets ( replicaSets [ ] * extensions . ReplicaSet ) int32 {
totalAvailableReplicas := int32 ( 0 )
for _ , rs := range replicaSets {
if rs != nil {
totalAvailableReplicas += rs . Status . AvailableReplicas
2015-09-29 23:55:06 +00:00
}
}
2016-10-11 14:37:39 +00:00
return totalAvailableReplicas
2015-09-29 23:55:06 +00:00
}
2016-07-05 07:29:09 +00:00
// IsRollingUpdate returns true if the strategy type is a rolling update.
2016-02-05 02:05:38 +00:00
func IsRollingUpdate ( deployment * extensions . Deployment ) bool {
return deployment . Spec . Strategy . Type == extensions . RollingUpdateDeploymentStrategyType
}
2017-04-19 12:40:31 +00:00
// DeploymentComplete considers a deployment to be complete once all of its desired replicas
// are updated and available, and no old pods are running.
2016-09-15 15:57:53 +00:00
func DeploymentComplete ( deployment * extensions . Deployment , newStatus * extensions . DeploymentStatus ) bool {
2016-11-18 20:50:17 +00:00
return newStatus . UpdatedReplicas == * ( deployment . Spec . Replicas ) &&
2017-01-21 21:54:21 +00:00
newStatus . Replicas == * ( deployment . Spec . Replicas ) &&
2017-04-19 12:40:31 +00:00
newStatus . AvailableReplicas == * ( deployment . Spec . Replicas ) &&
2016-11-10 16:59:30 +00:00
newStatus . ObservedGeneration >= deployment . Generation
2016-09-15 15:57:53 +00:00
}
// DeploymentProgressing reports progress for a deployment. Progress is estimated by comparing the
2016-11-10 16:59:30 +00:00
// current with the new status of the deployment that the controller is observing. More specifically,
// when new pods are scaled up or become available, or old pods are scaled down, then we consider the
// deployment is progressing.
2016-09-15 15:57:53 +00:00
func DeploymentProgressing ( deployment * extensions . Deployment , newStatus * extensions . DeploymentStatus ) bool {
oldStatus := deployment . Status
// Old replicas that need to be scaled down
oldStatusOldReplicas := oldStatus . Replicas - oldStatus . UpdatedReplicas
newStatusOldReplicas := newStatus . Replicas - newStatus . UpdatedReplicas
2016-11-10 16:59:30 +00:00
return ( newStatus . UpdatedReplicas > oldStatus . UpdatedReplicas ) ||
( newStatusOldReplicas < oldStatusOldReplicas ) ||
newStatus . AvailableReplicas > deployment . Status . AvailableReplicas
2016-09-15 15:57:53 +00:00
}
// used for unit testing
var nowFn = func ( ) time . Time { return time . Now ( ) }
// DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress
// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already
// exists.
func DeploymentTimedOut ( deployment * extensions . Deployment , newStatus * extensions . DeploymentStatus ) bool {
if deployment . Spec . ProgressDeadlineSeconds == nil {
return false
}
// Look for the Progressing condition. If it doesn't exist, we have no base to estimate progress.
// If it's already set with a TimedOutReason reason, we have already timed out, no need to check
// again.
condition := GetDeploymentCondition ( * newStatus , extensions . DeploymentProgressing )
if condition == nil {
return false
}
if condition . Reason == TimedOutReason {
return true
}
// Look at the difference in seconds between now and the last time we reported any
// progress or tried to create a replica set, or resumed a paused deployment and
// compare against progressDeadlineSeconds.
2016-11-08 10:41:53 +00:00
from := condition . LastUpdateTime
2017-02-10 13:14:32 +00:00
now := nowFn ( )
2016-09-15 15:57:53 +00:00
delta := time . Duration ( * deployment . Spec . ProgressDeadlineSeconds ) * time . Second
2017-02-10 13:14:32 +00:00
timedOut := from . Add ( delta ) . Before ( now )
2017-02-24 17:37:32 +00:00
glog . V ( 4 ) . Infof ( "Deployment %q timed out (%t) [last progress check: %v - now: %v]" , deployment . Name , timedOut , from , now )
2017-02-10 13:14:32 +00:00
return timedOut
2016-09-15 15:57:53 +00:00
}
2016-02-05 02:05:38 +00:00
// NewRSNewReplicas calculates the number of replicas a deployment's new RS should have.
// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it.
// 1) The new RS is saturated: newRS's replicas == deployment's replicas
// 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas
2016-04-27 04:35:14 +00:00
func NewRSNewReplicas ( deployment * extensions . Deployment , allRSs [ ] * extensions . ReplicaSet , newRS * extensions . ReplicaSet ) ( int32 , error ) {
2016-02-05 02:05:38 +00:00
switch deployment . Spec . Strategy . Type {
case extensions . RollingUpdateDeploymentStrategyType :
// Check if we can scale up.
2016-11-18 20:50:17 +00:00
maxSurge , err := intstrutil . GetValueFromIntOrPercent ( deployment . Spec . Strategy . RollingUpdate . MaxSurge , int ( * ( deployment . Spec . Replicas ) ) , true )
2016-02-05 02:05:38 +00:00
if err != nil {
return 0 , err
}
// Find the total number of pods
currentPodCount := GetReplicaCountForReplicaSets ( allRSs )
2016-11-18 20:50:17 +00:00
maxTotalPods := * ( deployment . Spec . Replicas ) + int32 ( maxSurge )
2016-02-05 02:05:38 +00:00
if currentPodCount >= maxTotalPods {
// Cannot scale up.
2016-11-18 20:50:17 +00:00
return * ( newRS . Spec . Replicas ) , nil
2016-02-05 02:05:38 +00:00
}
// Scale up.
scaleUpCount := maxTotalPods - currentPodCount
// Do not exceed the number of desired replicas.
2016-11-18 20:50:17 +00:00
scaleUpCount = int32 ( integer . IntMin ( int ( scaleUpCount ) , int ( * ( deployment . Spec . Replicas ) - * ( newRS . Spec . Replicas ) ) ) )
return * ( newRS . Spec . Replicas ) + scaleUpCount , nil
2016-02-05 02:05:38 +00:00
case extensions . RecreateDeploymentStrategyType :
2016-11-18 20:50:17 +00:00
return * ( deployment . Spec . Replicas ) , nil
2016-02-05 02:05:38 +00:00
default :
return 0 , fmt . Errorf ( "deployment type %v isn't supported" , deployment . Spec . Strategy . Type )
}
}
2016-02-24 04:27:24 +00:00
2016-01-28 16:35:14 +00:00
// IsSaturated checks if the new replica set is saturated by comparing its size with its deployment size.
// Both the deployment and the replica set have to believe this replica set can own all of the desired
2017-04-18 15:32:06 +00:00
// replicas in the deployment and the annotation helps in achieving that. All pods of the ReplicaSet
// need to be available.
2016-01-28 16:35:14 +00:00
func IsSaturated ( deployment * extensions . Deployment , rs * extensions . ReplicaSet ) bool {
if rs == nil {
return false
}
desiredString := rs . Annotations [ DesiredReplicasAnnotation ]
desired , err := strconv . Atoi ( desiredString )
if err != nil {
return false
}
2017-04-18 15:32:06 +00:00
return * ( rs . Spec . Replicas ) == * ( deployment . Spec . Replicas ) &&
int32 ( desired ) == * ( deployment . Spec . Replicas ) &&
rs . Status . AvailableReplicas == * ( deployment . Spec . Replicas )
2016-01-28 16:35:14 +00:00
}
2016-07-05 07:29:09 +00:00
// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
2016-02-24 04:27:24 +00:00
// Returns error if polling timesout.
func WaitForObservedDeployment ( getDeploymentFunc func ( ) ( * extensions . Deployment , error ) , desiredGeneration int64 , interval , timeout time . Duration ) error {
// TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface.
return wait . Poll ( interval , timeout , func ( ) ( bool , error ) {
deployment , err := getDeploymentFunc ( )
if err != nil {
return false , err
}
return deployment . Status . ObservedGeneration >= desiredGeneration , nil
} )
}
2016-03-04 10:29:55 +00:00
2016-11-18 20:58:22 +00:00
// TODO: remove the duplicate
// WaitForObservedInternalDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
// Returns error if polling timesout.
func WaitForObservedDeploymentInternal ( getDeploymentFunc func ( ) ( * internalextensions . Deployment , error ) , desiredGeneration int64 , interval , timeout time . Duration ) error {
return wait . Poll ( interval , timeout , func ( ) ( bool , error ) {
deployment , err := getDeploymentFunc ( )
if err != nil {
return false , err
}
return deployment . Status . ObservedGeneration >= desiredGeneration , nil
} )
}
2016-03-04 10:29:55 +00:00
// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one
// step. For example:
//
// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1)
// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1)
// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1)
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
2016-04-27 04:35:14 +00:00
func ResolveFenceposts ( maxSurge , maxUnavailable * intstrutil . IntOrString , desired int32 ) ( int32 , int32 , error ) {
surge , err := intstrutil . GetValueFromIntOrPercent ( maxSurge , int ( desired ) , true )
2016-03-04 10:29:55 +00:00
if err != nil {
return 0 , 0 , err
}
2016-04-27 04:35:14 +00:00
unavailable , err := intstrutil . GetValueFromIntOrPercent ( maxUnavailable , int ( desired ) , false )
2016-03-04 10:29:55 +00:00
if err != nil {
return 0 , 0 , err
}
if surge == 0 && unavailable == 0 {
// Validation should never allow the user to explicitly use zero values for both maxSurge
// maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero.
// If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the
// theory that surge might not work due to quota.
unavailable = 1
}
2016-04-27 04:35:14 +00:00
return int32 ( surge ) , int32 ( unavailable ) , nil
2016-03-04 10:29:55 +00:00
}
2016-07-08 12:48:38 +00:00
func DeploymentDeepCopy ( deployment * extensions . Deployment ) ( * extensions . Deployment , error ) {
objCopy , err := api . Scheme . DeepCopy ( deployment )
if err != nil {
return nil , err
}
copied , ok := objCopy . ( * extensions . Deployment )
if ! ok {
return nil , fmt . Errorf ( "expected Deployment, got %#v" , objCopy )
}
return copied , nil
}