2016-06-25 09:31:32 +00:00
/ *
Copyright 2016 The Kubernetes Authors .
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
package deployment
import (
"fmt"
"reflect"
"sort"
"strconv"
2018-03-19 23:47:20 +00:00
apps "k8s.io/api/apps/v1"
2017-06-22 18:24:23 +00:00
"k8s.io/api/core/v1"
2017-01-13 17:48:50 +00:00
"k8s.io/apimachinery/pkg/api/errors"
2017-01-11 14:09:48 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2018-11-09 18:49:10 +00:00
"k8s.io/klog"
2016-06-25 09:31:32 +00:00
"k8s.io/kubernetes/pkg/controller"
2016-07-05 07:29:09 +00:00
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
2016-06-25 09:31:32 +00:00
labelsutil "k8s.io/kubernetes/pkg/util/labels"
)
2016-06-17 11:46:43 +00:00
// syncStatusOnly only updates Deployments Status and doesn't take any mutating actions.
2018-05-15 09:54:25 +00:00
func ( dc * DeploymentController ) syncStatusOnly ( d * apps . Deployment , rsList [ ] * apps . ReplicaSet ) error {
newRS , oldRSs , err := dc . getAllReplicaSetsAndSyncRevision ( d , rsList , false )
2016-06-17 11:46:43 +00:00
if err != nil {
return err
}
allRSs := append ( oldRSs , newRS )
2017-02-26 18:53:45 +00:00
return dc . syncDeploymentStatus ( allRSs , newRS , d )
2016-06-17 11:46:43 +00:00
}
2016-06-25 09:31:32 +00:00
// sync is responsible for reconciling deployments on scaling events or when they
// are paused.
2018-05-15 09:54:25 +00:00
func ( dc * DeploymentController ) sync ( d * apps . Deployment , rsList [ ] * apps . ReplicaSet ) error {
newRS , oldRSs , err := dc . getAllReplicaSetsAndSyncRevision ( d , rsList , false )
2016-06-25 09:31:32 +00:00
if err != nil {
return err
}
2017-02-26 18:53:45 +00:00
if err := dc . scale ( d , newRS , oldRSs ) ; err != nil {
2016-06-25 09:31:32 +00:00
// If we get an error while trying to scale, the deployment will be requeued
// so we can abort this resync
return err
}
2017-06-25 16:25:27 +00:00
// Clean up the deployment when it's paused and no rollback is in flight.
2018-03-19 23:47:20 +00:00
if d . Spec . Paused && getRollbackTo ( d ) == nil {
2017-06-25 16:25:27 +00:00
if err := dc . cleanupDeployment ( oldRSs , d ) ; err != nil {
return err
}
}
2016-06-25 09:31:32 +00:00
allRSs := append ( oldRSs , newRS )
2017-02-26 18:53:45 +00:00
return dc . syncDeploymentStatus ( allRSs , newRS , d )
2016-06-25 09:31:32 +00:00
}
2016-09-15 15:57:53 +00:00
// checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition.
// These conditions are needed so that we won't accidentally report lack of progress for resumed deployments
// that were paused for longer than progressDeadlineSeconds.
2018-03-19 23:47:20 +00:00
func ( dc * DeploymentController ) checkPausedConditions ( d * apps . Deployment ) error {
2018-07-24 20:20:39 +00:00
if ! deploymentutil . HasProgressDeadline ( d ) {
2016-09-15 15:57:53 +00:00
return nil
}
2018-03-19 23:47:20 +00:00
cond := deploymentutil . GetDeploymentCondition ( d . Status , apps . DeploymentProgressing )
2016-09-15 15:57:53 +00:00
if cond != nil && cond . Reason == deploymentutil . TimedOutReason {
// If we have reported lack of progress, do not overwrite it with a paused condition.
return nil
}
pausedCondExists := cond != nil && cond . Reason == deploymentutil . PausedDeployReason
needsUpdate := false
if d . Spec . Paused && ! pausedCondExists {
2018-03-19 23:47:20 +00:00
condition := deploymentutil . NewDeploymentCondition ( apps . DeploymentProgressing , v1 . ConditionUnknown , deploymentutil . PausedDeployReason , "Deployment is paused" )
2016-09-15 15:57:53 +00:00
deploymentutil . SetDeploymentCondition ( & d . Status , * condition )
needsUpdate = true
} else if ! d . Spec . Paused && pausedCondExists {
2018-03-19 23:47:20 +00:00
condition := deploymentutil . NewDeploymentCondition ( apps . DeploymentProgressing , v1 . ConditionUnknown , deploymentutil . ResumedDeployReason , "Deployment is resumed" )
2016-09-15 15:57:53 +00:00
deploymentutil . SetDeploymentCondition ( & d . Status , * condition )
needsUpdate = true
}
if ! needsUpdate {
return nil
}
var err error
2018-03-19 23:47:20 +00:00
d , err = dc . client . AppsV1 ( ) . Deployments ( d . Namespace ) . UpdateStatus ( d )
2016-09-15 15:57:53 +00:00
return err
}
2016-06-25 09:31:32 +00:00
// getAllReplicaSetsAndSyncRevision returns all the replica sets for the provided deployment (new and all old), with new RS's and deployment's revision updated.
2017-02-26 18:53:45 +00:00
//
// rsList should come from getReplicaSetsForDeployment(d).
//
2016-06-25 09:31:32 +00:00
// 1. Get all old RSes this deployment targets, and calculate the max revision number among them (maxOldV).
// 2. Get new RS this deployment targets (whose pod template matches deployment's), and update new RS's revision number to (maxOldV + 1),
// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop.
// 3. Copy new RS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop.
2017-02-26 18:53:45 +00:00
//
2016-06-25 09:31:32 +00:00
// Note that currently the deployment controller is using caches to avoid querying the server for reads.
// This may lead to stale reads of replica sets, thus incorrect deployment status.
2018-05-15 09:54:25 +00:00
func ( dc * DeploymentController ) getAllReplicaSetsAndSyncRevision ( d * apps . Deployment , rsList [ ] * apps . ReplicaSet , createIfNotExisted bool ) ( * apps . ReplicaSet , [ ] * apps . ReplicaSet , error ) {
2017-09-23 20:30:57 +00:00
_ , allOldRSs := deploymentutil . FindOldReplicaSets ( d , rsList )
2016-06-25 09:31:32 +00:00
// Get new replica set with the updated revision number
2017-02-26 18:53:45 +00:00
newRS , err := dc . getNewReplicaSet ( d , rsList , allOldRSs , createIfNotExisted )
2016-06-25 09:31:32 +00:00
if err != nil {
return nil , nil , err
}
return newRS , allOldRSs , nil
}
// Returns a replica set that matches the intent of the given deployment. Returns nil if the new replica set doesn't exist yet.
// 1. Get existing new RS (the RS that the given deployment targets, whose pod template is the same as deployment's).
// 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes.
// 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas.
// Note that the pod-template-hash will be added to adopted RSes and pods.
2018-03-19 23:47:20 +00:00
func ( dc * DeploymentController ) getNewReplicaSet ( d * apps . Deployment , rsList , oldRSs [ ] * apps . ReplicaSet , createIfNotExisted bool ) ( * apps . ReplicaSet , error ) {
2017-09-23 20:30:57 +00:00
existingNewRS := deploymentutil . FindNewReplicaSet ( d , rsList )
2016-10-06 15:02:51 +00:00
// Calculate the max revision number among all old RSes
maxOldRevision := deploymentutil . MaxRevision ( oldRSs )
// Calculate revision number for this new replica set
newRevision := strconv . FormatInt ( maxOldRevision + 1 , 10 )
// Latest replica set exists. We need to sync its annotations (includes copying all but
// annotationsToSkip from the parent deployment, and update revision, desiredReplicas,
// and maxReplicas) and also update the revision annotation in the deployment with the
// latest revision.
if existingNewRS != nil {
2017-08-15 12:14:21 +00:00
rsCopy := existingNewRS . DeepCopy ( )
2016-10-04 17:23:27 +00:00
2016-06-25 09:31:32 +00:00
// Set existing new replica set's annotation
2017-04-21 17:49:30 +00:00
annotationsUpdated := deploymentutil . SetNewReplicaSetAnnotations ( d , rsCopy , newRevision , true )
minReadySecondsNeedsUpdate := rsCopy . Spec . MinReadySeconds != d . Spec . MinReadySeconds
2016-10-11 14:37:39 +00:00
if annotationsUpdated || minReadySecondsNeedsUpdate {
2017-04-21 17:49:30 +00:00
rsCopy . Spec . MinReadySeconds = d . Spec . MinReadySeconds
2018-03-19 23:47:20 +00:00
return dc . client . AppsV1 ( ) . ReplicaSets ( rsCopy . ObjectMeta . Namespace ) . Update ( rsCopy )
2016-10-06 15:02:51 +00:00
}
2017-03-17 15:23:41 +00:00
// Should use the revision in existingNewRS's annotation, since it set by before
2017-04-21 17:49:30 +00:00
needsUpdate := deploymentutil . SetDeploymentRevision ( d , rsCopy . Annotations [ deploymentutil . RevisionAnnotation ] )
2016-09-15 15:57:53 +00:00
// If no other Progressing condition has been recorded and we need to estimate the progress
// of this deployment then it is likely that old users started caring about progress. In that
// case we need to take into account the first time we noticed their new replica set.
2018-03-19 23:47:20 +00:00
cond := deploymentutil . GetDeploymentCondition ( d . Status , apps . DeploymentProgressing )
2018-07-24 20:20:39 +00:00
if deploymentutil . HasProgressDeadline ( d ) && cond == nil {
2016-09-15 15:57:53 +00:00
msg := fmt . Sprintf ( "Found new replica set %q" , rsCopy . Name )
2018-03-19 23:47:20 +00:00
condition := deploymentutil . NewDeploymentCondition ( apps . DeploymentProgressing , v1 . ConditionTrue , deploymentutil . FoundNewRSReason , msg )
2017-04-21 17:49:30 +00:00
deploymentutil . SetDeploymentCondition ( & d . Status , * condition )
needsUpdate = true
2016-09-15 15:57:53 +00:00
}
2016-10-06 15:02:51 +00:00
2017-04-21 17:49:30 +00:00
if needsUpdate {
2017-09-23 20:30:57 +00:00
var err error
2018-03-19 23:47:20 +00:00
if d , err = dc . client . AppsV1 ( ) . Deployments ( d . Namespace ) . UpdateStatus ( d ) ; err != nil {
2016-10-06 15:02:51 +00:00
return nil , err
}
2016-06-25 09:31:32 +00:00
}
2016-10-04 17:23:27 +00:00
return rsCopy , nil
2016-06-25 09:31:32 +00:00
}
if ! createIfNotExisted {
return nil , nil
}
// new ReplicaSet does not exist, create one.
2017-08-15 12:14:21 +00:00
newRSTemplate := * d . Spec . Template . DeepCopy ( )
2018-07-12 00:01:38 +00:00
podTemplateSpecHash := controller . ComputeHash ( & newRSTemplate , d . Status . CollisionCount )
2018-03-19 23:47:20 +00:00
newRSTemplate . Labels = labelsutil . CloneAndAddLabel ( d . Spec . Template . Labels , apps . DefaultDeploymentUniqueLabelKey , podTemplateSpecHash )
2016-06-25 09:31:32 +00:00
// Add podTemplateHash label to selector.
2018-03-19 23:47:20 +00:00
newRSSelector := labelsutil . CloneSelectorAndAddLabel ( d . Spec . Selector , apps . DefaultDeploymentUniqueLabelKey , podTemplateSpecHash )
2016-06-25 09:31:32 +00:00
// Create new ReplicaSet
2018-03-19 23:47:20 +00:00
newRS := apps . ReplicaSet {
2017-01-17 03:38:19 +00:00
ObjectMeta : metav1 . ObjectMeta {
2016-06-25 09:31:32 +00:00
// Make the name deterministic, to ensure idempotence
2018-07-12 00:01:38 +00:00
Name : d . Name + "-" + podTemplateSpecHash ,
2017-04-21 17:49:30 +00:00
Namespace : d . Namespace ,
2017-08-02 10:05:37 +00:00
OwnerReferences : [ ] metav1 . OwnerReference { * metav1 . NewControllerRef ( d , controllerKind ) } ,
2018-05-11 18:09:56 +00:00
Labels : newRSTemplate . Labels ,
2016-06-25 09:31:32 +00:00
} ,
2018-03-19 23:47:20 +00:00
Spec : apps . ReplicaSetSpec {
2017-02-26 18:53:45 +00:00
Replicas : new ( int32 ) ,
2017-04-21 17:49:30 +00:00
MinReadySeconds : d . Spec . MinReadySeconds ,
2016-10-11 14:37:39 +00:00
Selector : newRSSelector ,
Template : newRSTemplate ,
2016-06-25 09:31:32 +00:00
} ,
}
allRSs := append ( oldRSs , & newRS )
2017-04-21 17:49:30 +00:00
newReplicasCount , err := deploymentutil . NewRSNewReplicas ( d , allRSs , & newRS )
2016-06-25 09:31:32 +00:00
if err != nil {
return nil , err
}
2016-11-18 20:50:17 +00:00
* ( newRS . Spec . Replicas ) = newReplicasCount
2016-06-25 09:31:32 +00:00
// Set new replica set's annotation
2017-04-21 17:49:30 +00:00
deploymentutil . SetNewReplicaSetAnnotations ( d , & newRS , newRevision , false )
// Create the new ReplicaSet. If it already exists, then we need to check for possible
// hash collisions. If there is any other error, we need to report it in the status of
// the Deployment.
alreadyExists := false
2018-03-19 23:47:20 +00:00
createdRS , err := dc . client . AppsV1 ( ) . ReplicaSets ( d . Namespace ) . Create ( & newRS )
2016-09-15 15:57:53 +00:00
switch {
2017-04-21 17:49:30 +00:00
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
2016-11-11 09:49:05 +00:00
case errors . IsAlreadyExists ( err ) :
2017-04-21 17:49:30 +00:00
alreadyExists = true
2018-02-07 19:28:43 +00:00
// Fetch a copy of the ReplicaSet.
2017-04-21 17:49:30 +00:00
rs , rsErr := dc . rsLister . ReplicaSets ( newRS . Namespace ) . Get ( newRS . Name )
if rsErr != nil {
return nil , rsErr
}
2018-02-07 19:28:43 +00:00
// If the Deployment owns the ReplicaSet and the ReplicaSet's PodTemplateSpec is semantically
// deep equal to the PodTemplateSpec of the Deployment, it's the Deployment's new ReplicaSet.
// Otherwise, this is a hash collision and we need to increment the collisionCount field in
// the status of the Deployment and requeue to try the creation in the next sync.
controllerRef := metav1 . GetControllerOf ( rs )
if controllerRef != nil && controllerRef . UID == d . UID && deploymentutil . EqualIgnoreHash ( & d . Spec . Template , & rs . Spec . Template ) {
createdRS = rs
err = nil
break
}
2017-04-21 17:49:30 +00:00
// Matching ReplicaSet is not equal - increment the collisionCount in the DeploymentStatus
// and requeue the Deployment.
2018-02-07 19:28:43 +00:00
if d . Status . CollisionCount == nil {
d . Status . CollisionCount = new ( int32 )
2017-04-21 17:49:30 +00:00
}
2018-02-07 19:28:43 +00:00
preCollisionCount := * d . Status . CollisionCount
* d . Status . CollisionCount ++
// Update the collisionCount for the Deployment and let it requeue by returning the original
// error.
2018-03-19 23:47:20 +00:00
_ , dErr := dc . client . AppsV1 ( ) . Deployments ( d . Namespace ) . UpdateStatus ( d )
2018-02-07 19:28:43 +00:00
if dErr == nil {
2018-11-09 18:49:10 +00:00
klog . V ( 2 ) . Infof ( "Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it" , d . Name , preCollisionCount , * d . Status . CollisionCount )
2018-02-07 19:28:43 +00:00
}
return nil , err
2016-09-15 15:57:53 +00:00
case err != nil :
msg := fmt . Sprintf ( "Failed to create new replica set %q: %v" , newRS . Name , err )
2018-07-24 20:20:39 +00:00
if deploymentutil . HasProgressDeadline ( d ) {
2018-03-19 23:47:20 +00:00
cond := deploymentutil . NewDeploymentCondition ( apps . DeploymentProgressing , v1 . ConditionFalse , deploymentutil . FailedRSCreateReason , msg )
2017-04-21 17:49:30 +00:00
deploymentutil . SetDeploymentCondition ( & d . Status , * cond )
2016-09-15 15:57:53 +00:00
// We don't really care about this error at this point, since we have a bigger issue to report.
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
2018-03-19 23:47:20 +00:00
_ , _ = dc . client . AppsV1 ( ) . Deployments ( d . Namespace ) . UpdateStatus ( d )
2016-09-15 15:57:53 +00:00
}
2017-04-21 17:49:30 +00:00
dc . eventRecorder . Eventf ( d , v1 . EventTypeWarning , deploymentutil . FailedRSCreateReason , msg )
2016-09-15 15:57:53 +00:00
return nil , err
2016-06-25 09:31:32 +00:00
}
2017-04-21 17:49:30 +00:00
if ! alreadyExists && newReplicasCount > 0 {
dc . eventRecorder . Eventf ( d , v1 . EventTypeNormal , "ScalingReplicaSet" , "Scaled up replica set %s to %d" , createdRS . Name , newReplicasCount )
2016-06-25 09:31:32 +00:00
}
2017-04-21 17:49:30 +00:00
needsUpdate := deploymentutil . SetDeploymentRevision ( d , newRevision )
2018-07-24 20:20:39 +00:00
if ! alreadyExists && deploymentutil . HasProgressDeadline ( d ) {
2016-09-15 15:57:53 +00:00
msg := fmt . Sprintf ( "Created new replica set %q" , createdRS . Name )
2018-03-19 23:47:20 +00:00
condition := deploymentutil . NewDeploymentCondition ( apps . DeploymentProgressing , v1 . ConditionTrue , deploymentutil . NewReplicaSetReason , msg )
2017-04-21 17:49:30 +00:00
deploymentutil . SetDeploymentCondition ( & d . Status , * condition )
needsUpdate = true
}
if needsUpdate {
2018-03-19 23:47:20 +00:00
_ , err = dc . client . AppsV1 ( ) . Deployments ( d . Namespace ) . UpdateStatus ( d )
2016-09-15 15:57:53 +00:00
}
2016-10-06 15:02:51 +00:00
return createdRS , err
2016-06-25 09:31:32 +00:00
}
// scale scales proportionally in order to mitigate risk. Otherwise, scaling up can increase the size
// of the new replica set and scaling down can decrease the sizes of the old ones, both of which would
// have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable
// replicas in the event of a problem with the rolled out template. Should run only on scaling events or
// when a deployment is paused and not during the normal rollout process.
2018-03-19 23:47:20 +00:00
func ( dc * DeploymentController ) scale ( deployment * apps . Deployment , newRS * apps . ReplicaSet , oldRSs [ ] * apps . ReplicaSet ) error {
2016-06-25 09:31:32 +00:00
// If there is only one active replica set then we should scale that up to the full count of the
// deployment. If there is no active replica set, then we should scale up the newest replica set.
2016-07-05 07:29:09 +00:00
if activeOrLatest := deploymentutil . FindActiveOrLatest ( newRS , oldRSs ) ; activeOrLatest != nil {
2016-11-18 20:50:17 +00:00
if * ( activeOrLatest . Spec . Replicas ) == * ( deployment . Spec . Replicas ) {
2016-06-25 09:31:32 +00:00
return nil
}
2016-11-18 20:50:17 +00:00
_ , _ , err := dc . scaleReplicaSetAndRecordEvent ( activeOrLatest , * ( deployment . Spec . Replicas ) , deployment )
2016-06-25 09:31:32 +00:00
return err
}
// If the new replica set is saturated, old replica sets should be fully scaled down.
// This case handles replica set adoption during a saturated new replica set.
if deploymentutil . IsSaturated ( deployment , newRS ) {
for _ , old := range controller . FilterActiveReplicaSets ( oldRSs ) {
if _ , _ , err := dc . scaleReplicaSetAndRecordEvent ( old , 0 , deployment ) ; err != nil {
return err
}
}
return nil
}
// There are old replica sets with pods and the new replica set is not saturated.
// We need to proportionally scale all replica sets (new and old) in case of a
// rolling deployment.
if deploymentutil . IsRollingUpdate ( deployment ) {
allRSs := controller . FilterActiveReplicaSets ( append ( oldRSs , newRS ) )
allRSsReplicas := deploymentutil . GetReplicaCountForReplicaSets ( allRSs )
allowedSize := int32 ( 0 )
2016-11-18 20:50:17 +00:00
if * ( deployment . Spec . Replicas ) > 0 {
allowedSize = * ( deployment . Spec . Replicas ) + deploymentutil . MaxSurge ( * deployment )
2016-06-25 09:31:32 +00:00
}
// Number of additional replicas that can be either added or removed from the total
// replicas count. These replicas should be distributed proportionally to the active
// replica sets.
deploymentReplicasToAdd := allowedSize - allRSsReplicas
// The additional replicas should be distributed proportionally amongst the active
// replica sets from the larger to the smaller in size replica set. Scaling direction
// drives what happens in case we are trying to scale replica sets of the same size.
// In such a case when scaling up, we should scale up newer replica sets first, and
// when scaling down, we should scale down older replica sets first.
2016-11-04 12:00:37 +00:00
var scalingOperation string
2016-06-25 09:31:32 +00:00
switch {
case deploymentReplicasToAdd > 0 :
sort . Sort ( controller . ReplicaSetsBySizeNewer ( allRSs ) )
2016-11-04 12:00:37 +00:00
scalingOperation = "up"
2016-06-25 09:31:32 +00:00
case deploymentReplicasToAdd < 0 :
sort . Sort ( controller . ReplicaSetsBySizeOlder ( allRSs ) )
scalingOperation = "down"
}
// Iterate over all active replica sets and estimate proportions for each of them.
// The absolute value of deploymentReplicasAdded should never exceed the absolute
// value of deploymentReplicasToAdd.
deploymentReplicasAdded := int32 ( 0 )
2016-11-04 12:00:37 +00:00
nameToSize := make ( map [ string ] int32 )
2016-06-25 09:31:32 +00:00
for i := range allRSs {
rs := allRSs [ i ]
2016-11-04 12:00:37 +00:00
// Estimate proportions if we have replicas to add, otherwise simply populate
// nameToSize with the current sizes for each replica set.
if deploymentReplicasToAdd != 0 {
proportion := deploymentutil . GetProportion ( rs , * deployment , deploymentReplicasToAdd , deploymentReplicasAdded )
2016-06-25 09:31:32 +00:00
2016-11-18 20:50:17 +00:00
nameToSize [ rs . Name ] = * ( rs . Spec . Replicas ) + proportion
2016-11-04 12:00:37 +00:00
deploymentReplicasAdded += proportion
} else {
2016-11-18 20:50:17 +00:00
nameToSize [ rs . Name ] = * ( rs . Spec . Replicas )
2016-11-04 12:00:37 +00:00
}
2016-06-25 09:31:32 +00:00
}
// Update all replica sets
for i := range allRSs {
rs := allRSs [ i ]
// Add/remove any leftovers to the largest replica set.
2016-11-04 12:00:37 +00:00
if i == 0 && deploymentReplicasToAdd != 0 {
2016-06-25 09:31:32 +00:00
leftover := deploymentReplicasToAdd - deploymentReplicasAdded
2016-11-04 12:00:37 +00:00
nameToSize [ rs . Name ] = nameToSize [ rs . Name ] + leftover
if nameToSize [ rs . Name ] < 0 {
nameToSize [ rs . Name ] = 0
2016-06-25 09:31:32 +00:00
}
}
2016-11-04 12:00:37 +00:00
// TODO: Use transactions when we have them.
2016-11-08 11:10:25 +00:00
if _ , _ , err := dc . scaleReplicaSet ( rs , nameToSize [ rs . Name ] , deployment , scalingOperation ) ; err != nil {
2016-06-25 09:31:32 +00:00
// Return as soon as we fail, the deployment is requeued
return err
}
}
}
return nil
}
2018-03-19 23:47:20 +00:00
func ( dc * DeploymentController ) scaleReplicaSetAndRecordEvent ( rs * apps . ReplicaSet , newScale int32 , deployment * apps . Deployment ) ( bool , * apps . ReplicaSet , error ) {
2016-06-25 09:31:32 +00:00
// No need to scale
2016-11-18 20:50:17 +00:00
if * ( rs . Spec . Replicas ) == newScale {
2016-06-25 09:31:32 +00:00
return false , rs , nil
}
var scalingOperation string
2016-11-18 20:50:17 +00:00
if * ( rs . Spec . Replicas ) < newScale {
2016-06-25 09:31:32 +00:00
scalingOperation = "up"
} else {
scalingOperation = "down"
}
2016-11-08 11:10:25 +00:00
scaled , newRS , err := dc . scaleReplicaSet ( rs , newScale , deployment , scalingOperation )
return scaled , newRS , err
2016-06-25 09:31:32 +00:00
}
2018-03-19 23:47:20 +00:00
func ( dc * DeploymentController ) scaleReplicaSet ( rs * apps . ReplicaSet , newScale int32 , deployment * apps . Deployment , scalingOperation string ) ( bool , * apps . ReplicaSet , error ) {
2016-11-04 12:00:37 +00:00
2018-04-02 01:27:11 +00:00
sizeNeedsUpdate := * ( rs . Spec . Replicas ) != newScale
annotationsNeedUpdate := deploymentutil . ReplicasAnnotationsNeedUpdate ( rs , * ( deployment . Spec . Replicas ) , * ( deployment . Spec . Replicas ) + deploymentutil . MaxSurge ( * deployment ) )
2016-11-04 12:00:37 +00:00
2016-11-08 11:10:25 +00:00
scaled := false
2017-08-15 12:14:21 +00:00
var err error
2016-11-04 12:00:37 +00:00
if sizeNeedsUpdate || annotationsNeedUpdate {
2018-04-02 01:27:11 +00:00
rsCopy := rs . DeepCopy ( )
2016-11-18 20:50:17 +00:00
* ( rsCopy . Spec . Replicas ) = newScale
2018-04-02 01:27:11 +00:00
deploymentutil . SetReplicasAnnotations ( rsCopy , * ( deployment . Spec . Replicas ) , * ( deployment . Spec . Replicas ) + deploymentutil . MaxSurge ( * deployment ) )
2018-03-19 23:47:20 +00:00
rs , err = dc . client . AppsV1 ( ) . ReplicaSets ( rsCopy . Namespace ) . Update ( rsCopy )
2016-11-04 12:00:37 +00:00
if err == nil && sizeNeedsUpdate {
2016-11-08 11:10:25 +00:00
scaled = true
2016-11-18 20:50:17 +00:00
dc . eventRecorder . Eventf ( deployment , v1 . EventTypeNormal , "ScalingReplicaSet" , "Scaled %s replica set %s to %d" , scalingOperation , rs . Name , newScale )
2016-11-04 12:00:37 +00:00
}
2016-06-25 09:31:32 +00:00
}
2016-11-08 11:10:25 +00:00
return scaled , rs , err
2016-06-25 09:31:32 +00:00
}
// cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets
// where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept
// around by default 1) for historical reasons and 2) for the ability to rollback a deployment.
2018-03-19 23:47:20 +00:00
func ( dc * DeploymentController ) cleanupDeployment ( oldRSs [ ] * apps . ReplicaSet , deployment * apps . Deployment ) error {
2018-08-10 02:12:43 +00:00
if ! deploymentutil . HasRevisionHistoryLimit ( deployment ) {
2016-06-25 09:31:32 +00:00
return nil
}
2017-02-08 22:18:13 +00:00
// Avoid deleting replica set with deletion timestamp set
2018-03-19 23:47:20 +00:00
aliveFilter := func ( rs * apps . ReplicaSet ) bool {
2017-02-08 22:18:13 +00:00
return rs != nil && rs . ObjectMeta . DeletionTimestamp == nil
}
cleanableRSes := controller . FilterReplicaSets ( oldRSs , aliveFilter )
diff := int32 ( len ( cleanableRSes ) ) - * deployment . Spec . RevisionHistoryLimit
2016-06-25 09:31:32 +00:00
if diff <= 0 {
return nil
}
2017-02-08 22:18:13 +00:00
sort . Sort ( controller . ReplicaSetsByCreationTimestamp ( cleanableRSes ) )
2018-11-09 18:49:10 +00:00
klog . V ( 4 ) . Infof ( "Looking to cleanup old replica sets for deployment %q" , deployment . Name )
2016-06-25 09:31:32 +00:00
for i := int32 ( 0 ) ; i < diff ; i ++ {
2017-02-08 22:18:13 +00:00
rs := cleanableRSes [ i ]
2016-06-25 09:31:32 +00:00
// Avoid delete replica set with non-zero replica counts
2017-02-09 09:30:32 +00:00
if rs . Status . Replicas != 0 || * ( rs . Spec . Replicas ) != 0 || rs . Generation > rs . Status . ObservedGeneration || rs . DeletionTimestamp != nil {
2016-06-25 09:31:32 +00:00
continue
}
2018-11-09 18:49:10 +00:00
klog . V ( 4 ) . Infof ( "Trying to cleanup replica set %q for deployment %q" , rs . Name , deployment . Name )
2018-03-19 23:47:20 +00:00
if err := dc . client . AppsV1 ( ) . ReplicaSets ( rs . Namespace ) . Delete ( rs . Name , nil ) ; err != nil && ! errors . IsNotFound ( err ) {
2017-06-25 16:25:27 +00:00
// Return error instead of aggregating and continuing DELETEs on the theory
// that we may be overloading the api server.
return err
2016-06-25 09:31:32 +00:00
}
}
2017-06-25 16:25:27 +00:00
return nil
2016-06-25 09:31:32 +00:00
}
// syncDeploymentStatus checks if the status is up-to-date and sync it if necessary
2018-03-19 23:47:20 +00:00
func ( dc * DeploymentController ) syncDeploymentStatus ( allRSs [ ] * apps . ReplicaSet , newRS * apps . ReplicaSet , d * apps . Deployment ) error {
2016-12-09 16:16:00 +00:00
newStatus := calculateStatus ( allRSs , newRS , d )
2016-10-11 14:37:39 +00:00
if reflect . DeepEqual ( d . Status , newStatus ) {
return nil
2016-06-25 09:31:32 +00:00
}
2016-10-11 14:37:39 +00:00
newDeployment := d
newDeployment . Status = newStatus
2018-03-19 23:47:20 +00:00
_ , err := dc . client . AppsV1 ( ) . Deployments ( newDeployment . Namespace ) . UpdateStatus ( newDeployment )
2016-10-11 14:37:39 +00:00
return err
2016-06-25 09:31:32 +00:00
}
2016-12-09 16:16:00 +00:00
// calculateStatus calculates the latest status for the provided deployment by looking into the provided replica sets.
2018-03-19 23:47:20 +00:00
func calculateStatus ( allRSs [ ] * apps . ReplicaSet , newRS * apps . ReplicaSet , deployment * apps . Deployment ) apps . DeploymentStatus {
2016-10-11 14:37:39 +00:00
availableReplicas := deploymentutil . GetAvailableReplicaCountForReplicaSets ( allRSs )
2016-06-25 09:31:32 +00:00
totalReplicas := deploymentutil . GetReplicaCountForReplicaSets ( allRSs )
2016-12-07 16:31:29 +00:00
unavailableReplicas := totalReplicas - availableReplicas
// If unavailableReplicas is negative, then that means the Deployment has more available replicas running than
2017-04-04 06:16:34 +00:00
// desired, e.g. whenever it scales down. In such a case we should simply default unavailableReplicas to zero.
2016-12-07 16:31:29 +00:00
if unavailableReplicas < 0 {
unavailableReplicas = 0
}
2016-06-25 09:31:32 +00:00
2018-03-19 23:47:20 +00:00
status := apps . DeploymentStatus {
2016-06-25 09:31:32 +00:00
// TODO: Ensure that if we start retrying status updates, we won't pick up a new Generation value.
ObservedGeneration : deployment . Generation ,
Replicas : deploymentutil . GetActualReplicaCountForReplicaSets ( allRSs ) ,
2018-03-19 23:47:20 +00:00
UpdatedReplicas : deploymentutil . GetActualReplicaCountForReplicaSets ( [ ] * apps . ReplicaSet { newRS } ) ,
2016-12-02 16:32:34 +00:00
ReadyReplicas : deploymentutil . GetReadyReplicaCountForReplicaSets ( allRSs ) ,
2016-06-25 09:31:32 +00:00
AvailableReplicas : availableReplicas ,
2016-12-07 16:31:29 +00:00
UnavailableReplicas : unavailableReplicas ,
2017-04-21 17:49:30 +00:00
CollisionCount : deployment . Status . CollisionCount ,
2016-06-25 09:31:32 +00:00
}
2016-12-09 16:16:00 +00:00
// Copy conditions one by one so we won't mutate the original object.
conditions := deployment . Status . Conditions
for i := range conditions {
status . Conditions = append ( status . Conditions , conditions [ i ] )
}
if availableReplicas >= * ( deployment . Spec . Replicas ) - deploymentutil . MaxUnavailable ( * deployment ) {
2018-03-19 23:47:20 +00:00
minAvailability := deploymentutil . NewDeploymentCondition ( apps . DeploymentAvailable , v1 . ConditionTrue , deploymentutil . MinimumReplicasAvailable , "Deployment has minimum availability." )
2016-12-09 16:16:00 +00:00
deploymentutil . SetDeploymentCondition ( & status , * minAvailability )
} else {
2018-03-19 23:47:20 +00:00
noMinAvailability := deploymentutil . NewDeploymentCondition ( apps . DeploymentAvailable , v1 . ConditionFalse , deploymentutil . MinimumReplicasUnavailable , "Deployment does not have minimum availability." )
2016-12-09 16:16:00 +00:00
deploymentutil . SetDeploymentCondition ( & status , * noMinAvailability )
}
return status
2016-06-25 09:31:32 +00:00
}
// isScalingEvent checks whether the provided deployment has been updated with a scaling event
// by looking at the desired-replicas annotation in the active replica sets of the deployment.
2017-02-26 18:53:45 +00:00
//
// rsList should come from getReplicaSetsForDeployment(d).
2017-03-04 00:10:27 +00:00
// podMap should come from getPodMapForDeployment(d, rsList).
2018-05-15 09:54:25 +00:00
func ( dc * DeploymentController ) isScalingEvent ( d * apps . Deployment , rsList [ ] * apps . ReplicaSet ) ( bool , error ) {
newRS , oldRSs , err := dc . getAllReplicaSetsAndSyncRevision ( d , rsList , false )
2016-06-25 09:31:32 +00:00
if err != nil {
2016-06-28 11:31:03 +00:00
return false , err
2016-06-25 09:31:32 +00:00
}
allRSs := append ( oldRSs , newRS )
for _ , rs := range controller . FilterActiveReplicaSets ( allRSs ) {
2016-07-05 07:29:09 +00:00
desired , ok := deploymentutil . GetDesiredReplicasAnnotation ( rs )
2016-06-25 09:31:32 +00:00
if ! ok {
continue
}
2016-11-18 20:50:17 +00:00
if desired != * ( d . Spec . Replicas ) {
2016-06-28 11:31:03 +00:00
return true , nil
2016-06-25 09:31:32 +00:00
}
}
2016-06-28 11:31:03 +00:00
return false , nil
2016-06-25 09:31:32 +00:00
}