mirror of https://github.com/k3s-io/k3s
Merge pull request #29889 from janetkuo/deployment-e2e-test-fix
Automatic merge from submit-queue Fix deployment e2e test: waitDeploymentStatus should error when entering an invalid state Follow up #28162 1. We should check that max unavailable and max surge aren't violated at all times in e2e tests (didn't check this in deployment scaled rollout yet, but we should wait for it to become valid and then continue do the check until it finishes) 2. Fix some minor bugs in e2e tests @kubernetes/deploymentpull/6/head
commit
74477a83e4
|
@ -247,6 +247,14 @@ func MaxUnavailable(deployment extensions.Deployment) int32 {
|
|||
return maxUnavailable
|
||||
}
|
||||
|
||||
// MinAvailable returns the minimum vailable pods of a given deployment
|
||||
func MinAvailable(deployment *extensions.Deployment) int32 {
|
||||
if !IsRollingUpdate(deployment) {
|
||||
return int32(0)
|
||||
}
|
||||
return deployment.Spec.Replicas - MaxUnavailable(*deployment)
|
||||
}
|
||||
|
||||
// MaxSurge returns the maximum surge pods a rolling deployment can take.
|
||||
func MaxSurge(deployment extensions.Deployment) int32 {
|
||||
if !IsRollingUpdate(&deployment) {
|
||||
|
@ -593,12 +601,12 @@ func CountAvailablePodsForReplicaSets(podList *api.PodList, rss []*extensions.Re
|
|||
}
|
||||
|
||||
// GetAvailablePodsForDeployment returns the number of available pods (listed from clientset) corresponding to the given deployment.
|
||||
func GetAvailablePodsForDeployment(c clientset.Interface, deployment *extensions.Deployment, minReadySeconds int32) (int32, error) {
|
||||
func GetAvailablePodsForDeployment(c clientset.Interface, deployment *extensions.Deployment) (int32, error) {
|
||||
podList, err := listPods(deployment, c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return countAvailablePods(podList.Items, minReadySeconds), nil
|
||||
return countAvailablePods(podList.Items, deployment.Spec.MinReadySeconds), nil
|
||||
}
|
||||
|
||||
func countAvailablePods(pods []api.Pod, minReadySeconds int32) int32 {
|
||||
|
|
|
@ -83,7 +83,7 @@ var _ = framework.KubeDescribe("Deployment", func() {
|
|||
It("paused deployment should be able to scale", func() {
|
||||
testScalePausedDeployment(f)
|
||||
})
|
||||
It("scaled rollout should not block on annotation check", func() {
|
||||
It("scaled rollout deployment should not block on annotation check", func() {
|
||||
testScaledRolloutDeployment(f)
|
||||
})
|
||||
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
|
||||
|
@ -236,7 +236,7 @@ func testNewDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", nginxImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deploy, true)
|
||||
err = framework.WaitForDeploymentStatus(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
|
@ -285,7 +285,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deploy, true)
|
||||
err = framework.WaitForDeploymentStatus(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// There should be 1 old RS (nginx-controller, which is adopted)
|
||||
|
@ -341,7 +341,7 @@ func testRollingUpdateDeploymentEvents(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", redisImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deploy, true)
|
||||
err = framework.WaitForDeploymentStatus(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the pods were scaled up and down as expected. We use events to verify that.
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
|
@ -397,7 +397,7 @@ func testRecreateDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deploy, true)
|
||||
err = framework.WaitForDeploymentStatus(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the pods were scaled up and down as expected. We use events to verify that.
|
||||
|
@ -557,7 +557,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deployment, true)
|
||||
err = framework.WaitForDeploymentStatus(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
|
@ -685,7 +685,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deploy, true)
|
||||
err = framework.WaitForDeploymentStatus(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "create"
|
||||
|
@ -711,7 +711,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deployment, true)
|
||||
err = framework.WaitForDeploymentStatus(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "update"
|
||||
|
@ -734,7 +734,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deployment, true)
|
||||
err = framework.WaitForDeploymentStatus(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "create", after the rollback
|
||||
|
@ -755,7 +755,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deployment, true)
|
||||
err = framework.WaitForDeploymentStatus(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "update", after the rollback
|
||||
|
@ -804,7 +804,7 @@ func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deploy, true)
|
||||
err = framework.WaitForDeploymentStatus(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check that the replica set we created still doesn't contain revision information
|
||||
|
@ -846,7 +846,7 @@ func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deployment, true)
|
||||
err = framework.WaitForDeploymentStatus(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 4. Update the deploymentRollback to rollback to revision 1
|
||||
|
@ -866,7 +866,7 @@ func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deployment, true)
|
||||
err = framework.WaitForDeploymentStatus(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 5. Update the deploymentRollback to rollback to revision 10
|
||||
|
@ -938,7 +938,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// The RS and pods should be relabeled before the status is updated by syncRollingUpdateDeployment
|
||||
err = framework.WaitForDeploymentStatus(c, deploy, true)
|
||||
err = framework.WaitForDeploymentStatus(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// There should be no old RSs (overlapping RS)
|
||||
|
@ -1042,8 +1042,8 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
|||
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, d.Spec.Replicas-2))
|
||||
err = framework.WaitForDeploymentStatus(c, deployment, true)
|
||||
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)))
|
||||
err = framework.WaitForDeploymentStatusValid(c, deployment, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
first, err := deploymentutil.GetNewReplicaSet(deployment, c)
|
||||
|
@ -1063,8 +1063,8 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
|||
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, d.Spec.Replicas-2))
|
||||
err = framework.WaitForDeploymentStatus(c, deployment, false)
|
||||
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)))
|
||||
err = framework.WaitForDeploymentStatusValid(c, deployment, false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking that the replica sets for %q are synced", deploymentName))
|
||||
|
@ -1104,8 +1104,8 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, d.Spec.Replicas-2))
|
||||
err = framework.WaitForDeploymentStatus(c, deployment, true)
|
||||
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)))
|
||||
err = framework.WaitForDeploymentStatusValid(c, deployment, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Update the deployment with a non-existent image so that the new replica set will be blocked.
|
||||
|
@ -1122,8 +1122,8 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
|||
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, d.Spec.Replicas-2))
|
||||
err = framework.WaitForDeploymentStatus(c, deployment, false)
|
||||
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)))
|
||||
err = framework.WaitForDeploymentStatusValid(c, deployment, false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking that the replica sets for %q are synced", deploymentName))
|
||||
|
@ -1163,7 +1163,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, d.Spec.Replicas-2))
|
||||
err = framework.WaitForDeploymentStatus(c, deployment, true)
|
||||
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)))
|
||||
err = framework.WaitForDeploymentStatusValid(c, deployment, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
|
|
@ -3183,9 +3183,9 @@ func waitForReplicaSetPodsGone(c *client.Client, rs *extensions.ReplicaSet) erro
|
|||
})
|
||||
}
|
||||
|
||||
// Waits for the deployment to reach desired state.
|
||||
// Returns an error if minAvailable or maxCreated is broken at any times.
|
||||
func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment, expectComplete bool) error {
|
||||
// Waits for the deployment status to sync (i.e. max unavailable and max surge aren't violated anymore).
|
||||
// If expectComplete, wait until all its replicas become up-to-date.
|
||||
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment, expectComplete bool) error {
|
||||
var (
|
||||
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
|
||||
newRS *extensions.ReplicaSet
|
||||
|
@ -3206,6 +3206,7 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment, ex
|
|||
if newRS == nil {
|
||||
// New RC hasn't been created yet.
|
||||
reason = "new replica set hasn't been created yet"
|
||||
Logf(reason)
|
||||
return false, nil
|
||||
}
|
||||
allRSs = append(oldRSs, newRS)
|
||||
|
@ -3213,11 +3214,12 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment, ex
|
|||
for i := range allRSs {
|
||||
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
|
||||
reason = "all replica sets need to contain the pod-template-hash label"
|
||||
Logf(reason)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
totalAvailable, err := deploymentutil.GetAvailablePodsForDeployment(c, deployment, deployment.Spec.MinReadySeconds)
|
||||
totalAvailable, err := deploymentutil.GetAvailablePodsForDeployment(c, deployment)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -3227,7 +3229,7 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment, ex
|
|||
Logf(reason)
|
||||
return false, nil
|
||||
}
|
||||
minAvailable := deployment.Spec.Replicas - deploymentutil.MaxUnavailable(*deployment)
|
||||
minAvailable := deploymentutil.MinAvailable(deployment)
|
||||
if totalAvailable < minAvailable {
|
||||
reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", totalAvailable, minAvailable)
|
||||
Logf(reason)
|
||||
|
@ -3251,7 +3253,7 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment, ex
|
|||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, deployment, deployment.Spec.MinReadySeconds)
|
||||
logPodsOfDeployment(c, deployment)
|
||||
err = fmt.Errorf("%s", reason)
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -3260,6 +3262,74 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment, ex
|
|||
return nil
|
||||
}
|
||||
|
||||
// Waits for the deployment to reach desired state.
|
||||
// Returns an error if the deployment's rolling update strategy (max unavailable or max surge) is broken at any times.
|
||||
func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) error {
|
||||
var (
|
||||
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
|
||||
newRS *extensions.ReplicaSet
|
||||
deployment *extensions.Deployment
|
||||
)
|
||||
|
||||
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newRS == nil {
|
||||
// New RS hasn't been created yet.
|
||||
return false, nil
|
||||
}
|
||||
allRSs = append(oldRSs, newRS)
|
||||
// The old/new ReplicaSets need to contain the pod-template-hash label
|
||||
for i := range allRSs {
|
||||
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
totalAvailable, err := deploymentutil.GetAvailablePodsForDeployment(c, deployment)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
maxCreated := deployment.Spec.Replicas + deploymentutil.MaxSurge(*deployment)
|
||||
if totalCreated > maxCreated {
|
||||
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, deployment)
|
||||
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
|
||||
}
|
||||
minAvailable := deploymentutil.MinAvailable(deployment)
|
||||
if totalAvailable < minAvailable {
|
||||
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, deployment)
|
||||
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", totalAvailable, minAvailable)
|
||||
}
|
||||
|
||||
// When the deployment status and its underlying resources reach the desired state, we're done
|
||||
if deployment.Status.Replicas == deployment.Spec.Replicas &&
|
||||
deployment.Status.UpdatedReplicas == deployment.Spec.Replicas &&
|
||||
deploymentutil.GetReplicaCountForReplicaSets(oldRSs) == 0 &&
|
||||
deploymentutil.GetReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}) == deployment.Spec.Replicas {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, deployment)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int, desiredGeneration int64) error {
|
||||
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
|
||||
|
@ -3401,7 +3471,8 @@ func WaitForObservedDeployment(c *clientset.Clientset, ns, deploymentName string
|
|||
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute)
|
||||
}
|
||||
|
||||
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, minReadySeconds int32) {
|
||||
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment) {
|
||||
minReadySeconds := deployment.Spec.MinReadySeconds
|
||||
podList, err := deploymentutil.ListPods(deployment,
|
||||
func(namespace string, options api.ListOptions) (*api.PodList, error) {
|
||||
return c.Core().Pods(namespace).List(options)
|
||||
|
|
Loading…
Reference in New Issue