mirror of https://github.com/k3s-io/k3s
Move deployment e2e test for rollback with no revision to integration
parent
4de496d4f8
commit
1ca6120af3
|
@ -360,7 +360,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
|||
// kubectl get deployments -n <aggregated-api-namespace> && status == Running
|
||||
// NOTE: aggregated apis should generally be set up in there own namespace (<aggregated-api-namespace>). As the test framework
|
||||
// is setting up a new namespace, we are just using that.
|
||||
err = framework.WaitForDeploymentStatusValid(client, deployment)
|
||||
err = framework.WaitForDeploymentComplete(client, deployment)
|
||||
|
||||
// We seem to need to do additional waiting until the extension api service is actually up.
|
||||
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
|
|
|
@ -87,9 +87,6 @@ var _ = SIGDescribe("Deployment", func() {
|
|||
It("deployment should support rollback", func() {
|
||||
testRollbackDeployment(f)
|
||||
})
|
||||
It("deployment should support rollback when there's replica set with no revision", func() {
|
||||
testRollbackDeploymentRSNoRevision(f)
|
||||
})
|
||||
It("deployment should label adopted RSs and pods", func() {
|
||||
testDeploymentLabelAdopted(f)
|
||||
})
|
||||
|
@ -171,30 +168,6 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
|
|||
}
|
||||
}
|
||||
|
||||
// checkDeploymentRevision checks if the input deployment's and its new replica set's revision and images are as expected.
|
||||
func checkDeploymentRevision(c clientset.Interface, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *extensions.ReplicaSet) {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Check revision of the new replica set of this deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newRS).NotTo(Equal(nilRs))
|
||||
Expect(newRS.Annotations).NotTo(Equal(nil))
|
||||
Expect(newRS.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision))
|
||||
// Check revision of This deployment
|
||||
Expect(deployment.Annotations).NotTo(Equal(nil))
|
||||
Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision))
|
||||
if len(imageName) > 0 {
|
||||
// Check the image the new replica set creates
|
||||
Expect(newRS.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName))
|
||||
Expect(newRS.Spec.Template.Spec.Containers[0].Image).Should(Equal(image))
|
||||
// Check the image the deployment creates
|
||||
Expect(deployment.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName))
|
||||
Expect(deployment.Spec.Template.Spec.Containers[0].Image).Should(Equal(image))
|
||||
}
|
||||
return deployment, newRS
|
||||
}
|
||||
|
||||
func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName string) {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -253,7 +226,7 @@ func testDeleteDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatusValid(c, deploy)
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
|
@ -301,7 +274,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
|
||||
err = framework.WaitForDeploymentStatusValid(c, deploy)
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// There should be 1 old RS (nginx-controller, which is adopted)
|
||||
|
@ -334,7 +307,7 @@ func testRecreateDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting deployment %q to complete", deploymentName)
|
||||
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
// Update deployment to delete redis pods and bring up nginx pods.
|
||||
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
|
@ -466,13 +439,14 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
// Check if it's updated to revision 1 correctly
|
||||
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
|
||||
_, newRS := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both replica sets have 1 created replica")
|
||||
oldRS, err := c.Extensions().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(oldRS, int32(1))
|
||||
newRS, err = c.Extensions().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(newRS, int32(1))
|
||||
|
||||
|
@ -496,7 +470,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure deployment %q is complete", deploymentName)
|
||||
err = framework.WaitForDeploymentStatus(c, deployment)
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both old replica sets have no replicas")
|
||||
|
@ -541,7 +515,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatusValid(c, deploy)
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "create"
|
||||
|
@ -567,7 +541,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deployment)
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "update"
|
||||
|
@ -590,7 +564,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deployment)
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "create", after the rollback
|
||||
|
@ -611,7 +585,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deployment)
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "update", after the rollback
|
||||
|
@ -633,7 +607,8 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
|
||||
// The pod template shouldn't change since there's no revision 10
|
||||
// Check if it's still revision 4 and still has the old pod template
|
||||
checkDeploymentRevision(c, ns, deploymentName, "4", updatedDeploymentImageName, updatedDeploymentImage)
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 6. Update the deploymentRollback to rollback to revision 4
|
||||
// Since it's already revision 4, it should be no-op
|
||||
|
@ -650,114 +625,8 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
|
||||
// The pod template shouldn't change since it's already revision 4
|
||||
// Check if it's still revision 4 and still has the old pod template
|
||||
checkDeploymentRevision(c, ns, deploymentName, "4", updatedDeploymentImageName, updatedDeploymentImage)
|
||||
}
|
||||
|
||||
// testRollbackDeploymentRSNoRevision tests that deployment supports rollback even when there's old replica set without revision.
|
||||
// An old replica set without revision is created, and then a deployment is created (v1). The deployment shouldn't add revision
|
||||
// annotation to the old replica set. Then rollback the deployment to last revision, and it should fail.
|
||||
// Then update the deployment to v2 and rollback it to v1 should succeed, now the deployment
|
||||
// becomes v3. Then rollback the deployment to v10 (doesn't exist in history) should fail.
|
||||
// Finally, rollback the deployment (v3) to v3 should be no-op.
|
||||
// TODO: When we finished reporting rollback status in deployment status, check the rollback status here in each case.
|
||||
func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
podName := "nginx"
|
||||
deploymentPodLabels := map[string]string{"name": podName}
|
||||
rsPodLabels := map[string]string{
|
||||
"name": podName,
|
||||
"pod": NginxImageName,
|
||||
}
|
||||
|
||||
// Create an old RS without revision
|
||||
rsName := "test-rollback-no-revision-controller"
|
||||
rsReplicas := int32(0)
|
||||
rs := newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage)
|
||||
rs.Annotations = make(map[string]string)
|
||||
rs.Annotations["make"] = "difference"
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 1. Create a deployment to create nginx pods, which have different template than the replica set created above.
|
||||
deploymentName, deploymentImageName := "test-rollback-no-revision-deployment", NginxImageName
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := NginxImage
|
||||
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
deploy, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatusValid(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check that the replica set we created still doesn't contain revision information
|
||||
rs, err = c.Extensions().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(rs.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(""))
|
||||
|
||||
// 2. Update the deploymentRollback to rollback to last revision
|
||||
// Since there's only 1 revision in history, it should stay as revision 1
|
||||
revision := int64(0)
|
||||
framework.Logf("rolling back deployment %s to last revision", deploymentName)
|
||||
rollback := newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.Extensions().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO: report RollbackRevisionNotFound in deployment status and check it here
|
||||
|
||||
// The pod template shouldn't change since there's no last revision
|
||||
// Check if the deployment is still revision 1 and still has the old pod template
|
||||
checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
|
||||
|
||||
// 3. Update the deployment to create redis pods.
|
||||
updatedDeploymentImage := RedisImage
|
||||
updatedDeploymentImageName := RedisImageName
|
||||
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Use observedGeneration to determine if the controller noticed the pod template update.
|
||||
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 2
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 4. Update the deploymentRollback to rollback to revision 1
|
||||
revision = 1
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.Extensions().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO: report RollbackDone in deployment status and check it here
|
||||
|
||||
// The pod template should be updated to the one in revision 1
|
||||
// Wait for it to be updated to revision 3
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
}
|
||||
|
||||
func testDeploymentLabelAdopted(f *framework.Framework) {
|
||||
|
@ -787,7 +656,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// The RS and pods should be relabeled before the status is updated by syncRollingUpdateDeployment
|
||||
err = framework.WaitForDeploymentStatusValid(c, deploy)
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// There should be no old RSs (overlapping RS)
|
||||
|
@ -838,7 +707,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
|
||||
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
first, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -888,7 +757,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))
|
||||
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
oldRSs, _, rs, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -948,7 +817,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))
|
||||
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
oldRSs, _, rs, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -1035,7 +904,7 @@ func testFailedDeployment(f *framework.Framework) {
|
|||
Expect(framework.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, replicas, deployment.Generation))
|
||||
|
||||
framework.Logf("Waiting for deployment %q status", deploymentName)
|
||||
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
|
||||
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
|
||||
|
@ -1165,7 +1034,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for deployment %q status", deploymentName)
|
||||
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
|
||||
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
|
||||
|
@ -1193,7 +1062,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
|||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForDeploymentStatusValid(c, deploy)
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Checking its ReplicaSet has the right controllerRef")
|
||||
|
@ -1213,7 +1082,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
|||
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err = c.Extensions().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForDeploymentStatusValid(c, deploy)
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
|
||||
|
|
|
@ -70,7 +70,6 @@ go_library(
|
|||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/ssh:go_default_library",
|
||||
"//pkg/util/file:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
|
@ -131,69 +130,17 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s
|
|||
}
|
||||
}
|
||||
|
||||
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
|
||||
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
|
||||
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
|
||||
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment) error {
|
||||
return testutils.WaitForDeploymentStatusValid(c, d, Logf, Poll, pollLongTimeout)
|
||||
// Waits for the deployment to complete, and don't check if rolling update strategy is broken.
|
||||
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
|
||||
// such as shortly after a scaling event or the deployment is just created.
|
||||
func WaitForDeploymentComplete(c clientset.Interface, d *extensions.Deployment) error {
|
||||
return testutils.WaitForDeploymentComplete(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// Waits for the deployment to reach desired state.
|
||||
// Returns an error if the deployment's rolling update strategy (max unavailable or max surge) is broken at any times.
|
||||
func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) error {
|
||||
var (
|
||||
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
|
||||
newRS *extensions.ReplicaSet
|
||||
deployment *extensions.Deployment
|
||||
)
|
||||
|
||||
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newRS == nil {
|
||||
// New RS hasn't been created yet.
|
||||
return false, nil
|
||||
}
|
||||
allRSs = append(oldRSs, newRS)
|
||||
// The old/new ReplicaSets need to contain the pod-template-hash label
|
||||
for i := range allRSs {
|
||||
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
|
||||
if totalCreated > maxCreated {
|
||||
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, deployment, allRSs)
|
||||
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
|
||||
}
|
||||
minAvailable := deploymentutil.MinAvailable(deployment)
|
||||
if deployment.Status.AvailableReplicas < minAvailable {
|
||||
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, deployment, allRSs)
|
||||
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
|
||||
}
|
||||
|
||||
// When the deployment status and its underlying resources reach the desired state, we're done
|
||||
return deploymentutil.DeploymentComplete(deployment, &deployment.Status), nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, deployment, allRSs)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
|
||||
}
|
||||
return nil
|
||||
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
|
||||
// Rolling update strategy should not be broken during a rolling update.
|
||||
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensions.Deployment) error {
|
||||
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
|
@ -217,21 +164,7 @@ func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentNa
|
|||
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
|
||||
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
|
||||
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
|
||||
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Rollback not set or is kicked off
|
||||
if deployment.Spec.RollbackTo == nil {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
|
||||
}
|
||||
return nil
|
||||
return testutils.WaitForDeploymentRollbackCleared(c, ns, deploymentName, Poll, pollShortTimeout)
|
||||
}
|
||||
|
||||
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
|
||||
|
@ -290,10 +223,6 @@ func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deploymen
|
|||
testutils.LogPodsOfDeployment(c, deployment, rsList, Logf)
|
||||
}
|
||||
|
||||
func WaitForDeploymentCompletes(c clientset.Interface, deployment *extensions.Deployment) error {
|
||||
return testutils.WaitForDeploymentCompletes(c, deployment, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
func WaitForDeploymentRevision(c clientset.Interface, d *extensions.Deployment, targetRevision string) error {
|
||||
err := wait.PollImmediate(Poll, pollLongTimeout, func() (bool, error) {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
|
@ -308,3 +237,8 @@ func WaitForDeploymentRevision(c clientset.Interface, d *extensions.Deployment,
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
|
||||
func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error {
|
||||
return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
|
|||
framework.ExpectNoError(err)
|
||||
|
||||
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
|
||||
framework.ExpectNoError(framework.WaitForDeploymentCompletes(c, deployment))
|
||||
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
|
||||
|
||||
By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
|
||||
rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
|
||||
|
@ -87,7 +87,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
|
|||
framework.ExpectNoError(err)
|
||||
|
||||
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
|
||||
framework.ExpectNoError(framework.WaitForDeploymentCompletes(c, deployment))
|
||||
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
|
||||
|
||||
By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
|
||||
rsList, err = rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
|
||||
|
@ -153,7 +153,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
|
|||
Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(Equal("2"))
|
||||
|
||||
By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
|
||||
framework.ExpectNoError(framework.WaitForDeploymentCompletes(c, deployment))
|
||||
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
|
||||
|
||||
// Verify the upgraded deployment is active by scaling up the deployment by 1
|
||||
By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
|
||||
|
@ -163,7 +163,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
|
|||
framework.ExpectNoError(err)
|
||||
|
||||
By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName))
|
||||
framework.ExpectNoError(framework.WaitForDeploymentCompletes(c, deployment))
|
||||
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
|
||||
}
|
||||
|
||||
// Teardown cleans up any remaining resources.
|
||||
|
|
|
@ -59,8 +59,9 @@ func TestNewDeployment(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Make sure the Deployment status becomes valid while manually marking Deployment pods as ready at the same time
|
||||
if err := tester.waitForDeploymentStatusValidAndMarkPodsReady(); err != nil {
|
||||
// Make sure the Deployment completes while manually marking Deployment pods as ready at the same time.
|
||||
// Use soft check because this deployment was just created and rolling update strategy might be violated.
|
||||
if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -187,8 +188,9 @@ func TestPausedDeployment(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Make sure the Deployment status becomes valid while manually marking Deployment pods as ready at the same time
|
||||
if err := tester.waitForDeploymentStatusValidAndMarkPodsReady(); err != nil {
|
||||
// Make sure the Deployment completes while manually marking Deployment pods as ready at the same time.
|
||||
// Use soft check because this deployment was just created and rolling update strategy might be violated.
|
||||
if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -271,8 +273,9 @@ func TestScalePausedDeployment(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Make sure the Deployment status becomes valid while manually marking Deployment pods as ready at the same time
|
||||
if err := tester.waitForDeploymentStatusValidAndMarkPodsReady(); err != nil {
|
||||
// Make sure the Deployment completes while manually marking Deployment pods as ready at the same time.
|
||||
// Use soft check because this deployment was just created and rolling update strategy might be violated.
|
||||
if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -315,8 +318,9 @@ func TestScalePausedDeployment(t *testing.T) {
|
|||
t.Errorf("expected new replicaset replicas = %d, got %d", newReplicas, *rs.Spec.Replicas)
|
||||
}
|
||||
|
||||
// Make sure the Deployment status becomes valid while manually marking Deployment pods as ready at the same time
|
||||
if err := tester.waitForDeploymentStatusValidAndMarkPodsReady(); err != nil {
|
||||
// Make sure the Deployment completes while manually marking Deployment pods as ready at the same time.
|
||||
// Use soft check because this deployment was just scaled and rolling update strategy might be violated.
|
||||
if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -381,3 +385,116 @@ func TestDeploymentHashCollision(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Deployment supports rollback even when there's old replica set without revision.
|
||||
func TestRollbackDeploymentRSNoRevision(t *testing.T) {
|
||||
s, closeFn, rm, dc, informers, c := dcSetup(t)
|
||||
defer closeFn()
|
||||
name := "test-rollback-no-revision-deployment"
|
||||
ns := framework.CreateTestingNamespace(name, s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
// Create an old RS without revision
|
||||
rsName := "test-rollback-no-revision-controller"
|
||||
rsReplicas := int32(1)
|
||||
rs := newReplicaSet(rsName, ns.Name, rsReplicas)
|
||||
rs.Annotations = make(map[string]string)
|
||||
rs.Annotations["make"] = "difference"
|
||||
rs.Spec.Template.Spec.Containers[0].Image = "different-image"
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns.Name).Create(rs)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create replicaset %s: %v", rsName, err)
|
||||
}
|
||||
|
||||
replicas := int32(1)
|
||||
tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
|
||||
oriImage := tester.deployment.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
// Create a deployment which have different template than the replica set created above.
|
||||
if tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment); err != nil {
|
||||
t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
|
||||
// Start informer and controllers
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
go rm.Run(5, stopCh)
|
||||
go dc.Run(5, stopCh)
|
||||
|
||||
// Wait for the Deployment to be updated to revision 1
|
||||
if err = tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// 1. Rollback to the last revision
|
||||
// Since there's only 1 revision in history, it should still be revision 1
|
||||
revision := int64(0)
|
||||
rollback := newDeploymentRollback(tester.deployment.Name, nil, revision)
|
||||
if err = c.ExtensionsV1beta1().Deployments(ns.Name).Rollback(rollback); err != nil {
|
||||
t.Fatalf("failed to roll back deployment %s to last revision: %v", tester.deployment.Name, err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
if err = tester.waitForDeploymentRollbackCleared(); err != nil {
|
||||
t.Fatalf("failed to roll back deployment %s to last revision: %v", tester.deployment.Name, err)
|
||||
}
|
||||
// TODO: report RollbackRevisionNotFound in deployment status and check it here
|
||||
|
||||
// The pod template shouldn't change since there's no last revision
|
||||
// Check if the deployment is still revision 1 and still has the old pod template
|
||||
err = tester.checkDeploymentRevisionAndImage("1", oriImage)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// 2. Update the deployment to revision 2.
|
||||
updatedImage := "update"
|
||||
tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedImage
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedImage
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed updating deployment %s: %v", tester.deployment.Name, err)
|
||||
}
|
||||
|
||||
// Use observedGeneration to determine if the controller noticed the pod template update.
|
||||
// Wait for the controller to notice the resume.
|
||||
if err = tester.waitForObservedDeployment(tester.deployment.Generation); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for it to be updated to revision 2
|
||||
if err = tester.waitForDeploymentRevisionAndImage("2", updatedImage); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the Deployment to complete while manually marking Deployment pods as ready at the same time
|
||||
if err = tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// 3. Update the deploymentRollback to rollback to revision 1
|
||||
revision = int64(1)
|
||||
rollback = newDeploymentRollback(tester.deployment.Name, nil, revision)
|
||||
if err = c.ExtensionsV1beta1().Deployments(ns.Name).Rollback(rollback); err != nil {
|
||||
t.Fatalf("failed to roll back deployment %s to revision %d: %v", tester.deployment.Name, revision, err)
|
||||
}
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
if err = tester.waitForDeploymentRollbackCleared(); err != nil {
|
||||
t.Fatalf("failed to roll back deployment %s to revision %d: %v", tester.deployment.Name, revision, err)
|
||||
}
|
||||
// TODO: report RollbackDone in deployment status and check it here
|
||||
|
||||
// The pod template should be updated to the one in revision 1
|
||||
// Wait for it to be updated to revision 3
|
||||
if err = tester.waitForDeploymentRevisionAndImage("3", oriImage); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the Deployment to complete while manually marking Deployment pods as ready at the same time
|
||||
if err = tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,8 +41,8 @@ const (
|
|||
pollInterval = 100 * time.Millisecond
|
||||
pollTimeout = 60 * time.Second
|
||||
|
||||
fakeImageName = "fake-name"
|
||||
fakeImage = "fakeimage"
|
||||
fakeContainerName = "fake-name"
|
||||
fakeImage = "fakeimage"
|
||||
)
|
||||
|
||||
var pauseFn = func(update *v1beta1.Deployment) {
|
||||
|
@ -87,7 +87,7 @@ func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment {
|
|||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: fakeImageName,
|
||||
Name: fakeContainerName,
|
||||
Image: fakeImage,
|
||||
},
|
||||
},
|
||||
|
@ -97,6 +97,46 @@ func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment {
|
|||
}
|
||||
}
|
||||
|
||||
func newReplicaSet(name, ns string, replicas int32) *v1beta1.ReplicaSet {
|
||||
return &v1beta1.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicaSet",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1beta1.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: testLabels(),
|
||||
},
|
||||
Replicas: &replicas,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: testLabels(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: fakeContainerName,
|
||||
Image: fakeImage,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newDeploymentRollback(name string, annotations map[string]string, revision int64) *v1beta1.DeploymentRollback {
|
||||
return &v1beta1.DeploymentRollback{
|
||||
Name: name,
|
||||
UpdatedAnnotations: annotations,
|
||||
RollbackTo: v1beta1.RollbackConfig{Revision: revision},
|
||||
}
|
||||
}
|
||||
|
||||
// dcSetup sets up necessities for Deployment integration test, including master, apiserver, informers, and clientset
|
||||
func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.ReplicaSetController, *deployment.DeploymentController, informers.SharedInformerFactory, clientset.Interface) {
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
|
@ -198,18 +238,42 @@ func (d *deploymentTester) markAllPodsReady() {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *deploymentTester) waitForDeploymentStatusValid() error {
|
||||
return testutil.WaitForDeploymentStatusValid(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout)
|
||||
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
|
||||
// Rolling update strategy should not be broken during a rolling update.
|
||||
func (d *deploymentTester) waitForDeploymentCompleteAndCheckRolling() error {
|
||||
return testutil.WaitForDeploymentCompleteAndCheckRolling(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
// waitForDeploymentStatusValidAndMarkPodsReady waits for the Deployment status to become valid
|
||||
// Waits for the deployment to complete, and don't check if rolling update strategy is broken.
|
||||
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
|
||||
// such as shortly after a scaling event or the deployment is just created.
|
||||
func (d *deploymentTester) waitForDeploymentComplete() error {
|
||||
return testutil.WaitForDeploymentComplete(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
// waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady waits for the Deployment to complete
|
||||
// while marking all Deployment pods as ready at the same time.
|
||||
func (d *deploymentTester) waitForDeploymentStatusValidAndMarkPodsReady() error {
|
||||
// Uses hard check to make sure rolling update strategy is not violated at any times.
|
||||
func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady() error {
|
||||
// Manually mark all Deployment pods as ready in a separate goroutine
|
||||
go d.markAllPodsReady()
|
||||
|
||||
// Make sure the Deployment status is valid while Deployment pods are becoming ready
|
||||
err := d.waitForDeploymentStatusValid()
|
||||
// Wait for the Deployment status to complete while Deployment pods are becoming ready
|
||||
err := d.waitForDeploymentCompleteAndCheckRolling()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to wait for Deployment %s to complete: %v", d.deployment.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForDeploymentCompleteAndMarkPodsReady waits for the Deployment to complete
|
||||
// while marking all Deployment pods as ready at the same time.
|
||||
func (d *deploymentTester) waitForDeploymentCompleteAndMarkPodsReady() error {
|
||||
// Manually mark all Deployment pods as ready in a separate goroutine
|
||||
go d.markAllPodsReady()
|
||||
|
||||
// Wait for the Deployment status to complete using soft check, while Deployment pods are becoming ready
|
||||
err := d.waitForDeploymentComplete()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to wait for Deployment status %s: %v", d.deployment.Name, err)
|
||||
}
|
||||
|
@ -260,3 +324,13 @@ func (d *deploymentTester) expectNewReplicaSet() (*v1beta1.ReplicaSet, error) {
|
|||
func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*v1beta1.ReplicaSet, error) {
|
||||
return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
// waitForDeploymentRollbackCleared waits for deployment either started rolling back or doesn't need to rollback.
|
||||
func (d *deploymentTester) waitForDeploymentRollbackCleared() error {
|
||||
return testutil.WaitForDeploymentRollbackCleared(d.c, d.deployment.Namespace, d.deployment.Name, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
// checkDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
|
||||
func (d *deploymentTester) checkDeploymentRevisionAndImage(revision, image string) error {
|
||||
return testutil.CheckDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image)
|
||||
}
|
||||
|
|
|
@ -68,58 +68,34 @@ func LogPodsOfDeployment(c clientset.Interface, deployment *extensions.Deploymen
|
|||
}
|
||||
}
|
||||
|
||||
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
|
||||
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
|
||||
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
|
||||
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
|
||||
// Waits for the deployment to complete.
|
||||
// If during a rolling update (rolling == true), returns an error if the deployment's
|
||||
// rolling update strategy (max unavailable or max surge) is broken at any times.
|
||||
// It's not seen as a rolling update if shortly after a scaling event or the deployment is just created.
|
||||
func waitForDeploymentCompleteMaybeCheckRolling(c clientset.Interface, d *extensions.Deployment, rolling bool, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
|
||||
var (
|
||||
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
|
||||
newRS *extensions.ReplicaSet
|
||||
deployment *extensions.Deployment
|
||||
reason string
|
||||
deployment *extensions.Deployment
|
||||
reason string
|
||||
)
|
||||
|
||||
err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
var err error
|
||||
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
deployment, err = c.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newRS == nil {
|
||||
// New RC hasn't been created yet.
|
||||
reason = "new replica set hasn't been created yet"
|
||||
logf(reason)
|
||||
return false, nil
|
||||
}
|
||||
allRSs = append(oldRSs, newRS)
|
||||
// The old/new ReplicaSets need to contain the pod-template-hash label
|
||||
for i := range allRSs {
|
||||
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
|
||||
reason = "all replica sets need to contain the pod-template-hash label"
|
||||
logf(reason)
|
||||
return false, nil
|
||||
|
||||
// If during a rolling update, make sure rolling update strategy isn't broken at any times.
|
||||
if rolling {
|
||||
reason, err = checkRollingUpdateStatus(c, deployment, logf)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
|
||||
if totalCreated > maxCreated {
|
||||
reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
|
||||
logf(reason)
|
||||
return false, nil
|
||||
}
|
||||
minAvailable := deploymentutil.MinAvailable(deployment)
|
||||
if deployment.Status.AvailableReplicas < minAvailable {
|
||||
reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
|
||||
logf(reason)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// When the deployment status and its underlying resources reach the desired state, we're done
|
||||
if deploymentutil.DeploymentComplete(deployment, &deployment.Status) {
|
||||
if deploymentutil.DeploymentComplete(d, &deployment.Status) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -130,8 +106,6 @@ func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deploymen
|
|||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, logf)
|
||||
LogPodsOfDeployment(c, deployment, allRSs, logf)
|
||||
err = fmt.Errorf("%s", reason)
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -140,6 +114,58 @@ func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deploymen
|
|||
return nil
|
||||
}
|
||||
|
||||
func checkRollingUpdateStatus(c clientset.Interface, deployment *extensions.Deployment, logf LogfFn) (string, error) {
|
||||
var reason string
|
||||
oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if newRS == nil {
|
||||
// New RC hasn't been created yet.
|
||||
reason = "new replica set hasn't been created yet"
|
||||
return reason, nil
|
||||
}
|
||||
allRSs := append(oldRSs, newRS)
|
||||
// The old/new ReplicaSets need to contain the pod-template-hash label
|
||||
for i := range allRSs {
|
||||
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
|
||||
reason = "all replica sets need to contain the pod-template-hash label"
|
||||
return reason, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check max surge and min available
|
||||
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
|
||||
if totalCreated > maxCreated {
|
||||
LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, logf)
|
||||
LogPodsOfDeployment(c, deployment, allRSs, logf)
|
||||
return "", fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
|
||||
}
|
||||
minAvailable := deploymentutil.MinAvailable(deployment)
|
||||
if deployment.Status.AvailableReplicas < minAvailable {
|
||||
LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, logf)
|
||||
LogPodsOfDeployment(c, deployment, allRSs, logf)
|
||||
return "", fmt.Errorf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
|
||||
// Rolling update strategy should not be broken during a rolling update.
|
||||
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensions.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
|
||||
rolling := true
|
||||
return waitForDeploymentCompleteMaybeCheckRolling(c, d, rolling, logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
// Waits for the deployment to complete, and don't check if rolling update strategy is broken.
|
||||
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
|
||||
// such as shortly after a scaling event or the deployment is just created.
|
||||
func WaitForDeploymentComplete(c clientset.Interface, d *extensions.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
|
||||
rolling := false
|
||||
return waitForDeploymentCompleteMaybeCheckRolling(c, d, rolling, logf, pollInterval, pollTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
|
||||
// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early.
|
||||
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
|
||||
|
@ -148,45 +174,17 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
|
|||
var reason string
|
||||
err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) {
|
||||
var err error
|
||||
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
|
||||
|
||||
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newRS == nil {
|
||||
reason = fmt.Sprintf("New replica set for deployment %q is yet to be created", deployment.Name)
|
||||
logf(reason)
|
||||
return false, nil
|
||||
}
|
||||
if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
|
||||
reason = fmt.Sprintf("New replica set %q doesn't have DefaultDeploymentUniqueLabelKey", newRS.Name)
|
||||
logf(reason)
|
||||
return false, nil
|
||||
}
|
||||
// Check revision of this deployment, and of the new replica set of this deployment
|
||||
if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision {
|
||||
reason = fmt.Sprintf("Deployment %q doesn't have the required revision set", deployment.Name)
|
||||
logf(reason)
|
||||
return false, nil
|
||||
}
|
||||
if !containsImage(deployment.Spec.Template.Spec.Containers, image) {
|
||||
reason = fmt.Sprintf("Deployment %q doesn't have the required image %s set", deployment.Name, image)
|
||||
logf(reason)
|
||||
return false, nil
|
||||
}
|
||||
if newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision {
|
||||
reason = fmt.Sprintf("New replica set %q doesn't have the required revision set", newRS.Name)
|
||||
logf(reason)
|
||||
return false, nil
|
||||
}
|
||||
if !containsImage(newRS.Spec.Template.Spec.Containers, image) {
|
||||
reason = fmt.Sprintf("New replica set %q doesn't have the required image %s.", newRS.Name, image)
|
||||
if err := checkRevisionAndImage(deployment, newRS, revision, image); err != nil {
|
||||
reason = err.Error()
|
||||
logf(reason)
|
||||
return false, nil
|
||||
}
|
||||
|
@ -205,6 +203,46 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
|
|||
return nil
|
||||
}
|
||||
|
||||
// CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
|
||||
func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get deployment %s during revision check: %v", deploymentName, err)
|
||||
}
|
||||
|
||||
// Check revision of the new replica set of this deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get new replicaset of deployment %s during revision check: %v", deploymentName, err)
|
||||
}
|
||||
return checkRevisionAndImage(deployment, newRS, revision, image)
|
||||
}
|
||||
|
||||
func checkRevisionAndImage(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, revision, image string) error {
|
||||
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
|
||||
if newRS == nil {
|
||||
return fmt.Errorf("new replicaset for deployment %q is yet to be created", deployment.Name)
|
||||
}
|
||||
if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
|
||||
return fmt.Errorf("new replica set %q doesn't have %q label selector", newRS.Name, extensions.DefaultDeploymentUniqueLabelKey)
|
||||
}
|
||||
// Check revision of this deployment, and of the new replica set of this deployment
|
||||
if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision {
|
||||
return fmt.Errorf("deployment %q doesn't have the required revision set", deployment.Name)
|
||||
}
|
||||
if newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision {
|
||||
return fmt.Errorf("new replicaset %q doesn't have the required revision set", newRS.Name)
|
||||
}
|
||||
// Check the image of this deployment, and of the new replica set of this deployment
|
||||
if !containsImage(deployment.Spec.Template.Spec.Containers, image) {
|
||||
return fmt.Errorf("deployment %q doesn't have the required image %s set", deployment.Name, image)
|
||||
}
|
||||
if !containsImage(newRS.Spec.Template.Spec.Containers, image) {
|
||||
return fmt.Errorf("new replica set %q doesn't have the required image %s.", newRS.Name, image)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func containsImage(containers []v1.Container, imageName string) bool {
|
||||
for _, container := range containers {
|
||||
if container.Image == imageName {
|
||||
|
@ -245,25 +283,21 @@ func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string,
|
|||
}, desiredGeneration, 2*time.Second, 1*time.Minute)
|
||||
}
|
||||
|
||||
// Pool until deployment status and its underlying resources reach the desired state.
|
||||
func WaitForDeploymentCompletes(c clientset.Interface, d *extensions.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
|
||||
var reason string
|
||||
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
|
||||
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string, pollInterval, pollTimeout time.Duration) error {
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// When the deployment status and its underlying resources reach the desired state, we're done
|
||||
if deploymentutil.DeploymentComplete(d, &deployment.Status) {
|
||||
// Rollback not set or is kicked off
|
||||
if deployment.Spec.RollbackTo == nil {
|
||||
return true, nil
|
||||
}
|
||||
reason = fmt.Sprintf("deployment status: %#v", deployment.Status)
|
||||
logf(reason)
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("timeout waiting for deployment to complete: %v, most recent deployment status: %s", err, reason)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue