mirror of https://github.com/k3s-io/k3s
use framework.ExpectNoError() for daemon_set.go and deployment.go in e2e/apps
parent
d881c0d77b
commit
4841e5b98c
|
@ -89,7 +89,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
e2elog.Logf("unable to dump pods: %v", err)
|
||||
}
|
||||
err = clearDaemonSetNodeLabels(f.ClientSet)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
f = framework.NewDefaultFramework("daemonsets")
|
||||
|
@ -106,12 +106,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
c = f.ClientSet
|
||||
|
||||
updatedNS, err := updateNamespaceAnnotations(c, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ns = updatedNS.Name
|
||||
|
||||
err = clearDaemonSetNodeLabels(c)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -124,19 +124,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.")
|
||||
podList := listDaemonPods(c, ns, label)
|
||||
pod := podList.Items[0]
|
||||
err = c.CoreV1().Pods(ns).Delete(pod.Name, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to revive")
|
||||
})
|
||||
|
@ -153,7 +153,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.NodeSelector = nodeSelector
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
|
@ -169,14 +169,14 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
|
||||
nodeSelector[daemonsetColorLabel] = "green"
|
||||
greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
framework.ExpectNoError(err, "error removing labels on node")
|
||||
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
|
||||
|
||||
ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
|
||||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
|
||||
|
@ -188,7 +188,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
// We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the
|
||||
|
@ -216,7 +216,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
},
|
||||
}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
|
@ -232,13 +232,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
|
||||
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
|
||||
framework.ExpectNoError(err, "error removing labels on node")
|
||||
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -250,13 +250,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
|
||||
podList := listDaemonPods(c, ns, label)
|
||||
|
@ -282,7 +282,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
|
@ -290,7 +290,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
first := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
|
@ -300,11 +300,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
ginkgo.By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods images aren't updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
|
@ -312,7 +312,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
|
||||
|
@ -331,7 +331,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
|
@ -339,7 +339,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
|
@ -349,18 +349,18 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
ginkgo.By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Time to complete the rolling upgrade is proportional to the number of nodes in the cluster.
|
||||
// Get the number of nodes, and set the timeout appropriately.
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
nodeCount := len(nodes.Items)
|
||||
retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second
|
||||
|
||||
ginkgo.By("Check that daemon pods images are updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
|
@ -368,7 +368,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur = curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
|
@ -389,7 +389,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
|
@ -401,11 +401,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Make sure we're in the middle of a rollout
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
var existingPods, newPods []*v1.Pod
|
||||
|
@ -433,11 +433,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Make sure DaemonSet rollback is complete")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted
|
||||
pods = listDaemonPods(c, ns, label)
|
||||
|
@ -487,7 +487,7 @@ func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *
|
|||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(ns).List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
return podList
|
||||
}
|
||||
|
@ -748,7 +748,7 @@ func listDaemonHistories(c clientset.Interface, ns string, label map[string]stri
|
|||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
historyList, err := c.AppsV1().ControllerRevisions(ns).List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
return historyList
|
||||
}
|
||||
|
@ -761,7 +761,7 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
|
|||
// Every history should have the hash label
|
||||
gomega.Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(gomega.BeNumerically(">", 0))
|
||||
match, err := daemon.Match(ds, history)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if match {
|
||||
curHistory = history
|
||||
foundCurHistories++
|
||||
|
|
|
@ -191,10 +191,11 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
|
|||
|
||||
func stopDeployment(c clientset.Interface, ns, deploymentName string) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Deleting deployment %s", deploymentName)
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name))
|
||||
err = framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||
_, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
|
@ -202,10 +203,10 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) {
|
|||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
e2elog.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rss, err := c.AppsV1().ReplicaSets(ns).List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(rss.Items).Should(gomega.HaveLen(0))
|
||||
e2elog.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
|
||||
var pods *v1.PodList
|
||||
|
@ -235,19 +236,19 @@ func testDeleteDeployment(f *framework.Framework) {
|
|||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(newRS).NotTo(gomega.Equal(nilRs))
|
||||
stopDeployment(c, ns, deploymentName)
|
||||
}
|
||||
|
@ -271,7 +272,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
|||
rs.Annotations = annotations
|
||||
e2elog.Logf("Creating replica set %q (going to be adopted)", rs.Name)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(rs)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
|
||||
|
@ -281,23 +282,23 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
|||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 3546343826724305833.
|
||||
e2elog.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// There should be 1 old RS (nginx-controller, which is adopted)
|
||||
e2elog.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(allOldRSs)).Should(gomega.Equal(1))
|
||||
}
|
||||
|
||||
|
@ -310,15 +311,16 @@ func testRecreateDeployment(f *framework.Framework) {
|
|||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
e2elog.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Waiting deployment %q to complete", deploymentName)
|
||||
gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deployment)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Update deployment to delete redis pods and bring up nginx pods.
|
||||
e2elog.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
|
@ -326,10 +328,11 @@ func testRecreateDeployment(f *framework.Framework) {
|
|||
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = NginxImage
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
|
||||
gomega.Expect(e2edeploy.WatchRecreateDeployment(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
err = e2edeploy.WatchRecreateDeployment(c, deployment)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
|
||||
|
@ -346,7 +349,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
|||
replicas := int32(1)
|
||||
revisionHistoryLimit := utilpointer.Int32Ptr(0)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas)
|
||||
|
@ -365,7 +368,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
|||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
w, err := c.CoreV1().Pods(ns).Watch(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
go func() {
|
||||
// There should be only one pod being created, which is the pod with the redis image.
|
||||
// The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
|
||||
|
@ -395,11 +398,11 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
|||
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
|
||||
_, err = c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
|
||||
err = e2edeploy.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// testRolloverDeployment tests that deployment supports rollover.
|
||||
|
@ -417,14 +420,15 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
rsName := "test-rollover-controller"
|
||||
rsReplicas := int32(1)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Wait for replica set to become ready before adopting it.
|
||||
e2elog.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
|
||||
gomega.Expect(replicaset.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(gomega.HaveOccurred())
|
||||
err = replicaset.WaitForReadyReplicaSet(c, ns, rsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis-slave pods.
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
|
@ -440,25 +444,25 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
}
|
||||
newDeployment.Spec.MinReadySeconds = int32(10)
|
||||
_, err = c.AppsV1().Deployments(ns).Create(newDeployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Verify that the pods were scaled up and down as expected.
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
e2elog.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
|
||||
err = e2edeploy.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
// Check if it's updated to revision 1 correctly
|
||||
e2elog.Logf("Check revision of new replica set for deployment %q", deploymentName)
|
||||
err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Ensure that both replica sets have 1 created replica")
|
||||
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
ensureReplicas(oldRS, int32(1))
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
ensureReplicas(newRS, int32(1))
|
||||
|
||||
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
|
||||
|
@ -468,29 +472,29 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Use observedGeneration to determine if the controller noticed the pod template update.
|
||||
e2elog.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName)
|
||||
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 2
|
||||
e2elog.Logf("Wait for revision update of deployment %q to 2", deploymentName)
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Make sure deployment %q is complete", deploymentName)
|
||||
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Ensure that both old replica sets have no replicas")
|
||||
oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
ensureReplicas(oldRS, int32(0))
|
||||
// Not really the new replica set anymore but we GET by name so that's fine.
|
||||
newRS, err = c.AppsV1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
ensureReplicas(newRS, int32(0))
|
||||
}
|
||||
|
||||
|
@ -520,18 +524,18 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
createAnnotation := map[string]string{"action": "create", "author": "node"}
|
||||
d.Annotations = createAnnotation
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Current newRS annotation should be "create"
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// 2. Update the deployment to create redis pods.
|
||||
updatedDeploymentImage := RedisImage
|
||||
|
@ -542,66 +546,66 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
update.Annotations = updateAnnotation
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Use observedGeneration to determine if the controller noticed the pod template update.
|
||||
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 2
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Current newRS annotation should be "update"
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// 3. Update the deploymentRollback to rollback to revision 1
|
||||
revision := int64(1)
|
||||
e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback := newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// TODO: report RollbackDone in deployment status and check it here
|
||||
|
||||
// Wait for it to be updated to revision 3
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Current newRS annotation should be "create", after the rollback
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// 4. Update the deploymentRollback to rollback to last revision
|
||||
revision = 0
|
||||
e2elog.Logf("rolling back deployment %s to last revision", deploymentName)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 4
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Current newRS annotation should be "update", after the rollback
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// 5. Update the deploymentRollback to rollback to revision 10
|
||||
// Since there's no revision 10 in history, it should stay as revision 4
|
||||
|
@ -609,17 +613,17 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// TODO: report RollbackRevisionNotFound in deployment status and check it here
|
||||
|
||||
// The pod template shouldn't change since there's no revision 10
|
||||
// Check if it's still revision 4 and still has the old pod template
|
||||
err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// 6. Update the deploymentRollback to rollback to revision 4
|
||||
// Since it's already revision 4, it should be no-op
|
||||
|
@ -627,17 +631,17 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// TODO: report RollbackTemplateUnchanged in deployment status and check it here
|
||||
|
||||
// The pod template shouldn't change since it's already revision 4
|
||||
// Check if it's still revision 4 and still has the old pod template
|
||||
err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func randomScale(d *apps.Deployment, i int) {
|
||||
|
@ -671,7 +675,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
iterations := 20
|
||||
for i := 0; i < iterations; i++ {
|
||||
|
@ -688,7 +692,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
|
||||
randomScale(update, i)
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
case n < 0.4:
|
||||
// rollback to the previous version
|
||||
|
@ -699,7 +703,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
}
|
||||
update.Annotations[apps.DeprecatedRollbackTo] = "0"
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
case n < 0.6:
|
||||
// just scaling
|
||||
|
@ -707,7 +711,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
randomScale(update, i)
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
case n < 0.8:
|
||||
// toggling the deployment
|
||||
|
@ -717,24 +721,24 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
update.Spec.Paused = true
|
||||
randomScale(update, i)
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
} else {
|
||||
e2elog.Logf("%02d: resuming deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
randomScale(update, i)
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
default:
|
||||
// arbitrarily delete deployment pods
|
||||
e2elog.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
opts := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(ns).List(opts)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if len(podList.Items) == 0 {
|
||||
e2elog.Logf("%02d: no deployment pods to delete", i)
|
||||
continue
|
||||
|
@ -747,7 +751,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
e2elog.Logf("%02d: deleting deployment pod %q", i, name)
|
||||
err := c.CoreV1().Pods(ns).Delete(name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -755,7 +759,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
|
||||
// unpause the deployment if we end up pausing it
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if deployment.Spec.Paused {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
|
@ -763,13 +767,16 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
}
|
||||
|
||||
e2elog.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
|
||||
gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
|
||||
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Waiting for deployment %q status", deploymentName)
|
||||
gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deployment)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Checking deployment %q for a complete condition", deploymentName)
|
||||
gomega.Expect(e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(gomega.HaveOccurred())
|
||||
err = e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
|
@ -782,9 +789,9 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
|||
replicas := int32(1)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
|
||||
rsList := listDeploymentReplicaSets(c, ns, podLabels)
|
||||
|
@ -795,11 +802,11 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
|||
|
||||
e2elog.Logf("Checking the ReplicaSet has the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName)
|
||||
err = orphanDeploymentReplicaSets(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Wait for the ReplicaSet to be orphaned")
|
||||
err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
|
||||
|
@ -809,13 +816,13 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
|||
e2elog.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err = c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Waiting for the ReplicaSet to have the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
|
||||
rsList = listDeploymentReplicaSets(c, ns, podLabels)
|
||||
|
@ -844,10 +851,11 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
|
||||
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
e2elog.Logf("Waiting for all required pods to come up")
|
||||
|
@ -855,10 +863,11 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
e2elog.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deployment)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Update the deployment with a non-existent image so that the new replica set
|
||||
// will be blocked to simulate a partial rollout.
|
||||
|
@ -866,37 +875,40 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
|
||||
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Checking state of first rollout's replicaset.
|
||||
maxUnavailable, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, int(*(deployment.Spec.Replicas)), false)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas.
|
||||
minAvailableReplicas := replicas - int32(maxUnavailable)
|
||||
e2elog.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred())
|
||||
err = replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 too.
|
||||
e2elog.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas)
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred())
|
||||
err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
e2elog.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Checking state of second rollout's replicaset.
|
||||
secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
maxSurge, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Second rollout's replicaset should have 0 available replicas.
|
||||
e2elog.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
|
||||
|
@ -905,19 +917,21 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
|
||||
newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
|
||||
e2elog.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas)
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(gomega.HaveOccurred())
|
||||
err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
e2elog.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Check the deployment's minimum availability.
|
||||
e2elog.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName)
|
||||
if deployment.Status.AvailableReplicas < minAvailableReplicas {
|
||||
gomega.Expect(fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)).NotTo(gomega.HaveOccurred())
|
||||
err = fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// Scale the deployment to 30 replicas.
|
||||
|
@ -926,23 +940,25 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
|
||||
// Note that 12 comes from rounding (30-10)*(8/13) to nearest integer.
|
||||
e2elog.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20")
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(gomega.HaveOccurred())
|
||||
err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas.
|
||||
// Note that 8 comes from rounding (30-10)*(5/13) to nearest integer.
|
||||
e2elog.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13")
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(gomega.HaveOccurred())
|
||||
err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
|
||||
|
@ -973,7 +989,7 @@ func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[strin
|
|||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(rsList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
return rsList
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue