mirror of https://github.com/k3s-io/k3s
test: remove deployment events from e2e tests
parent
cbbe421db8
commit
31a5b4218d
|
@ -66,9 +66,6 @@ var _ = framework.KubeDescribe("Deployment", func() {
|
|||
It("RollingUpdateDeployment should delete old pods and create new ones", func() {
|
||||
testRollingUpdateDeployment(f)
|
||||
})
|
||||
It("RollingUpdateDeployment should scale up and down in the right order", func() {
|
||||
testRollingUpdateDeploymentEvents(f)
|
||||
})
|
||||
It("RecreateDeployment should delete old pods and create new ones", func() {
|
||||
testRecreateDeployment(f)
|
||||
})
|
||||
|
@ -348,114 +345,34 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
|||
Expect(len(allOldRSs[0].Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
|
||||
}
|
||||
|
||||
func testRollingUpdateDeploymentEvents(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
// Create nginx pods.
|
||||
deploymentPodLabels := map[string]string{"name": "sample-pod-2"}
|
||||
rsPodLabels := map[string]string{
|
||||
"name": "sample-pod-2",
|
||||
"pod": nginxImageName,
|
||||
}
|
||||
rsName := "test-rolling-scale-controller"
|
||||
replicas := int32(1)
|
||||
|
||||
rsRevision := "3546343826724305832"
|
||||
annotations := make(map[string]string)
|
||||
annotations[deploymentutil.RevisionAnnotation] = rsRevision
|
||||
rs := newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)
|
||||
rs.Annotations = annotations
|
||||
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPods(c, ns, "sample-pod-2", false, 1)
|
||||
if err != nil {
|
||||
framework.Logf("error in waiting for pods to come up: %s", err)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-rolling-scale-deployment"
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
deploy, err := c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType, nil))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 3546343826724305833
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", redisImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the pods were scaled up and down as expected. We use events to verify that.
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.WaitForEvents(c, ns, deployment, 2)
|
||||
events, err := c.Core().Events(ns).Search(deployment)
|
||||
if err != nil {
|
||||
framework.Logf("error in listing events: %s", err)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
// There should be 2 events, one to scale up the new ReplicaSet and then to scale down
|
||||
// the old ReplicaSet.
|
||||
Expect(len(events.Items)).Should(Equal(2))
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newRS).NotTo(Equal(nil))
|
||||
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled up replica set %s to 1", newRS.Name)))
|
||||
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName)))
|
||||
}
|
||||
|
||||
func testRecreateDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
// Create nginx pods.
|
||||
deploymentPodLabels := map[string]string{"name": "sample-pod-3"}
|
||||
rsPodLabels := map[string]string{
|
||||
"name": "sample-pod-3",
|
||||
"pod": nginxImageName,
|
||||
}
|
||||
|
||||
rsName := "test-recreate-controller"
|
||||
replicas := int32(3)
|
||||
_, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPods(c, ns, "sample-pod-3", false, 3)
|
||||
if err != nil {
|
||||
framework.Logf("error in waiting for pods to come up: %s", err)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
// Create a deployment that brings up redis pods.
|
||||
deploymentName := "test-recreate-deployment"
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
deploy, err := c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RecreateDeploymentStrategyType, nil))
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, int32(3), map[string]string{"name": "sample-pod-3"}, redisImageName, redisImage, extensions.RecreateDeploymentStrategyType, nil))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentStatus(c, deploy)
|
||||
framework.Logf("Waiting deployment %q to complete", deploymentName)
|
||||
Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
// Update deployment to delete redis pods and bring up nginx pods.
|
||||
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = nginxImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = nginxImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the pods were scaled up and down as expected. We use events to verify that.
|
||||
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.WaitForEvents(c, ns, deployment, 2)
|
||||
events, err := c.Core().Events(ns).Search(deployment)
|
||||
if err != nil {
|
||||
framework.Logf("error in listing events: %s", err)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
// There should be 2 events, one to scale up the new ReplicaSet and then to scale down the old ReplicaSet.
|
||||
Expect(len(events.Items)).Should(Equal(2))
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newRS).NotTo(Equal(nil))
|
||||
Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName)))
|
||||
Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled up replica set %s to 3", newRS.Name)))
|
||||
framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
|
||||
Expect(framework.WatchRecreateDeployment(c, deployment)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
|
||||
|
@ -494,6 +411,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
|||
}
|
||||
stopCh := make(chan struct{})
|
||||
w, err := c.Core().Pods(ns).Watch(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
go func() {
|
||||
// There should be only one pod being created, which is the pod with the redis image.
|
||||
// The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
|
||||
|
|
|
@ -3301,6 +3301,40 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName
|
|||
return nil
|
||||
}
|
||||
|
||||
// WatchRecreateDeployment wathces Recreate deployments and ensures no new pods will run at the same time with
|
||||
// old pods.
|
||||
func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) error {
|
||||
if d.Spec.Strategy.Type != extensions.RecreateDeploymentStrategyType {
|
||||
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
|
||||
}
|
||||
|
||||
w, err := c.Extensions().Deployments(d.Namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
status := d.Status
|
||||
|
||||
condition := func(event watch.Event) (bool, error) {
|
||||
d := event.Object.(*extensions.Deployment)
|
||||
status = d.Status
|
||||
|
||||
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
|
||||
return false, fmt.Errorf("deployment %q is running new pods alongisde old pods: %#v", d.Name, status)
|
||||
}
|
||||
|
||||
return *(d.Spec.Replicas) == d.Status.Replicas &&
|
||||
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
|
||||
d.Generation <= d.Status.ObservedGeneration, nil
|
||||
}
|
||||
|
||||
_, err = watch.Until(2*time.Minute, w, condition)
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
|
||||
// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early.
|
||||
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
|
||||
|
|
|
@ -66,7 +66,6 @@ Density should allow running maximum capacity pods on nodes,smarterclayton,1
|
|||
Density should allow starting * pods per node using *,derekwaynecarr,0
|
||||
Deployment RecreateDeployment should delete old pods and create new ones,pwittrock,0
|
||||
Deployment RollingUpdateDeployment should delete old pods and create new ones,pwittrock,0
|
||||
Deployment RollingUpdateDeployment should scale up and down in the right order,pwittrock,0
|
||||
Deployment deployment reaping should cascade to its replica sets and pods,wojtek-t,1
|
||||
Deployment deployment should create new pods,pwittrock,0
|
||||
Deployment deployment should delete old replica sets,pwittrock,0
|
||||
|
|
|
Loading…
Reference in New Issue