mirror of https://github.com/k3s-io/k3s
fix golint failures of test/e2e/apps
parent
0148014048
commit
03180ec2b2
|
@ -631,7 +631,6 @@ staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
|
|||
staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller
|
||||
staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1
|
||||
test/e2e
|
||||
test/e2e/apps
|
||||
test/e2e/auth
|
||||
test/e2e/autoscaling
|
||||
test/e2e/chaosmonkey
|
||||
|
|
|
@ -20,8 +20,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
|
@ -51,221 +51,221 @@ var _ = SIGDescribe("CronJob", func() {
|
|||
// Pod will complete instantly
|
||||
successCommand := []string{"/bin/true"}
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResourceBeta, f.Namespace.Name)
|
||||
})
|
||||
|
||||
// multiple jobs running at once
|
||||
It("should schedule multiple jobs concurrently", func() {
|
||||
By("Creating a cronjob")
|
||||
ginkgo.It("should schedule multiple jobs concurrently", func() {
|
||||
ginkgo.By("Creating a cronjob")
|
||||
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||
sleepCommand, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
|
||||
By("Ensuring more than one job is running at a time")
|
||||
ginkgo.By("Ensuring more than one job is running at a time")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
|
||||
By("Ensuring at least two running jobs exists by listing jobs explicitly")
|
||||
ginkgo.By("Ensuring at least two running jobs exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
Expect(len(activeJobs) >= 2).To(BeTrue())
|
||||
gomega.Expect(len(activeJobs) >= 2).To(gomega.BeTrue())
|
||||
|
||||
By("Removing cronjob")
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
// suspended should not schedule jobs
|
||||
It("should not schedule jobs when suspended [Slow]", func() {
|
||||
By("Creating a suspended cronjob")
|
||||
ginkgo.It("should not schedule jobs when suspended [Slow]", func() {
|
||||
ginkgo.By("Creating a suspended cronjob")
|
||||
cronJob := newTestCronJob("suspended", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||
sleepCommand, nil)
|
||||
t := true
|
||||
cronJob.Spec.Suspend = &t
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
|
||||
By("Ensuring no jobs are scheduled")
|
||||
ginkgo.By("Ensuring no jobs are scheduled")
|
||||
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, false)
|
||||
Expect(err).To(HaveOccurred())
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
|
||||
By("Ensuring no job exists by listing jobs explicitly")
|
||||
ginkgo.By("Ensuring no job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
||||
Expect(jobs.Items).To(HaveLen(0))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(jobs.Items).To(gomega.HaveLen(0))
|
||||
|
||||
By("Removing cronjob")
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
// only single active job is allowed for ForbidConcurrent
|
||||
It("should not schedule new jobs when ForbidConcurrent [Slow]", func() {
|
||||
By("Creating a ForbidConcurrent cronjob")
|
||||
ginkgo.It("should not schedule new jobs when ForbidConcurrent [Slow]", func() {
|
||||
ginkgo.By("Creating a ForbidConcurrent cronjob")
|
||||
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
|
||||
sleepCommand, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
|
||||
By("Ensuring a job is scheduled")
|
||||
ginkgo.By("Ensuring a job is scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to schedule CronJob %s", cronJob.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to schedule CronJob %s", cronJob.Name)
|
||||
|
||||
By("Ensuring exactly one is scheduled")
|
||||
ginkgo.By("Ensuring exactly one is scheduled")
|
||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to get CronJob %s", cronJob.Name)
|
||||
Expect(cronJob.Status.Active).Should(HaveLen(1))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get CronJob %s", cronJob.Name)
|
||||
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
|
||||
|
||||
By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
Expect(activeJobs).To(HaveLen(1))
|
||||
gomega.Expect(activeJobs).To(gomega.HaveLen(1))
|
||||
|
||||
By("Ensuring no more jobs are scheduled")
|
||||
ginkgo.By("Ensuring no more jobs are scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
|
||||
Expect(err).To(HaveOccurred())
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
// only single active job is allowed for ReplaceConcurrent
|
||||
It("should replace jobs when ReplaceConcurrent", func() {
|
||||
By("Creating a ReplaceConcurrent cronjob")
|
||||
ginkgo.It("should replace jobs when ReplaceConcurrent", func() {
|
||||
ginkgo.By("Creating a ReplaceConcurrent cronjob")
|
||||
cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1beta1.ReplaceConcurrent,
|
||||
sleepCommand, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
|
||||
By("Ensuring a job is scheduled")
|
||||
ginkgo.By("Ensuring a job is scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to schedule CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to schedule CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
|
||||
By("Ensuring exactly one is scheduled")
|
||||
ginkgo.By("Ensuring exactly one is scheduled")
|
||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to get CronJob %s", cronJob.Name)
|
||||
Expect(cronJob.Status.Active).Should(HaveLen(1))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get CronJob %s", cronJob.Name)
|
||||
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
|
||||
|
||||
By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to list the jobs in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the jobs in namespace %s", f.Namespace.Name)
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
Expect(activeJobs).To(HaveLen(1))
|
||||
gomega.Expect(activeJobs).To(gomega.HaveLen(1))
|
||||
|
||||
By("Ensuring the job is replaced with a new one")
|
||||
ginkgo.By("Ensuring the job is replaced with a new one")
|
||||
err = waitForJobReplaced(f.ClientSet, f.Namespace.Name, jobs.Items[0].Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to replace CronJob %s in namespace %s", jobs.Items[0].Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to replace CronJob %s in namespace %s", jobs.Items[0].Name, f.Namespace.Name)
|
||||
|
||||
By("Removing cronjob")
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
// shouldn't give us unexpected warnings
|
||||
It("should not emit unexpected warnings", func() {
|
||||
By("Creating a cronjob")
|
||||
ginkgo.It("should not emit unexpected warnings", func() {
|
||||
ginkgo.By("Creating a cronjob")
|
||||
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||
nil, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
|
||||
By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly")
|
||||
ginkgo.By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly")
|
||||
err = waitForJobsAtLeast(f.ClientSet, f.Namespace.Name, 2)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to ensure at least two job exists in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure at least two job exists in namespace %s", f.Namespace.Name)
|
||||
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to ensure at least on finished job exists in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure at least on finished job exists in namespace %s", f.Namespace.Name)
|
||||
|
||||
By("Ensuring no unexpected event has happened")
|
||||
ginkgo.By("Ensuring no unexpected event has happened")
|
||||
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
// deleted jobs should be removed from the active list
|
||||
It("should remove from active list jobs that have been deleted", func() {
|
||||
By("Creating a ForbidConcurrent cronjob")
|
||||
ginkgo.It("should remove from active list jobs that have been deleted", func() {
|
||||
ginkgo.By("Creating a ForbidConcurrent cronjob")
|
||||
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
|
||||
sleepCommand, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||
|
||||
By("Ensuring a job is scheduled")
|
||||
ginkgo.By("Ensuring a job is scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to ensure a %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure a %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
|
||||
By("Ensuring exactly one is scheduled")
|
||||
ginkgo.By("Ensuring exactly one is scheduled")
|
||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to ensure exactly one %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
Expect(cronJob.Status.Active).Should(HaveLen(1))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure exactly one %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
|
||||
|
||||
By("Deleting the job")
|
||||
ginkgo.By("Deleting the job")
|
||||
job := cronJob.Status.Active[0]
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
||||
|
||||
By("Ensuring job was deleted")
|
||||
ginkgo.By("Ensuring job was deleted")
|
||||
_, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
|
||||
By("Ensuring the job is not in the cronjob active list")
|
||||
ginkgo.By("Ensuring the job is not in the cronjob active list")
|
||||
err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to ensure the %s cronjob is not in active list in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure the %s cronjob is not in active list in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
|
||||
By("Ensuring MissingJob event has occurred")
|
||||
ginkgo.By("Ensuring MissingJob event has occurred")
|
||||
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to ensure missing job event has occurred for %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure missing job event has occurred for %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
|
||||
By("Removing cronjob")
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to remove %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to remove %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
// cleanup of successful finished jobs, with limit of one successful job
|
||||
It("should delete successful finished jobs with limit of one successful job", func() {
|
||||
By("Creating a AllowConcurrent cronjob with custom history limits")
|
||||
ginkgo.It("should delete successful finished jobs with limit of one successful job", func() {
|
||||
ginkgo.By("Creating a AllowConcurrent cronjob with custom history limits")
|
||||
successLimit := int32(1)
|
||||
cronJob := newTestCronJob("concurrent-limit", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||
successCommand, &successLimit)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create allowconcurrent cronjob with custom history limits in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create allowconcurrent cronjob with custom history limits in namespace %s", f.Namespace.Name)
|
||||
|
||||
// Job is going to complete instantly: do not check for an active job
|
||||
// as we are most likely to miss it
|
||||
|
||||
By("Ensuring a finished job exists")
|
||||
ginkgo.By("Ensuring a finished job exists")
|
||||
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to ensure a finished cronjob exists in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure a finished cronjob exists in namespace %s", f.Namespace.Name)
|
||||
|
||||
By("Ensuring a finished job exists by listing jobs explicitly")
|
||||
ginkgo.By("Ensuring a finished job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", f.Namespace.Name)
|
||||
_, finishedJobs := filterActiveJobs(jobs)
|
||||
Expect(len(finishedJobs) == 1).To(BeTrue())
|
||||
gomega.Expect(len(finishedJobs) == 1).To(gomega.BeTrue())
|
||||
|
||||
// Job should get deleted when the next job finishes the next minute
|
||||
By("Ensuring this job and its pods does not exist anymore")
|
||||
ginkgo.By("Ensuring this job and its pods does not exist anymore")
|
||||
err = waitForJobToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0])
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to ensure that job does not exists anymore in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure that job does not exists anymore in namespace %s", f.Namespace.Name)
|
||||
err = waitForJobsPodToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0])
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to ensure that pods for job does not exists anymore in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure that pods for job does not exists anymore in namespace %s", f.Namespace.Name)
|
||||
|
||||
By("Ensuring there is 1 finished job by listing jobs explicitly")
|
||||
ginkgo.By("Ensuring there is 1 finished job by listing jobs explicitly")
|
||||
jobs, err = f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to ensure there is one finished job by listing job explicitly in namespace %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure there is one finished job by listing job explicitly in namespace %s", f.Namespace.Name)
|
||||
_, finishedJobs = filterActiveJobs(jobs)
|
||||
Expect(len(finishedJobs) == 1).To(BeTrue())
|
||||
gomega.Expect(len(finishedJobs) == 1).To(gomega.BeTrue())
|
||||
|
||||
By("Removing cronjob")
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to remove the %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to remove the %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -360,9 +360,8 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty boo
|
|||
|
||||
if failIfNonEmpty {
|
||||
return len(curr.Status.Active) == 0, nil
|
||||
} else {
|
||||
return len(curr.Status.Active) != 0, nil
|
||||
}
|
||||
return len(curr.Status.Active) != 0, nil
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -36,8 +36,8 @@ import (
|
|||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// This test primarily checks 2 things:
|
||||
|
@ -52,14 +52,17 @@ const (
|
|||
restartTimeout = 10 * time.Minute
|
||||
numPods = 10
|
||||
sshPort = 22
|
||||
ADD = "ADD"
|
||||
DEL = "DEL"
|
||||
UPDATE = "UPDATE"
|
||||
// ADD represents the ADD event
|
||||
ADD = "ADD"
|
||||
// DEL represents the DEL event
|
||||
DEL = "DEL"
|
||||
// UPDATE represents the UPDATE event
|
||||
UPDATE = "UPDATE"
|
||||
)
|
||||
|
||||
// restartDaemonConfig is a config to restart a running daemon on a node, and wait till
|
||||
// RestartDaemonConfig is a config to restart a running daemon on a node, and wait till
|
||||
// it comes back up. It uses ssh to send a SIGTERM to the daemon.
|
||||
type restartDaemonConfig struct {
|
||||
type RestartDaemonConfig struct {
|
||||
nodeName string
|
||||
daemonName string
|
||||
healthzPort int
|
||||
|
@ -67,12 +70,12 @@ type restartDaemonConfig struct {
|
|||
pollTimeout time.Duration
|
||||
}
|
||||
|
||||
// NewRestartConfig creates a restartDaemonConfig for the given node and daemon.
|
||||
func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *restartDaemonConfig {
|
||||
// NewRestartConfig creates a RestartDaemonConfig for the given node and daemon.
|
||||
func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *RestartDaemonConfig {
|
||||
if !framework.ProviderIs("gce") {
|
||||
framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider)
|
||||
}
|
||||
return &restartDaemonConfig{
|
||||
return &RestartDaemonConfig{
|
||||
nodeName: nodeName,
|
||||
daemonName: daemonName,
|
||||
healthzPort: healthzPort,
|
||||
|
@ -81,12 +84,12 @@ func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval
|
|||
}
|
||||
}
|
||||
|
||||
func (r *restartDaemonConfig) String() string {
|
||||
func (r *RestartDaemonConfig) String() string {
|
||||
return fmt.Sprintf("Daemon %v on node %v", r.daemonName, r.nodeName)
|
||||
}
|
||||
|
||||
// waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout
|
||||
func (r *restartDaemonConfig) waitUp() {
|
||||
func (r *RestartDaemonConfig) waitUp() {
|
||||
framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
|
||||
healthzCheck := fmt.Sprintf(
|
||||
"curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort)
|
||||
|
@ -110,14 +113,14 @@ func (r *restartDaemonConfig) waitUp() {
|
|||
}
|
||||
|
||||
// kill sends a SIGTERM to the daemon
|
||||
func (r *restartDaemonConfig) kill() {
|
||||
func (r *RestartDaemonConfig) kill() {
|
||||
framework.Logf("Killing %v", r)
|
||||
_, err := framework.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// Restart checks if the daemon is up, kills it, and waits till it comes back up
|
||||
func (r *restartDaemonConfig) restart() {
|
||||
func (r *RestartDaemonConfig) restart() {
|
||||
r.waitUp()
|
||||
r.kill()
|
||||
r.waitUp()
|
||||
|
@ -191,7 +194,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
|||
var stopCh chan struct{}
|
||||
var tracker *podTracker
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
// These tests require SSH
|
||||
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||
ns = f.Namespace.Name
|
||||
|
@ -206,7 +209,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
|||
Replicas: numPods,
|
||||
CreatedPods: &[]*v1.Pod{},
|
||||
}
|
||||
Expect(framework.RunRC(config)).NotTo(HaveOccurred())
|
||||
gomega.Expect(framework.RunRC(config)).NotTo(gomega.HaveOccurred())
|
||||
replacePods(*config.CreatedPods, existingPods)
|
||||
|
||||
stopCh = make(chan struct{})
|
||||
|
@ -240,11 +243,11 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
|||
go controller.Run(stopCh)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
close(stopCh)
|
||||
})
|
||||
|
||||
It("Controller Manager should not create/delete replicas across restart", func() {
|
||||
ginkgo.It("Controller Manager should not create/delete replicas across restart", func() {
|
||||
|
||||
// Requires master ssh access.
|
||||
framework.SkipUnlessProviderIs("gce", "aws")
|
||||
|
@ -275,7 +278,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
|||
}
|
||||
})
|
||||
|
||||
It("Scheduler should continue assigning pods to nodes across restart", func() {
|
||||
ginkgo.It("Scheduler should continue assigning pods to nodes across restart", func() {
|
||||
|
||||
// Requires master ssh access.
|
||||
framework.SkipUnlessProviderIs("gce", "aws")
|
||||
|
@ -293,7 +296,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
|||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true))
|
||||
})
|
||||
|
||||
It("Kubelet should not restart containers across restart", func() {
|
||||
ginkgo.It("Kubelet should not restart containers across restart", func() {
|
||||
|
||||
nodeIPs, err := framework.GetNodePublicIps(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
|
|
|
@ -38,8 +38,8 @@ import (
|
|||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -53,7 +53,7 @@ const (
|
|||
daemonsetColorLabel = daemonsetLabelPrefix + "color"
|
||||
)
|
||||
|
||||
// The annotation key scheduler.alpha.kubernetes.io/node-selector is for assigning
|
||||
// NamespaceNodeSelectors the annotation key scheduler.alpha.kubernetes.io/node-selector is for assigning
|
||||
// node selectors labels to namespaces
|
||||
var NamespaceNodeSelectors = []string{"scheduler.alpha.kubernetes.io/node-selector"}
|
||||
|
||||
|
@ -65,16 +65,16 @@ var NamespaceNodeSelectors = []string{"scheduler.alpha.kubernetes.io/node-select
|
|||
var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
var f *framework.Framework
|
||||
|
||||
AfterEach(func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
// Clean up
|
||||
daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to dump DaemonSets")
|
||||
if daemonsets != nil && len(daemonsets.Items) > 0 {
|
||||
for _, ds := range daemonsets.Items {
|
||||
By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name))
|
||||
ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name))
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to be reaped")
|
||||
}
|
||||
}
|
||||
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
|
@ -88,7 +88,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
framework.Logf("unable to dump pods: %v", err)
|
||||
}
|
||||
err = clearDaemonSetNodeLabels(f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
f = framework.NewDefaultFramework("daemonsets")
|
||||
|
@ -99,18 +99,18 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
var ns string
|
||||
var c clientset.Interface
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
c = f.ClientSet
|
||||
|
||||
updatedNS, err := updateNamespaceAnnotations(c, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ns = updatedNS.Name
|
||||
|
||||
err = clearDaemonSetNodeLabels(c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -121,23 +121,23 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
framework.ConformanceIt("should run and stop simple daemon", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
||||
ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Stop a daemon pod, check that the daemon pod is revived.")
|
||||
ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.")
|
||||
podList := listDaemonPods(c, ns, label)
|
||||
pod := podList.Items[0]
|
||||
err = c.CoreV1().Pods(ns).Delete(pod.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -152,47 +152,47 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.NodeSelector = nodeSelector
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Initially, daemon pods should not be running on any nodes.")
|
||||
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
By("Change node label to blue, check that daemon pod is launched.")
|
||||
ginkgo.By("Change node label to blue, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
|
||||
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Update the node label to green, and wait for daemons to be unscheduled")
|
||||
ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
|
||||
nodeSelector[daemonsetColorLabel] = "green"
|
||||
greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
|
||||
Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error removing labels on node")
|
||||
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
|
||||
By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
|
||||
ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
|
||||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
|
||||
daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel])
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred(), "error patching daemon set")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error patching daemon set")
|
||||
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
// We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the
|
||||
// default scheduler.
|
||||
It("should run and stop complex daemon with node affinity", func() {
|
||||
ginkgo.It("should run and stop complex daemon with node affinity", func() {
|
||||
complexLabel := map[string]string{daemonsetNameLabel: dsName}
|
||||
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
||||
framework.Logf("Creating daemon %q with a node affinity", dsName)
|
||||
|
@ -215,29 +215,29 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
},
|
||||
}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Initially, daemon pods should not be running on any nodes.")
|
||||
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
By("Change node label to blue, check that daemon pod is launched.")
|
||||
ginkgo.By("Change node label to blue, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
|
||||
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Remove the node label and wait for daemons to be unscheduled")
|
||||
ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
|
||||
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
|
||||
Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
|
||||
Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error removing labels on node")
|
||||
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -247,75 +247,75 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
framework.ConformanceIt("should retry creating failed daemon pods", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
|
||||
ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
|
||||
ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
|
||||
podList := listDaemonPods(c, ns, label)
|
||||
pod := podList.Items[0]
|
||||
pod.ResourceVersion = ""
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
_, err = c.CoreV1().Pods(ns).UpdateStatus(&pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "error failing a daemon pod")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error failing a daemon pod")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
|
||||
By("Wait for the failed daemon pod to be completely deleted.")
|
||||
ginkgo.By("Wait for the failed daemon pod to be completely deleted.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for the failed daemon pod to be completely deleted")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for the failed daemon pod to be completely deleted")
|
||||
})
|
||||
|
||||
// This test should not be added to conformance. We will consider deprecating OnDelete when the
|
||||
// extensions/v1beta1 and apps/v1beta1 are removed.
|
||||
It("should not update pod when spec was updated and update strategy is OnDelete", func() {
|
||||
ginkgo.It("should not update pod when spec was updated and update strategy is OnDelete", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
first := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(first.Revision).To(Equal(int64(1)))
|
||||
gomega.Expect(first.Revision).To(gomega.Equal(int64(1)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
|
||||
|
||||
By("Update daemon pods image.")
|
||||
ginkgo.By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Check that daemon pods images aren't updated.")
|
||||
ginkgo.By("Check that daemon pods images aren't updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Check that daemon pods are still running on every node of the cluster.")
|
||||
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
Expect(cur.Revision).To(Equal(int64(2)))
|
||||
Expect(cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]).NotTo(Equal(firstHash))
|
||||
gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
|
||||
gomega.Expect(cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]).NotTo(gomega.Equal(firstHash))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
|
||||
})
|
||||
|
||||
|
@ -330,48 +330,48 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(cur.Revision).To(Equal(int64(1)))
|
||||
gomega.Expect(cur.Revision).To(gomega.Equal(int64(1)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
|
||||
|
||||
By("Update daemon pods image.")
|
||||
ginkgo.By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Time to complete the rolling upgrade is proportional to the number of nodes in the cluster.
|
||||
// Get the number of nodes, and set the timeout appropriately.
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
nodeCount := len(nodes.Items)
|
||||
retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second
|
||||
|
||||
By("Check that daemon pods images are updated.")
|
||||
ginkgo.By("Check that daemon pods images are updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Check that daemon pods are still running on every node of the cluster.")
|
||||
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur = curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(cur.Revision).To(Equal(int64(2)))
|
||||
gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
|
||||
})
|
||||
|
||||
|
@ -382,17 +382,17 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
*/
|
||||
framework.ConformanceIt("should rollback without unnecessary restarts", func() {
|
||||
schedulableNodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
Expect(len(schedulableNodes.Items)).To(BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.")
|
||||
gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.")
|
||||
framework.Logf("Create a RollingUpdate DaemonSet")
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
framework.Logf("Update the DaemonSet to trigger a rollout")
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
|
@ -400,11 +400,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Make sure we're in the middle of a rollout
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
var existingPods, newPods []*v1.Pod
|
||||
|
@ -422,21 +422,21 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
}
|
||||
schedulableNodes = framework.GetReadySchedulableNodesOrDie(c)
|
||||
if len(schedulableNodes.Items) < 2 {
|
||||
Expect(len(existingPods)).To(Equal(0))
|
||||
gomega.Expect(len(existingPods)).To(gomega.Equal(0))
|
||||
} else {
|
||||
Expect(len(existingPods)).NotTo(Equal(0))
|
||||
gomega.Expect(len(existingPods)).NotTo(gomega.Equal(0))
|
||||
}
|
||||
Expect(len(newPods)).NotTo(Equal(0))
|
||||
gomega.Expect(len(newPods)).NotTo(gomega.Equal(0))
|
||||
|
||||
framework.Logf("Roll back the DaemonSet before rollout is complete")
|
||||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure DaemonSet rollback is complete")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted
|
||||
pods = listDaemonPods(c, ns, label)
|
||||
|
@ -445,7 +445,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
|||
rollbackPods[pod.Name] = true
|
||||
}
|
||||
for _, pod := range existingPods {
|
||||
Expect(rollbackPods[pod.Name]).To(BeTrue(), fmt.Sprintf("unexpected pod %s be restarted", pod.Name))
|
||||
gomega.Expect(rollbackPods[pod.Name]).To(gomega.BeTrue(), fmt.Sprintf("unexpected pod %s be restarted", pod.Name))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
@ -486,8 +486,8 @@ func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *
|
|||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(podList.Items)).To(BeNumerically(">", 0))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
return podList
|
||||
}
|
||||
|
||||
|
@ -569,7 +569,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
|
|||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(newLabels) != len(labels) {
|
||||
return nil, fmt.Errorf("Could not set daemon set test labels as expected.")
|
||||
return nil, fmt.Errorf("Could not set daemon set test labels as expected")
|
||||
}
|
||||
|
||||
return newNode, nil
|
||||
|
@ -593,7 +593,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames
|
|||
continue
|
||||
}
|
||||
if podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
nodesToPodCount[pod.Spec.NodeName] += 1
|
||||
nodesToPodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
}
|
||||
framework.Logf("Number of nodes with available pods: %d", len(nodesToPodCount))
|
||||
|
@ -667,7 +667,7 @@ func checkRunningOnNoNodes(f *framework.Framework, ds *apps.DaemonSet) func() (b
|
|||
func checkDaemonStatus(f *framework.Framework, dsName string) error {
|
||||
ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not get daemon set from v1.")
|
||||
return fmt.Errorf("Could not get daemon set from v1")
|
||||
}
|
||||
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
|
||||
if desired != scheduled && desired != ready {
|
||||
|
@ -694,7 +694,7 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonS
|
|||
if podImage != image {
|
||||
framework.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage)
|
||||
} else {
|
||||
nodesToUpdatedPodCount[pod.Spec.NodeName] += 1
|
||||
nodesToUpdatedPodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
if !podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
framework.Logf("Pod %s is not available", pod.Name)
|
||||
|
@ -718,9 +718,9 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonS
|
|||
func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
|
||||
for _, pod := range podList.Items {
|
||||
podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(len(podHash)).To(BeNumerically(">", 0))
|
||||
gomega.Expect(len(podHash)).To(gomega.BeNumerically(">", 0))
|
||||
if len(hash) > 0 {
|
||||
Expect(podHash).To(Equal(hash))
|
||||
gomega.Expect(podHash).To(gomega.Equal(hash))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -740,15 +740,15 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
|
|||
return false, nil
|
||||
}
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn)
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for controllerrevisions to be created")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for controllerrevisions to be created")
|
||||
}
|
||||
|
||||
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
historyList, err := c.AppsV1().ControllerRevisions(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(historyList.Items)).To(BeNumerically(">", 0))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
return historyList
|
||||
}
|
||||
|
||||
|
@ -758,16 +758,16 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
|
|||
for i := range historyList.Items {
|
||||
history := &historyList.Items[i]
|
||||
// Every history should have the hash label
|
||||
Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(BeNumerically(">", 0))
|
||||
gomega.Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(gomega.BeNumerically(">", 0))
|
||||
match, err := daemon.Match(ds, history)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
if match {
|
||||
curHistory = history
|
||||
foundCurHistories++
|
||||
}
|
||||
}
|
||||
Expect(foundCurHistories).To(Equal(1))
|
||||
Expect(curHistory).NotTo(BeNil())
|
||||
gomega.Expect(foundCurHistories).To(gomega.Equal(1))
|
||||
gomega.Expect(curHistory).NotTo(gomega.BeNil())
|
||||
return curHistory
|
||||
}
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
|
@ -57,18 +57,18 @@ var _ = SIGDescribe("Deployment", func() {
|
|||
var ns string
|
||||
var c clientset.Interface
|
||||
|
||||
AfterEach(func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
failureTrap(c, ns)
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("deployment")
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
It("deployment reaping should cascade to its replica sets and pods", func() {
|
||||
ginkgo.It("deployment reaping should cascade to its replica sets and pods", func() {
|
||||
testDeleteDeployment(f)
|
||||
})
|
||||
/*
|
||||
|
@ -102,13 +102,13 @@ var _ = SIGDescribe("Deployment", func() {
|
|||
framework.ConformanceIt("deployment should support rollover", func() {
|
||||
testRolloverDeployment(f)
|
||||
})
|
||||
It("deployment should support rollback", func() {
|
||||
ginkgo.It("deployment should support rollback", func() {
|
||||
testRollbackDeployment(f)
|
||||
})
|
||||
It("iterative rollouts should eventually progress", func() {
|
||||
ginkgo.It("iterative rollouts should eventually progress", func() {
|
||||
testIterativeDeployments(f)
|
||||
})
|
||||
It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
|
||||
ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
|
||||
testDeploymentsControllerRef(f)
|
||||
})
|
||||
/*
|
||||
|
@ -189,22 +189,22 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
|
|||
|
||||
func stopDeployment(c clientset.Interface, ns, deploymentName string) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Deleting deployment %s", deploymentName)
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name))
|
||||
|
||||
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||
_, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rss, err := c.AppsV1().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(rss.Items).Should(HaveLen(0))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(rss.Items).Should(gomega.HaveLen(0))
|
||||
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
|
||||
var pods *v1.PodList
|
||||
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
|
||||
|
@ -233,20 +233,20 @@ func testDeleteDeployment(f *framework.Framework) {
|
|||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newRS).NotTo(Equal(nilRs))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(newRS).NotTo(gomega.Equal(nilRs))
|
||||
stopDeployment(c, ns, deploymentName)
|
||||
}
|
||||
|
||||
|
@ -269,34 +269,34 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
|||
rs.Annotations = annotations
|
||||
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %s", err)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %s", err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-rolling-update-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 3546343826724305833.
|
||||
framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// There should be 1 old RS (nginx-controller, which is adopted)
|
||||
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(allOldRSs)).Should(Equal(1))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(len(allOldRSs)).Should(gomega.Equal(1))
|
||||
}
|
||||
|
||||
func testRecreateDeployment(f *framework.Framework) {
|
||||
|
@ -308,15 +308,15 @@ func testRecreateDeployment(f *framework.Framework) {
|
|||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting deployment %q to complete", deploymentName)
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
gomega.Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Update deployment to delete redis pods and bring up nginx pods.
|
||||
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
|
@ -324,10 +324,10 @@ func testRecreateDeployment(f *framework.Framework) {
|
|||
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = NginxImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
|
||||
Expect(framework.WatchRecreateDeployment(c, deployment)).NotTo(HaveOccurred())
|
||||
gomega.Expect(framework.WatchRecreateDeployment(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
|
||||
|
@ -344,18 +344,18 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
|||
replicas := int32(1)
|
||||
revisionHistoryLimit := utilpointer.Int32Ptr(0)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-cleanup-deployment"
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
|
||||
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to query for pods: %v", err)
|
||||
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
|
@ -363,7 +363,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
|||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
w, err := c.CoreV1().Pods(ns).Watch(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
go func() {
|
||||
// There should be only one pod being created, which is the pod with the redis image.
|
||||
// The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
|
||||
|
@ -393,11 +393,11 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
|||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
|
||||
_, err = c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
|
||||
ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
|
||||
err = framework.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// testRolloverDeployment tests that deployment supports rollover.
|
||||
|
@ -415,14 +415,14 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
rsName := "test-rollover-controller"
|
||||
rsReplicas := int32(1)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Wait for replica set to become ready before adopting it.
|
||||
framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
|
||||
Expect(replicaset.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(HaveOccurred())
|
||||
gomega.Expect(replicaset.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis-slave pods.
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
|
@ -438,25 +438,25 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
}
|
||||
newDeployment.Spec.MinReadySeconds = int32(10)
|
||||
_, err = c.AppsV1().Deployments(ns).Create(newDeployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Verify that the pods were scaled up and down as expected.
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
|
||||
err = framework.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
// Check if it's updated to revision 1 correctly
|
||||
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both replica sets have 1 created replica")
|
||||
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ensureReplicas(oldRS, int32(1))
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ensureReplicas(newRS, int32(1))
|
||||
|
||||
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
|
||||
|
@ -466,35 +466,35 @@ func testRolloverDeployment(f *framework.Framework) {
|
|||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Use observedGeneration to determine if the controller noticed the pod template update.
|
||||
framework.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName)
|
||||
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 2
|
||||
framework.Logf("Wait for revision update of deployment %q to 2", deploymentName)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure deployment %q is complete", deploymentName)
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both old replica sets have no replicas")
|
||||
oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ensureReplicas(oldRS, int32(0))
|
||||
// Not really the new replica set anymore but we GET by name so that's fine.
|
||||
newRS, err = c.AppsV1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ensureReplicas(newRS, int32(0))
|
||||
}
|
||||
|
||||
func ensureReplicas(rs *apps.ReplicaSet, replicas int32) {
|
||||
Expect(*rs.Spec.Replicas).Should(Equal(replicas))
|
||||
Expect(rs.Status.Replicas).Should(Equal(replicas))
|
||||
gomega.Expect(*rs.Spec.Replicas).Should(gomega.Equal(replicas))
|
||||
gomega.Expect(rs.Status.Replicas).Should(gomega.Equal(replicas))
|
||||
}
|
||||
|
||||
// testRollbackDeployment tests that a deployment is created (revision 1) and updated (revision 2), and
|
||||
|
@ -518,18 +518,18 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
createAnnotation := map[string]string{"action": "create", "author": "node"}
|
||||
d.Annotations = createAnnotation
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "create"
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// 2. Update the deployment to create redis pods.
|
||||
updatedDeploymentImage := RedisImage
|
||||
|
@ -540,66 +540,66 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
update.Annotations = updateAnnotation
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Use observedGeneration to determine if the controller noticed the pod template update.
|
||||
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 2
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "update"
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// 3. Update the deploymentRollback to rollback to revision 1
|
||||
revision := int64(1)
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback := newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
// TODO: report RollbackDone in deployment status and check it here
|
||||
|
||||
// Wait for it to be updated to revision 3
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "create", after the rollback
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// 4. Update the deploymentRollback to rollback to last revision
|
||||
revision = 0
|
||||
framework.Logf("rolling back deployment %s to last revision", deploymentName)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 4
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "update", after the rollback
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// 5. Update the deploymentRollback to rollback to revision 10
|
||||
// Since there's no revision 10 in history, it should stay as revision 4
|
||||
|
@ -607,17 +607,17 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
// TODO: report RollbackRevisionNotFound in deployment status and check it here
|
||||
|
||||
// The pod template shouldn't change since there's no revision 10
|
||||
// Check if it's still revision 4 and still has the old pod template
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// 6. Update the deploymentRollback to rollback to revision 4
|
||||
// Since it's already revision 4, it should be no-op
|
||||
|
@ -625,17 +625,17 @@ func testRollbackDeployment(f *framework.Framework) {
|
|||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
// TODO: report RollbackTemplateUnchanged in deployment status and check it here
|
||||
|
||||
// The pod template shouldn't change since it's already revision 4
|
||||
// Check if it's still revision 4 and still has the old pod template
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
func randomScale(d *apps.Deployment, i int) {
|
||||
|
@ -669,7 +669,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
iterations := 20
|
||||
for i := 0; i < iterations; i++ {
|
||||
|
@ -686,7 +686,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
case n < 0.4:
|
||||
// rollback to the previous version
|
||||
|
@ -697,7 +697,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
}
|
||||
update.Annotations[apps.DeprecatedRollbackTo] = "0"
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
case n < 0.6:
|
||||
// just scaling
|
||||
|
@ -705,7 +705,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
case n < 0.8:
|
||||
// toggling the deployment
|
||||
|
@ -715,24 +715,24 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
update.Spec.Paused = true
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
} else {
|
||||
framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
default:
|
||||
// arbitrarily delete deployment pods
|
||||
framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
opts := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(ns).List(opts)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
if len(podList.Items) == 0 {
|
||||
framework.Logf("%02d: no deployment pods to delete", i)
|
||||
continue
|
||||
|
@ -745,7 +745,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
framework.Logf("%02d: deleting deployment pod %q", i, name)
|
||||
err := c.CoreV1().Pods(ns).Delete(name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -753,7 +753,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
|
||||
// unpause the deployment if we end up pausing it
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
if deployment.Spec.Paused {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
|
@ -761,13 +761,13 @@ func testIterativeDeployments(f *framework.Framework) {
|
|||
}
|
||||
|
||||
framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
|
||||
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
|
||||
gomega.Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for deployment %q status", deploymentName)
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
gomega.Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
|
||||
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(HaveOccurred())
|
||||
gomega.Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
|
@ -780,47 +780,47 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
|||
replicas := int32(1)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
|
||||
rsList := listDeploymentReplicaSets(c, ns, podLabels)
|
||||
Expect(len(rsList.Items)).Should(Equal(1))
|
||||
gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1))
|
||||
|
||||
framework.Logf("Obtaining the ReplicaSet's UID")
|
||||
orphanedRSUID := rsList.Items[0].UID
|
||||
|
||||
framework.Logf("Checking the ReplicaSet has the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName)
|
||||
err = orphanDeploymentReplicaSets(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Wait for the ReplicaSet to be orphaned")
|
||||
ginkgo.By("Wait for the ReplicaSet to be orphaned")
|
||||
err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for Deployment ReplicaSet to be orphaned")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for Deployment ReplicaSet to be orphaned")
|
||||
|
||||
deploymentName = "test-adopt-deployment"
|
||||
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err = c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
|
||||
rsList = listDeploymentReplicaSets(c, ns, podLabels)
|
||||
Expect(len(rsList.Items)).Should(Equal(1))
|
||||
gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1))
|
||||
|
||||
framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
|
||||
Expect(rsList.Items[0].UID).Should(Equal(orphanedRSUID))
|
||||
gomega.Expect(rsList.Items[0].UID).Should(gomega.Equal(orphanedRSUID))
|
||||
}
|
||||
|
||||
// testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle
|
||||
|
@ -842,21 +842,21 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
|
||||
gomega.Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
framework.Logf("Waiting for all required pods to come up")
|
||||
err = framework.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas))
|
||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
|
||||
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
gomega.Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Update the deployment with a non-existent image so that the new replica set
|
||||
// will be blocked to simulate a partial rollout.
|
||||
|
@ -864,58 +864,58 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
|
||||
gomega.Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Checking state of first rollout's replicaset.
|
||||
maxUnavailable, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, int(*(deployment.Spec.Replicas)), false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas.
|
||||
minAvailableReplicas := replicas - int32(maxUnavailable)
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
|
||||
Expect(replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred())
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 too.
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas)
|
||||
Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred())
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Checking state of second rollout's replicaset.
|
||||
secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
maxSurge, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Second rollout's replicaset should have 0 available replicas.
|
||||
framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
|
||||
Expect(secondRS.Status.AvailableReplicas).Should(Equal(int32(0)))
|
||||
gomega.Expect(secondRS.Status.AvailableReplicas).Should(gomega.Equal(int32(0)))
|
||||
|
||||
// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
|
||||
newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
|
||||
framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas)
|
||||
Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(HaveOccurred())
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Check the deployment's minimum availability.
|
||||
framework.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName)
|
||||
if deployment.Status.AvailableReplicas < minAvailableReplicas {
|
||||
Expect(fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)).NotTo(HaveOccurred())
|
||||
gomega.Expect(fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// Scale the deployment to 30 replicas.
|
||||
|
@ -924,23 +924,23 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
|||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
|
||||
// Note that 12 comes from rounding (30-10)*(8/13) to nearest integer.
|
||||
framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20")
|
||||
Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(HaveOccurred())
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas.
|
||||
// Note that 8 comes from rounding (30-10)*(5/13) to nearest integer.
|
||||
framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13")
|
||||
Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(HaveOccurred())
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
|
||||
|
@ -971,8 +971,8 @@ func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[strin
|
|||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(rsList.Items)).To(BeNumerically(">", 0))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(len(rsList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
return rsList
|
||||
}
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
|
@ -48,16 +48,16 @@ var _ = SIGDescribe("DisruptionController", func() {
|
|||
var ns string
|
||||
var cs kubernetes.Interface
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
cs = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
It("should create a PodDisruptionBudget", func() {
|
||||
ginkgo.It("should create a PodDisruptionBudget", func() {
|
||||
createPDBMinAvailableOrDie(cs, ns, intstr.FromString("1%"))
|
||||
})
|
||||
|
||||
It("should update PodDisruptionBudget status", func() {
|
||||
ginkgo.It("should update PodDisruptionBudget status", func() {
|
||||
createPDBMinAvailableOrDie(cs, ns, intstr.FromInt(2))
|
||||
|
||||
createPodsOrDie(cs, ns, 3)
|
||||
|
@ -72,7 +72,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
|||
}
|
||||
return pdb.Status.PodDisruptionsAllowed > 0, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
evictionCases := []struct {
|
||||
|
@ -145,7 +145,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
|||
if c.shouldDeny {
|
||||
expectation = "should not allow an eviction"
|
||||
}
|
||||
It(fmt.Sprintf("evictions: %s => %s", c.description, expectation), func() {
|
||||
ginkgo.It(fmt.Sprintf("evictions: %s => %s", c.description, expectation), func() {
|
||||
if c.skipForBigClusters {
|
||||
framework.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1)
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
|||
|
||||
return false, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
e := &policy.Eviction{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -194,7 +194,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
|||
time.Sleep(timeout)
|
||||
|
||||
err = cs.CoreV1().Pods(ns).Evict(e)
|
||||
Expect(err).Should(MatchError("Cannot evict pod as it would violate the pod's disruption budget."))
|
||||
gomega.Expect(err).Should(gomega.MatchError("Cannot evict pod as it would violate the pod's disruption budget."))
|
||||
} else {
|
||||
// Only wait for running pods in the "allow" case
|
||||
// because one of shouldDeny cases relies on the
|
||||
|
@ -207,11 +207,10 @@ var _ = SIGDescribe("DisruptionController", func() {
|
|||
err = cs.CoreV1().Pods(ns).Evict(e)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
} else {
|
||||
return true, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -229,7 +228,7 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable
|
|||
},
|
||||
}
|
||||
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavailable intstr.IntOrString) {
|
||||
|
@ -244,7 +243,7 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavail
|
|||
},
|
||||
}
|
||||
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
|
@ -272,7 +271,7 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
|||
}
|
||||
|
||||
func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
By("Waiting for all pods to be running")
|
||||
ginkgo.By("Waiting for all pods to be running")
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
pods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: "foo=bar"})
|
||||
if err != nil {
|
||||
|
|
|
@ -18,6 +18,7 @@ package apps
|
|||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
// SIGDescribe annotates the test with the SIG label.
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-apps] "+text, body)
|
||||
}
|
||||
|
|
|
@ -27,8 +27,8 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Job", func() {
|
||||
|
@ -38,20 +38,20 @@ var _ = SIGDescribe("Job", func() {
|
|||
backoffLimit := int32(6) // default value
|
||||
|
||||
// Simplest case: all pods succeed promptly
|
||||
It("should run a job to completion when tasks succeed", func() {
|
||||
By("Creating a job")
|
||||
ginkgo.It("should run a job to completion when tasks succeed", func() {
|
||||
ginkgo.By("Creating a job")
|
||||
job := jobutil.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
// Pods sometimes fail, but eventually succeed.
|
||||
It("should run a job to completion when tasks sometimes fail and are locally restarted", func() {
|
||||
By("Creating a job")
|
||||
ginkgo.It("should run a job to completion when tasks sometimes fail and are locally restarted", func() {
|
||||
ginkgo.By("Creating a job")
|
||||
// One failure, then a success, local restarts.
|
||||
// We can't use the random failure approach used by the
|
||||
// non-local test below, because kubelet will throttle
|
||||
|
@ -61,16 +61,16 @@ var _ = SIGDescribe("Job", func() {
|
|||
// test timeout.
|
||||
job := jobutil.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit)
|
||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
// Pods sometimes fail, but eventually succeed, after pod restarts
|
||||
It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() {
|
||||
By("Creating a job")
|
||||
ginkgo.It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() {
|
||||
ginkgo.By("Creating a job")
|
||||
// 50% chance of container success, local restarts.
|
||||
// Can't use the failOnce approach because that relies
|
||||
// on an emptyDir, which is not preserved across new pods.
|
||||
|
@ -82,22 +82,22 @@ var _ = SIGDescribe("Job", func() {
|
|||
// test less flaky, for now.
|
||||
job := jobutil.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, 3, nil, 999)
|
||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
It("should exceed active deadline", func() {
|
||||
By("Creating a job")
|
||||
ginkgo.It("should exceed active deadline", func() {
|
||||
ginkgo.By("Creating a job")
|
||||
var activeDeadlineSeconds int64 = 1
|
||||
job := jobutil.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit)
|
||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
By("Ensuring job past active deadline")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
ginkgo.By("Ensuring job past active deadline")
|
||||
err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -106,49 +106,49 @@ var _ = SIGDescribe("Job", func() {
|
|||
Description: Create a job. Ensure the active pods reflect paralellism in the namespace and delete the job. Job MUST be deleted successfully.
|
||||
*/
|
||||
framework.ConformanceIt("should delete a job", func() {
|
||||
By("Creating a job")
|
||||
ginkgo.By("Creating a job")
|
||||
job := jobutil.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
By("Ensuring active pods == parallelism")
|
||||
ginkgo.By("Ensuring active pods == parallelism")
|
||||
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
||||
|
||||
By("delete a job")
|
||||
ginkgo.By("delete a job")
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
||||
|
||||
By("Ensuring job was deleted")
|
||||
ginkgo.By("Ensuring job was deleted")
|
||||
_, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).To(HaveOccurred(), "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name)
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name)
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
})
|
||||
|
||||
It("should adopt matching orphans and release non-matching pods", func() {
|
||||
By("Creating a job")
|
||||
ginkgo.It("should adopt matching orphans and release non-matching pods", func() {
|
||||
ginkgo.By("Creating a job")
|
||||
job := jobutil.NewTestJob("notTerminate", "adopt-release", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
// Replace job with the one returned from Create() so it has the UID.
|
||||
// Save Kind since it won't be populated in the returned job.
|
||||
kind := job.Kind
|
||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
job.Kind = kind
|
||||
|
||||
By("Ensuring active pods == parallelism")
|
||||
ginkgo.By("Ensuring active pods == parallelism")
|
||||
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
||||
|
||||
By("Orphaning one of the Job's Pods")
|
||||
ginkgo.By("Orphaning one of the Job's Pods")
|
||||
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
||||
Expect(pods.Items).To(HaveLen(int(parallelism)))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
||||
gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism)))
|
||||
pod := pods.Items[0]
|
||||
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
|
||||
pod.OwnerReferences = nil
|
||||
})
|
||||
|
||||
By("Checking that the Job readopts the Pod")
|
||||
Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", jobutil.JobTimeout,
|
||||
ginkgo.By("Checking that the Job readopts the Pod")
|
||||
gomega.Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", jobutil.JobTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
|
@ -159,15 +159,15 @@ var _ = SIGDescribe("Job", func() {
|
|||
}
|
||||
return true, nil
|
||||
},
|
||||
)).To(Succeed(), "wait for pod %q to be readopted", pod.Name)
|
||||
)).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name)
|
||||
|
||||
By("Removing the labels from the Job's Pod")
|
||||
ginkgo.By("Removing the labels from the Job's Pod")
|
||||
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
|
||||
pod.Labels = nil
|
||||
})
|
||||
|
||||
By("Checking that the Job releases the Pod")
|
||||
Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", jobutil.JobTimeout,
|
||||
ginkgo.By("Checking that the Job releases the Pod")
|
||||
gomega.Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", jobutil.JobTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef != nil {
|
||||
|
@ -175,24 +175,24 @@ var _ = SIGDescribe("Job", func() {
|
|||
}
|
||||
return true, nil
|
||||
},
|
||||
)).To(Succeed(), "wait for pod %q to be released", pod.Name)
|
||||
)).To(gomega.Succeed(), "wait for pod %q to be released", pod.Name)
|
||||
})
|
||||
|
||||
It("should exceed backoffLimit", func() {
|
||||
By("Creating a job")
|
||||
ginkgo.It("should exceed backoffLimit", func() {
|
||||
ginkgo.By("Creating a job")
|
||||
backoff := 1
|
||||
job := jobutil.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff))
|
||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
By("Ensuring job exceed backofflimit")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
ginkgo.By("Ensuring job exceed backofflimit")
|
||||
|
||||
err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, jobutil.JobTimeout, "BackoffLimitExceeded")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
|
||||
ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
|
||||
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
||||
// Expect(pods.Items).To(HaveLen(backoff + 1))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
||||
// gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1))
|
||||
// due to NumRequeus not being stable enough, especially with failed status
|
||||
// updates we need to allow more than backoff+1
|
||||
// TODO revert this back to above when https://github.com/kubernetes/kubernetes/issues/64787 gets fixed
|
||||
|
@ -200,7 +200,7 @@ var _ = SIGDescribe("Job", func() {
|
|||
framework.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items)
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodFailed))
|
||||
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodFailed))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
|
|
@ -39,8 +39,8 @@ import (
|
|||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -108,11 +108,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
var c clientset.Interface
|
||||
var ns string
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
_, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||
framework.SkipUnlessProviderIs("gke", "aws")
|
||||
|
@ -122,8 +122,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
})
|
||||
|
||||
framework.KubeDescribe("Pods", func() {
|
||||
Context("should return to running and ready state after network partition is healed", func() {
|
||||
BeforeEach(func() {
|
||||
ginkgo.Context("should return to running and ready state after network partition is healed", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||
})
|
||||
|
||||
|
@ -133,13 +133,13 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
// 1. Node is marked NotReady after timeout by nodecontroller (40seconds)
|
||||
// 2. All pods on node are marked NotReady shortly after #1
|
||||
// 3. Node and pods return to Ready after connectivity recovers
|
||||
It("All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+
|
||||
ginkgo.It("All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+
|
||||
"AND all pods should be mark back to Ready when the node get back to Ready before pod eviction timeout", func() {
|
||||
By("choose a node - we will block all network traffic on this node")
|
||||
ginkgo.By("choose a node - we will block all network traffic on this node")
|
||||
var podOpts metav1.ListOptions
|
||||
nodeOpts := metav1.ListOptions{}
|
||||
nodes, err := c.CoreV1().Nodes().List(nodeOpts)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.FilterNodes(nodes, func(node v1.Node) bool {
|
||||
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
|
||||
return false
|
||||
|
@ -160,7 +160,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
|
||||
}
|
||||
|
||||
By("Set up watch on node status")
|
||||
ginkgo.By("Set up watch on node status")
|
||||
nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name)
|
||||
stopCh := make(chan struct{})
|
||||
newNode := make(chan *v1.Node)
|
||||
|
@ -182,7 +182,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
n, ok := newObj.(*v1.Node)
|
||||
Expect(ok).To(Equal(true))
|
||||
gomega.Expect(ok).To(gomega.Equal(true))
|
||||
newNode <- n
|
||||
|
||||
},
|
||||
|
@ -196,21 +196,21 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
}()
|
||||
go controller.Run(stopCh)
|
||||
|
||||
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||
host, err := framework.GetNodeExternalIP(&node)
|
||||
framework.ExpectNoError(err)
|
||||
masterAddresses := framework.GetAllMasterAddresses(c)
|
||||
defer func() {
|
||||
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
||||
for _, masterAddress := range masterAddresses {
|
||||
framework.UnblockNetwork(host, masterAddress)
|
||||
}
|
||||
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
}
|
||||
|
||||
By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers")
|
||||
ginkgo.By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers")
|
||||
expectNodeReadiness(true, newNode)
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
|
||||
framework.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err)
|
||||
|
@ -221,7 +221,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
framework.BlockNetwork(host, masterAddress)
|
||||
}
|
||||
|
||||
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||
ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||
expectNodeReadiness(false, newNode)
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
|
||||
framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
|
||||
|
@ -231,7 +231,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
})
|
||||
|
||||
framework.KubeDescribe("[ReplicationController]", func() {
|
||||
It("should recreate pods scheduled on the unreachable node "+
|
||||
ginkgo.It("should recreate pods scheduled on the unreachable node "+
|
||||
"AND allow scheduling of pods on a node after it rejoins the cluster", func() {
|
||||
|
||||
// Create a replication controller for a service that serves its hostname.
|
||||
|
@ -243,32 +243,32 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
replicas := int32(numNodes)
|
||||
common.NewRCByName(c, ns, name, replicas, nil)
|
||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Each pod should start running and responding")
|
||||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
nodeName := pods.Items[0].Spec.NodeName
|
||||
|
||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// This creates a temporary network partition, verifies that 'podNameToDisappear',
|
||||
// that belongs to replication controller 'rcName', really disappeared (because its
|
||||
// grace period is set to 0).
|
||||
// Finally, it checks that the replication controller recreates the
|
||||
// pods on another node and that now the number of replicas is equal 'replicas'.
|
||||
By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("verifying whether the pod from the unreachable node is recreated")
|
||||
ginkgo.By("verifying whether the pod from the unreachable node is recreated")
|
||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
|
@ -279,26 +279,26 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
// sleep a bit, to allow Watch in NodeController to catch up.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
By("verify whether new pods can be created on the re-attached node")
|
||||
ginkgo.By("verify whether new pods can be created on the re-attached node")
|
||||
// increasing the RC size is not a valid way to test this
|
||||
// since we have no guarantees the pod will be scheduled on our node.
|
||||
additionalPod := "additionalpod"
|
||||
err = newPodOnNode(c, ns, additionalPod, node.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
err = framework.VerifyPods(c, ns, additionalPod, true, 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// verify that it is really on the requested node
|
||||
{
|
||||
pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
if pod.Spec.NodeName != node.Name {
|
||||
framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
It("should eagerly create replacement pod during network partition when termination grace is non-zero", func() {
|
||||
ginkgo.It("should eagerly create replacement pod during network partition when termination grace is non-zero", func() {
|
||||
// Create a replication controller for a service that serves its hostname.
|
||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||
name := "my-hostname-net"
|
||||
|
@ -310,32 +310,32 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
replicas := int32(numNodes)
|
||||
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
|
||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Each pod should start running and responding")
|
||||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
nodeName := pods.Items[0].Spec.NodeName
|
||||
|
||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// This creates a temporary network partition, verifies that 'podNameToDisappear',
|
||||
// that belongs to replication controller 'rcName', did not disappear (because its
|
||||
// grace period is set to 30).
|
||||
// Finally, it checks that the replication controller recreates the
|
||||
// pods on another node and that now the number of replicas is equal 'replicas + 1'.
|
||||
By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
|
||||
Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
|
||||
By(fmt.Sprintf("verifying that there are %v running pods during partition", replicas))
|
||||
ginkgo.By(fmt.Sprintf("verifying that there are %v running pods during partition", replicas))
|
||||
_, err = framework.PodsCreated(c, ns, name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
|
@ -352,10 +352,10 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
}
|
||||
headlessSvcName := "test"
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name)
|
||||
ginkgo.By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name)
|
||||
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -363,20 +363,20 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all stateful set in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
It("should come back up if node goes down [Slow] [Disruptive]", func() {
|
||||
ginkgo.It("should come back up if node goes down [Slow] [Disruptive]", func() {
|
||||
petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
|
||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
pst := framework.NewStatefulSetTester(c)
|
||||
|
||||
|
@ -386,14 +386,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
framework.ExpectNoError(err)
|
||||
common.RestartNodes(f.ClientSet, nodes)
|
||||
|
||||
By("waiting for pods to be running again")
|
||||
ginkgo.By("waiting for pods to be running again")
|
||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||
})
|
||||
|
||||
It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
|
||||
ginkgo.It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
|
||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
pst := framework.NewStatefulSetTester(c)
|
||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||
|
@ -408,7 +408,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name)
|
||||
err := framework.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute)
|
||||
Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
|
@ -416,13 +416,13 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
|
||||
By("waiting for pods to be running again")
|
||||
ginkgo.By("waiting for pods to be running again")
|
||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("[Job]", func() {
|
||||
It("should create new pods when node is partitioned", func() {
|
||||
ginkgo.It("should create new pods when node is partitioned", func() {
|
||||
parallelism := int32(2)
|
||||
completions := int32(4)
|
||||
backoffLimit := int32(6) // default value
|
||||
|
@ -430,33 +430,33 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
job := jobutil.NewTestJob("notTerminate", "network-partition", v1.RestartPolicyNever,
|
||||
parallelism, completions, nil, backoffLimit)
|
||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{jobutil.JobSelectorKey: job.Name}))
|
||||
|
||||
By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
||||
ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
||||
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
nodeName := pods.Items[0].Spec.NodeName
|
||||
|
||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// This creates a temporary network partition, verifies that the job has 'parallelism' number of
|
||||
// running pods after the node-controller detects node unreachable.
|
||||
By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute)
|
||||
Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
|
||||
By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
||||
ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
||||
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
|
@ -467,8 +467,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
})
|
||||
|
||||
framework.KubeDescribe("Pods", func() {
|
||||
Context("should be evicted from unready Node", func() {
|
||||
BeforeEach(func() {
|
||||
ginkgo.Context("should be evicted from unready Node", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||
})
|
||||
|
||||
|
@ -478,9 +478,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
// 1. Node is marked NotReady after timeout by nodecontroller (40seconds)
|
||||
// 2. All pods on node are marked NotReady shortly after #1
|
||||
// 3. After enough time passess all Pods are evicted from the given Node
|
||||
It("[Feature:TaintEviction] All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+
|
||||
ginkgo.It("[Feature:TaintEviction] All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+
|
||||
"AND all pods should be evicted after eviction timeout passes", func() {
|
||||
By("choose a node - we will block all network traffic on this node")
|
||||
ginkgo.By("choose a node - we will block all network traffic on this node")
|
||||
var podOpts metav1.ListOptions
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
framework.FilterNodes(nodes, func(node v1.Node) bool {
|
||||
|
@ -542,7 +542,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
maxTolerationTime,
|
||||
)
|
||||
|
||||
By("Set up watch on node status")
|
||||
ginkgo.By("Set up watch on node status")
|
||||
nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name)
|
||||
stopCh := make(chan struct{})
|
||||
newNode := make(chan *v1.Node)
|
||||
|
@ -564,7 +564,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
n, ok := newObj.(*v1.Node)
|
||||
Expect(ok).To(Equal(true))
|
||||
gomega.Expect(ok).To(gomega.Equal(true))
|
||||
newNode <- n
|
||||
|
||||
},
|
||||
|
@ -578,21 +578,21 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
}()
|
||||
go controller.Run(stopCh)
|
||||
|
||||
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||
host, err := framework.GetNodeExternalIP(&node)
|
||||
framework.ExpectNoError(err)
|
||||
masterAddresses := framework.GetAllMasterAddresses(c)
|
||||
defer func() {
|
||||
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
||||
for _, masterAddress := range masterAddresses {
|
||||
framework.UnblockNetwork(host, masterAddress)
|
||||
}
|
||||
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
}
|
||||
|
||||
By("Expect to observe node status change from NotReady to Ready after network connectivity recovers")
|
||||
ginkgo.By("Expect to observe node status change from NotReady to Ready after network connectivity recovers")
|
||||
expectNodeReadiness(true, newNode)
|
||||
}()
|
||||
|
||||
|
@ -600,7 +600,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
framework.BlockNetwork(host, masterAddress)
|
||||
}
|
||||
|
||||
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||
ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||
expectNodeReadiness(false, newNode)
|
||||
framework.ExpectNoError(wait.Poll(1*time.Second, timeout, func() (bool, error) {
|
||||
return framework.NodeHasTaint(c, node.Name, nodepkg.UnreachableTaintTemplate)
|
||||
|
@ -610,7 +610,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||
}
|
||||
|
||||
sleepTime := maxTolerationTime + 20*time.Second
|
||||
By(fmt.Sprintf("Sleeping for %v and checking if all Pods were evicted", sleepTime))
|
||||
ginkgo.By(fmt.Sprintf("Sleeping for %v and checking if all Pods were evicted", sleepTime))
|
||||
time.Sleep(sleepTime)
|
||||
pods, err = c.CoreV1().Pods(v1.NamespaceAll).List(podOpts)
|
||||
framework.ExpectNoError(err)
|
||||
|
|
|
@ -31,8 +31,8 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("ReplicationController", func() {
|
||||
|
@ -48,7 +48,7 @@ var _ = SIGDescribe("ReplicationController", func() {
|
|||
TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
})
|
||||
|
||||
It("should serve a basic image on each replica with a private image", func() {
|
||||
ginkgo.It("should serve a basic image on each replica with a private image", func() {
|
||||
// requires private images
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
privateimage := imageutils.GetConfig(imageutils.ServeHostname)
|
||||
|
@ -110,9 +110,9 @@ func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageNa
|
|||
}
|
||||
}
|
||||
|
||||
// A basic test to check the deployment of an image using
|
||||
// a replication controller. The image serves its hostname
|
||||
// which is checked for each replica.
|
||||
// TestReplicationControllerServeImageOrFail is a basic test to check
|
||||
// the deployment of an image using a replication controller.
|
||||
// The image serves its hostname which is checked for each replica.
|
||||
func TestReplicationControllerServeImageOrFail(f *framework.Framework, test string, image string) {
|
||||
name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
|
||||
replicas := int32(1)
|
||||
|
@ -121,16 +121,16 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
|||
// that serves its hostname.
|
||||
// The source for the Docker container kubernetes/serve_hostname is
|
||||
// in contrib/for-demos/serve_hostname
|
||||
By(fmt.Sprintf("Creating replication controller %s", name))
|
||||
ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
|
||||
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image)
|
||||
newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(newRC)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Check that pods for the new RC were created.
|
||||
// TODO: Maybe switch PodsCreated to just check owner references.
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
|
@ -149,14 +149,14 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
|||
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
running++
|
||||
}
|
||||
|
||||
// Sanity check
|
||||
if running != replicas {
|
||||
Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(HaveOccurred())
|
||||
gomega.Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// Verify that something is listening.
|
||||
|
@ -182,7 +182,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
|||
framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
|
||||
quota := newPodQuota(name, "2")
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
|
||||
|
@ -196,14 +196,14 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
|||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("resource quota %q never synced", name)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
|
||||
ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
|
||||
rc := newRC(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(rc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name))
|
||||
ginkgo.By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name))
|
||||
generation := rc.Generation
|
||||
conditions := rc.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
|
@ -223,16 +223,16 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
|||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("rc manager never added the failure condition for rc %q: %#v", name, conditions)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name))
|
||||
ginkgo.By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name))
|
||||
rc, err = framework.UpdateReplicationControllerWithRetries(c, namespace, name, func(update *v1.ReplicationController) {
|
||||
x := int32(2)
|
||||
update.Spec.Replicas = &x
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking rc %q has no failure condition set", name))
|
||||
ginkgo.By(fmt.Sprintf("Checking rc %q has no failure condition set", name))
|
||||
generation = rc.Generation
|
||||
conditions = rc.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
|
@ -252,12 +252,12 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
|||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("rc manager never removed the failure condition for rc %q: %#v", name, conditions)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
||||
name := "pod-adoption"
|
||||
By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
|
||||
ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
|
||||
p := f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
|
@ -275,21 +275,21 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
|||
},
|
||||
})
|
||||
|
||||
By("When a replication controller with a matching selector is created")
|
||||
ginkgo.By("When a replication controller with a matching selector is created")
|
||||
replicas := int32(1)
|
||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage)
|
||||
rcSt.Spec.Selector = map[string]string{"name": name}
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Then the orphan pod is adopted")
|
||||
ginkgo.By("Then the orphan pod is adopted")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
// The Pod p should either be adopted or deleted by the RC
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rc.UID {
|
||||
// pod adopted
|
||||
|
@ -299,26 +299,26 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
|||
// pod still not adopted
|
||||
return false, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
||||
name := "pod-release"
|
||||
By("Given a ReplicationController is created")
|
||||
ginkgo.By("Given a ReplicationController is created")
|
||||
replicas := int32(1)
|
||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage)
|
||||
rcSt.Spec.Selector = map[string]string{"name": name}
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("When the matched label of one of its pods change")
|
||||
ginkgo.By("When the matched label of one of its pods change")
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
p := pods.Items[0]
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
pod.Labels = map[string]string{"name": "not-matching-name"}
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
||||
|
@ -330,12 +330,12 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
|||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Then the pod is released")
|
||||
ginkgo.By("Then the pod is released")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rc.UID {
|
||||
// pod still belonging to the replication controller
|
||||
|
@ -345,5 +345,5 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
|||
// pod already released
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
|
|
@ -32,8 +32,8 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
|
@ -92,7 +92,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
|||
testReplicaSetServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
})
|
||||
|
||||
It("should serve a basic image on each replica with a private image", func() {
|
||||
ginkgo.It("should serve a basic image on each replica with a private image", func() {
|
||||
// requires private images
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
privateimage := imageutils.GetConfig(imageutils.ServeHostname)
|
||||
|
@ -100,7 +100,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
|||
testReplicaSetServeImageOrFail(f, "private", privateimage.GetE2EImage())
|
||||
})
|
||||
|
||||
It("should surface a failure condition on a common issue like exceeded quota", func() {
|
||||
ginkgo.It("should surface a failure condition on a common issue like exceeded quota", func() {
|
||||
testReplicaSetConditionCheck(f)
|
||||
})
|
||||
|
||||
|
@ -127,12 +127,12 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
|||
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
|
||||
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Check that pods for the new RS were created.
|
||||
// TODO: Maybe switch PodsCreated to just check owner references.
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
|
@ -151,14 +151,14 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
|||
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
running++
|
||||
}
|
||||
|
||||
// Sanity check
|
||||
if running != replicas {
|
||||
Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(HaveOccurred())
|
||||
gomega.Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// Verify that something is listening.
|
||||
|
@ -181,10 +181,10 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
|||
namespace := f.Namespace.Name
|
||||
name := "condition-test"
|
||||
|
||||
By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name))
|
||||
ginkgo.By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name))
|
||||
quota := newPodQuota(name, "2")
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
|
||||
|
@ -198,14 +198,14 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
|||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("resource quota %q never synced", name)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
|
||||
ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
|
||||
rs := newRS(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
||||
rs, err = c.AppsV1().ReplicaSets(namespace).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name))
|
||||
ginkgo.By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name))
|
||||
generation := rs.Generation
|
||||
conditions := rs.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
|
@ -226,16 +226,16 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
|||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("rs controller never added the failure condition for replica set %q: %#v", name, conditions)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
|
||||
ginkgo.By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
|
||||
rs, err = replicasetutil.UpdateReplicaSetWithRetries(c, namespace, name, func(update *apps.ReplicaSet) {
|
||||
x := int32(2)
|
||||
update.Spec.Replicas = &x
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking replica set %q has no failure condition set", name))
|
||||
ginkgo.By(fmt.Sprintf("Checking replica set %q has no failure condition set", name))
|
||||
generation = rs.Generation
|
||||
conditions = rs.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
|
@ -255,12 +255,12 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
|||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("rs controller never removed the failure condition for rs %q: %#v", name, conditions)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
||||
name := "pod-adoption-release"
|
||||
By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
|
||||
ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
|
||||
p := f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
|
@ -278,21 +278,21 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
|||
},
|
||||
})
|
||||
|
||||
By("When a replicaset with a matching selector is created")
|
||||
ginkgo.By("When a replicaset with a matching selector is created")
|
||||
replicas := int32(1)
|
||||
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImage)
|
||||
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
|
||||
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(rsSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Then the orphan pod is adopted")
|
||||
ginkgo.By("Then the orphan pod is adopted")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
// The Pod p should either be adopted or deleted by the ReplicaSet
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rs.UID {
|
||||
// pod adopted
|
||||
|
@ -302,16 +302,16 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
|||
// pod still not adopted
|
||||
return false, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("When the matched label of one of its pods change")
|
||||
ginkgo.By("When the matched label of one of its pods change")
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
p = &pods.Items[0]
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
pod.Labels = map[string]string{"name": "not-matching-name"}
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
||||
|
@ -323,12 +323,12 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
|||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Then the pod is released")
|
||||
ginkgo.By("Then the pod is released")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rs.UID {
|
||||
// pod still belonging to the replicaset
|
||||
|
@ -338,5 +338,5 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
|||
// pod already released
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -56,7 +56,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
var ns string
|
||||
var c clientset.Interface
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
@ -71,19 +71,19 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
var statefulPodMounts, podMounts []v1.VolumeMount
|
||||
var ss *apps.StatefulSet
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||
podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
|
||||
ss = framework.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
|
||||
|
||||
By("Creating service " + headlessSvcName + " in namespace " + ns)
|
||||
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
|
||||
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
|
||||
_, err := c.CoreV1().Services(ns).Create(headlessService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all statefulset in ns %v", ns)
|
||||
|
@ -92,47 +92,47 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
// StorageClass and a dynamic provisioner.
|
||||
It("should provide basic identity", func() {
|
||||
By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
ginkgo.It("should provide basic identity", func() {
|
||||
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
*(ss.Spec.Replicas) = 3
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.PauseNewPods(ss)
|
||||
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Saturating stateful set " + ss.Name)
|
||||
ginkgo.By("Saturating stateful set " + ss.Name)
|
||||
sst.Saturate(ss)
|
||||
|
||||
By("Verifying statefulset mounted data directory is usable")
|
||||
ginkgo.By("Verifying statefulset mounted data directory is usable")
|
||||
framework.ExpectNoError(sst.CheckMount(ss, "/data"))
|
||||
|
||||
By("Verifying statefulset provides a stable hostname for each pod")
|
||||
ginkgo.By("Verifying statefulset provides a stable hostname for each pod")
|
||||
framework.ExpectNoError(sst.CheckHostname(ss))
|
||||
|
||||
By("Verifying statefulset set proper service name")
|
||||
ginkgo.By("Verifying statefulset set proper service name")
|
||||
framework.ExpectNoError(sst.CheckServiceName(ss, headlessSvcName))
|
||||
|
||||
cmd := "echo $(hostname) | dd of=/data/hostname conv=fsync"
|
||||
By("Running " + cmd + " in all stateful pods")
|
||||
ginkgo.By("Running " + cmd + " in all stateful pods")
|
||||
framework.ExpectNoError(sst.ExecInStatefulPods(ss, cmd))
|
||||
|
||||
By("Restarting statefulset " + ss.Name)
|
||||
ginkgo.By("Restarting statefulset " + ss.Name)
|
||||
sst.Restart(ss)
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
|
||||
By("Verifying statefulset mounted data directory is usable")
|
||||
ginkgo.By("Verifying statefulset mounted data directory is usable")
|
||||
framework.ExpectNoError(sst.CheckMount(ss, "/data"))
|
||||
|
||||
cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi"
|
||||
By("Running " + cmd + " in all stateful pods")
|
||||
ginkgo.By("Running " + cmd + " in all stateful pods")
|
||||
framework.ExpectNoError(sst.ExecInStatefulPods(ss, cmd))
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
// StorageClass and a dynamic provisioner.
|
||||
It("should adopt matching orphans and release non-matching pods", func() {
|
||||
By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
ginkgo.It("should adopt matching orphans and release non-matching pods", func() {
|
||||
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
*(ss.Spec.Replicas) = 1
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.PauseNewPods(ss)
|
||||
|
@ -141,29 +141,29 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
// Save Kind since it won't be populated in the returned ss.
|
||||
kind := ss.Kind
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ss.Kind = kind
|
||||
|
||||
By("Saturating stateful set " + ss.Name)
|
||||
ginkgo.By("Saturating stateful set " + ss.Name)
|
||||
sst.Saturate(ss)
|
||||
pods := sst.GetPodList(ss)
|
||||
Expect(pods.Items).To(HaveLen(int(*ss.Spec.Replicas)))
|
||||
gomega.Expect(pods.Items).To(gomega.HaveLen(int(*ss.Spec.Replicas)))
|
||||
|
||||
By("Checking that stateful set pods are created with ControllerRef")
|
||||
ginkgo.By("Checking that stateful set pods are created with ControllerRef")
|
||||
pod := pods.Items[0]
|
||||
controllerRef := metav1.GetControllerOf(&pod)
|
||||
Expect(controllerRef).ToNot(BeNil())
|
||||
Expect(controllerRef.Kind).To(Equal(ss.Kind))
|
||||
Expect(controllerRef.Name).To(Equal(ss.Name))
|
||||
Expect(controllerRef.UID).To(Equal(ss.UID))
|
||||
gomega.Expect(controllerRef).ToNot(gomega.BeNil())
|
||||
gomega.Expect(controllerRef.Kind).To(gomega.Equal(ss.Kind))
|
||||
gomega.Expect(controllerRef.Name).To(gomega.Equal(ss.Name))
|
||||
gomega.Expect(controllerRef.UID).To(gomega.Equal(ss.UID))
|
||||
|
||||
By("Orphaning one of the stateful set's pods")
|
||||
ginkgo.By("Orphaning one of the stateful set's pods")
|
||||
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
|
||||
pod.OwnerReferences = nil
|
||||
})
|
||||
|
||||
By("Checking that the stateful set readopts the pod")
|
||||
Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout,
|
||||
ginkgo.By("Checking that the stateful set readopts the pod")
|
||||
gomega.Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
|
@ -174,16 +174,16 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
}
|
||||
return true, nil
|
||||
},
|
||||
)).To(Succeed(), "wait for pod %q to be readopted", pod.Name)
|
||||
)).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name)
|
||||
|
||||
By("Removing the labels from one of the stateful set's pods")
|
||||
ginkgo.By("Removing the labels from one of the stateful set's pods")
|
||||
prevLabels := pod.Labels
|
||||
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
|
||||
pod.Labels = nil
|
||||
})
|
||||
|
||||
By("Checking that the stateful set releases the pod")
|
||||
Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "released", framework.StatefulSetTimeout,
|
||||
ginkgo.By("Checking that the stateful set releases the pod")
|
||||
gomega.Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "released", framework.StatefulSetTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef != nil {
|
||||
|
@ -191,16 +191,16 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
}
|
||||
return true, nil
|
||||
},
|
||||
)).To(Succeed(), "wait for pod %q to be released", pod.Name)
|
||||
)).To(gomega.Succeed(), "wait for pod %q to be released", pod.Name)
|
||||
|
||||
// If we don't do this, the test leaks the Pod and PVC.
|
||||
By("Readding labels to the stateful set's pod")
|
||||
ginkgo.By("Readding labels to the stateful set's pod")
|
||||
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
|
||||
pod.Labels = prevLabels
|
||||
})
|
||||
|
||||
By("Checking that the stateful set readopts the pod")
|
||||
Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout,
|
||||
ginkgo.By("Checking that the stateful set readopts the pod")
|
||||
gomega.Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
|
@ -211,49 +211,49 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
}
|
||||
return true, nil
|
||||
},
|
||||
)).To(Succeed(), "wait for pod %q to be readopted", pod.Name)
|
||||
)).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name)
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
// StorageClass and a dynamic provisioner.
|
||||
It("should not deadlock when a pod's predecessor fails", func() {
|
||||
By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
ginkgo.It("should not deadlock when a pod's predecessor fails", func() {
|
||||
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
*(ss.Spec.Replicas) = 2
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.PauseNewPods(ss)
|
||||
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
sst.WaitForRunning(1, 0, ss)
|
||||
|
||||
By("Resuming stateful pod at index 0.")
|
||||
ginkgo.By("Resuming stateful pod at index 0.")
|
||||
sst.ResumeNextPod(ss)
|
||||
|
||||
By("Waiting for stateful pod at index 1 to enter running.")
|
||||
ginkgo.By("Waiting for stateful pod at index 1 to enter running.")
|
||||
sst.WaitForRunning(2, 1, ss)
|
||||
|
||||
// Now we have 1 healthy and 1 unhealthy stateful pod. Deleting the healthy stateful pod should *not*
|
||||
// create a new stateful pod till the remaining stateful pod becomes healthy, which won't happen till
|
||||
// we set the healthy bit.
|
||||
|
||||
By("Deleting healthy stateful pod at index 0.")
|
||||
ginkgo.By("Deleting healthy stateful pod at index 0.")
|
||||
sst.DeleteStatefulPodAtIndex(0, ss)
|
||||
|
||||
By("Confirming stateful pod at index 0 is recreated.")
|
||||
ginkgo.By("Confirming stateful pod at index 0 is recreated.")
|
||||
sst.WaitForRunning(2, 1, ss)
|
||||
|
||||
By("Resuming stateful pod at index 1.")
|
||||
ginkgo.By("Resuming stateful pod at index 1.")
|
||||
sst.ResumeNextPod(ss)
|
||||
|
||||
By("Confirming all stateful pods in statefulset are created.")
|
||||
ginkgo.By("Confirming all stateful pods in statefulset are created.")
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
// StorageClass and a dynamic provisioner.
|
||||
It("should perform rolling updates and roll backs of template modifications with PVCs", func() {
|
||||
By("Creating a new StatefulSet with PVCs")
|
||||
ginkgo.It("should perform rolling updates and roll backs of template modifications with PVCs", func() {
|
||||
ginkgo.By("Creating a new StatefulSet with PVCs")
|
||||
*(ss.Spec.Replicas) = 3
|
||||
rollbackTest(c, ns, ss)
|
||||
})
|
||||
|
@ -264,7 +264,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
Description: StatefulSet MUST support the RollingUpdate strategy to automatically replace Pods one at a time when the Pod template changes. The StatefulSet's status MUST indicate the CurrentRevision and UpdateRevision. If the template is changed to match a prior revision, StatefulSet MUST detect this as a rollback instead of creating a new revision. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
|
||||
By("Creating a new StatefulSet")
|
||||
ginkgo.By("Creating a new StatefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
rollbackTest(c, ns, ss)
|
||||
})
|
||||
|
@ -275,7 +275,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
Description: StatefulSet's RollingUpdate strategy MUST support the Partition parameter for canaries and phased rollouts. If a Pod is deleted while a rolling update is in progress, StatefulSet MUST restore the Pod without violating the Partition. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func() {
|
||||
By("Creating a new StatefulSet")
|
||||
ginkgo.By("Creating a new StatefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
|
@ -290,16 +290,16 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
}(),
|
||||
}
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).To(Equal(updateRevision),
|
||||
gomega.Expect(currentRevision).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to currentRevision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
|
@ -309,28 +309,28 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
newImage := NewNginxImage
|
||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
|
||||
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
|
||||
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Creating a new revision")
|
||||
ginkgo.By("Creating a new revision")
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
|
||||
"Current revision should not equal update revision during rolling update")
|
||||
|
||||
By("Not applying an update when the partition is greater than the number of replicas")
|
||||
ginkgo.By("Not applying an update when the partition is greater than the number of replicas")
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
|
@ -338,7 +338,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
currentRevision))
|
||||
}
|
||||
|
||||
By("Performing a canary update")
|
||||
ginkgo.By("Performing a canary update")
|
||||
ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {
|
||||
|
@ -361,30 +361,30 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
}(),
|
||||
}
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ss, pods = sst.WaitForPartitionedRollingUpdate(ss)
|
||||
for i := range pods.Items {
|
||||
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
} else {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
|
@ -393,7 +393,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
}
|
||||
}
|
||||
|
||||
By("Restoring Pods to the correct revision when they are deleted")
|
||||
ginkgo.By("Restoring Pods to the correct revision when they are deleted")
|
||||
sst.DeleteStatefulPodAtIndex(0, ss)
|
||||
sst.DeleteStatefulPodAtIndex(2, ss)
|
||||
sst.WaitForRunningAndReady(3, ss)
|
||||
|
@ -401,26 +401,26 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
pods = sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
} else {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
|
@ -429,7 +429,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
}
|
||||
}
|
||||
|
||||
By("Performing a phased rolling update")
|
||||
ginkgo.By("Performing a phased rolling update")
|
||||
for i := int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) - 1; i >= 0; i-- {
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
||||
|
@ -442,30 +442,30 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
}(),
|
||||
}
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ss, pods = sst.WaitForPartitionedRollingUpdate(ss)
|
||||
for i := range pods.Items {
|
||||
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to current image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
} else {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
|
@ -474,7 +474,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
}
|
||||
}
|
||||
}
|
||||
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
|
||||
gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
|
||||
ss.Namespace,
|
||||
ss.Name,
|
||||
|
@ -485,8 +485,8 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
|
||||
// Do not mark this as Conformance.
|
||||
// The legacy OnDelete strategy only exists for backward compatibility with pre-v1 APIs.
|
||||
It("should implement legacy replacement when the update strategy is OnDelete", func() {
|
||||
By("Creating a new StatefulSet")
|
||||
ginkgo.It("should implement legacy replacement when the update strategy is OnDelete", func() {
|
||||
ginkgo.By("Creating a new StatefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
|
@ -494,16 +494,16 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
Type: apps.OnDeleteStatefulSetStrategyType,
|
||||
}
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).To(Equal(updateRevision),
|
||||
gomega.Expect(currentRevision).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
|
@ -511,7 +511,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
currentRevision))
|
||||
}
|
||||
|
||||
By("Restoring Pods to the current revision")
|
||||
ginkgo.By("Restoring Pods to the current revision")
|
||||
sst.DeleteStatefulPodAtIndex(0, ss)
|
||||
sst.DeleteStatefulPodAtIndex(1, ss)
|
||||
sst.DeleteStatefulPodAtIndex(2, ss)
|
||||
|
@ -519,7 +519,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
ss = sst.GetStatefulSet(ss.Namespace, ss.Name)
|
||||
pods = sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
|
@ -529,20 +529,20 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
newImage := NewNginxImage
|
||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
|
||||
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
|
||||
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Creating a new revision")
|
||||
ginkgo.By("Creating a new revision")
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
|
||||
"Current revision should not equal update revision during rolling update")
|
||||
|
||||
By("Recreating Pods at the new revision")
|
||||
ginkgo.By("Recreating Pods at the new revision")
|
||||
sst.DeleteStatefulPodAtIndex(0, ss)
|
||||
sst.DeleteStatefulPodAtIndex(1, ss)
|
||||
sst.DeleteStatefulPodAtIndex(2, ss)
|
||||
|
@ -550,13 +550,13 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
ss = sst.GetStatefulSet(ss.Namespace, ss.Name)
|
||||
pods = sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to new image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
|
@ -572,34 +572,34 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
*/
|
||||
framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() {
|
||||
psLabels := klabels.Set(labels)
|
||||
By("Initializing watcher for selector " + psLabels.String())
|
||||
ginkgo.By("Initializing watcher for selector " + psLabels.String())
|
||||
watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
|
||||
LabelSelector: psLabels.AsSelector().String(),
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Creating stateful set " + ssName + " in namespace " + ns)
|
||||
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
|
||||
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
ss, err = c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
||||
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
|
||||
By("Confirming that stateful set scale up will halt with unhealthy stateful pod")
|
||||
ginkgo.By("Confirming that stateful set scale up will halt with unhealthy stateful pod")
|
||||
sst.BreakHTTPProbe(ss)
|
||||
sst.WaitForRunningAndNotReady(*ss.Spec.Replicas, ss)
|
||||
sst.WaitForStatusReadyReplicas(ss, 0)
|
||||
sst.UpdateReplicas(ss, 3)
|
||||
sst.ConfirmStatefulPodCount(1, ss, 10*time.Second, true)
|
||||
|
||||
By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
|
||||
ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
|
||||
sst.RestoreHTTPProbe(ss)
|
||||
sst.WaitForRunningAndReady(3, ss)
|
||||
|
||||
By("Verifying that stateful set " + ssName + " was scaled up in order")
|
||||
ginkgo.By("Verifying that stateful set " + ssName + " was scaled up in order")
|
||||
expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"}
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
|
||||
defer cancel()
|
||||
|
@ -614,13 +614,13 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
return len(expectedOrder) == 0, nil
|
||||
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Scale down will halt with unhealthy stateful pod")
|
||||
ginkgo.By("Scale down will halt with unhealthy stateful pod")
|
||||
watcher, err = f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
|
||||
LabelSelector: psLabels.AsSelector().String(),
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
sst.BreakHTTPProbe(ss)
|
||||
sst.WaitForStatusReadyReplicas(ss, 0)
|
||||
|
@ -628,11 +628,11 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
sst.UpdateReplicas(ss, 0)
|
||||
sst.ConfirmStatefulPodCount(3, ss, 10*time.Second, true)
|
||||
|
||||
By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
|
||||
ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
|
||||
sst.RestoreHTTPProbe(ss)
|
||||
sst.Scale(ss, 0)
|
||||
|
||||
By("Verifying that stateful set " + ssName + " was scaled down in reverse order")
|
||||
ginkgo.By("Verifying that stateful set " + ssName + " was scaled down in reverse order")
|
||||
expectedOrder = []string{ssName + "-2", ssName + "-1", ssName + "-0"}
|
||||
ctx, cancel = watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
|
||||
defer cancel()
|
||||
|
@ -647,7 +647,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
return len(expectedOrder) == 0, nil
|
||||
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -658,36 +658,36 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods", func() {
|
||||
psLabels := klabels.Set(labels)
|
||||
|
||||
By("Creating stateful set " + ssName + " in namespace " + ns)
|
||||
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
|
||||
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
|
||||
ss.Spec.PodManagementPolicy = apps.ParallelPodManagement
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
||||
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
|
||||
By("Confirming that stateful set scale up will not halt with unhealthy stateful pod")
|
||||
ginkgo.By("Confirming that stateful set scale up will not halt with unhealthy stateful pod")
|
||||
sst.BreakHTTPProbe(ss)
|
||||
sst.WaitForRunningAndNotReady(*ss.Spec.Replicas, ss)
|
||||
sst.WaitForStatusReadyReplicas(ss, 0)
|
||||
sst.UpdateReplicas(ss, 3)
|
||||
sst.ConfirmStatefulPodCount(3, ss, 10*time.Second, false)
|
||||
|
||||
By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
|
||||
ginkgo.By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
|
||||
sst.RestoreHTTPProbe(ss)
|
||||
sst.WaitForRunningAndReady(3, ss)
|
||||
|
||||
By("Scale down will not halt with unhealthy stateful pod")
|
||||
ginkgo.By("Scale down will not halt with unhealthy stateful pod")
|
||||
sst.BreakHTTPProbe(ss)
|
||||
sst.WaitForStatusReadyReplicas(ss, 0)
|
||||
sst.WaitForRunningAndNotReady(3, ss)
|
||||
sst.UpdateReplicas(ss, 0)
|
||||
sst.ConfirmStatefulPodCount(0, ss, 10*time.Second, false)
|
||||
|
||||
By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
|
||||
ginkgo.By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
|
||||
sst.RestoreHTTPProbe(ss)
|
||||
sst.Scale(ss, 0)
|
||||
sst.WaitForStatusReplicas(ss, 0)
|
||||
|
@ -701,11 +701,11 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
framework.ConformanceIt("Should recreate evicted statefulset", func() {
|
||||
podName := "test-pod"
|
||||
statefulPodName := ssName + "-0"
|
||||
By("Looking for a node to schedule stateful set and pod")
|
||||
ginkgo.By("Looking for a node to schedule stateful set and pod")
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
node := nodes.Items[0]
|
||||
|
||||
By("Creating pod with conflicting port in namespace " + f.Namespace.Name)
|
||||
ginkgo.By("Creating pod with conflicting port in namespace " + f.Namespace.Name)
|
||||
conflictingPort := v1.ContainerPort{HostPort: 21017, ContainerPort: 21017, Name: "conflict"}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -725,7 +725,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name)
|
||||
ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name)
|
||||
ss := framework.NewStatefulSet(ssName, f.Namespace.Name, headlessSvcName, 1, nil, nil, labels)
|
||||
statefulPodContainer := &ss.Spec.Template.Spec.Containers[0]
|
||||
statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort)
|
||||
|
@ -733,13 +733,13 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
_, err = f.ClientSet.AppsV1().StatefulSets(f.Namespace.Name).Create(ss)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name)
|
||||
ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name)
|
||||
if err := f.WaitForPodRunning(podName); err != nil {
|
||||
framework.Failf("Pod %v did not start running: %v", podName, err)
|
||||
}
|
||||
|
||||
var initialStatefulPodUID types.UID
|
||||
By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
|
||||
ginkgo.By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
|
||||
w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName}))
|
||||
framework.ExpectNoError(err)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulPodTimeout)
|
||||
|
@ -764,13 +764,13 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
framework.Failf("Pod %v expected to be re-created at least once", statefulPodName)
|
||||
}
|
||||
|
||||
By("Removing pod with conflicting port in namespace " + f.Namespace.Name)
|
||||
ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name)
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
|
||||
ginkgo.By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
|
||||
// we may catch delete event, that's why we are waiting for running phase like this, and not with watchtools.UntilWithoutRetry
|
||||
Eventually(func() error {
|
||||
gomega.Eventually(func() error {
|
||||
statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -781,41 +781,41 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
return fmt.Errorf("Pod %v wasn't recreated: %v == %v", statefulPod.Name, statefulPod.UID, initialStatefulPodUID)
|
||||
}
|
||||
return nil
|
||||
}, framework.StatefulPodTimeout, 2*time.Second).Should(BeNil())
|
||||
}, framework.StatefulPodTimeout, 2*time.Second).Should(gomega.BeNil())
|
||||
})
|
||||
|
||||
It("should have a working scale subresource", func() {
|
||||
By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
ginkgo.It("should have a working scale subresource", func() {
|
||||
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
|
||||
By("getting scale subresource")
|
||||
ginkgo.By("getting scale subresource")
|
||||
scale, err := c.AppsV1().StatefulSets(ns).GetScale(ssName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get scale subresource: %v", err)
|
||||
}
|
||||
Expect(scale.Spec.Replicas).To(Equal(int32(1)))
|
||||
Expect(scale.Status.Replicas).To(Equal(int32(1)))
|
||||
gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1)))
|
||||
gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1)))
|
||||
|
||||
By("updating a scale subresource")
|
||||
ginkgo.By("updating a scale subresource")
|
||||
scale.Spec.Replicas = 2
|
||||
scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(ssName, scale)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to put scale subresource: %v", err)
|
||||
}
|
||||
Expect(scaleResult.Spec.Replicas).To(Equal(int32(2)))
|
||||
gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2)))
|
||||
|
||||
By("verifying the statefulset Spec.Replicas was modified")
|
||||
ginkgo.By("verifying the statefulset Spec.Replicas was modified")
|
||||
ss, err = c.AppsV1().StatefulSets(ns).Get(ssName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get statefulset resource: %v", err)
|
||||
}
|
||||
Expect(*(ss.Spec.Replicas)).To(Equal(int32(2)))
|
||||
gomega.Expect(*(ss.Spec.Replicas)).To(gomega.Equal(int32(2)))
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -823,13 +823,13 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
var sst *framework.StatefulSetTester
|
||||
var appTester *clusterAppTester
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
sst = framework.NewStatefulSetTester(c)
|
||||
appTester = &clusterAppTester{tester: sst, ns: ns}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all statefulset in ns %v", ns)
|
||||
|
@ -838,28 +838,28 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
It("should creating a working zookeeper cluster", func() {
|
||||
ginkgo.It("should creating a working zookeeper cluster", func() {
|
||||
appTester.statefulPod = &zookeeperTester{tester: sst}
|
||||
appTester.run()
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
It("should creating a working redis cluster", func() {
|
||||
ginkgo.It("should creating a working redis cluster", func() {
|
||||
appTester.statefulPod = &redisTester{tester: sst}
|
||||
appTester.run()
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
It("should creating a working mysql cluster", func() {
|
||||
ginkgo.It("should creating a working mysql cluster", func() {
|
||||
appTester.statefulPod = &mysqlGaleraTester{tester: sst}
|
||||
appTester.run()
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
It("should creating a working CockroachDB cluster", func() {
|
||||
ginkgo.It("should creating a working CockroachDB cluster", func() {
|
||||
appTester.statefulPod = &cockroachDBTester{tester: sst}
|
||||
appTester.run()
|
||||
})
|
||||
|
@ -892,10 +892,10 @@ type clusterAppTester struct {
|
|||
}
|
||||
|
||||
func (c *clusterAppTester) run() {
|
||||
By("Deploying " + c.statefulPod.name())
|
||||
ginkgo.By("Deploying " + c.statefulPod.name())
|
||||
ss := c.statefulPod.deploy(c.ns)
|
||||
|
||||
By("Creating foo:bar in member with index 0")
|
||||
ginkgo.By("Creating foo:bar in member with index 0")
|
||||
c.statefulPod.write(0, map[string]string{"foo": "bar"})
|
||||
|
||||
switch c.statefulPod.(type) {
|
||||
|
@ -903,13 +903,13 @@ func (c *clusterAppTester) run() {
|
|||
// Don't restart MySQL cluster since it doesn't handle restarts well
|
||||
default:
|
||||
if restartCluster {
|
||||
By("Restarting stateful set " + ss.Name)
|
||||
ginkgo.By("Restarting stateful set " + ss.Name)
|
||||
c.tester.Restart(ss)
|
||||
c.tester.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
}
|
||||
}
|
||||
|
||||
By("Reading value under foo from member with index 2")
|
||||
ginkgo.By("Reading value under foo from member with index 2")
|
||||
if err := pollReadWithTimeout(c.statefulPod, 2, "foo", "bar"); err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
|
@ -1085,16 +1085,16 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
|||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).To(Equal(updateRevision),
|
||||
gomega.Expect(currentRevision).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
|
@ -1103,45 +1103,45 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
|||
}
|
||||
sst.SortStatefulPods(pods)
|
||||
err = sst.BreakPodHTTPProbe(ss, &pods.Items[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
newImage := NewNginxImage
|
||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
|
||||
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ginkgo.By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
|
||||
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Creating a new revision")
|
||||
ginkgo.By("Creating a new revision")
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
|
||||
"Current revision should not equal update revision during rolling update")
|
||||
|
||||
By("Updating Pods in reverse ordinal order")
|
||||
ginkgo.By("Updating Pods in reverse ordinal order")
|
||||
pods = sst.GetPodList(ss)
|
||||
sst.SortStatefulPods(pods)
|
||||
err = sst.RestorePodHTTPProbe(ss, &pods.Items[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
|
||||
gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
|
||||
ss.Namespace,
|
||||
ss.Name,
|
||||
ss.Status.CurrentRevision,
|
||||
updateRevision))
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage),
|
||||
fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
|
@ -1149,30 +1149,30 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
|||
updateRevision))
|
||||
}
|
||||
|
||||
By("Rolling back to a previous revision")
|
||||
ginkgo.By("Rolling back to a previous revision")
|
||||
err = sst.BreakPodHTTPProbe(ss, &pods.Items[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
priorRevision := currentRevision
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = oldImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
|
||||
"Current revision should not equal update revision during roll back")
|
||||
Expect(priorRevision).To(Equal(updateRevision),
|
||||
gomega.Expect(priorRevision).To(gomega.Equal(updateRevision),
|
||||
"Prior revision should equal update revision during roll back")
|
||||
|
||||
By("Rolling back update in reverse ordinal order")
|
||||
ginkgo.By("Rolling back update in reverse ordinal order")
|
||||
pods = sst.GetPodList(ss)
|
||||
sst.SortStatefulPods(pods)
|
||||
sst.RestorePodHTTPProbe(ss, &pods.Items[1])
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
Expect(ss.Status.CurrentRevision).To(Equal(priorRevision),
|
||||
gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(priorRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
|
||||
ss.Namespace,
|
||||
ss.Name,
|
||||
|
@ -1180,13 +1180,13 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
|||
updateRevision))
|
||||
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(oldImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(priorRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
|
|
|
@ -28,11 +28,24 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
// CronJobGroupVersionResourceAlpha unambiguously identifies a resource of cronjob with alpha status
|
||||
CronJobGroupVersionResourceAlpha = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"}
|
||||
CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
|
||||
NautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
|
||||
KittenImage = imageutils.GetE2EImage(imageutils.Kitten)
|
||||
NginxImage = imageutils.GetE2EImage(imageutils.Nginx)
|
||||
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxNew)
|
||||
RedisImage = imageutils.GetE2EImage(imageutils.Redis)
|
||||
|
||||
// CronJobGroupVersionResourceBeta unambiguously identifies a resource of cronjob with beta status
|
||||
CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
|
||||
|
||||
// NautilusImage is the fully qualified URI to the Nautilus image
|
||||
NautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
|
||||
|
||||
// KittenImage is the fully qualified URI to the Kitten image
|
||||
KittenImage = imageutils.GetE2EImage(imageutils.Kitten)
|
||||
|
||||
// NginxImage is the fully qualified URI to the Nginx image
|
||||
NginxImage = imageutils.GetE2EImage(imageutils.Nginx)
|
||||
|
||||
// NewNginxImage is the fully qualified URI to the NginxNew image
|
||||
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxNew)
|
||||
|
||||
// RedisImage is the fully qualified URI to the Redis image
|
||||
RedisImage = imageutils.GetE2EImage(imageutils.Redis)
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue