diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index f9f4b7facc..1a9e43909d 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -42,8 +42,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var ( @@ -58,7 +58,7 @@ var ( // testHostIP tests that a pod gets a host IP func testHostIP(podClient *framework.PodClient, pod *v1.Pod) { - By("creating pod") + ginkgo.By("creating pod") podClient.CreateSync(pod) // Try to make sure we get a hostIP for each pod. @@ -66,7 +66,7 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) { t := time.Now() for { p, err := podClient.Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get pod %q", pod.Name) if p.Status.HostIP != "" { framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) break @@ -83,23 +83,23 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) { func startPodAndGetBackOffs(podClient *framework.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) { podClient.CreateSync(pod) time.Sleep(sleepAmount) - Expect(pod.Spec.Containers).NotTo(BeEmpty()) + gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty()) podName := pod.Name containerName := pod.Spec.Containers[0].Name - By("getting restart delay-0") + ginkgo.By("getting restart delay-0") _, err := getRestartDelay(podClient, podName, containerName) if err != nil { framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } - By("getting restart delay-1") + ginkgo.By("getting restart delay-1") delay1, err := getRestartDelay(podClient, podName, containerName) if err != nil { framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } - By("getting restart delay-2") + ginkgo.By("getting restart delay-2") delay2, err := getRestartDelay(podClient, podName, containerName) if err != nil { framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) @@ -132,7 +132,7 @@ func getRestartDelay(podClient *framework.PodClient, podName string, containerNa var _ = framework.KubeDescribe("Pods", func() { f := framework.NewDefaultFramework("pods") var podClient *framework.PodClient - BeforeEach(func() { + ginkgo.BeforeEach(func() { podClient = f.PodClient() }) @@ -164,7 +164,7 @@ var _ = framework.KubeDescribe("Pods", func() { Description: A Pod is created with a unique label. Pod MUST be accessible when queried using the label selector upon creation. Add a watch, check if the Pod is running. Pod then deleted, The pod deletion timestamp is observed. The watch MUST return the pod deleted event. Query with the original selector for the Pod MUST return empty list. */ framework.ConformanceIt("should be submitted and removed [NodeConformance]", func() { - By("creating the pod") + ginkgo.By("creating the pod") name := "pod-submit-remove-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) pod := &v1.Pod{ @@ -185,12 +185,12 @@ var _ = framework.KubeDescribe("Pods", func() { }, } - By("setting up watch") + ginkgo.By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pods") - Expect(len(pods.Items)).To(Equal(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") + gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) options = metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, @@ -220,17 +220,17 @@ var _ = framework.KubeDescribe("Pods", func() { _, _, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.Pod{}) defer w.Stop() - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient.Create(pod) - By("verifying the pod is in kubernetes") + ginkgo.By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pods") - Expect(len(pods.Items)).To(Equal(1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") + gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) - By("verifying pod creation was observed") + ginkgo.By("verifying pod creation was observed") select { case <-listCompleted: select { @@ -250,14 +250,14 @@ var _ = framework.KubeDescribe("Pods", func() { framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) // save the running pod pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod") - By("deleting the pod gracefully") + ginkgo.By("deleting the pod gracefully") err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30)) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod") - By("verifying the kubelet observed the termination notice") - Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { + ginkgo.By("verifying the kubelet observed the termination notice") + gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName) if err != nil { framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err) @@ -275,9 +275,9 @@ var _ = framework.KubeDescribe("Pods", func() { } framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed") return true, nil - })).NotTo(HaveOccurred(), "kubelet never observed the termination notice") + })).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice") - By("verifying pod deletion was observed") + ginkgo.By("verifying pod deletion was observed") deleted := false var lastPod *v1.Pod timer := time.After(framework.DefaultPodDeletionTimeout) @@ -300,14 +300,14 @@ var _ = framework.KubeDescribe("Pods", func() { framework.Failf("Failed to observe pod deletion") } - Expect(lastPod.DeletionTimestamp).ToNot(BeNil()) - Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero()) + gomega.Expect(lastPod.DeletionTimestamp).ToNot(gomega.BeNil()) + gomega.Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(gomega.BeZero()) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pods") - Expect(len(pods.Items)).To(Equal(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") + gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) }) /* @@ -316,7 +316,7 @@ var _ = framework.KubeDescribe("Pods", func() { Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. Update the pod to change the value of the Label. Query for the Pod with the new value for the label MUST be successful. */ framework.ConformanceIt("should be updated [NodeConformance]", func() { - By("creating the pod") + ginkgo.By("creating the pod") name := "pod-update-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) pod := &v1.Pod{ @@ -337,17 +337,17 @@ var _ = framework.KubeDescribe("Pods", func() { }, } - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") pod = podClient.CreateSync(pod) - By("verifying the pod is in kubernetes") + ginkgo.By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pods") - Expect(len(pods.Items)).To(Equal(1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") + gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) - By("updating the pod") + ginkgo.By("updating the pod") podClient.Update(name, func(pod *v1.Pod) { value = strconv.Itoa(time.Now().Nanosecond()) pod.Labels["time"] = value @@ -355,12 +355,12 @@ var _ = framework.KubeDescribe("Pods", func() { framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - By("verifying the updated pod is in kubernetes") + ginkgo.By("verifying the updated pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pods") - Expect(len(pods.Items)).To(Equal(1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") + gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) framework.Logf("Pod update OK") }) @@ -370,7 +370,7 @@ var _ = framework.KubeDescribe("Pods", func() { Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. The Pod is updated with ActiveDeadlineSeconds set on the Pod spec. Pod MUST terminate of the specified time elapses. */ framework.ConformanceIt("should allow activeDeadlineSeconds to be updated [NodeConformance]", func() { - By("creating the pod") + ginkgo.By("creating the pod") name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) pod := &v1.Pod{ @@ -391,17 +391,17 @@ var _ = framework.KubeDescribe("Pods", func() { }, } - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient.CreateSync(pod) - By("verifying the pod is in kubernetes") + ginkgo.By("verifying the pod is in kubernetes") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pods") - Expect(len(pods.Items)).To(Equal(1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") + gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) - By("updating the pod") + ginkgo.By("updating the pod") podClient.Update(name, func(pod *v1.Pod) { newDeadline := int64(5) pod.Spec.ActiveDeadlineSeconds = &newDeadline @@ -462,7 +462,7 @@ var _ = framework.KubeDescribe("Pods", func() { }, } _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc) - Expect(err).NotTo(HaveOccurred(), "failed to create service") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service") // Make a client pod that verifies that it has the service environment variables. podName := "client-envvars-" + string(uuid.NewUUID()) @@ -497,7 +497,7 @@ var _ = framework.KubeDescribe("Pods", func() { "FOOSERVICE_PORT_8765_TCP_ADDR=", } framework.ExpectNoErrorWithRetries(func() error { - return f.MatchContainerOutput(pod, containerName, expectedVars, ContainSubstring) + return f.MatchContainerOutput(pod, containerName, expectedVars, gomega.ContainSubstring) }, maxRetries, "Container should have service environment variables set") }) @@ -509,9 +509,9 @@ var _ = framework.KubeDescribe("Pods", func() { */ framework.ConformanceIt("should support remote command execution over websockets [NodeConformance]", func() { config, err := framework.LoadConfig() - Expect(err).NotTo(HaveOccurred(), "unable to get base config") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config") - By("creating the pod") + ginkgo.By("creating the pod") name := "pod-exec-websocket-" + string(uuid.NewUUID()) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -528,7 +528,7 @@ var _ = framework.KubeDescribe("Pods", func() { }, } - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") pod = podClient.CreateSync(pod) req := f.ClientSet.CoreV1().RESTClient().Get(). @@ -550,7 +550,7 @@ var _ = framework.KubeDescribe("Pods", func() { defer ws.Close() buf := &bytes.Buffer{} - Eventually(func() error { + gomega.Eventually(func() error { for { var msg []byte if err := websocket.Message.Receive(ws, &msg); err != nil { @@ -580,7 +580,7 @@ var _ = framework.KubeDescribe("Pods", func() { return fmt.Errorf("Expected to find 'remote execution test' in %q", buf.String()) } return nil - }, time.Minute, 10*time.Second).Should(BeNil()) + }, time.Minute, 10*time.Second).Should(gomega.BeNil()) }) /* @@ -591,9 +591,9 @@ var _ = framework.KubeDescribe("Pods", func() { */ framework.ConformanceIt("should support retrieving logs from the container over websockets [NodeConformance]", func() { config, err := framework.LoadConfig() - Expect(err).NotTo(HaveOccurred(), "unable to get base config") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config") - By("creating the pod") + ginkgo.By("creating the pod") name := "pod-logs-websocket-" + string(uuid.NewUUID()) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -610,7 +610,7 @@ var _ = framework.KubeDescribe("Pods", func() { }, } - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient.CreateSync(pod) req := f.ClientSet.CoreV1().RESTClient().Get(). @@ -647,7 +647,7 @@ var _ = framework.KubeDescribe("Pods", func() { }) // Slow (~7 mins) - It("should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]", func() { + ginkgo.It("should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]", func() { podName := "pod-back-off-image" containerName := "back-off" pod := &v1.Pod{ @@ -668,7 +668,7 @@ var _ = framework.KubeDescribe("Pods", func() { delay1, delay2 := startPodAndGetBackOffs(podClient, pod, buildBackOffDuration) - By("updating the image") + ginkgo.By("updating the image") podClient.Update(podName, func(pod *v1.Pod) { pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx) }) @@ -676,7 +676,7 @@ var _ = framework.KubeDescribe("Pods", func() { time.Sleep(syncLoopFrequency) framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - By("get restart delay after image update") + ginkgo.By("get restart delay after image update") delayAfterUpdate, err := getRestartDelay(podClient, podName, containerName) if err != nil { framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) @@ -688,7 +688,7 @@ var _ = framework.KubeDescribe("Pods", func() { }) // Slow by design (~27 mins) issue #19027 - It("should cap back-off at MaxContainerBackOff [Slow][NodeConformance]", func() { + ginkgo.It("should cap back-off at MaxContainerBackOff [Slow][NodeConformance]", func() { podName := "back-off-cap" containerName := "back-off-cap" pod := &v1.Pod{ @@ -711,7 +711,7 @@ var _ = framework.KubeDescribe("Pods", func() { time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x // wait for a delay == capped delay of MaxContainerBackOff - By("getting restart delay when capped") + ginkgo.By("getting restart delay when capped") var ( delay1 time.Duration err error @@ -731,7 +731,7 @@ var _ = framework.KubeDescribe("Pods", func() { framework.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1) } - By("getting restart delay after a capped delay") + ginkgo.By("getting restart delay after a capped delay") delay2, err := getRestartDelay(podClient, podName, containerName) if err != nil { framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) @@ -743,7 +743,7 @@ var _ = framework.KubeDescribe("Pods", func() { }) // TODO(freehan): label the test to be [NodeConformance] after tests are proven to be stable. - It("should support pod readiness gates [NodeFeature:PodReadinessGate]", func() { + ginkgo.It("should support pod readiness gates [NodeFeature:PodReadinessGate]", func() { podName := "pod-ready" readinessGate1 := "k8s.io/test-condition1" readinessGate2 := "k8s.io/test-condition2" @@ -769,35 +769,35 @@ var _ = framework.KubeDescribe("Pods", func() { } validatePodReadiness := func(expectReady bool) { - Expect(wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) { + gomega.Expect(wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) { podReady := podClient.PodIsReady(podName) res := expectReady == podReady if !res { framework.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady) } return res, nil - })).NotTo(HaveOccurred()) + })).NotTo(gomega.HaveOccurred()) } - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient.CreateSync(pod) - Expect(podClient.PodIsReady(podName)).To(BeFalse(), "Expect pod's Ready condition to be false initially.") + gomega.Expect(podClient.PodIsReady(podName)).To(gomega.BeFalse(), "Expect pod's Ready condition to be false initially.") - By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1)) + ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1)) _, err := podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status") - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Sleep for 10 seconds. time.Sleep(maxReadyStatusUpdateTolerance) - Expect(podClient.PodIsReady(podName)).To(BeFalse(), "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True") + gomega.Expect(podClient.PodIsReady(podName)).To(gomega.BeFalse(), "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True") - By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2)) + ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2)) _, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status") - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) validatePodReadiness(true) - By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1)) + ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1)) _, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status") - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) validatePodReadiness(false) })