diff --git a/hack/.golint_failures b/hack/.golint_failures index b88bf5a2d1..17d6422cbe 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -650,7 +650,6 @@ test/e2e/apps test/e2e/auth test/e2e/autoscaling test/e2e/chaosmonkey -test/e2e/cloud test/e2e/common test/e2e/framework test/e2e/framework/providers/gce @@ -658,14 +657,12 @@ test/e2e/framework/providers/kubemark test/e2e/instrumentation test/e2e/instrumentation/logging test/e2e/instrumentation/monitoring -test/e2e/kubectl test/e2e/lifecycle test/e2e/lifecycle/bootstrap test/e2e/network test/e2e/node test/e2e/scalability test/e2e/scheduling -test/e2e/servicecatalog test/e2e/storage test/e2e/storage/drivers test/e2e/storage/testsuites diff --git a/test/e2e/cloud/framework.go b/test/e2e/cloud/framework.go index 6f6ae11669..16c8e7ef1f 100644 --- a/test/e2e/cloud/framework.go +++ b/test/e2e/cloud/framework.go @@ -18,6 +18,7 @@ package cloud import "k8s.io/kubernetes/test/e2e/framework" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return framework.KubeDescribe("[sig-cloud-provider] "+text, body) } diff --git a/test/e2e/cloud/nodes.go b/test/e2e/cloud/nodes.go index a3e8e0315a..8d097082ea 100644 --- a/test/e2e/cloud/nodes.go +++ b/test/e2e/cloud/nodes.go @@ -24,23 +24,23 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { f := framework.NewDefaultFramework("cloudprovider") var c clientset.Interface - BeforeEach(func() { + ginkgo.BeforeEach(func() { // Only supported in AWS/GCE because those are the only cloud providers // where E2E test are currently running. framework.SkipUnlessProviderIs("aws", "gce", "gke") c = f.ClientSet }) - It("should be deleted on API server if it doesn't exist in the cloud provider", func() { - By("deleting a node on the cloud provider") + ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func() { + ginkgo.By("deleting a node on the cloud provider") nodeDeleteCandidates := framework.GetReadySchedulableNodesOrDie(c) nodeToDelete := nodeDeleteCandidates.Items[0] @@ -54,8 +54,8 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { } newNodes, err := framework.CheckNodesReady(c, len(origNodes.Items)-1, 5*time.Minute) - Expect(err).To(BeNil()) - Expect(len(newNodes)).To(Equal(len(origNodes.Items) - 1)) + gomega.Expect(err).To(gomega.BeNil()) + gomega.Expect(len(newNodes)).To(gomega.Equal(len(origNodes.Items) - 1)) _, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{}) if err == nil { diff --git a/test/e2e/kubectl/framework.go b/test/e2e/kubectl/framework.go index 79e69f9e89..a95ea5a719 100644 --- a/test/e2e/kubectl/framework.go +++ b/test/e2e/kubectl/framework.go @@ -18,6 +18,7 @@ package kubectl import "github.com/onsi/ginkgo" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return ginkgo.Describe("[sig-cli] "+text, body) } diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index ca09707401..59c617c628 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -63,8 +63,8 @@ import ( testutils "k8s.io/kubernetes/test/utils" uexec "k8s.io/utils/exec" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -101,14 +101,14 @@ var ( var ( proxyRegexp = regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)") - CronJobGroupVersionResourceAlpha = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"} - CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"} + cronJobGroupVersionResourceAlpha = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"} + cronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"} ) // Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped. // Aware of the kubectl example files map. func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) { - By("using delete to clean up resources") + ginkgo.By("using delete to clean up resources") var nsArg string if ns != "" { nsArg = fmt.Sprintf("--namespace=%s", ns) @@ -120,7 +120,7 @@ func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) { } func readTestFileOrDie(file string) []byte { - return testfiles.ReadOrDie(path.Join(kubeCtlManifestPath, file), Fail) + return testfiles.ReadOrDie(path.Join(kubeCtlManifestPath, file), ginkgo.Fail) } func runKubectlRetryOrDie(args ...string) string { @@ -136,18 +136,18 @@ func runKubectlRetryOrDie(args ...string) string { // Expect no errors to be present after retries are finished // Copied from framework #ExecOrDie framework.Logf("stdout: %q", output) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) return output } // duplicated setup to avoid polluting "normal" clients with alpha features which confuses the generated clients var _ = SIGDescribe("Kubectl alpha client", func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() f := framework.NewDefaultFramework("kubectl") var c clientset.Interface var ns string - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name }) @@ -156,22 +156,22 @@ var _ = SIGDescribe("Kubectl alpha client", func() { var nsFlag string var cjName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) cjName = "e2e-test-echo-cronjob-alpha" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "cronjobs", cjName, nsFlag) }) - It("should create a CronJob", func() { - framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResourceAlpha, f.Namespace.Name) + ginkgo.It("should create a CronJob", func() { + framework.SkipIfMissingResource(f.DynamicClient, cronJobGroupVersionResourceAlpha, f.Namespace.Name) schedule := "*/5 * * * ?" framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v2alpha1", "--schedule="+schedule, "--image="+busyboxImage, nsFlag) - By("verifying the CronJob " + cjName + " was created") + ginkgo.By("verifying the CronJob " + cjName + " was created") sj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting CronJob %s: %v", cjName, err) @@ -191,7 +191,7 @@ var _ = SIGDescribe("Kubectl alpha client", func() { }) var _ = SIGDescribe("Kubectl client", func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() f := framework.NewDefaultFramework("kubectl") // Reusable cluster state function. This won't be adversely affected by lazy initialization of framework. @@ -208,7 +208,7 @@ var _ = SIGDescribe("Kubectl client", func() { } var c clientset.Interface var ns string - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name }) @@ -260,10 +260,10 @@ var _ = SIGDescribe("Kubectl client", func() { framework.KubeDescribe("Update Demo", func() { var nautilus, kitten string - BeforeEach(func() { + ginkgo.BeforeEach(func() { updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo" - nautilus = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"), Fail))) - kitten = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "kitten-rc.yaml.in"), Fail))) + nautilus = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"), ginkgo.Fail))) + kitten = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "kitten-rc.yaml.in"), ginkgo.Fail))) }) /* Release : v1.9 @@ -273,7 +273,7 @@ var _ = SIGDescribe("Kubectl client", func() { framework.ConformanceIt("should create and stop a replication controller ", func() { defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector) - By("creating a replication controller") + ginkgo.By("creating a replication controller") framework.RunKubectlOrDieInput(nautilus, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) }) @@ -286,14 +286,14 @@ var _ = SIGDescribe("Kubectl client", func() { framework.ConformanceIt("should scale a replication controller ", func() { defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector) - By("creating a replication controller") + ginkgo.By("creating a replication controller") framework.RunKubectlOrDieInput(nautilus, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) - By("scaling down the replication controller") + ginkgo.By("scaling down the replication controller") debugDiscovery() framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) framework.ValidateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) - By("scaling up the replication controller") + ginkgo.By("scaling up the replication controller") debugDiscovery() framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) @@ -305,10 +305,10 @@ var _ = SIGDescribe("Kubectl client", func() { Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. Run a rolling update to run a different version of the container. All running instances SHOULD now be running the newer version of the container as part of the rolling update. */ framework.ConformanceIt("should do a rolling update of a replication controller ", func() { - By("creating the initial replication controller") + ginkgo.By("creating the initial replication controller") framework.RunKubectlOrDieInput(string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) - By("rolling-update to new replication controller") + ginkgo.By("rolling-update to new replication controller") debugDiscovery() framework.RunKubectlOrDieInput(string(kitten[:]), "rolling-update", "update-demo-nautilus", "--update-period=1s", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) framework.ValidateController(c, kittenImage, 2, "update-demo", updateDemoSelector, getUDData("kitten.jpg", ns), ns) @@ -327,7 +327,7 @@ var _ = SIGDescribe("Kubectl client", func() { "redis-master-deployment.yaml.in", "redis-slave-deployment.yaml.in", } { - contents := commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(guestbookRoot, gbAppFile), Fail))) + contents := commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(guestbookRoot, gbAppFile), ginkgo.Fail))) run(contents) } } @@ -341,45 +341,45 @@ var _ = SIGDescribe("Kubectl client", func() { defer forEachGBFile(func(contents string) { cleanupKubectlInputs(contents, ns) }) - By("creating all guestbook components") + ginkgo.By("creating all guestbook components") forEachGBFile(func(contents string) { framework.Logf(contents) framework.RunKubectlOrDieInput(contents, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) }) - By("validating guestbook app") + ginkgo.By("validating guestbook app") validateGuestbookApp(c, ns) }) }) framework.KubeDescribe("Simple pod", func() { var podYaml string - BeforeEach(func() { - By(fmt.Sprintf("creating the pod from %v", podYaml)) + ginkgo.BeforeEach(func() { + ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml)) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in"))) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) - Expect(framework.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(BeTrue()) + gomega.Expect(framework.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, simplePodSelector) }) - It("should support exec", func() { - By("executing a command in the container") + ginkgo.It("should support exec", func() { + ginkgo.By("executing a command in the container") execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container") if e, a := "running in container", strings.TrimSpace(execOutput); e != a { framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } - By("executing a very long command in the container") + ginkgo.By("executing a very long command in the container") veryLongData := make([]rune, 20000) for i := 0; i < len(veryLongData); i++ { veryLongData[i] = 'a' } execOutput = framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", string(veryLongData)) - Expect(string(veryLongData)).To(Equal(strings.TrimSpace(execOutput)), "Unexpected kubectl exec output") + gomega.Expect(string(veryLongData)).To(gomega.Equal(strings.TrimSpace(execOutput)), "Unexpected kubectl exec output") - By("executing a command in the container with noninteractive stdin") + ginkgo.By("executing a command in the container with noninteractive stdin") execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "cat"). WithStdinData("abcd1234"). ExecOrDie() @@ -395,7 +395,7 @@ var _ = SIGDescribe("Kubectl client", func() { // NOTE this is solely for test cleanup! defer closer.Close() - By("executing a command in the container with pseudo-interactive stdin") + ginkgo.By("executing a command in the container with pseudo-interactive stdin") execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "sh"). WithStdinReader(r). ExecOrDie() @@ -404,20 +404,20 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - It("should support exec through an HTTP proxy", func() { + ginkgo.It("should support exec through an HTTP proxy", func() { // Fail if the variable isn't set if framework.TestContext.Host == "" { framework.Failf("--host variable must be set to the full URI to the api server on e2e run.") } - By("Starting goproxy") + ginkgo.By("Starting goproxy") testSrv, proxyLogs := startLocalProxy() defer testSrv.Close() proxyAddr := testSrv.URL for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} { proxyLogs.Reset() - By("Running kubectl via an HTTP proxy using " + proxyVar) + ginkgo.By("Running kubectl via an HTTP proxy using " + proxyVar) output := framework.NewKubectlCommand(fmt.Sprintf("--namespace=%s", ns), "exec", "nginx", "echo", "running", "in", "container"). WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))). ExecOrDie() @@ -438,20 +438,20 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - It("should support exec through kubectl proxy", func() { + ginkgo.It("should support exec through kubectl proxy", func() { // Fail if the variable isn't set if framework.TestContext.Host == "" { framework.Failf("--host variable must be set to the full URI to the api server on e2e run.") } - By("Starting kubectl proxy") + ginkgo.By("Starting kubectl proxy") port, proxyCmd, err := startProxyServer() framework.ExpectNoError(err) defer framework.TryKill(proxyCmd) //proxyLogs.Reset() host := fmt.Sprintf("--server=http://127.0.0.1:%d", port) - By("Running kubectl via kubectl proxy using " + host) + ginkgo.By("Running kubectl via kubectl proxy using " + host) output := framework.NewKubectlCommand( host, fmt.Sprintf("--namespace=%s", ns), "exec", "nginx", "echo", "running", "in", "container", @@ -464,60 +464,60 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - It("should return command exit codes", func() { + ginkgo.It("should return command exit codes", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("execing into a container with a successful command") + ginkgo.By("execing into a container with a successful command") _, err := framework.NewKubectlCommand(nsFlag, "exec", "nginx", "--", "/bin/sh", "-c", "exit 0").Exec() framework.ExpectNoError(err) - By("execing into a container with a failing command") + ginkgo.By("execing into a container with a failing command") _, err = framework.NewKubectlCommand(nsFlag, "exec", "nginx", "--", "/bin/sh", "-c", "exit 42").Exec() ee, ok := err.(uexec.ExitError) - Expect(ok).To(Equal(true)) - Expect(ee.ExitStatus()).To(Equal(42)) + gomega.Expect(ok).To(gomega.Equal(true)) + gomega.Expect(ee.ExitStatus()).To(gomega.Equal(42)) - By("running a successful command") + ginkgo.By("running a successful command") _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "success", "--", "/bin/sh", "-c", "exit 0").Exec() framework.ExpectNoError(err) - By("running a failing command") + ginkgo.By("running a failing command") _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec() ee, ok = err.(uexec.ExitError) - Expect(ok).To(Equal(true)) - Expect(ee.ExitStatus()).To(Equal(42)) + gomega.Expect(ok).To(gomega.Equal(true)) + gomega.Expect(ee.ExitStatus()).To(gomega.Equal(42)) - By("running a failing command without --restart=Never") + ginkgo.By("running a failing command without --restart=Never") _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "failure-2", "--", "/bin/sh", "-c", "cat && exit 42"). WithStdinData("abcd1234"). Exec() framework.ExpectNoError(err) - By("running a failing command without --restart=Never, but with --rm") + ginkgo.By("running a failing command without --restart=Never, but with --rm") _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", "failure-3", "--", "/bin/sh", "-c", "cat && exit 42"). WithStdinData("abcd1234"). Exec() framework.ExpectNoError(err) framework.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout) - By("running a failing command with --leave-stdin-open") + ginkgo.By("running a failing command with --leave-stdin-open") _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). WithStdinData("abcd1234"). Exec() framework.ExpectNoError(err) }) - It("should support inline execution and attach", func() { + ginkgo.It("should support inline execution and attach", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("executing a command with run and attach with stdin") + ginkgo.By("executing a command with run and attach with stdin") runOutput := framework.NewKubectlCommand(nsFlag, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234"). ExecOrDie() g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test", 1*time.Minute, g) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) // NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have // to loop test. err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { @@ -525,30 +525,30 @@ var _ = SIGDescribe("Kubectl client", func() { framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test") } logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name) - Expect(runOutput).To(ContainSubstring("abcd1234")) - Expect(runOutput).To(ContainSubstring("stdin closed")) + gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234")) + gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) return strings.Contains(logOutput, "abcd1234"), nil }) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) - Expect(c.BatchV1().Jobs(ns).Delete("run-test", nil)).To(BeNil()) + gomega.Expect(c.BatchV1().Jobs(ns).Delete("run-test", nil)).To(gomega.BeNil()) - By("executing a command with run and attach without stdin") + ginkgo.By("executing a command with run and attach without stdin") runOutput = framework.NewKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234"). ExecOrDie() - Expect(runOutput).ToNot(ContainSubstring("abcd1234")) - Expect(runOutput).To(ContainSubstring("stdin closed")) - Expect(c.BatchV1().Jobs(ns).Delete("run-test-2", nil)).To(BeNil()) + gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234")) + gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) + gomega.Expect(c.BatchV1().Jobs(ns).Delete("run-test-2", nil)).To(gomega.BeNil()) - By("executing a command with run and attach with stdin with open stdin should remain running") + ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running") runOutput = framework.NewKubectlCommand(nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234\n"). ExecOrDie() - Expect(runOutput).ToNot(ContainSubstring("stdin closed")) + gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("stdin closed")) g = func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } runTestPod, _, err = polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) { framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") } @@ -560,20 +560,20 @@ var _ = SIGDescribe("Kubectl client", func() { framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") } logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name) - Expect(logOutput).ToNot(ContainSubstring("stdin closed")) + gomega.Expect(logOutput).ToNot(gomega.ContainSubstring("stdin closed")) return strings.Contains(logOutput, "abcd1234"), nil }) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) - Expect(c.BatchV1().Jobs(ns).Delete("run-test-3", nil)).To(BeNil()) + gomega.Expect(c.BatchV1().Jobs(ns).Delete("run-test-3", nil)).To(gomega.BeNil()) }) - It("should support port-forward", func() { - By("forwarding the container port to a local port") + ginkgo.It("should support port-forward", func() { + ginkgo.By("forwarding the container port to a local port") cmd := runPortForward(ns, simplePodName, simplePodPort) defer cmd.Stop() - By("curling local port output") + ginkgo.By("curling local port output") localAddr := fmt.Sprintf("http://localhost:%d", cmd.port) body, err := curl(localAddr) framework.Logf("got: %s", body) @@ -585,8 +585,8 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - It("should handle in-cluster config", func() { - By("adding rbac permissions") + ginkgo.It("should handle in-cluster config", func() { + ginkgo.By("adding rbac permissions") // grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace framework.BindClusterRole(f.ClientSet.RbacV1beta1(), "view", f.Namespace.Name, rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"}) @@ -596,7 +596,7 @@ var _ = SIGDescribe("Kubectl client", func() { f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true) framework.ExpectNoError(err) - By("overriding icc with values provided by flags") + ginkgo.By("overriding icc with values provided by flags") kubectlPath := framework.TestContext.KubectlPath // we need the actual kubectl binary, not the script wrapper kubectlPathNormalizer := exec.Command("which", kubectlPath) @@ -660,52 +660,52 @@ metadata: framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") - By("getting pods with in-cluster configs") + ginkgo.By("getting pods with in-cluster configs") execOutput := framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=6 2>&1") - Expect(execOutput).To(MatchRegexp("nginx +1/1 +Running")) - Expect(execOutput).To(ContainSubstring("Using in-cluster namespace")) - Expect(execOutput).To(ContainSubstring("Using in-cluster configuration")) + gomega.Expect(execOutput).To(gomega.MatchRegexp("nginx +1/1 +Running")) + gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster namespace")) + gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration")) - By("creating an object containing a namespace with in-cluster config") + ginkgo.By("creating an object containing a namespace with in-cluster config") _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=6 2>&1") - Expect(err).To(ContainSubstring("Using in-cluster namespace")) - Expect(err).To(ContainSubstring("Using in-cluster configuration")) - Expect(err).To(ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterHost, inClusterPort))) + gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) + gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration")) + gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterHost, inClusterPort))) - By("creating an object not containing a namespace with in-cluster config") + ginkgo.By("creating an object not containing a namespace with in-cluster config") _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=6 2>&1") - Expect(err).To(ContainSubstring("Using in-cluster namespace")) - Expect(err).To(ContainSubstring("Using in-cluster configuration")) - Expect(err).To(ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/%s/configmaps", inClusterHost, inClusterPort, f.Namespace.Name))) + gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) + gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration")) + gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/%s/configmaps", inClusterHost, inClusterPort, f.Namespace.Name))) - By("trying to use kubectl with invalid token") + ginkgo.By("trying to use kubectl with invalid token") _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1") framework.Logf("got err %v", err) - Expect(err).To(HaveOccurred()) - Expect(err).To(ContainSubstring("Using in-cluster namespace")) - Expect(err).To(ContainSubstring("Using in-cluster configuration")) - Expect(err).To(ContainSubstring("Authorization: Bearer invalid")) - Expect(err).To(ContainSubstring("Response Status: 401 Unauthorized")) + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) + gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration")) + gomega.Expect(err).To(gomega.ContainSubstring("Authorization: Bearer invalid")) + gomega.Expect(err).To(gomega.ContainSubstring("Response Status: 401 Unauthorized")) - By("trying to use kubectl with invalid server") + ginkgo.By("trying to use kubectl with invalid server") _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1") framework.Logf("got err %v", err) - Expect(err).To(HaveOccurred()) - Expect(err).To(ContainSubstring("Unable to connect to the server")) - Expect(err).To(ContainSubstring("GET http://invalid/api")) + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server")) + gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api")) - By("trying to use kubectl with invalid namespace") + ginkgo.By("trying to use kubectl with invalid namespace") execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1") - Expect(execOutput).To(ContainSubstring("No resources found")) - Expect(execOutput).ToNot(ContainSubstring("Using in-cluster namespace")) - Expect(execOutput).To(ContainSubstring("Using in-cluster configuration")) - Expect(execOutput).To(MatchRegexp(fmt.Sprintf("GET http[s]?://%s:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort))) + gomega.Expect(execOutput).To(gomega.ContainSubstring("No resources found")) + gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace")) + gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration")) + gomega.Expect(execOutput).To(gomega.MatchRegexp(fmt.Sprintf("GET http[s]?://%s:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort))) - By("trying to use kubectl with kubeconfig") + ginkgo.By("trying to use kubectl with kubeconfig") execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1") - Expect(execOutput).ToNot(ContainSubstring("Using in-cluster namespace")) - Expect(execOutput).ToNot(ContainSubstring("Using in-cluster configuration")) - Expect(execOutput).To(ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods")) + gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace")) + gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster configuration")) + gomega.Expect(execOutput).To(gomega.ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods")) }) }) @@ -716,7 +716,7 @@ metadata: Description: Run kubectl to get api versions, output MUST contain returned versions with ‘v1’ listed. */ framework.ConformanceIt("should check if v1 is in available api versions ", func() { - By("validating api versions") + ginkgo.By("validating api versions") output := framework.RunKubectlOrDie("api-versions") if !strings.Contains(output, "v1") { framework.Failf("No v1 in kubectl api-versions") @@ -725,90 +725,90 @@ metadata: }) framework.KubeDescribe("Kubectl get componentstatuses", func() { - It("should get componentstatuses", func() { - By("getting list of componentstatuses") + ginkgo.It("should get componentstatuses", func() { + ginkgo.By("getting list of componentstatuses") output := framework.RunKubectlOrDie("get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}") components := strings.Split(output, " ") - By("getting details of componentstatuses") + ginkgo.By("getting details of componentstatuses") for _, component := range components { - By("getting status of " + component) + ginkgo.By("getting status of " + component) framework.RunKubectlOrDie("get", "componentstatuses", component) } }) }) framework.KubeDescribe("Kubectl apply", func() { - It("should apply a new configuration to an existing RC", func() { - controllerJson := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) + ginkgo.It("should apply a new configuration to an existing RC", func() { + controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("creating Redis RC") - framework.RunKubectlOrDieInput(controllerJson, "create", "-f", "-", nsFlag) - By("applying a modified configuration") - stdin := modifyReplicationControllerConfiguration(controllerJson) + ginkgo.By("creating Redis RC") + framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag) + ginkgo.By("applying a modified configuration") + stdin := modifyReplicationControllerConfiguration(controllerJSON) framework.NewKubectlCommand("apply", "-f", "-", nsFlag). WithStdinReader(stdin). ExecOrDie() - By("checking the result") + ginkgo.By("checking the result") forEachReplicationController(c, ns, "app", "redis", validateReplicationControllerConfiguration) }) - It("should reuse port when apply to an existing SVC", func() { - serviceJson := readTestFileOrDie(redisServiceFilename) + ginkgo.It("should reuse port when apply to an existing SVC", func() { + serviceJSON := readTestFileOrDie(redisServiceFilename) nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("creating Redis SVC") - framework.RunKubectlOrDieInput(string(serviceJson[:]), "create", "-f", "-", nsFlag) + ginkgo.By("creating Redis SVC") + framework.RunKubectlOrDieInput(string(serviceJSON[:]), "create", "-f", "-", nsFlag) - By("getting the original port") + ginkgo.By("getting the original port") originalNodePort := framework.RunKubectlOrDie("get", "service", "redis-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}") - By("applying the same configuration") - framework.RunKubectlOrDieInput(string(serviceJson[:]), "apply", "-f", "-", nsFlag) + ginkgo.By("applying the same configuration") + framework.RunKubectlOrDieInput(string(serviceJSON[:]), "apply", "-f", "-", nsFlag) - By("getting the port after applying configuration") + ginkgo.By("getting the port after applying configuration") currentNodePort := framework.RunKubectlOrDie("get", "service", "redis-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}") - By("checking the result") + ginkgo.By("checking the result") if originalNodePort != currentNodePort { framework.Failf("port should keep the same") } }) - It("apply set/view last-applied", func() { + ginkgo.It("apply set/view last-applied", func() { deployment1Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(nginxDeployment1Filename))) deployment2Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(nginxDeployment2Filename))) deployment3Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(nginxDeployment3Filename))) nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("deployment replicas number is 2") + ginkgo.By("deployment replicas number is 2") framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "-f", "-", nsFlag) - By("check the last-applied matches expectations annotations") + ginkgo.By("check the last-applied matches expectations annotations") output := framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") requiredString := "\"replicas\": 2" if !strings.Contains(output, requiredString) { framework.Failf("Missing %s in kubectl view-last-applied", requiredString) } - By("apply file doesn't have replicas") + ginkgo.By("apply file doesn't have replicas") framework.RunKubectlOrDieInput(deployment2Yaml, "apply", "set-last-applied", "-f", "-", nsFlag) - By("check last-applied has been updated, annotations doesn't have replicas") + ginkgo.By("check last-applied has been updated, annotations doesn't have replicas") output = framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") requiredString = "\"replicas\": 2" if strings.Contains(output, requiredString) { framework.Failf("Presenting %s in kubectl view-last-applied", requiredString) } - By("scale set replicas to 3") + ginkgo.By("scale set replicas to 3") nginxDeploy := "nginx-deployment" debugDiscovery() framework.RunKubectlOrDie("scale", "deployment", nginxDeploy, "--replicas=3", nsFlag) - By("apply file doesn't have replicas but image changed") + ginkgo.By("apply file doesn't have replicas but image changed") framework.RunKubectlOrDieInput(deployment3Yaml, "apply", "-f", "-", nsFlag) - By("verify replicas still is 3 and image has been updated") + ginkgo.By("verify replicas still is 3 and image has been updated") output = framework.RunKubectlOrDieInput(deployment3Yaml, "get", "-f", "-", nsFlag, "-o", "json") requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Nginx)} for _, item := range requiredItems { @@ -826,7 +826,7 @@ metadata: Description: Call kubectl to get cluster-info, output MUST contain cluster-info returned and Kubernetes Master SHOULD be running. */ framework.ConformanceIt("should check if Kubernetes master services is included in cluster-info ", func() { - By("validating cluster-info") + ginkgo.By("validating cluster-info") output := framework.RunKubectlOrDie("cluster-info") // Can't check exact strings due to terminal control commands (colors) requiredItems := []string{"Kubernetes master", "is running at"} @@ -839,8 +839,8 @@ metadata: }) framework.KubeDescribe("Kubectl cluster-info dump", func() { - It("should check if cluster-info dump succeeds", func() { - By("running cluster-info dump") + ginkgo.It("should check if cluster-info dump succeeds", func() { + ginkgo.By("running cluster-info dump") framework.RunKubectlOrDie("cluster-info", "dump") }) }) @@ -853,16 +853,16 @@ metadata: */ framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods ", func() { kv, err := framework.KubectlVersion() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.SkipUnlessServerVersionGTE(kv, c.Discovery()) - controllerJson := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) - serviceJson := readTestFileOrDie(redisServiceFilename) + controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) + serviceJSON := readTestFileOrDie(redisServiceFilename) nsFlag := fmt.Sprintf("--namespace=%v", ns) - framework.RunKubectlOrDieInput(controllerJson, "create", "-f", "-", nsFlag) - framework.RunKubectlOrDieInput(string(serviceJson[:]), "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(string(serviceJSON[:]), "create", "-f", "-", nsFlag) - By("Waiting for Redis master to start.") + ginkgo.By("Waiting for Redis master to start.") waitForOrFailWithDebug(1) // Pod @@ -919,7 +919,7 @@ metadata: // Node // It should be OK to list unschedulable Nodes here. nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) node := nodes.Items[0] output = framework.RunKubectlOrDie("describe", "node", node.Name) requiredStrings = [][]string{ @@ -960,18 +960,18 @@ metadata: Description: Create a Pod running redis master listening to port 6379. Using kubectl expose the redis master replication controllers at port 1234. Validate that the replication controller is listening on port 1234 and the target port is set to 6379, port that redis master is listening. Using kubectl expose the redis master as a service at port 2345. The service MUST be listening on port 2345 and the target port is set to 6379, port that redis master is listening. */ framework.ConformanceIt("should create services for rc ", func() { - controllerJson := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) + controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) nsFlag := fmt.Sprintf("--namespace=%v", ns) redisPort := 6379 - By("creating Redis RC") + ginkgo.By("creating Redis RC") framework.Logf("namespace %v", ns) - framework.RunKubectlOrDieInput(controllerJson, "create", "-f", "-", nsFlag) + framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag) // It may take a while for the pods to get registered in some cases, wait to be sure. - By("Waiting for Redis master to start.") + ginkgo.By("Waiting for Redis master to start.") waitForOrFailWithDebug(1) forEachPod(func(pod v1.Pod) { framework.Logf("wait on redis-master startup in %v ", ns) @@ -1008,10 +1008,10 @@ metadata: } return true, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) service, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if len(service.Spec.Ports) != 1 { framework.Failf("1 port is expected") @@ -1025,12 +1025,12 @@ metadata: } } - By("exposing RC") + ginkgo.By("exposing RC") framework.RunKubectlOrDie("expose", "rc", "redis-master", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", redisPort), nsFlag) framework.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout) validateService("rm2", 1234, framework.ServiceStartTimeout) - By("exposing service") + ginkgo.By("exposing service") framework.RunKubectlOrDie("expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", redisPort), nsFlag) framework.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout) validateService("rm3", 2345, framework.ServiceStartTimeout) @@ -1040,14 +1040,14 @@ metadata: framework.KubeDescribe("Kubectl label", func() { var podYaml string var nsFlag string - BeforeEach(func() { - By("creating the pod") + ginkgo.BeforeEach(func() { + ginkgo.By("creating the pod") podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in"))) nsFlag = fmt.Sprintf("--namespace=%v", ns) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) - Expect(framework.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(BeTrue()) + gomega.Expect(framework.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, pausePodSelector) }) @@ -1060,17 +1060,17 @@ metadata: labelName := "testing-label" labelValue := "testing-label-value" - By("adding the label " + labelName + " with value " + labelValue + " to a pod") + ginkgo.By("adding the label " + labelName + " with value " + labelValue + " to a pod") framework.RunKubectlOrDie("label", "pods", pausePodName, labelName+"="+labelValue, nsFlag) - By("verifying the pod has the label " + labelName + " with the value " + labelValue) + ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue) output := framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag) if !strings.Contains(output, labelValue) { framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName) } - By("removing the label " + labelName + " of a pod") + ginkgo.By("removing the label " + labelName + " of a pod") framework.RunKubectlOrDie("label", "pods", pausePodName, labelName+"-", nsFlag) - By("verifying the pod doesn't have the label " + labelName) + ginkgo.By("verifying the pod doesn't have the label " + labelName) output = framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag) if strings.Contains(output, labelValue) { framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName) @@ -1081,14 +1081,14 @@ metadata: framework.KubeDescribe("Kubectl copy", func() { var podYaml string var nsFlag string - BeforeEach(func() { - By("creating the pod") + ginkgo.BeforeEach(func() { + ginkgo.By("creating the pod") nsFlag = fmt.Sprintf("--namespace=%v", ns) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml"))) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) - Expect(framework.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout)).To(BeTrue()) + gomega.Expect(framework.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, busyboxPodSelector) }) @@ -1097,7 +1097,7 @@ metadata: Testname: Kubectl, copy Description: When a Pod is running, copy a known file from it to a temporary local destination. */ - It("should copy a file from a running Pod", func() { + ginkgo.It("should copy a file from a running Pod", func() { remoteContents := "foobar\n" podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName) tempDestination, err := ioutil.TempFile(os.TempDir(), "copy-foobar") @@ -1105,9 +1105,9 @@ metadata: framework.Failf("Failed creating temporary destination file: %v", err) } - By("specifying a remote filepath " + podSource + " on the pod") + ginkgo.By("specifying a remote filepath " + podSource + " on the pod") framework.RunKubectlOrDie("cp", podSource, tempDestination.Name(), nsFlag) - By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name()) + ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name()) localData, err := ioutil.ReadAll(tempDestination) if err != nil { framework.Failf("Failed reading temporary local file: %v", err) @@ -1122,13 +1122,13 @@ metadata: var nsFlag string var rc string containerName := "redis-master" - BeforeEach(func() { - By("creating an rc") + ginkgo.BeforeEach(func() { + ginkgo.By("creating an rc") rc = commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) nsFlag = fmt.Sprintf("--namespace=%v", ns) framework.RunKubectlOrDieInput(rc, "create", "-f", "-", nsFlag) }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanupKubectlInputs(rc, ns, simplePodSelector) }) @@ -1150,45 +1150,45 @@ metadata: return strings.Split(strings.TrimRight(out, "\n"), "\n") } - By("Waiting for Redis master to start.") + ginkgo.By("Waiting for Redis master to start.") waitForOrFailWithDebug(1) forEachPod(func(pod v1.Pod) { - By("checking for a matching strings") + ginkgo.By("checking for a matching strings") _, err := framework.LookForStringInLog(ns, pod.Name, containerName, "The server is now ready to accept connections", framework.PodStartTimeout) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("limiting log lines") + ginkgo.By("limiting log lines") out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1") - Expect(len(out)).NotTo(BeZero()) - Expect(len(lines(out))).To(Equal(1)) + gomega.Expect(len(out)).NotTo(gomega.BeZero()) + gomega.Expect(len(lines(out))).To(gomega.Equal(1)) - By("limiting log bytes") + ginkgo.By("limiting log bytes") out = framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--limit-bytes=1") - Expect(len(lines(out))).To(Equal(1)) - Expect(len(out)).To(Equal(1)) + gomega.Expect(len(lines(out))).To(gomega.Equal(1)) + gomega.Expect(len(out)).To(gomega.Equal(1)) - By("exposing timestamps") + ginkgo.By("exposing timestamps") out = framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1", "--timestamps") l := lines(out) - Expect(len(l)).To(Equal(1)) + gomega.Expect(len(l)).To(gomega.Equal(1)) words := strings.Split(l[0], " ") - Expect(len(words)).To(BeNumerically(">", 1)) + gomega.Expect(len(words)).To(gomega.BeNumerically(">", 1)) if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil { if _, err := time.Parse(time.RFC3339, words[0]); err != nil { framework.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0]) } } - By("restricting to a time range") + ginkgo.By("restricting to a time range") // Note: we must wait at least two seconds, // because the granularity is only 1 second and // it could end up rounding the wrong way. time.Sleep(2500 * time.Millisecond) // ensure that startup logs on the node are seen as older than 1s - recent_out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=1s") - recent := len(strings.Split(recent_out, "\n")) - older_out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=24h") - older := len(strings.Split(older_out, "\n")) - Expect(recent).To(BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recent_out, older_out) + recentOut := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=1s") + recent := len(strings.Split(recentOut, "\n")) + olderOut := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=24h") + older := len(strings.Split(olderOut, "\n")) + gomega.Expect(recent).To(gomega.BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recentOut, olderOut) }) }) }) @@ -1200,18 +1200,18 @@ metadata: Description: Start running a redis master and a replication controller. When the pod is running, using ‘kubectl patch’ command add annotations. The annotation MUST be added to running pods and SHOULD be able to read added annotations from each of the Pods running under the replication controller. */ framework.ConformanceIt("should add annotations for pods in rc ", func() { - controllerJson := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) + controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("creating Redis RC") - framework.RunKubectlOrDieInput(controllerJson, "create", "-f", "-", nsFlag) - By("Waiting for Redis master to start.") + ginkgo.By("creating Redis RC") + framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag) + ginkgo.By("Waiting for Redis master to start.") waitForOrFailWithDebug(1) - By("patching all pods") + ginkgo.By("patching all pods") forEachPod(func(pod v1.Pod) { framework.RunKubectlOrDie("patch", "pod", pod.Name, nsFlag, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}") }) - By("checking annotations") + ginkgo.By("checking annotations") forEachPod(func(pod v1.Pod) { found := false for key, val := range pod.Annotations { @@ -1250,13 +1250,13 @@ metadata: var cleanUp func() - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) name = "e2e-test-nginx-deployment" cleanUp = func() { framework.RunKubectlOrDie("delete", "deployment", name, nsFlag) } }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanUp() }) @@ -1266,9 +1266,9 @@ metadata: Description: Command ‘kubectl run’ MUST create a running pod with possible replicas given a image using the option --image=’nginx’. The running Pod SHOULD have one container and the container SHOULD be running the image specified in the ‘run’ command. */ framework.ConformanceIt("should create an rc or deployment from an image ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", name, "--image="+nginxImage, nsFlag) - By("verifying the pod controlled by " + name + " gets created") + ginkgo.By("verifying the pod controlled by " + name + " gets created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name})) podlist, err := framework.WaitForPodsWithLabel(c, ns, label) if err != nil { @@ -1286,12 +1286,12 @@ metadata: var nsFlag string var rcName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) rcName = "e2e-test-nginx-rc" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "rc", rcName, nsFlag) }) @@ -1301,9 +1301,9 @@ metadata: Description: Command ‘kubectl run’ MUST create a running rc with default one replicas given a image using the option --image=’nginx’. The running replication controller SHOULD have one container and the container SHOULD be running the image specified in the ‘run’ command. Also there MUST be 1 pod controlled by this replica set running 1 container with the image specified. A ‘kubetctl logs’ command MUST return the logs from the container in the replication controller. */ framework.ConformanceIt("should create an rc from an image ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) - By("verifying the rc " + rcName + " was created") + ginkgo.By("verifying the rc " + rcName + " was created") rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting rc %s: %v", rcName, err) @@ -1313,7 +1313,7 @@ metadata: framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage) } - By("verifying the pod controlled by rc " + rcName + " was created") + ginkgo.By("verifying the pod controlled by rc " + rcName + " was created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName})) podlist, err := framework.WaitForPodsWithLabel(c, ns, label) if err != nil { @@ -1325,7 +1325,7 @@ metadata: framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) } - By("confirm that you can get logs from an rc") + ginkgo.By("confirm that you can get logs from an rc") podNames := []string{} for _, pod := range pods { podNames = append(podNames, pod.Name) @@ -1346,13 +1346,13 @@ metadata: var rcName string var c clientset.Interface - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet nsFlag = fmt.Sprintf("--namespace=%v", ns) rcName = "e2e-test-nginx-rc" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "rc", rcName, nsFlag) }) @@ -1362,9 +1362,9 @@ metadata: Description: Command ‘kubectl rolling-update’ MUST replace the specified replication controller with a new replication controller by updating one pod at a time to use the new Pod spec. */ framework.ConformanceIt("should support rolling-update to same image ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag) - By("verifying the rc " + rcName + " was created") + ginkgo.By("verifying the rc " + rcName + " was created") rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting rc %s: %v", rcName, err) @@ -1375,7 +1375,7 @@ metadata: } framework.WaitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout) - By("rolling-update to same image controller") + ginkgo.By("rolling-update to same image controller") debugDiscovery() runKubectlRetryOrDie("rolling-update", rcName, "--update-period=1s", "--image="+nginxImage, "--image-pull-policy="+string(v1.PullIfNotPresent), nsFlag) @@ -1387,12 +1387,12 @@ metadata: var nsFlag string var dName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) dName = "e2e-test-nginx-deployment" }) - AfterEach(func() { + ginkgo.AfterEach(func() { err := wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { out, err := framework.RunKubectl("delete", "deployment", dName, nsFlag) if err != nil { @@ -1403,7 +1403,7 @@ metadata: } return true, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -1412,9 +1412,9 @@ metadata: Description: Command ‘kubectl run’ MUST create a deployment, with --generator=deployment, when a image name is specified in the run command. After the run command there SHOULD be a deployment that should exist with one container running the specified image. Also there SHOULD be a Pod that is controlled by this deployment, with a container running the specified image. */ framework.ConformanceIt("should create a deployment from an image ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag) - By("verifying the deployment " + dName + " was created") + ginkgo.By("verifying the deployment " + dName + " was created") d, err := c.AppsV1().Deployments(ns).Get(dName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting deployment %s: %v", dName, err) @@ -1424,7 +1424,7 @@ metadata: framework.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage) } - By("verifying the pod controlled by deployment " + dName + " was created") + ginkgo.By("verifying the pod controlled by deployment " + dName + " was created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName})) podlist, err := framework.WaitForPodsWithLabel(c, ns, label) if err != nil { @@ -1442,12 +1442,12 @@ metadata: var nsFlag string var jobName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) jobName = "e2e-test-nginx-job" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "jobs", jobName, nsFlag) }) @@ -1457,9 +1457,9 @@ metadata: Description: Command ‘kubectl run’ MUST create a job, with --generator=job, when a image name is specified in the run command. After the run command there SHOULD be a job that should exist with one container running the specified image. Also there SHOULD be a restart policy on the job spec that SHOULD match the command line. */ framework.ConformanceIt("should create a job from an image when restart is OnFailure ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+nginxImage, nsFlag) - By("verifying the job " + jobName + " was created") + ginkgo.By("verifying the job " + jobName + " was created") job, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting job %s: %v", jobName, err) @@ -1478,22 +1478,22 @@ metadata: var nsFlag string var cjName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) cjName = "e2e-test-echo-cronjob-beta" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "cronjobs", cjName, nsFlag) }) - It("should create a CronJob", func() { - framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResourceBeta, f.Namespace.Name) + ginkgo.It("should create a CronJob", func() { + framework.SkipIfMissingResource(f.DynamicClient, cronJobGroupVersionResourceBeta, f.Namespace.Name) schedule := "*/5 * * * ?" framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v1beta1", "--schedule="+schedule, "--image="+busyboxImage, nsFlag) - By("verifying the CronJob " + cjName + " was created") + ginkgo.By("verifying the CronJob " + cjName + " was created") cj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting CronJob %s: %v", cjName, err) @@ -1515,12 +1515,12 @@ metadata: var nsFlag string var podName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) podName = "e2e-test-nginx-pod" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "pods", podName, nsFlag) }) @@ -1530,9 +1530,9 @@ metadata: Description: Command ‘kubectl run’ MUST create a pod, with --generator=run-pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image. */ framework.ConformanceIt("should create a pod from an image when restart is Never ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+nginxImage, nsFlag) - By("verifying the pod " + podName + " was created") + ginkgo.By("verifying the pod " + podName + " was created") pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting pod %s: %v", podName, err) @@ -1551,12 +1551,12 @@ metadata: var nsFlag string var podName string - BeforeEach(func() { + ginkgo.BeforeEach(func() { nsFlag = fmt.Sprintf("--namespace=%v", ns) podName = "e2e-test-nginx-pod" }) - AfterEach(func() { + ginkgo.AfterEach(func() { framework.RunKubectlOrDie("delete", "pods", podName, nsFlag) }) @@ -1566,27 +1566,27 @@ metadata: Description: Command ‘kubectl replace’ on a existing Pod with a new spec MUST update the image of the container running in the Pod. A -f option to ‘kubectl replace’ SHOULD force to re-create the resource. The new Pod SHOULD have the container with new change to the image. */ framework.ConformanceIt("should update a single-container pod's image ", func() { - By("running the image " + nginxImage) + ginkgo.By("running the image " + nginxImage) framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+nginxImage, "--labels=run="+podName, nsFlag) - By("verifying the pod " + podName + " is running") + ginkgo.By("verifying the pod " + podName + " is running") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName})) err := testutils.WaitForPodsWithLabelRunning(c, ns, label) if err != nil { framework.Failf("Failed getting pod %s: %v", podName, err) } - By("verifying the pod " + podName + " was created") - podJson := framework.RunKubectlOrDie("get", "pod", podName, nsFlag, "-o", "json") - if !strings.Contains(podJson, podName) { - framework.Failf("Failed to find pod %s in [%s]", podName, podJson) + ginkgo.By("verifying the pod " + podName + " was created") + podJSON := framework.RunKubectlOrDie("get", "pod", podName, nsFlag, "-o", "json") + if !strings.Contains(podJSON, podName) { + framework.Failf("Failed to find pod %s in [%s]", podName, podJSON) } - By("replace the image in the pod") - podJson = strings.Replace(podJson, nginxImage, busyboxImage, 1) - framework.RunKubectlOrDieInput(podJson, "replace", "-f", "-", nsFlag) + ginkgo.By("replace the image in the pod") + podJSON = strings.Replace(podJSON, nginxImage, busyboxImage, 1) + framework.RunKubectlOrDieInput(podJSON, "replace", "-f", "-", nsFlag) - By("verifying the pod " + podName + " has the right image " + busyboxImage) + ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage) pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting deployment %s: %v", podName, err) @@ -1609,23 +1609,23 @@ metadata: framework.ConformanceIt("should create a job from an image, then delete the job ", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) - By("executing a command with run --rm and attach with stdin") + ginkgo.By("executing a command with run --rm and attach with stdin") t := time.NewTimer(runJobTimeout) defer t.Stop() runOutput := framework.NewKubectlCommand(nsFlag, "run", jobName, "--image="+busyboxImage, "--rm=true", "--generator=job/v1", "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234"). WithTimeout(t.C). ExecOrDie() - Expect(runOutput).To(ContainSubstring("abcd1234")) - Expect(runOutput).To(ContainSubstring("stdin closed")) + gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234")) + gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed")) err := framework.WaitForJobGone(c, ns, jobName, wait.ForeverTestTimeout) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("verifying the job " + jobName + " was deleted") + ginkgo.By("verifying the job " + jobName + " was deleted") _, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) - Expect(err).To(HaveOccurred()) - Expect(apierrs.IsNotFound(err)).To(BeTrue()) + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(apierrs.IsNotFound(err)).To(gomega.BeTrue()) }) }) @@ -1637,7 +1637,7 @@ metadata: Description: Start a proxy server on port zero by running ‘kubectl proxy’ with --port=0. Call the proxy server by requesting api versions from unix socket. The proxy server MUST provide at least one version string. */ framework.ConformanceIt("should support proxy with --port 0 ", func() { - By("starting the proxy server") + ginkgo.By("starting the proxy server") port, cmd, err := startProxyServer() if cmd != nil { defer framework.TryKill(cmd) @@ -1645,7 +1645,7 @@ metadata: if err != nil { framework.Failf("Failed to start proxy server: %v", err) } - By("curling proxy /api/ output") + ginkgo.By("curling proxy /api/ output") localAddr := fmt.Sprintf("http://localhost:%d/api/", port) apiVersions, err := getAPIVersions(localAddr) if err != nil { @@ -1662,7 +1662,7 @@ metadata: Description: Start a proxy server on by running ‘kubectl proxy’ with --unix-socket=. Call the proxy server by requesting api versions from http://locahost:0/api. The proxy server MUST provide at least one version string */ framework.ConformanceIt("should support --unix-socket=/path ", func() { - By("Starting the proxy") + ginkgo.By("Starting the proxy") tmpdir, err := ioutil.TempDir("", "kubectl-proxy-unix") if err != nil { framework.Failf("Failed to create temporary directory: %v", err) @@ -1682,7 +1682,7 @@ metadata: if _, err = stdout.Read(buf); err != nil { framework.Failf("Expected output from kubectl proxy: %v", err) } - By("retrieving proxy /api/ output") + ginkgo.By("retrieving proxy /api/ output") _, err = curlUnix("http://unused/api", path) if err != nil { framework.Failf("Failed get of /api at %s: %v", path, err) @@ -1693,7 +1693,7 @@ metadata: // This test must run [Serial] because it modifies the node so it doesn't allow pods to execute on // it, which will affect anything else running in parallel. framework.KubeDescribe("Kubectl taint [Serial]", func() { - It("should update the taint on a node", func() { + ginkgo.It("should update the taint on a node", func() { testTaint := v1.Taint{ Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())), Value: "testing-taint-value", @@ -1702,11 +1702,11 @@ metadata: nodeName := scheduling.GetNodeThatCanRunPod(f) - By("adding the taint " + testTaint.ToString() + " to a node") + ginkgo.By("adding the taint " + testTaint.ToString() + " to a node") runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString()) defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint) - By("verifying the node has the taint " + testTaint.ToString()) + ginkgo.By("verifying the node has the taint " + testTaint.ToString()) output := runKubectlRetryOrDie("describe", "node", nodeName) requiredStrings := [][]string{ {"Name:", nodeName}, @@ -1715,16 +1715,16 @@ metadata: } checkOutput(output, requiredStrings) - By("removing the taint " + testTaint.ToString() + " of a node") + ginkgo.By("removing the taint " + testTaint.ToString() + " of a node") runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.Key+":"+string(testTaint.Effect)+"-") - By("verifying the node doesn't have the taint " + testTaint.Key) + ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key) output = runKubectlRetryOrDie("describe", "node", nodeName) if strings.Contains(output, testTaint.Key) { framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName) } }) - It("should remove all the taints with the same key off a node", func() { + ginkgo.It("should remove all the taints with the same key off a node", func() { testTaint := v1.Taint{ Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())), Value: "testing-taint-value", @@ -1733,11 +1733,11 @@ metadata: nodeName := scheduling.GetNodeThatCanRunPod(f) - By("adding the taint " + testTaint.ToString() + " to a node") + ginkgo.By("adding the taint " + testTaint.ToString() + " to a node") runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString()) defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint) - By("verifying the node has the taint " + testTaint.ToString()) + ginkgo.By("verifying the node has the taint " + testTaint.ToString()) output := runKubectlRetryOrDie("describe", "node", nodeName) requiredStrings := [][]string{ {"Name:", nodeName}, @@ -1751,11 +1751,11 @@ metadata: Value: "another-testing-taint-value", Effect: v1.TaintEffectPreferNoSchedule, } - By("adding another taint " + newTestTaint.ToString() + " to the node") + ginkgo.By("adding another taint " + newTestTaint.ToString() + " to the node") runKubectlRetryOrDie("taint", "nodes", nodeName, newTestTaint.ToString()) defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, newTestTaint) - By("verifying the node has the taint " + newTestTaint.ToString()) + ginkgo.By("verifying the node has the taint " + newTestTaint.ToString()) output = runKubectlRetryOrDie("describe", "node", nodeName) requiredStrings = [][]string{ {"Name:", nodeName}, @@ -1769,11 +1769,11 @@ metadata: Value: "testing-taint-value-no-execute", Effect: v1.TaintEffectNoExecute, } - By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node") + ginkgo.By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node") runKubectlRetryOrDie("taint", "nodes", nodeName, noExecuteTaint.ToString()) defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, noExecuteTaint) - By("verifying the node has the taint " + noExecuteTaint.ToString()) + ginkgo.By("verifying the node has the taint " + noExecuteTaint.ToString()) output = runKubectlRetryOrDie("describe", "node", nodeName) requiredStrings = [][]string{ {"Name:", nodeName}, @@ -1782,9 +1782,9 @@ metadata: } checkOutput(output, requiredStrings) - By("removing all taints that have the same key " + testTaint.Key + " of the node") + ginkgo.By("removing all taints that have the same key " + testTaint.Key + " of the node") runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.Key+"-") - By("verifying the node doesn't have the taints that have the same key " + testTaint.Key) + ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key) output = runKubectlRetryOrDie("describe", "node", nodeName) if strings.Contains(output, testTaint.Key) { framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName) @@ -1793,14 +1793,14 @@ metadata: }) framework.KubeDescribe("Kubectl create quota", func() { - It("should create a quota without scopes", func() { + ginkgo.It("should create a quota without scopes", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) quotaName := "million" - By("calling kubectl quota") + ginkgo.By("calling kubectl quota") framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag) - By("verifying that the quota was created") + ginkgo.By("verifying that the quota was created") quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting quota %s: %v", quotaName, err) @@ -1822,14 +1822,14 @@ metadata: } }) - It("should create a quota with scopes", func() { + ginkgo.It("should create a quota with scopes", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) quotaName := "scopes" - By("calling kubectl quota") + ginkgo.By("calling kubectl quota") framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag) - By("verifying that the quota was created") + ginkgo.By("verifying that the quota was created") quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) if err != nil { framework.Failf("Failed getting quota %s: %v", quotaName, err) @@ -1850,11 +1850,11 @@ metadata: } }) - It("should reject quota with invalid scopes", func() { + ginkgo.It("should reject quota with invalid scopes", func() { nsFlag := fmt.Sprintf("--namespace=%v", ns) quotaName := "scopes" - By("calling kubectl quota") + ginkgo.By("calling kubectl quota") out, err := framework.RunKubectl("create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo", nsFlag) if err == nil { framework.Failf("Expected kubectl to fail, but it succeeded: %s", out) @@ -1981,7 +1981,7 @@ func validateGuestbookApp(c clientset.Interface, ns string) { framework.Logf("Waiting for all frontend pods to be Running.") label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"})) err := testutils.WaitForPodsWithLabelRunning(c, ns, label) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Waiting for frontend to serve content.") if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) { framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds()) @@ -2066,7 +2066,7 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) options := metav1.ListOptions{LabelSelector: label.String()} rcs, err = c.CoreV1().ReplicationControllers(ns).List(options) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if len(rcs.Items) > 0 { break } @@ -2128,9 +2128,8 @@ func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) framework.Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected) if strings.Contains(data.Image, jpgExpected) { return nil - } else { - return fmt.Errorf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected) } + return fmt.Errorf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected) } } diff --git a/test/e2e/kubectl/portforward.go b/test/e2e/kubectl/portforward.go index d29847839f..53c764bc7b 100644 --- a/test/e2e/kubectl/portforward.go +++ b/test/e2e/kubectl/portforward.go @@ -39,8 +39,8 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -116,6 +116,7 @@ func pfPod(expectedClientData, chunks, chunkSize, chunkIntervalMillis string, bi } } +// WaitForTerminatedContainer wait till a given container be terminated for a given pod. func WaitForTerminatedContainer(f *framework.Framework, pod *v1.Pod, containerName string) error { return framework.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "container terminated", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { if len(testutils.TerminatedContainers(pod)[containerName]) > 0 { @@ -199,7 +200,7 @@ func runPortForward(ns, podName string, port int) *portForwardCommand { } func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { - By("Creating the target pod") + ginkgo.By("Creating the target pod") pod := pfPod("", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) @@ -208,21 +209,21 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { framework.Failf("Pod did not start running: %v", err) } - By("Running 'kubectl port-forward'") + ginkgo.By("Running 'kubectl port-forward'") cmd := runPortForward(f.Namespace.Name, pod.Name, 80) defer cmd.Stop() - By("Dialing the local port") + ginkgo.By("Dialing the local port") conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) if err != nil { framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) } defer func() { - By("Closing the connection to the local port") + ginkgo.By("Closing the connection to the local port") conn.Close() }() - By("Reading data from the local port") + ginkgo.By("Reading data from the local port") fromServer, err := ioutil.ReadAll(conn) if err != nil { framework.Failf("Unexpected error reading data from the server: %v", err) @@ -232,22 +233,22 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { framework.Failf("Expected %q from server, got %q", e, a) } - By("Waiting for the target pod to stop running") + ginkgo.By("Waiting for the target pod to stop running") if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { framework.Failf("Container did not terminate: %v", err) } - By("Verifying logs") - Eventually(func() (string, error) { + ginkgo.By("Verifying logs") + gomega.Eventually(func() (string, error) { return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") - }, postStartWaitTimeout, podCheckInterval).Should(SatisfyAll( - ContainSubstring("Accepted client connection"), - ContainSubstring("Done"), + }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( + gomega.ContainSubstring("Accepted client connection"), + gomega.ContainSubstring("Done"), )) } func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { - By("Creating the target pod") + ginkgo.By("Creating the target pod") pod := pfPod("abc", "1", "1", "1", fmt.Sprintf("%s", bindAddress)) if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) @@ -256,35 +257,35 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { framework.Failf("Pod did not start running: %v", err) } - By("Running 'kubectl port-forward'") + ginkgo.By("Running 'kubectl port-forward'") cmd := runPortForward(f.Namespace.Name, pod.Name, 80) defer cmd.Stop() - By("Dialing the local port") + ginkgo.By("Dialing the local port") conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) if err != nil { framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) } - By("Closing the connection to the local port") + ginkgo.By("Closing the connection to the local port") conn.Close() - By("Waiting for the target pod to stop running") + ginkgo.By("Waiting for the target pod to stop running") if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { framework.Failf("Container did not terminate: %v", err) } - By("Verifying logs") - Eventually(func() (string, error) { + ginkgo.By("Verifying logs") + gomega.Eventually(func() (string, error) { return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") - }, postStartWaitTimeout, podCheckInterval).Should(SatisfyAll( - ContainSubstring("Accepted client connection"), - ContainSubstring("Expected to read 3 bytes from client, but got 0 instead"), + }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( + gomega.ContainSubstring("Accepted client connection"), + gomega.ContainSubstring("Expected to read 3 bytes from client, but got 0 instead"), )) } func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) { - By("Creating the target pod") + ginkgo.By("Creating the target pod") pod := pfPod("abc", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) @@ -293,11 +294,11 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) framework.Failf("Pod did not start running: %v", err) } - By("Running 'kubectl port-forward'") + ginkgo.By("Running 'kubectl port-forward'") cmd := runPortForward(f.Namespace.Name, pod.Name, 80) defer cmd.Stop() - By("Dialing the local port") + ginkgo.By("Dialing the local port") addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) if err != nil { framework.Failf("Error resolving tcp addr: %v", err) @@ -307,17 +308,17 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) } defer func() { - By("Closing the connection to the local port") + ginkgo.By("Closing the connection to the local port") conn.Close() }() - By("Sending the expected data to the local port") + ginkgo.By("Sending the expected data to the local port") fmt.Fprint(conn, "abc") - By("Closing the write half of the client's connection") + ginkgo.By("Closing the write half of the client's connection") conn.CloseWrite() - By("Reading data from the local port") + ginkgo.By("Reading data from the local port") fromServer, err := ioutil.ReadAll(conn) if err != nil { framework.Failf("Unexpected error reading data from the server: %v", err) @@ -327,26 +328,26 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) framework.Failf("Expected %q from server, got %q", e, a) } - By("Waiting for the target pod to stop running") + ginkgo.By("Waiting for the target pod to stop running") if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { framework.Failf("Container did not terminate: %v", err) } - By("Verifying logs") - Eventually(func() (string, error) { + ginkgo.By("Verifying logs") + gomega.Eventually(func() (string, error) { return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") - }, postStartWaitTimeout, podCheckInterval).Should(SatisfyAll( - ContainSubstring("Accepted client connection"), - ContainSubstring("Received expected client data"), - ContainSubstring("Done"), + }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( + gomega.ContainSubstring("Accepted client connection"), + gomega.ContainSubstring("Received expected client data"), + gomega.ContainSubstring("Done"), )) } func doTestOverWebSockets(bindAddress string, f *framework.Framework) { config, err := framework.LoadConfig() - Expect(err).NotTo(HaveOccurred(), "unable to get base config") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config") - By("Creating the pod") + ginkgo.By("Creating the pod") pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { framework.Failf("Couldn't create pod: %v", err) @@ -369,7 +370,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { } defer ws.Close() - Eventually(func() error { + gomega.Eventually(func() error { channel, msg, err := wsRead(ws) if err != nil { return fmt.Errorf("Failed to read completely from websocket %s: %v", url.String(), err) @@ -381,9 +382,9 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { return fmt.Errorf("Received the wrong port: %d", p) } return nil - }, time.Minute, 10*time.Second).Should(BeNil()) + }, time.Minute, 10*time.Second).Should(gomega.BeNil()) - Eventually(func() error { + gomega.Eventually(func() error { channel, msg, err := wsRead(ws) if err != nil { return fmt.Errorf("Failed to read completely from websocket %s: %v", url.String(), err) @@ -395,18 +396,18 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { return fmt.Errorf("Received the wrong port: %d", p) } return nil - }, time.Minute, 10*time.Second).Should(BeNil()) + }, time.Minute, 10*time.Second).Should(gomega.BeNil()) - By("Sending the expected data to the local port") + ginkgo.By("Sending the expected data to the local port") err = wsWrite(ws, 0, []byte("def")) if err != nil { framework.Failf("Failed to write to websocket %s: %v", url.String(), err) } - By("Reading data from the local port") + ginkgo.By("Reading data from the local port") buf := bytes.Buffer{} expectedData := bytes.Repeat([]byte("x"), 100) - Eventually(func() error { + gomega.Eventually(func() error { channel, msg, err := wsRead(ws) if err != nil { return fmt.Errorf("Failed to read completely from websocket %s: %v", url.String(), err) @@ -419,14 +420,14 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { return fmt.Errorf("Expected %q from server, got %q", expectedData, buf.Bytes()) } return nil - }, time.Minute, 10*time.Second).Should(BeNil()) + }, time.Minute, 10*time.Second).Should(gomega.BeNil()) - By("Verifying logs") - Eventually(func() (string, error) { + ginkgo.By("Verifying logs") + gomega.Eventually(func() (string, error) { return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") - }, postStartWaitTimeout, podCheckInterval).Should(SatisfyAll( - ContainSubstring("Accepted client connection"), - ContainSubstring("Received expected client data"), + }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( + gomega.ContainSubstring("Accepted client connection"), + gomega.ContainSubstring("Received expected client data"), )) } @@ -435,21 +436,21 @@ var _ = SIGDescribe("Kubectl Port forwarding", func() { framework.KubeDescribe("With a server listening on 0.0.0.0", func() { framework.KubeDescribe("that expects a client request", func() { - It("should support a client that connects, sends NO DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends NO DATA, and disconnects", func() { doTestMustConnectSendNothing("0.0.0.0", f) }) - It("should support a client that connects, sends DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends DATA, and disconnects", func() { doTestMustConnectSendDisconnect("0.0.0.0", f) }) }) framework.KubeDescribe("that expects NO client request", func() { - It("should support a client that connects, sends DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends DATA, and disconnects", func() { doTestConnectSendDisconnect("0.0.0.0", f) }) }) - It("should support forwarding over websockets", func() { + ginkgo.It("should support forwarding over websockets", func() { doTestOverWebSockets("0.0.0.0", f) }) }) @@ -457,21 +458,21 @@ var _ = SIGDescribe("Kubectl Port forwarding", func() { // kubectl port-forward may need elevated privileges to do its job. framework.KubeDescribe("With a server listening on localhost", func() { framework.KubeDescribe("that expects a client request", func() { - It("should support a client that connects, sends NO DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends NO DATA, and disconnects", func() { doTestMustConnectSendNothing("localhost", f) }) - It("should support a client that connects, sends DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends DATA, and disconnects", func() { doTestMustConnectSendDisconnect("localhost", f) }) }) framework.KubeDescribe("that expects NO client request", func() { - It("should support a client that connects, sends DATA, and disconnects", func() { + ginkgo.It("should support a client that connects, sends DATA, and disconnects", func() { doTestConnectSendDisconnect("localhost", f) }) }) - It("should support forwarding over websockets", func() { + ginkgo.It("should support forwarding over websockets", func() { doTestOverWebSockets("localhost", f) }) }) diff --git a/test/e2e/servicecatalog/framework.go b/test/e2e/servicecatalog/framework.go index 9b6dc4b1ab..39af84e65b 100644 --- a/test/e2e/servicecatalog/framework.go +++ b/test/e2e/servicecatalog/framework.go @@ -18,6 +18,7 @@ package servicecatalog import "github.com/onsi/ginkgo" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return ginkgo.Describe("[sig-service-catalog] "+text, body) } diff --git a/test/e2e/servicecatalog/podpreset.go b/test/e2e/servicecatalog/podpreset.go index 260c1790a2..9a649f7257 100644 --- a/test/e2e/servicecatalog/podpreset.go +++ b/test/e2e/servicecatalog/podpreset.go @@ -30,8 +30,8 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -39,7 +39,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { f := framework.NewDefaultFramework("podpreset") var podClient *framework.PodClient - BeforeEach(func() { + ginkgo.BeforeEach(func() { // only run on gce for the time being til we find an easier way to update // the admission controllers used on the others framework.SkipUnlessProviderIs("gce") @@ -47,8 +47,8 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { }) // Simplest case: all pods succeed promptly - It("should create a pod preset", func() { - By("Creating a pod preset") + ginkgo.It("should create a pod preset", func() { + ginkgo.By("Creating a pod preset") pip := &settings.PodPreset{ ObjectMeta: metav1.ObjectMeta{ @@ -77,9 +77,9 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { if errors.IsNotFound(err) { framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled") } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("creating the pod") + ginkgo.By("creating the pod") name := "pod-preset-pod" value := strconv.Itoa(time.Now().Nanosecond()) pod := &v1.Pod{ @@ -102,30 +102,30 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { }, } - By("setting up watch") + ginkgo.By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pod") - Expect(len(pods.Items)).To(Equal(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) options = metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, } w, err := podClient.Watch(options) - Expect(err).NotTo(HaveOccurred(), "failed to set up watch") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch") - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient.Create(pod) - By("verifying the pod is in kubernetes") + ginkgo.By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pod") - Expect(len(pods.Items)).To(Equal(1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) - By("verifying pod creation was observed") + ginkgo.By("verifying pod creation was observed") select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { @@ -139,10 +139,10 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { // may be carried out immediately rather than gracefully. framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - By("ensuring pod is modified") + ginkgo.By("ensuring pod is modified") // save the running pod pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod") // check the annotation is there if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; !ok { @@ -155,8 +155,8 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { } }) - It("should not modify the pod on conflict", func() { - By("Creating a pod preset") + ginkgo.It("should not modify the pod on conflict", func() { + ginkgo.By("Creating a pod preset") pip := &settings.PodPreset{ ObjectMeta: metav1.ObjectMeta{ @@ -185,9 +185,9 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { if errors.IsNotFound(err) { framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled") } - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("creating the pod") + ginkgo.By("creating the pod") name := "pod-preset-pod" value := strconv.Itoa(time.Now().Nanosecond()) originalPod := &v1.Pod{ @@ -211,30 +211,30 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { }, } - By("setting up watch") + ginkgo.By("setting up watch") selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options := metav1.ListOptions{LabelSelector: selector.String()} pods, err := podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pod") - Expect(len(pods.Items)).To(Equal(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) options = metav1.ListOptions{ LabelSelector: selector.String(), ResourceVersion: pods.ListMeta.ResourceVersion, } w, err := podClient.Watch(options) - Expect(err).NotTo(HaveOccurred(), "failed to set up watch") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch") - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient.Create(originalPod) - By("verifying the pod is in kubernetes") + ginkgo.By("verifying the pod is in kubernetes") selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) options = metav1.ListOptions{LabelSelector: selector.String()} pods, err = podClient.List(options) - Expect(err).NotTo(HaveOccurred(), "failed to query for pod") - Expect(len(pods.Items)).To(Equal(1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") + gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) - By("verifying pod creation was observed") + ginkgo.By("verifying pod creation was observed") select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { @@ -248,10 +248,10 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { // may be carried out immediately rather than gracefully. framework.ExpectNoError(f.WaitForPodRunning(originalPod.Name)) - By("ensuring pod is modified") + ginkgo.By("ensuring pod is modified") // save the running pod pod, err := podClient.Get(originalPod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod") // check the annotation is not there if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; ok {