diff --git a/test/e2e/configmap.go b/test/e2e/configmap.go index 1d25e961f7..ac216b9679 100644 --- a/test/e2e/configmap.go +++ b/test/e2e/configmap.go @@ -131,10 +131,10 @@ var _ = framework.KubeDescribe("ConfigMap", func() { f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) }() By("Creating the pod") - _, err = f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod)) pollLogs := func() (string, error) { return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName) diff --git a/test/e2e/dns.go b/test/e2e/dns.go index bc57f85bb9..2e96eb06a6 100644 --- a/test/e2e/dns.go +++ b/test/e2e/dns.go @@ -245,7 +245,8 @@ func verifyDNSPodIsRunning(f *framework.Framework) { if len(dnsPods.Items) < 1 { framework.Failf("No pods match the label selector %v", dnsServiceLabelSelector.String()) } - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem)) + pod := dnsPods.Items[0] + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, &pod)) } func createServiceSpec(serviceName string, isHeadless bool, selector map[string]string) *api.Service { diff --git a/test/e2e/downwardapi_volume.go b/test/e2e/downwardapi_volume.go index 76d898e7db..5e7cba2113 100644 --- a/test/e2e/downwardapi_volume.go +++ b/test/e2e/downwardapi_volume.go @@ -70,10 +70,10 @@ var _ = framework.KubeDescribe("Downward API volume", func() { f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) }() By("Creating the pod") - _, err := f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err := f.Client.Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod)) Eventually(func() (string, error) { return framework.GetPodLogs(f.Client, f.Namespace.Name, podName, containerName) @@ -103,9 +103,9 @@ var _ = framework.KubeDescribe("Downward API volume", func() { f.Client.Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) }() By("Creating the pod") - _, err := f.Client.Pods(f.Namespace.Name).Create(pod) + pod, err := f.Client.Pods(f.Namespace.Name).Create(pod) Expect(err).NotTo(HaveOccurred()) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod)) pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/empty_dir_wrapper.go b/test/e2e/empty_dir_wrapper.go index 9b2a39f645..237c5769f9 100644 --- a/test/e2e/empty_dir_wrapper.go +++ b/test/e2e/empty_dir_wrapper.go @@ -152,7 +152,8 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() { }, } - if pod, err = f.Client.Pods(f.Namespace.Name).Create(pod); err != nil { + pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) + if err != nil { framework.Failf("unable to create pod %v: %v", pod.Name, err) } @@ -175,6 +176,6 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() { } }() - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod)) }) }) diff --git a/test/e2e/es_cluster_logging.go b/test/e2e/es_cluster_logging.go index f226d26d49..0589e80310 100644 --- a/test/e2e/es_cluster_logging.go +++ b/test/e2e/es_cluster_logging.go @@ -99,7 +99,7 @@ func ClusterLevelLoggingWithElasticsearch(f *framework.Framework) { pods, err := f.Client.Pods(api.NamespaceSystem).List(options) Expect(err).NotTo(HaveOccurred()) for _, pod := range pods.Items { - err = framework.WaitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem) + err = framework.WaitForPodRunningInNamespace(f.Client, &pod) Expect(err).NotTo(HaveOccurred()) } @@ -227,7 +227,7 @@ func ClusterLevelLoggingWithElasticsearch(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) for _, pod := range fluentdPods.Items { if nodeInNodeList(pod.Spec.NodeName, nodes) { - err = framework.WaitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem) + err = framework.WaitForPodRunningInNamespace(f.Client, &pod) Expect(err).NotTo(HaveOccurred()) } } diff --git a/test/e2e/example_cluster_dns.go b/test/e2e/example_cluster_dns.go index 9697c19c9c..14c1348d64 100644 --- a/test/e2e/example_cluster_dns.go +++ b/test/e2e/example_cluster_dns.go @@ -139,7 +139,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() { // wait until the pods have been scheduler, i.e. are not Pending anymore. Remember // that we cannot wait for the pods to be running because our pods terminate by themselves. for _, ns := range namespaces { - err := framework.WaitForPodNotPending(c, ns.Name, frontendPodName) + err := framework.WaitForPodNotPending(c, ns.Name, frontendPodName, "") framework.ExpectNoError(err) } diff --git a/test/e2e/examples.go b/test/e2e/examples.go index 200f59d44a..f86356e475 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { By("starting redis bootstrap") framework.RunKubectlOrDie("create", "-f", bootstrapYaml, nsFlag) - err := framework.WaitForPodRunningInNamespace(c, bootstrapPodName, ns) + err := framework.WaitForPodNameRunningInNamespace(c, bootstrapPodName, ns) Expect(err).NotTo(HaveOccurred()) _, err = framework.LookForStringInLog(ns, bootstrapPodName, "master", expectedOnServer, serverStartTimeout) @@ -308,7 +308,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { By("starting Zookeeper") framework.RunKubectlOrDie("create", "-f", zookeeperPodJson, nsFlag) framework.RunKubectlOrDie("create", "-f", zookeeperServiceJson, nsFlag) - err := framework.WaitForPodRunningInNamespace(c, zookeeperPod, ns) + err := framework.WaitForPodNameRunningInNamespace(c, zookeeperPod, ns) Expect(err).NotTo(HaveOccurred()) By("checking if zookeeper is up and running") @@ -320,7 +320,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { By("starting Nimbus") framework.RunKubectlOrDie("create", "-f", nimbusPodJson, nsFlag) framework.RunKubectlOrDie("create", "-f", nimbusServiceJson, nsFlag) - err = framework.WaitForPodRunningInNamespace(c, "nimbus", ns) + err = framework.WaitForPodNameRunningInNamespace(c, "nimbus", ns) Expect(err).NotTo(HaveOccurred()) err = framework.WaitForEndpoint(c, ns, "nimbus") @@ -365,7 +365,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { var wg sync.WaitGroup passed := true checkRestart := func(podName string, timeout time.Duration) { - err := framework.WaitForPodRunningInNamespace(c, podName, ns) + err := framework.WaitForPodNameRunningInNamespace(c, podName, ns) Expect(err).NotTo(HaveOccurred()) for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { pod, err := c.Pods(ns).Get(podName) @@ -412,7 +412,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { By("creating secret and pod") framework.RunKubectlOrDie("create", "-f", secretYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", podYaml, nsFlag) - err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns) + err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns, "") Expect(err).NotTo(HaveOccurred()) By("checking if secret was read correctly") @@ -432,7 +432,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { By("creating the pod") framework.RunKubectlOrDie("create", "-f", podYaml, nsFlag) - err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns) + err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns, "") Expect(err).NotTo(HaveOccurred()) By("checking if name and namespace were passed correctly") @@ -477,7 +477,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { By("starting admin") framework.RunKubectlOrDie("create", "-f", adminServiceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", adminPodYaml, nsFlag) - err = framework.WaitForPodRunningInNamespace(c, "rethinkdb-admin", ns) + err = framework.WaitForPodNameRunningInNamespace(c, "rethinkdb-admin", ns) Expect(err).NotTo(HaveOccurred()) checkDbInstances() content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/", framework.EndpointRegisterTimeout) diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index a7649e7c77..abffb2df90 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -324,7 +324,7 @@ func (f *Framework) AfterEach() { case "json": for i := range summaries { typeName := reflect.TypeOf(summaries[i]).String() - Logf("%v JSON\n%v", typeName[strings.LastIndex(typeName, ".")+1:len(typeName)], summaries[i].PrintJSON()) + Logf("%v JSON\n%v", typeName[strings.LastIndex(typeName, ".")+1:], summaries[i].PrintJSON()) Logf("Finished") } default: @@ -359,24 +359,24 @@ func (f *Framework) WaitForPodTerminated(podName, reason string) error { // WaitForPodRunning waits for the pod to run in the namespace. func (f *Framework) WaitForPodRunning(podName string) error { - return WaitForPodRunningInNamespace(f.Client, podName, f.Namespace.Name) + return WaitForPodNameRunningInNamespace(f.Client, podName, f.Namespace.Name) } // WaitForPodReady waits for the pod to flip to ready in the namespace. func (f *Framework) WaitForPodReady(podName string) error { - return waitTimeoutForPodReadyInNamespace(f.Client, podName, f.Namespace.Name, PodStartTimeout) + return waitTimeoutForPodReadyInNamespace(f.Client, podName, f.Namespace.Name, "", PodStartTimeout) } // WaitForPodRunningSlow waits for the pod to run in the namespace. // It has a longer timeout then WaitForPodRunning (util.slowPodStartTimeout). func (f *Framework) WaitForPodRunningSlow(podName string) error { - return waitForPodRunningInNamespaceSlow(f.Client, podName, f.Namespace.Name) + return waitForPodRunningInNamespaceSlow(f.Client, podName, f.Namespace.Name, "") } // WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either // success or failure. func (f *Framework) WaitForPodNoLongerRunning(podName string) error { - return WaitForPodNoLongerRunningInNamespace(f.Client, podName, f.Namespace.Name) + return WaitForPodNoLongerRunningInNamespace(f.Client, podName, f.Namespace.Name, "") } // Runs the given pod and verifies that the output of exact container matches the desired output. diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index a48c72c338..e7ca7ccc96 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -1177,18 +1177,31 @@ func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error { // Waits default amount of time (PodStartTimeout) for the specified pod to become running. // Returns an error if timeout occurs first, or pod goes in to failed state. -func WaitForPodRunningInNamespace(c *client.Client, podName string, namespace string) error { - return waitTimeoutForPodRunningInNamespace(c, podName, namespace, PodStartTimeout) +func WaitForPodRunningInNamespace(c *client.Client, pod *api.Pod) error { + // this short-cicuit is needed for cases when we pass a list of pods instead + // of newly created pod (eg. VerifyPods) which means we are getting already + // running pod for which waiting does not make sense and will always fail + if pod.Status.Phase == api.PodRunning { + return nil + } + return waitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, pod.ResourceVersion, PodStartTimeout) +} + +// Waits default amount of time (PodStartTimeout) for the specified pod to become running. +// Returns an error if timeout occurs first, or pod goes in to failed state. +func WaitForPodNameRunningInNamespace(c *client.Client, podName, namespace string) error { + return waitTimeoutForPodRunningInNamespace(c, podName, namespace, "", PodStartTimeout) } // Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running. -// Returns an error if timeout occurs first, or pod goes in to failed state. -func waitForPodRunningInNamespaceSlow(c *client.Client, podName string, namespace string) error { - return waitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout) +// The resourceVersion is used when Watching object changes, it tells since when we care +// about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state. +func waitForPodRunningInNamespaceSlow(c *client.Client, podName, namespace, resourceVersion string) error { + return waitTimeoutForPodRunningInNamespace(c, podName, namespace, resourceVersion, slowPodStartTimeout) } -func waitTimeoutForPodRunningInNamespace(c *client.Client, podName string, namespace string, timeout time.Duration) error { - w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName})) +func waitTimeoutForPodRunningInNamespace(c *client.Client, podName, namespace, resourceVersion string, timeout time.Duration) error { + w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } @@ -1198,12 +1211,12 @@ func waitTimeoutForPodRunningInNamespace(c *client.Client, podName string, names // Waits default amount of time (podNoLongerRunningTimeout) for the specified pod to stop running. // Returns an error if timeout occurs first. -func WaitForPodNoLongerRunningInNamespace(c *client.Client, podName string, namespace string) error { - return waitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, podNoLongerRunningTimeout) +func WaitForPodNoLongerRunningInNamespace(c *client.Client, podName, namespace, resourceVersion string) error { + return waitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, resourceVersion, podNoLongerRunningTimeout) } -func waitTimeoutForPodNoLongerRunningInNamespace(c *client.Client, podName string, namespace string, timeout time.Duration) error { - w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName})) +func waitTimeoutForPodNoLongerRunningInNamespace(c *client.Client, podName, namespace, resourceVersion string, timeout time.Duration) error { + w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } @@ -1211,8 +1224,8 @@ func waitTimeoutForPodNoLongerRunningInNamespace(c *client.Client, podName strin return err } -func waitTimeoutForPodReadyInNamespace(c *client.Client, podName string, namespace string, timeout time.Duration) error { - w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName})) +func waitTimeoutForPodReadyInNamespace(c *client.Client, podName, namespace, resourceVersion string, timeout time.Duration) error { + w, err := c.Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } @@ -1221,8 +1234,10 @@ func waitTimeoutForPodReadyInNamespace(c *client.Client, podName string, namespa } // WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state. -func WaitForPodNotPending(c *client.Client, ns, podName string) error { - w, err := c.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: podName})) +// The resourceVersion is used when Watching object changes, it tells since when we care +// about changes to the pod. +func WaitForPodNotPending(c *client.Client, ns, podName, resourceVersion string) error { + w, err := c.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion})) if err != nil { return err } @@ -1617,7 +1632,7 @@ func podsRunning(c *client.Client, pods *api.PodList) []error { for _, pod := range pods.Items { go func(p api.Pod) { - error_chan <- WaitForPodRunningInNamespace(c, p.Name, p.Namespace) + error_chan <- WaitForPodRunningInNamespace(c, &p) }(pod) } @@ -3506,7 +3521,7 @@ func LaunchHostExecPod(client *client.Client, ns, name string) *api.Pod { hostExecPod := NewHostExecPodSpec(ns, name) pod, err := client.Pods(ns).Create(hostExecPod) ExpectNoError(err) - err = WaitForPodRunningInNamespace(client, pod.Name, pod.Namespace) + err = WaitForPodRunningInNamespace(client, pod) ExpectNoError(err) return pod } diff --git a/test/e2e/initial_resources.go b/test/e2e/initial_resources.go index 1c548fb6f2..7fa63e90a1 100644 --- a/test/e2e/initial_resources.go +++ b/test/e2e/initial_resources.go @@ -67,6 +67,6 @@ func runPod(f *framework.Framework, name, image string) *api.Pod { } createdPod, err := f.Client.Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, name, f.Namespace.Name)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, createdPod)) return createdPod } diff --git a/test/e2e/kibana_logging.go b/test/e2e/kibana_logging.go index 594248437a..3bb660d081 100644 --- a/test/e2e/kibana_logging.go +++ b/test/e2e/kibana_logging.go @@ -73,7 +73,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { pods, err := f.Client.Pods(api.NamespaceSystem).List(options) Expect(err).NotTo(HaveOccurred()) for _, pod := range pods.Items { - err = framework.WaitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem) + err = framework.WaitForPodRunningInNamespace(f.Client, &pod) Expect(err).NotTo(HaveOccurred()) } diff --git a/test/e2e/mesos.go b/test/e2e/mesos.go index 8f9d1df93e..a5f97e9fde 100644 --- a/test/e2e/mesos.go +++ b/test/e2e/mesos.go @@ -101,7 +101,7 @@ var _ = framework.KubeDescribe("Mesos", func() { }) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) + framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, podName, ns)) pod, err := c.Pods(ns).Get(podName) framework.ExpectNoError(err) diff --git a/test/e2e/namespace.go b/test/e2e/namespace.go index 27ed243a52..ef290afa53 100644 --- a/test/e2e/namespace.go +++ b/test/e2e/namespace.go @@ -106,7 +106,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) By("Waiting for the pod to have running status") - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, pod.Namespace)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod)) By("Deleting the namespace") err = f.Client.Namespaces().Delete(namespace.Name) diff --git a/test/e2e/pods.go b/test/e2e/pods.go index 40be2f4832..28dacf5070 100644 --- a/test/e2e/pods.go +++ b/test/e2e/pods.go @@ -53,7 +53,7 @@ var ( func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRestarts int, timeout time.Duration) { By(fmt.Sprintf("Creating pod %s in namespace %s", podDescr.Name, ns)) - _, err := c.Pods(ns).Create(podDescr) + podDescr, err := c.Pods(ns).Create(podDescr) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. @@ -65,7 +65,7 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectNumRe // Wait until the pod is not pending. (Here we need to check for something other than // 'Pending' other than checking for 'Running', since when failures occur, we go to // 'Terminated' which can cause indefinite blocking.) - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, podDescr.Name), + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, podDescr.Name, podDescr.ResourceVersion), fmt.Sprintf("starting pod %s in namespace %s", podDescr.Name, ns)) framework.Logf("Started pod %s in namespace %s", podDescr.Name, ns) @@ -114,13 +114,14 @@ func testHostIP(c *client.Client, ns string, pod *api.Pod) { podClient := c.Pods(ns) By("creating pod") defer podClient.Delete(pod.Name, api.NewDeleteOptions(0)) - if _, err := podClient.Create(pod); err != nil { + pod, err := podClient.Create(pod) + if err != nil { framework.Failf("Failed to create pod: %v", err) } By("ensuring that pod is running and has a hostIP") // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. - err := framework.WaitForPodRunningInNamespace(c, pod.Name, ns) + err = framework.WaitForPodRunningInNamespace(c, pod) Expect(err).NotTo(HaveOccurred()) // Try to make sure we get a hostIP for each pod. hostIPTimeout := 2 * time.Minute diff --git a/test/e2e/pre_stop.go b/test/e2e/pre_stop.go index 2ab2f9686a..a9a35bee73 100644 --- a/test/e2e/pre_stop.go +++ b/test/e2e/pre_stop.go @@ -51,7 +51,7 @@ func testPreStop(c *client.Client, ns string) { }, } By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) - _, err := c.Pods(ns).Create(podDescr) + podDescr, err := c.Pods(ns).Create(podDescr) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. @@ -61,7 +61,7 @@ func testPreStop(c *client.Client, ns string) { }() By("Waiting for pods to come up.") - err = framework.WaitForPodRunningInNamespace(c, podDescr.Name, ns) + err = framework.WaitForPodRunningInNamespace(c, podDescr) framework.ExpectNoError(err, "waiting for server pod to start") val := "{\"Source\": \"prestop\"}" @@ -94,7 +94,7 @@ func testPreStop(c *client.Client, ns string) { } By(fmt.Sprintf("Creating tester pod %s in namespace %s", preStopDescr.Name, ns)) - _, err = c.Pods(ns).Create(preStopDescr) + preStopDescr, err = c.Pods(ns).Create(preStopDescr) framework.ExpectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) deletePreStop := true @@ -106,7 +106,7 @@ func testPreStop(c *client.Client, ns string) { } }() - err = framework.WaitForPodRunningInNamespace(c, preStopDescr.Name, ns) + err = framework.WaitForPodRunningInNamespace(c, preStopDescr) framework.ExpectNoError(err, "waiting for tester pod to start") // Delete the pod with the preStop handler. diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index c3647b407b..d9ec53add8 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -485,7 +485,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // scheduled onto it. By("Trying to launch a pod without a label to get a node which can launch it.") podName := "without-label" - _, err := c.Pods(ns).Create(&api.Pod{ + pod, err := c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -502,8 +502,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }, }) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) - pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod)) + pod, err = c.Pods(ns).Get(podName) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName @@ -524,7 +524,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { By("Trying to relaunch the pod, now with labels.") labelPodName := "with-labels" - _, err = c.Pods(ns).Create(&api.Pod{ + pod, err = c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -552,7 +552,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) labelPod, err := c.Pods(ns).Get(labelPodName) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) @@ -623,7 +623,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // scheduled onto it. By("Trying to launch a pod without a label to get a node which can launch it.") podName := "without-label" - _, err := c.Pods(ns).Create(&api.Pod{ + pod, err := c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -640,8 +640,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }, }) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) - pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod)) + pod, err = c.Pods(ns).Get(podName) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName @@ -662,7 +662,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { By("Trying to relaunch the pod, now with labels.") labelPodName := "with-labels" - _, err = c.Pods(ns).Create(&api.Pod{ + pod, err = c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -704,7 +704,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) labelPod, err := c.Pods(ns).Get(labelPodName) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) @@ -719,7 +719,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // scheduled onto it. By("Trying to launch a pod without a label to get a node which can launch it.") podName := "without-label" - _, err := c.Pods(ns).Create(&api.Pod{ + pod, err := c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -736,8 +736,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }, }) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) - pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod)) + pod, err = c.Pods(ns).Get(podName) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName @@ -768,7 +768,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, "")) labelPod, err := c.Pods(ns).Get(labelPodName) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) @@ -884,7 +884,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // scheduled onto it. By("Trying to launch a pod with a label to get a node which can launch it.") podName := "with-label-" + string(util.NewUUID()) - _, err := c.Pods(ns).Create(&api.Pod{ + pod, err := c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -902,8 +902,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }, }) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) - pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod)) + pod, err = c.Pods(ns).Get(podName) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName @@ -923,7 +923,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { By("Trying to launch the pod, now with podAffinity.") labelPodName := "with-podaffinity-" + string(util.NewUUID()) - _, err = c.Pods(ns).Create(&api.Pod{ + pod, err = c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -963,7 +963,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) labelPod, err := c.Pods(ns).Get(labelPodName) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) @@ -977,7 +977,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // scheduled onto it. By("Trying to launch a pod with a label to get a node which can launch it.") podName := "with-label-" + string(util.NewUUID()) - _, err := c.Pods(ns).Create(&api.Pod{ + pod, err := c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -995,8 +995,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }, }) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) - pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod)) + pod, err = c.Pods(ns).Get(podName) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName @@ -1066,7 +1066,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // scheduled onto it. By("Trying to launch a pod with a label to get a node which can launch it.") podName := "with-label-" + string(util.NewUUID()) - _, err := c.Pods(ns).Create(&api.Pod{ + pod, err := c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -1084,8 +1084,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }, }) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) - pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod)) + pod, err = c.Pods(ns).Get(podName) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName @@ -1105,7 +1105,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { By("Trying to launch the pod, now with multiple pod affinities with diff LabelOperators.") labelPodName := "with-podaffinity-" + string(util.NewUUID()) - _, err = c.Pods(ns).Create(&api.Pod{ + pod, err = c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -1153,7 +1153,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) labelPod, err := c.Pods(ns).Get(labelPodName) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) @@ -1167,7 +1167,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // scheduled onto it. By("Trying to launch a pod with a label to get a node which can launch it.") podName := "with-label-" + string(util.NewUUID()) - _, err := c.Pods(ns).Create(&api.Pod{ + pod, err := c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -1185,8 +1185,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }, }) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) - pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod)) + pod, err = c.Pods(ns).Get(podName) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName @@ -1206,7 +1206,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { By("Trying to launch the pod, now with Pod affinity and anti affinity.") labelPodName := "with-podantiaffinity-" + string(util.NewUUID()) - _, err = c.Pods(ns).Create(&api.Pod{ + pod, err = c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -1257,7 +1257,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) labelPod, err := c.Pods(ns).Get(labelPodName) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) @@ -1271,7 +1271,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // scheduled onto it. By("Trying to launch a pod with label to get a node which can launch it.") podName := "with-label-" + string(util.NewUUID()) - _, err := c.Pods(ns).Create(&api.Pod{ + pod, err := c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -1289,8 +1289,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }, }) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) - pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod)) + pod, err = c.Pods(ns).Get(podName) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName @@ -1320,7 +1320,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName, pod.ResourceVersion)) labelPod, err := c.Pods(ns).Get(labelPodName) framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) @@ -1337,7 +1337,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // scheduled onto it. By("Trying to launch a pod without a toleration to get a node which can launch it.") podName := "without-toleration" - _, err := c.Pods(ns).Create(&api.Pod{ + pod, err := c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -1354,8 +1354,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }, }) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) - pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod)) + pod, err = c.Pods(ns).Get(podName) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName @@ -1401,7 +1401,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { By("Trying to relaunch the pod, now with tolerations.") tolerationPodName := "with-tolerations" - _, err = c.Pods(ns).Create(&api.Pod{ + pod, err = c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -1436,7 +1436,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new taint yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, tolerationPodName)) + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, tolerationPodName, pod.ResourceVersion)) deployedPod, err := c.Pods(ns).Get(tolerationPodName) framework.ExpectNoError(err) Expect(deployedPod.Spec.NodeName).To(Equal(nodeName)) @@ -1453,7 +1453,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // scheduled onto it. By("Trying to launch a pod without a toleration to get a node which can launch it.") podName := "without-toleration" - _, err := c.Pods(ns).Create(&api.Pod{ + pod, err := c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -1470,8 +1470,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }, }) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) - pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod)) + pod, err = c.Pods(ns).Get(podName) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName @@ -1517,7 +1517,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { By("Trying to relaunch the pod, still no tolerations.") podNameNoTolerations := "still-no-tolerations" - podNoTolerations := api.Pod{ + podNoTolerations := &api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, @@ -1534,7 +1534,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }, }, } - _, err = c.Pods(ns).Create(&podNoTolerations) + _, err = c.Pods(ns).Create(podNoTolerations) framework.ExpectNoError(err) // Wait a bit to allow scheduler to do its thing // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. @@ -1546,7 +1546,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // TODO(@kevin-wangzefeng) Figure out how to do it correctly // By("Trying to relaunch the same.") - // _, err = c.Pods(ns).Create(&podNoTolerations) + // podNoTolerations, err = c.Pods(ns).Create(&podNoTolerations) // framework.ExpectNoError(err) // defer c.Pods(ns).Delete(podNameNoTolerations, api.NewDeleteOptions(0)) @@ -1555,7 +1555,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // // kubelet and the scheduler: the scheduler might have scheduled a pod // // already when the kubelet does not know about its new taint yet. The // // kubelet will then refuse to launch the pod. - // framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, podNameNoTolerations)) + // framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, podNameNoTolerations, podNoTolerations.ResourceVersion)) // deployedPod, err := c.Pods(ns).Get(podNameNoTolerations) // framework.ExpectNoError(err) // Expect(deployedPod.Spec.NodeName).To(Equal(nodeName)) diff --git a/test/e2e/security_context.go b/test/e2e/security_context.go index a18dbad14a..b507770c0c 100644 --- a/test/e2e/security_context.go +++ b/test/e2e/security_context.go @@ -168,11 +168,11 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) pod.Spec.Containers[0].Command = []string{"sleep", "6000"} client := f.Client.Pods(f.Namespace.Name) - _, err := client.Create(pod) + pod, err := client.Create(pod) framework.ExpectNoError(err, "Error creating pod %v", pod) defer client.Delete(pod.Name, nil) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod)) testContent := "hello" testFilePath := mountPath + "/TEST" diff --git a/test/e2e/volumes.go b/test/e2e/volumes.go index d6c0dcb664..f7e608ab50 100644 --- a/test/e2e/volumes.go +++ b/test/e2e/volumes.go @@ -141,10 +141,10 @@ func startVolumeServer(client *client.Client, config VolumeTestConfig) *api.Pod Volumes: volumes, }, } - _, err := podClient.Create(serverPod) + serverPod, err := podClient.Create(serverPod) framework.ExpectNoError(err, "Failed to create %s pod: %v", serverPod.Name, err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, serverPod.Name, config.namespace)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, serverPod)) By("locating the server pod") pod, err := podClient.Get(serverPod.Name) @@ -244,13 +244,14 @@ func testVolumeClient(client *client.Client, config VolumeTestConfig, volume api if fsGroup != nil { clientPod.Spec.SecurityContext.FSGroup = fsGroup } - if _, err := podsNamespacer.Create(clientPod); err != nil { + clientPod, err := podsNamespacer.Create(clientPod) + if err != nil { framework.Failf("Failed to create %s pod: %v", clientPod.Name, err) } - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, clientPod.Name, config.namespace)) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, clientPod)) By("Checking that text file contents are perfect.") - _, err := framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"cat", "/opt/index.html"}, expectedContent, time.Minute) + _, err = framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"cat", "/opt/index.html"}, expectedContent, time.Minute) Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file.") if fsGroup != nil {