Merge pull request #77159 from draveness/feature/refactor-expect-no-errors

refactor: use framework.ExpectNoError instead in e2e tests
k3s-v1.15.3
Kubernetes Prow Robot 2019-05-01 08:22:57 -07:00 committed by GitHub
commit 8f5a62af48
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 221 additions and 225 deletions

File diff suppressed because it is too large Load Diff

View File

@ -23,7 +23,6 @@ go_library(
"//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/instrumentation/logging/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)

View File

@ -26,7 +26,6 @@ import (
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
)
@ -67,17 +66,17 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Wait for the Kibana pod(s) to enter the running state.
ginkgo.By("Checking to make sure the Kibana pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
for _, pod := range pods.Items {
err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
ginkgo.By("Checking to make sure we get a response from the Kibana UI.")
@ -101,5 +100,5 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}

View File

@ -141,7 +141,7 @@ func runKubectlRetryOrDie(args ...string) string {
// Expect no errors to be present after retries are finished
// Copied from framework #ExecOrDie
framework.Logf("stdout: %q", output)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
return output
}
@ -945,7 +945,7 @@ metadata:
*/
framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods ", func() {
kv, err := framework.KubectlVersion()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.SkipUnlessServerVersionGTE(kv, c.Discovery())
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename)))
serviceJSON := readTestFileOrDie(redisServiceFilename)
@ -1011,7 +1011,7 @@ metadata:
// Node
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
node := nodes.Items[0]
output = framework.RunKubectlOrDie("describe", "node", node.Name)
requiredStrings = [][]string{
@ -1100,10 +1100,10 @@ metadata:
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
service, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
if len(service.Spec.Ports) != 1 {
framework.Failf("1 port is expected")
@ -1247,7 +1247,7 @@ metadata:
forEachPod(func(pod v1.Pod) {
ginkgo.By("checking for a matching strings")
_, err := framework.LookForStringInLog(ns, pod.Name, containerName, "The server is now ready to accept connections", framework.PodStartTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("limiting log lines")
out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1")
@ -1495,7 +1495,7 @@ metadata:
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
/*
@ -1712,7 +1712,7 @@ metadata:
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
err := jobutil.WaitForJobGone(c, ns, jobName, wait.ForeverTestTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("verifying the job " + jobName + " was deleted")
_, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
@ -2073,7 +2073,7 @@ func validateGuestbookApp(c clientset.Interface, ns string) {
framework.Logf("Waiting for all frontend pods to be Running.")
label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.Logf("Waiting for frontend to serve content.")
if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) {
framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
@ -2158,7 +2158,7 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
options := metav1.ListOptions{LabelSelector: label.String()}
rcs, err = c.CoreV1().ReplicationControllers(ns).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
if len(rcs.Items) > 0 {
break
}

View File

@ -77,7 +77,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
if errors.IsNotFound(err) {
framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("creating the pod")
name := "pod-preset-pod"
@ -195,7 +195,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
if errors.IsNotFound(err) {
framework.Skipf("podpresets requires k8s.io/api/settings/v1alpha1 to be enabled")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("creating the pod")
name := "pod-preset-pod"

View File

@ -16,7 +16,6 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)

View File

@ -29,7 +29,6 @@ import (
testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = SIGDescribe("Kubernetes Dashboard", func() {
@ -52,12 +51,12 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() {
ginkgo.It("should check that the kubernetes-dashboard instance is alive", func() {
ginkgo.By("Checking whether the kubernetes-dashboard service exists.")
err := framework.WaitForService(f.ClientSet, uiNamespace, uiServiceName, true, framework.Poll, framework.ServiceStartTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Checking to make sure the kubernetes-dashboard pods are running")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": uiAppName}))
err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, uiNamespace, selector)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Checking to make sure we get a response from the kubernetes-dashboard.")
err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) {
@ -90,6 +89,6 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() {
// Don't return err here as it aborts polling.
return status == http.StatusOK, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
})

View File

@ -95,7 +95,7 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.Logf("Service endpoint is up")
ginkgo.By("Adding 2 dummy users")
@ -105,7 +105,7 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By("Verifying that the users exist")
users, err := t.listUsers()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(users)).To(gomega.Equal(2))
}
@ -151,7 +151,7 @@ func (t *CassandraUpgradeTest) addUser(name string) error {
// getServiceIP is a helper method to extract the Ingress IP from the service.
func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {
return ""
@ -212,6 +212,6 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
// Teardown does one final check of the data's availability.
func (t *CassandraUpgradeTest) Teardown(f *framework.Framework) {
users, err := t.listUsers()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(users) >= t.successfulWrites).To(gomega.BeTrue())
}

View File

@ -90,7 +90,7 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.Logf("Service endpoint is up")
ginkgo.By("Adding 2 dummy users")
@ -100,7 +100,7 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By("Verifying that the users exist")
users, err := t.listUsers()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(users)).To(gomega.Equal(2))
}
@ -143,7 +143,7 @@ func (t *EtcdUpgradeTest) addUser(name string) error {
func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {
return ""
@ -200,6 +200,6 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
// Teardown does one final check of the data's availability.
func (t *EtcdUpgradeTest) Teardown(f *framework.Framework) {
users, err := t.listUsers()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(users) >= t.successfulWrites).To(gomega.BeTrue())
}

View File

@ -67,7 +67,7 @@ func mysqlKubectlCreate(ns, file string) {
func (t *MySQLUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {
return ""
@ -105,7 +105,7 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.Logf("Service endpoint is up")
ginkgo.By("Adding 2 names to the database")
@ -114,7 +114,7 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By("Verifying that the 2 names have been inserted")
count, err := t.countNames()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(count).To(gomega.Equal(2))
}
@ -166,7 +166,7 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
// Teardown performs one final check of the data's availability.
func (t *MySQLUpgradeTest) Teardown(f *framework.Framework) {
count, err := t.countNames()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(count >= t.successfulWrites).To(gomega.BeTrue())
}

View File

@ -56,7 +56,7 @@ func (t *NvidiaGPUUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
if upgrade == MasterUpgrade || upgrade == ClusterUpgrade {
// MasterUpgrade should be totally hitless.
job, err := jobutil.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(job.Status.Failed).To(gomega.BeZero(), "Job pods failed during master upgrade: %v", job.Status.Failed)
}
}
@ -88,11 +88,11 @@ func (t *NvidiaGPUUpgradeTest) startJob(f *framework.Framework) {
}
ns := f.Namespace.Name
_, err := jobutil.CreateJob(f.ClientSet, ns, testJob)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.Logf("Created job %v", testJob)
ginkgo.By("Waiting for gpu job pod start")
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, ns, testJob.Name, 1)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Done with gpu job pod start")
}
@ -101,9 +101,9 @@ func (t *NvidiaGPUUpgradeTest) verifyJobPodSuccess(f *framework.Framework) {
// Wait for client pod to complete.
ns := f.Namespace.Name
err := jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, "cuda-add", 1)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
createdPod := pods.Items[0].Name
framework.Logf("Created pod %v", createdPod)
f.PodClient().WaitForSuccess(createdPod, 5*time.Minute)

View File

@ -55,14 +55,14 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u
case MasterUpgrade, ClusterUpgrade:
ginkgo.By("Checking the safe sysctl pod keeps running on master upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning))
}
ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
if err == nil {
gomega.Expect(pod.Status.Phase).NotTo(gomega.Equal(v1.PodRunning))
@ -86,7 +86,7 @@ func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod
ginkgo.By("Making sure the valid pod launches")
ev, err := f.PodClient().WaitForErrorEventOrSuccess(t.validPod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
@ -104,7 +104,7 @@ func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(f *framework.Framewor
ginkgo.By("Making sure the invalid pod failed")
ev, err := f.PodClient().WaitForErrorEventOrSuccess(invalidPod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}

View File

@ -267,7 +267,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
defer wg.Done()
err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred())