Merge pull request #78478 from draveness/feature/use-framework-expect-no-error

fix: use framework.ExpectNoError instead of gomega
k3s-v1.15.3
Kubernetes Prow Robot 2019-05-30 04:55:01 -07:00 committed by GitHub
commit 13ec0e2bb9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 34 additions and 35 deletions

View File

@ -81,7 +81,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
for {
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
list, err := client.List(opts)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))
@ -110,7 +110,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
ginkgo.By("retrieving those results all at once")
opts := metav1.ListOptions{Limit: numberOfTotalResources + 1}
list, err := client.List(opts)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
gomega.Expect(list.Items).To(gomega.HaveLen(numberOfTotalResources))
})
@ -124,7 +124,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
opts := metav1.ListOptions{}
opts.Limit = oneTenth
list, err := client.List(opts)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
firstToken := list.Continue
firstRV := list.ResourceVersion
if list.GetContinue() == "" {
@ -163,7 +163,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
ginkgo.By("retrieving the second page again with the token received with the error message")
opts.Continue = inconsistentToken
list, err = client.List(opts)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit)
gomega.Expect(list.ResourceVersion).ToNot(gomega.Equal(firstRV))
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("==", opts.Limit))
found := int(oneTenth)
@ -183,7 +183,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
lastRV := list.ResourceVersion
for {
list, err := client.List(opts)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
if list.GetContinue() == "" {
gomega.Expect(list.GetRemainingItemCount()).To(gomega.BeNil())
} else {

View File

@ -859,13 +859,13 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v
}
// h.prepPod will be reused in cleanupDriver.
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(prepPod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating hostPath init pod")
framework.ExpectNoError(err, "while creating hostPath init pod")
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for hostPath init pod to succeed")
framework.ExpectNoError(err, "while waiting for hostPath init pod to succeed")
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting hostPath init pod")
framework.ExpectNoError(err, "while deleting hostPath init pod")
return &hostPathSymlinkVolume{
sourcePath: sourcePath,
targetPath: targetPath,
@ -881,13 +881,13 @@ func (v *hostPathSymlinkVolume) DeleteVolume() {
v.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd}
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(v.prepPod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating hostPath teardown pod")
framework.ExpectNoError(err, "while creating hostPath teardown pod")
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for hostPath teardown pod to succeed")
framework.ExpectNoError(err, "while waiting for hostPath teardown pod to succeed")
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting hostPath teardown pod")
framework.ExpectNoError(err, "while deleting hostPath teardown pod")
}
// emptydir

View File

@ -221,13 +221,13 @@ func testZonalFailover(c clientset.Interface, ns string) {
pod := getPod(c, ns, regionalPDLabels)
nodeName := pod.Spec.NodeName
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
podZone := node.Labels[v1.LabelZoneFailureDomain]
ginkgo.By("tainting nodes in the zone the pod is scheduled in")
selector := labels.SelectorFromSet(labels.Set(map[string]string{v1.LabelZoneFailureDomain: podZone}))
nodesInZone, err := c.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: selector.String()})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
removeTaintFunc := addTaint(c, ns, nodesInZone.Items, podZone)
defer func() {
@ -305,13 +305,13 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string)
reversePatches[node.Name] = reversePatchBytes
_, err = c.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
return func() {
for nodeName, reversePatch := range reversePatches {
_, err := c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, reversePatch)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
}
}
@ -534,7 +534,7 @@ func newPodTemplate(labels map[string]string) *v1.PodTemplateSpec {
func getTwoRandomZones(c clientset.Interface) []string {
zones, err := framework.GetClusterZones(c)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(zones.Len()).To(gomega.BeNumerically(">=", 2),
"The test should only be run in multizone clusters.")

View File

@ -36,7 +36,6 @@ import (
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var (
@ -151,7 +150,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
if l.pod != nil {
ginkgo.By("Deleting pod")
err := framework.DeletePodWithWait(f, f.ClientSet, l.pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting pod")
framework.ExpectNoError(err, "while deleting pod")
l.pod = nil
}
@ -427,7 +426,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name))
removeUnusedContainers(l.pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(l.pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod")
framework.ExpectNoError(err, "while creating pod")
defer func() {
ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
framework.DeletePodWithWait(f, f.ClientSet, pod)
@ -435,12 +434,12 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
// Wait for pod to be running
err = framework.WaitForPodRunningInNamespace(f.ClientSet, l.pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for pod to be running")
framework.ExpectNoError(err, "while waiting for pod to be running")
// Exec into container that mounted the volume, delete subpath directory
rmCmd := fmt.Sprintf("rm -r %s", l.subPathDir)
_, err = podContainerExec(l.pod, 1, rmCmd)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while removing subpath directory")
framework.ExpectNoError(err, "while removing subpath directory")
// Delete pod (from defer) and wait for it to be successfully deleted
})
@ -713,7 +712,7 @@ func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg strin
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod")
framework.ExpectNoError(err, "while creating pod")
defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()
@ -792,17 +791,17 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod")
framework.ExpectNoError(err, "while creating pod")
defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for pod to be running")
framework.ExpectNoError(err, "while waiting for pod to be running")
ginkgo.By("Failing liveness probe")
out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath))
e2elog.Logf("Pod exec output: %v", out)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while failing liveness probe")
framework.ExpectNoError(err, "while failing liveness probe")
// Check that container has restarted
ginkgo.By("Waiting for container to restart")
@ -824,7 +823,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
}
return false, nil
})
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for container to restart")
framework.ExpectNoError(err, "while waiting for container to restart")
// Fix liveness probe
ginkgo.By("Rewriting the file")
@ -836,7 +835,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
}
out, err = podContainerExec(pod, 1, writeCmd)
e2elog.Logf("Pod exec output: %v", out)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while rewriting the probe file")
framework.ExpectNoError(err, "while rewriting the probe file")
// Wait for container restarts to stabilize
ginkgo.By("Waiting for container to stop restarting")
@ -865,7 +864,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
}
return false, nil
})
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for container to stabilize")
framework.ExpectNoError(err, "while waiting for container to stabilize")
}
func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
@ -885,13 +884,13 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating pod")
framework.ExpectNoError(err, "while creating pod")
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for pod to be running")
framework.ExpectNoError(err, "while waiting for pod to be running")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while getting pod")
framework.ExpectNoError(err, "while getting pod")
utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
}
@ -899,13 +898,13 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete
func formatVolume(f *framework.Framework, pod *v1.Pod) {
ginkgo.By(fmt.Sprintf("Creating pod to format volume %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while creating volume init pod")
framework.ExpectNoError(err, "while creating volume init pod")
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while waiting for volume init pod to succeed")
framework.ExpectNoError(err, "while waiting for volume init pod to succeed")
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "while deleting volume init pod")
framework.ExpectNoError(err, "while deleting volume init pod")
}
func podContainerExec(pod *v1.Pod, containerIndex int, command string) (string, error) {

View File

@ -1230,7 +1230,7 @@ func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.Persisten
func getRandomClusterZone(c clientset.Interface) string {
zones, err := framework.GetClusterZones(c)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(zones)).ToNot(gomega.Equal(0))
zonesList := zones.UnsortedList()