refactor: use framework.ExpectNoError instead

k3s-v1.15.3
draveness 2019-05-09 09:48:02 +08:00
parent 5d9d5bca79
commit 950f6e868c
6 changed files with 14 additions and 19 deletions

View File

@ -814,19 +814,19 @@ var _ = framework.KubeDescribe("Pods", func() {
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1))
_, err := podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Sleep for 10 seconds.
time.Sleep(maxReadyStatusUpdateTolerance)
gomega.Expect(podClient.PodIsReady(podName)).To(gomega.BeFalse(), "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True")
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2))
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
validatePodReadiness(true)
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1))
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
validatePodReadiness(false)
})

View File

@ -24,7 +24,6 @@ import (
"k8s.io/kubernetes/test/e2e/upgrades"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// JobUpgradeTest is a test harness for batch Jobs.
@ -44,11 +43,11 @@ func (t *JobUpgradeTest) Setup(f *framework.Framework) {
t.job = jobutil.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6)
job, err := jobutil.CreateJob(f.ClientSet, t.namespace, t.job)
t.job = job
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Ensuring active pods == parallelism")
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
// Test verifies that the Jobs Pods are running after the an upgrade
@ -56,7 +55,7 @@ func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgr
<-done
ginkgo.By("Ensuring active pods == parallelism")
err := jobutil.EnsureAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
// Teardown cleans up any remaining resources.

View File

@ -18,7 +18,6 @@ package upgrades
import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
@ -69,12 +68,12 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
_, err := f.ClientSet.CoreV1().Services(ns).Create(t.service)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
*(t.set.Spec.Replicas) = 3
_, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(t.set)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Saturating stateful set " + t.set.Name)
t.tester.Saturate(t.set)

View File

@ -22,7 +22,6 @@ go_library(
"//test/e2e/storage/utils:go_default_library",
"//test/e2e/upgrades:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)

View File

@ -23,7 +23,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework/volume"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/kubernetes/test/e2e/upgrades"
)
@ -69,7 +68,7 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By("Creating the PV and PVC")
t.pv, t.pvc, err = framework.CreatePVPVC(f.ClientSet, pvConfig, pvcConfig, ns, true)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(f.ClientSet, ns, t.pv, t.pvc))
ginkgo.By("Consuming the PV before upgrade")

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/test/e2e/upgrades"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const devicePath = "/mnt/volume1"
@ -82,20 +81,20 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
}
t.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
t.pvc, err = framework.CreatePVC(cs, ns, t.pvc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(t.pvc.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
t.pv, err = cs.CoreV1().PersistentVolumes().Get(t.pvc.Spec.VolumeName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Consuming the PVC before downgrade")
t.pod, err = framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Checking if PV exists as expected volume mode")
utils.CheckVolumeModeOfPath(t.pod, block, devicePath)