Merge pull request #78021 from s-ito-ts/77103_e2e_storage

Use framework.ExpectNoError() for e2e/storage/vsphere
k3s-v1.15.3
Kubernetes Prow Robot 2019-05-17 13:04:25 -07:00 committed by GitHub
commit 63e6cf3a0a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 10 additions and 6 deletions

View File

@ -121,11 +121,12 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
// Verify PV is Present, after PVC is deleted and PV status should be Failed.
pv, err := c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(gomega.HaveOccurred())
err = framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)
framework.ExpectNoError(err)
ginkgo.By("Verify the volume is attached to the node")
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
gomega.Expect(verifyDiskAttachedError).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(verifyDiskAttachedError)
gomega.Expect(isVolumeAttached).To(gomega.BeTrue())
ginkgo.By("Verify the volume is accessible and available in the pod")
@ -136,7 +137,8 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
ginkgo.By("Verify PV is detached from the node after Pod is deleted")
gomega.Expect(waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)).NotTo(gomega.HaveOccurred())
err = waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
framework.ExpectNoError(err)
ginkgo.By("Verify PV should be deleted automatically")
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))

View File

@ -144,7 +144,8 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
gomega.Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(gomega.HaveOccurred())
err = f.WaitForPodRunningSlow(pod.Name)
framework.ExpectNoError(err)
// Get the copy of the Pod to know the assigned node name.
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
@ -153,7 +154,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
gomega.Expect(isVolumeAttached).To(gomega.BeTrue())
gomega.Expect(verifyDiskAttachedError).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(verifyDiskAttachedError)
ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
@ -167,6 +168,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))
gomega.Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(gomega.HaveOccurred())
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
framework.ExpectNoError(err)
}
}