mirror of https://github.com/k3s-io/k3s
Merge pull request #78021 from s-ito-ts/77103_e2e_storage
Use framework.ExpectNoError() for e2e/storage/vspherek3s-v1.15.3
commit
63e6cf3a0a
|
@ -121,11 +121,12 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
|
|||
// Verify PV is Present, after PVC is deleted and PV status should be Failed.
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(gomega.HaveOccurred())
|
||||
err = framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verify the volume is attached to the node")
|
||||
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
||||
gomega.Expect(verifyDiskAttachedError).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(verifyDiskAttachedError)
|
||||
gomega.Expect(isVolumeAttached).To(gomega.BeTrue())
|
||||
|
||||
ginkgo.By("Verify the volume is accessible and available in the pod")
|
||||
|
@ -136,7 +137,8 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
|
|||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
|
||||
|
||||
ginkgo.By("Verify PV is detached from the node after Pod is deleted")
|
||||
gomega.Expect(waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)).NotTo(gomega.HaveOccurred())
|
||||
err = waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verify PV should be deleted automatically")
|
||||
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
|
||||
|
|
|
@ -144,7 +144,8 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
|
|||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
|
||||
gomega.Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(gomega.HaveOccurred())
|
||||
err = f.WaitForPodRunningSlow(pod.Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Get the copy of the Pod to know the assigned node name.
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
|
@ -153,7 +154,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
|
|||
ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
|
||||
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
|
||||
gomega.Expect(isVolumeAttached).To(gomega.BeTrue())
|
||||
gomega.Expect(verifyDiskAttachedError).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(verifyDiskAttachedError)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
|
||||
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
|
||||
|
@ -167,6 +168,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
|
|||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))
|
||||
gomega.Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(gomega.HaveOccurred())
|
||||
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue