Merge pull request #76034 from mkimuram/issue/75775

Restore volume tests using statefulset
k3s-v1.15.3
Kubernetes Prow Robot 2019-04-04 17:48:07 -07:00 committed by GitHub
commit 8660288acd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
37 changed files with 386 additions and 343 deletions

View File

@ -226,7 +226,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
err := utilerrors.NewAggregate(errs) err := utilerrors.NewAggregate(errs)
Expect(err).NotTo(HaveOccurred(), "while cleaning up after test") framework.ExpectNoError(err, "while cleaning up after test")
} }
// The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12. // The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12.
@ -360,7 +360,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
attachKey := v1.ResourceName(volumeutil.GetCSIAttachLimitKey(m.provisioner)) attachKey := v1.ResourceName(volumeutil.GetCSIAttachLimitKey(m.provisioner))
nodeAttachLimit, err := checkNodeForLimits(nodeName, attachKey, m.cs) nodeAttachLimit, err := checkNodeForLimits(nodeName, attachKey, m.cs)
Expect(err).NotTo(HaveOccurred(), "while fetching node %v", err) framework.ExpectNoError(err, "while fetching node %v", err)
Expect(nodeAttachLimit).To(Equal(2)) Expect(nodeAttachLimit).To(Equal(2))
@ -379,7 +379,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
_, _, pod3 := createPod() _, _, pod3 := createPod()
Expect(pod3).NotTo(BeNil(), "while creating third pod") Expect(pod3).NotTo(BeNil(), "while creating third pod")
err = waitForMaxVolumeCondition(pod3, m.cs) err = waitForMaxVolumeCondition(pod3, m.cs)
Expect(err).NotTo(HaveOccurred(), "while waiting for max volume condition on pod : %+v", pod3) framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3)
}) })
}) })
@ -440,7 +440,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
By("Expanding current pvc") By("Expanding current pvc")
newSize := resource.MustParse("6Gi") newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, m.cs) pvc, err = expandPVCSize(pvc, newSize, m.cs)
Expect(err).NotTo(HaveOccurred(), "While updating pvc for more size") framework.ExpectNoError(err, "While updating pvc for more size")
Expect(pvc).NotTo(BeNil()) Expect(pvc).NotTo(BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
@ -455,12 +455,12 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
By("Waiting for persistent volume resize to finish") By("Waiting for persistent volume resize to finish")
err = waitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod) err = waitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod)
Expect(err).NotTo(HaveOccurred(), "While waiting for CSI PV resize to finish") framework.ExpectNoError(err, "While waiting for CSI PV resize to finish")
checkPVCSize := func() { checkPVCSize := func() {
By("Waiting for PVC resize to finish") By("Waiting for PVC resize to finish")
pvc, err = waitForFSResize(pvc, m.cs) pvc, err = waitForFSResize(pvc, m.cs)
Expect(err).NotTo(HaveOccurred(), "while waiting for PVC resize to finish") framework.ExpectNoError(err, "while waiting for PVC resize to finish")
pvcConditions := pvc.Status.Conditions pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions")
@ -472,7 +472,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} else { } else {
By("Checking for conditions on pvc") By("Checking for conditions on pvc")
pvc, err = m.cs.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) pvc, err = m.cs.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "While fetching pvc after controller resize") framework.ExpectNoError(err, "While fetching pvc after controller resize")
inProgressConditions := pvc.Status.Conditions inProgressConditions := pvc.Status.Conditions
if len(inProgressConditions) > 0 { if len(inProgressConditions) > 0 {
@ -481,12 +481,12 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
By("Deleting the previously created pod") By("Deleting the previously created pod")
err = framework.DeletePodWithWait(f, m.cs, pod) err = framework.DeletePodWithWait(f, m.cs, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod for resizing") framework.ExpectNoError(err, "while deleting pod for resizing")
By("Creating a new pod with same volume") By("Creating a new pod with same volume")
pod2, err := createPodWithPVC(pvc) pod2, err := createPodWithPVC(pvc)
Expect(pod2).NotTo(BeNil(), "while creating pod for csi resizing") Expect(pod2).NotTo(BeNil(), "while creating pod for csi resizing")
Expect(err).NotTo(HaveOccurred(), "while recreating pod for resizing") framework.ExpectNoError(err, "while recreating pod for resizing")
checkPVCSize() checkPVCSize()
} }
@ -531,7 +531,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
By("Expanding current pvc") By("Expanding current pvc")
newSize := resource.MustParse("6Gi") newSize := resource.MustParse("6Gi")
pvc, err = expandPVCSize(pvc, newSize, m.cs) pvc, err = expandPVCSize(pvc, newSize, m.cs)
Expect(err).NotTo(HaveOccurred(), "While updating pvc for more size") framework.ExpectNoError(err, "While updating pvc for more size")
Expect(pvc).NotTo(BeNil()) Expect(pvc).NotTo(BeNil())
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
@ -541,11 +541,11 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
By("Waiting for persistent volume resize to finish") By("Waiting for persistent volume resize to finish")
err = waitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod) err = waitForControllerVolumeResize(pvc, m.cs, csiResizeWaitPeriod)
Expect(err).NotTo(HaveOccurred(), "While waiting for PV resize to finish") framework.ExpectNoError(err, "While waiting for PV resize to finish")
By("Waiting for PVC resize to finish") By("Waiting for PVC resize to finish")
pvc, err = waitForFSResize(pvc, m.cs) pvc, err = waitForFSResize(pvc, m.cs)
Expect(err).NotTo(HaveOccurred(), "while waiting for PVC to finish") framework.ExpectNoError(err, "while waiting for PVC to finish")
pvcConditions := pvc.Status.Conditions pvcConditions := pvc.Status.Conditions
Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions")
@ -613,7 +613,7 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node f
pvcClaims := []*v1.PersistentVolumeClaim{claim} pvcClaims := []*v1.PersistentVolumeClaim{claim}
_, err = framework.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout) _, err = framework.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{

View File

@ -1044,7 +1044,7 @@ func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType te
output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput() output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
outputString := string(output[:]) outputString := string(output[:])
framework.Logf("cinder output:\n%s", outputString) framework.Logf("cinder output:\n%s", outputString)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Parse 'id'' from stdout. Expected format: // Parse 'id'' from stdout. Expected format:
// | attachments | [] | // | attachments | [] |
@ -1220,7 +1220,7 @@ func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes
} }
By("creating a test gce pd volume") By("creating a test gce pd volume")
vname, err := framework.CreatePDWithRetry() vname, err := framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
return &gcePdVolume{ return &gcePdVolume{
volumeName: vname, volumeName: vname,
} }
@ -1341,7 +1341,7 @@ func (v *vSphereDriver) CreateVolume(config *testsuites.PerTestConfig, volType t
vspheretest.Bootstrap(f) vspheretest.Bootstrap(f)
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo() nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef) volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
return &vSphereVolume{ return &vSphereVolume{
volumePath: volumePath, volumePath: volumePath,
nodeInfo: nodeInfo, nodeInfo: nodeInfo,
@ -1460,7 +1460,7 @@ func (a *azureDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestCo
func (a *azureDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { func (a *azureDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
By("creating a test azure disk volume") By("creating a test azure disk volume")
volumeName, err := framework.CreatePDWithRetry() volumeName, err := framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
return &azureVolume{ return &azureVolume{
volumeName: volumeName, volumeName: volumeName,
} }
@ -1573,7 +1573,7 @@ func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp
By("creating a test aws volume") By("creating a test aws volume")
var err error var err error
a.volumeName, err = framework.CreatePDWithRetry() a.volumeName, err = framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err))
} }
DeleteVolume() { DeleteVolume() {
@ -1687,9 +1687,9 @@ func (l *localDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestCo
filesystemType := "fs" filesystemType := "fs"
ssdCmd := fmt.Sprintf("ls -1 /mnt/disks/by-uuid/google-local-ssds-%s-%s/ | wc -l", ssdInterface, filesystemType) ssdCmd := fmt.Sprintf("ls -1 /mnt/disks/by-uuid/google-local-ssds-%s-%s/ | wc -l", ssdInterface, filesystemType)
res, err := l.hostExec.IssueCommandWithResult(ssdCmd, l.node) res, err := l.hostExec.IssueCommandWithResult(ssdCmd, l.node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
num, err := strconv.Atoi(strings.TrimSpace(res)) num, err := strconv.Atoi(strings.TrimSpace(res))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
if num < 1 { if num < 1 {
framework.Skipf("Requires at least 1 %s %s localSSD ", ssdInterface, filesystemType) framework.Skipf("Requires at least 1 %s %s localSSD ", ssdInterface, filesystemType)
} }

View File

@ -233,11 +233,11 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(config *testsuites.Pe
} }
items, err := f.LoadFromManifests(d.StorageClass.FromFile) items, err := f.LoadFromManifests(d.StorageClass.FromFile)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "load storage class from %s", d.StorageClass.FromFile) framework.ExpectNoError(err, "load storage class from %s", d.StorageClass.FromFile)
gomega.Expect(len(items)).To(gomega.Equal(1), "exactly one item from %s", d.StorageClass.FromFile) gomega.Expect(len(items)).To(gomega.Equal(1), "exactly one item from %s", d.StorageClass.FromFile)
err = f.PatchItems(items...) err = f.PatchItems(items...)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "patch items") framework.ExpectNoError(err, "patch items")
sc, ok := items[0].(*storagev1.StorageClass) sc, ok := items[0].(*storagev1.StorageClass)
gomega.Expect(ok).To(gomega.BeTrue(), "storage class from %s", d.StorageClass.FromFile) gomega.Expect(ok).To(gomega.BeTrue(), "storage class from %s", d.StorageClass.FromFile)

View File

@ -29,7 +29,6 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -308,22 +307,69 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
Describe("Default StorageClass", func() { Describe("Default StorageClass", func() {
Context("pods that use multiple volumes", func() { Context("pods that use multiple volumes", func() {
AfterEach(func() {
framework.DeleteAllStatefulSets(c, ns)
})
It("should be reschedulable [Slow]", func() { It("should be reschedulable [Slow]", func() {
// Only run on providers with default storageclass // Only run on providers with default storageclass
framework.SkipUnlessProviderIs("openstack", "gce", "gke", "vsphere", "azure") framework.SkipUnlessProviderIs("openstack", "gce", "gke", "vsphere", "azure")
numVols := 4 numVols := 4
ssTester := framework.NewStatefulSetTester(c)
By("Creating pvcs") By("Creating a StatefulSet pod to initialize data")
claims := []*v1.PersistentVolumeClaim{} writeCmd := "true"
for i := 0; i < numVols; i++ { for i := 0; i < numVols; i++ {
pvc := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{}, ns) writeCmd += fmt.Sprintf("&& touch %v", getVolumeFile(i))
claims = append(claims, pvc) }
writeCmd += "&& sleep 10000"
probe := &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
// Check that the last file got created
Command: []string{"test", "-f", getVolumeFile(numVols - 1)},
},
},
InitialDelaySeconds: 1,
PeriodSeconds: 1,
} }
By("Testing access to pvcs before and after pod recreation on differetn node") mounts := []v1.VolumeMount{}
testsuites.TestAccessMultipleVolumesAcrossPodRecreation(f, c, ns, claims := []v1.PersistentVolumeClaim{}
framework.NodeSelection{}, claims, false /* sameNode */)
for i := 0; i < numVols; i++ {
pvc := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{}, ns)
pvc.Name = getVolName(i)
mounts = append(mounts, v1.VolumeMount{Name: pvc.Name, MountPath: getMountPath(i)})
claims = append(claims, *pvc)
}
spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe)
ss, err := c.AppsV1().StatefulSets(ns).Create(spec)
framework.ExpectNoError(err)
ssTester.WaitForRunningAndReady(1, ss)
By("Deleting the StatefulSet but not the volumes")
// Scale down to 0 first so that the Delete is quick
ss, err = ssTester.Scale(ss, 0)
framework.ExpectNoError(err)
ssTester.WaitForStatusReplicas(ss, 0)
err = c.AppsV1().StatefulSets(ns).Delete(ss.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err)
By("Creating a new Statefulset and validating the data")
validateCmd := "true"
for i := 0; i < numVols; i++ {
validateCmd += fmt.Sprintf("&& test -f %v", getVolumeFile(i))
}
validateCmd += "&& sleep 10000"
spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe)
ss, err = c.AppsV1().StatefulSets(ns).Create(spec)
framework.ExpectNoError(err)
ssTester.WaitForRunningAndReady(1, ss)
}) })
}) })
}) })

View File

@ -23,7 +23,6 @@ import (
"time" "time"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
@ -201,7 +200,7 @@ func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, p
By("creating a StorageClass " + r.sc.Name) By("creating a StorageClass " + r.sc.Name)
var err error var err error
r.sc, err = cs.StorageV1().StorageClasses().Create(r.sc) r.sc, err = cs.StorageV1().StorageClasses().Create(r.sc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
if r.sc != nil { if r.sc != nil {
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPVFromDynamicProvisionSC( r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPVFromDynamicProvisionSC(
@ -289,10 +288,10 @@ func createVolumeSourceWithPVCPV(
framework.Logf("Creating PVC and PV") framework.Logf("Creating PVC and PV")
pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false) pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
Expect(err).NotTo(HaveOccurred(), "PVC, PV creation failed") framework.ExpectNoError(err, "PVC, PV creation failed")
err = framework.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc) err = framework.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc)
Expect(err).NotTo(HaveOccurred(), "PVC, PV failed to bind") framework.ExpectNoError(err, "PVC, PV failed to bind")
volSource := &v1.VolumeSource{ volSource := &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
@ -323,20 +322,20 @@ func createVolumeSourceWithPVCPVFromDynamicProvisionSC(
var err error var err error
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(pvc) pvc, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
if !isDelayedBinding(sc) { if !isDelayedBinding(sc) {
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
var pv *v1.PersistentVolume var pv *v1.PersistentVolume
if !isDelayedBinding(sc) { if !isDelayedBinding(sc) {
pv, err = cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) pv, err = cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
volSource := &v1.VolumeSource{ volSource := &v1.VolumeSource{
@ -380,7 +379,7 @@ func getClaim(claimSize string, ns string) *v1.PersistentVolumeClaim {
func deleteStorageClass(cs clientset.Interface, className string) { func deleteStorageClass(cs clientset.Interface, className string) {
err := cs.StorageV1().StorageClasses().Delete(className, nil) err := cs.StorageV1().StorageClasses().Delete(className, nil)
if err != nil && !apierrs.IsNotFound(err) { if err != nil && !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
} }

View File

@ -21,7 +21,6 @@ import (
"time" "time"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -326,7 +325,7 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}() }()
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
byteLen := 64 byteLen := 64
for i, pvc := range pvcs { for i, pvc := range pvcs {
@ -349,7 +348,7 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
} }
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "get pod") framework.ExpectNoError(err, "get pod")
return pod.Spec.NodeName return pod.Spec.NodeName
} }
@ -400,10 +399,10 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}() }()
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
pods = append(pods, pod) pods = append(pods, pod)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("get pod%d", index)) framework.ExpectNoError(err, fmt.Sprintf("get pod%d", index))
actualNodeName := pod.Spec.NodeName actualNodeName := pod.Spec.NodeName
// Set affinity depending on requiresSameNode // Set affinity depending on requiresSameNode

View File

@ -236,7 +236,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
// TODO: make class creation optional and remove the IsAlreadyExists exception // TODO: make class creation optional and remove the IsAlreadyExists exception
Expect(err == nil || apierrs.IsAlreadyExists(err)).To(Equal(true)) Expect(err == nil || apierrs.IsAlreadyExists(err)).To(Equal(true))
class, err = client.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{}) class, err = client.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
framework.Logf("deleting storage class %s", class.Name) framework.Logf("deleting storage class %s", class.Name)
framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil)) framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil))
@ -245,7 +245,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
By("creating a claim") By("creating a claim")
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
// typically this claim has already been deleted // typically this claim has already been deleted
@ -283,11 +283,11 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
// checkProvisioning verifies that the claim is bound and has the correct properities // checkProvisioning verifies that the claim is bound and has the correct properities
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume { func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("checking the claim") By("checking the claim")
pv, err := framework.GetBoundPV(client, claim) pv, err := framework.GetBoundPV(client, claim)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Check sizes // Check sizes
expectedCapacity := resource.MustParse(t.ExpectedSize) expectedCapacity := resource.MustParse(t.ExpectedSize)
@ -353,14 +353,14 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
}() }()
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "get pod") framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName actualNodeName := runningPod.Spec.NodeName
StopPod(client, pod) StopPod(client, pod)
pod = nil // Don't stop twice. pod = nil // Don't stop twice.
// Get a new copy of the PV // Get a new copy of the PV
volume, err := framework.GetBoundPV(client, claim) volume, err := framework.GetBoundPV(client, claim)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName)) By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))
command = "grep 'hello world' /mnt/test/data" command = "grep 'hello world' /mnt/test/data"
@ -406,7 +406,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node) pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "get pod") framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName actualNodeName := runningPod.Spec.NodeName
StopPod(client, pod) StopPod(client, pod)
pod = nil // Don't stop twice. pod = nil // Don't stop twice.
@ -422,7 +422,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode) pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "get pod") framework.ExpectNoError(err, "get pod")
Expect(runningPod.Spec.NodeName).NotTo(Equal(actualNodeName), "second pod should have run on a different node") Expect(runningPod.Spec.NodeName).NotTo(Equal(actualNodeName), "second pod should have run on a different node")
StopPod(client, pod) StopPod(client, pod)
pod = nil pod = nil
@ -443,7 +443,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
By("creating a storage class " + t.Class.Name) By("creating a storage class " + t.Class.Name)
class, err := t.Client.StorageV1().StorageClasses().Create(t.Class) class, err := t.Client.StorageV1().StorageClasses().Create(t.Class)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer deleteStorageClass(t.Client, class.Name) defer deleteStorageClass(t.Client, class.Name)
By("creating claims") By("creating claims")
@ -453,7 +453,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
claimNames = append(claimNames, c.Name) claimNames = append(claimNames, c.Name)
createdClaims = append(createdClaims, c) createdClaims = append(createdClaims, c)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
defer func() { defer func() {
var errors map[string]error var errors map[string]error
@ -484,7 +484,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
} else { } else {
pod, err = framework.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */) pod, err = framework.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
} }
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
framework.DeletePodOrFail(t.Client, pod.Namespace, pod.Name) framework.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
framework.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) framework.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
@ -497,20 +497,20 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
// collect node details // collect node details
node, err := t.Client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) node, err := t.Client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("re-checking the claims to see they binded") By("re-checking the claims to see they binded")
var pvs []*v1.PersistentVolume var pvs []*v1.PersistentVolume
for _, claim := range createdClaims { for _, claim := range createdClaims {
// Get new copy of the claim // Get new copy of the claim
claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// make sure claim did bind // make sure claim did bind
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
pv, err := t.Client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) pv, err := t.Client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
pvs = append(pvs, pv) pvs = append(pvs, pv)
} }
Expect(len(pvs)).To(Equal(len(createdClaims))) Expect(len(pvs)).To(Equal(len(createdClaims)))
@ -594,7 +594,7 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl
for _, claim := range pvcs { for _, claim := range pvcs {
// Get new copy of the claim // Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
} }
} }
@ -611,19 +611,19 @@ func prepareDataSourceForProvisioning(
if class != nil { if class != nil {
By("[Initialize dataSource]creating a StorageClass " + class.Name) By("[Initialize dataSource]creating a StorageClass " + class.Name)
_, err = client.StorageV1().StorageClasses().Create(class) _, err = client.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
By("[Initialize dataSource]creating a initClaim") By("[Initialize dataSource]creating a initClaim")
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(initClaim) updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(initClaim)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, updatedClaim.Namespace, updatedClaim.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, updatedClaim.Namespace, updatedClaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("[Initialize dataSource]checking the initClaim") By("[Initialize dataSource]checking the initClaim")
// Get new copy of the initClaim // Get new copy of the initClaim
_, err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Get(updatedClaim.Name, metav1.GetOptions{}) _, err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Get(updatedClaim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// write namespace to the /mnt/test (= the volume). // write namespace to the /mnt/test (= the volume).
By("[Initialize dataSource]write data to volume") By("[Initialize dataSource]write data to volume")
@ -636,15 +636,15 @@ func prepareDataSourceForProvisioning(
By("[Initialize dataSource]creating a snapshot") By("[Initialize dataSource]creating a snapshot")
snapshot := getSnapshot(updatedClaim.Name, updatedClaim.Namespace, snapshotClass.GetName()) snapshot := getSnapshot(updatedClaim.Name, updatedClaim.Namespace, snapshotClass.GetName())
snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Create(snapshot, metav1.CreateOptions{}) snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Create(snapshot, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
WaitForSnapshotReady(dynamicClient, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout) WaitForSnapshotReady(dynamicClient, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("[Initialize dataSource]checking the snapshot") By("[Initialize dataSource]checking the snapshot")
// Get new copy of the snapshot // Get new copy of the snapshot
snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{}) snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
group := "snapshot.storage.k8s.io" group := "snapshot.storage.k8s.io"
dataSourceRef := &v1.TypedLocalObjectReference{ dataSourceRef := &v1.TypedLocalObjectReference{
APIGroup: &group, APIGroup: &group,

View File

@ -123,7 +123,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
By("creating a StorageClass " + class.Name) By("creating a StorageClass " + class.Name)
class, err := cs.StorageV1().StorageClasses().Create(class) class, err := cs.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
framework.Logf("deleting storage class %s", class.Name) framework.Logf("deleting storage class %s", class.Name)
framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil)) framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil))
@ -131,7 +131,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
By("creating a claim") By("creating a claim")
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
// typically this claim has already been deleted // typically this claim has already been deleted
@ -141,20 +141,20 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
} }
}() }()
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("checking the claim") By("checking the claim")
// Get new copy of the claim // Get new copy of the claim
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Get the bound PV // Get the bound PV
pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("creating a SnapshotClass") By("creating a SnapshotClass")
vsc, err = dc.Resource(snapshotClassGVR).Create(vsc, metav1.CreateOptions{}) vsc, err = dc.Resource(snapshotClassGVR).Create(vsc, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
framework.Logf("deleting SnapshotClass %s", vsc.GetName()) framework.Logf("deleting SnapshotClass %s", vsc.GetName())
framework.ExpectNoError(dc.Resource(snapshotClassGVR).Delete(vsc.GetName(), nil)) framework.ExpectNoError(dc.Resource(snapshotClassGVR).Delete(vsc.GetName(), nil))
@ -164,7 +164,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
snapshot := getSnapshot(pvc.Name, pvc.Namespace, vsc.GetName()) snapshot := getSnapshot(pvc.Name, pvc.Namespace, vsc.GetName())
snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{}) snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName()) framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
// typically this snapshot has already been deleted // typically this snapshot has already been deleted
@ -174,18 +174,18 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
} }
}() }()
err = WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout) err = WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("checking the snapshot") By("checking the snapshot")
// Get new copy of the snapshot // Get new copy of the snapshot
snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{}) snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Get the bound snapshotContent // Get the bound snapshotContent
snapshotSpec := snapshot.Object["spec"].(map[string]interface{}) snapshotSpec := snapshot.Object["spec"].(map[string]interface{})
snapshotContentName := snapshotSpec["snapshotContentName"].(string) snapshotContentName := snapshotSpec["snapshotContentName"].(string)
snapshotContent, err := dc.Resource(snapshotContentGVR).Get(snapshotContentName, metav1.GetOptions{}) snapshotContent, err := dc.Resource(snapshotContentGVR).Get(snapshotContentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
snapshotContentSpec := snapshotContent.Object["spec"].(map[string]interface{}) snapshotContentSpec := snapshotContent.Object["spec"].(map[string]interface{})
volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{}) volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{})

View File

@ -439,7 +439,7 @@ func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod,
By(fmt.Sprintf("Deleting pod %s", pod.Name)) By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod) err := framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod") framework.ExpectNoError(err, "while deleting pod")
} }
func generateSuffixForPodName(s string) string { func generateSuffixForPodName(s string) string {
@ -699,7 +699,7 @@ func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerInd
By(fmt.Sprintf("Deleting pod %s", pod.Name)) By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod) err := framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod") framework.ExpectNoError(err, "while deleting pod")
} }
func testPodFailSubpath(f *framework.Framework, pod *v1.Pod, allowContainerTerminationError bool) { func testPodFailSubpath(f *framework.Framework, pod *v1.Pod, allowContainerTerminationError bool) {
@ -716,7 +716,7 @@ func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg strin
}() }()
By("Checking for subpath error in container status") By("Checking for subpath error in container status")
err = waitForPodSubpathError(f, pod, allowContainerTerminationError) err = waitForPodSubpathError(f, pod, allowContainerTerminationError)
Expect(err).NotTo(HaveOccurred(), "while waiting for subpath failure") framework.ExpectNoError(err, "while waiting for subpath failure")
} }
func findSubpathContainerName(pod *v1.Pod) string { func findSubpathContainerName(pod *v1.Pod) string {

View File

@ -30,7 +30,6 @@ import (
"time" "time"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@ -133,7 +132,7 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.
FSGroup: fsGroup, FSGroup: fsGroup,
} }
err := testVolumeIO(f, cs, convertTestConfig(l.config), *l.resource.volSource, &podSec, testFile, fileSizes) err := testVolumeIO(f, cs, convertTestConfig(l.config), *l.resource.volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
}) })
} }

View File

@ -168,16 +168,16 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
By("Creating sc") By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Creating pv and pvc") By("Creating pv and pvc")
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv) l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Prebind pv // Prebind pv
l.pvc.Spec.VolumeName = l.pv.Name l.pvc.Spec.VolumeName = l.pv.Name
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
@ -199,16 +199,16 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
By("Creating sc") By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Creating pv and pvc") By("Creating pv and pvc")
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv) l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Prebind pv // Prebind pv
l.pvc.Spec.VolumeName = l.pv.Name l.pvc.Spec.VolumeName = l.pv.Name
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
@ -219,7 +219,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}() }()
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Checking if persistent volume exists as expected volume mode") By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1") utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")
@ -239,11 +239,11 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
By("Creating sc") By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Creating pv and pvc") By("Creating pv and pvc")
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
@ -257,20 +257,20 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
By("Creating sc") By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Creating pv and pvc") By("Creating pv and pvc")
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Get(l.pvc.Name, metav1.GetOptions{}) l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Get(l.pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
l.pv, err = l.cs.CoreV1().PersistentVolumes().Get(l.pvc.Spec.VolumeName, metav1.GetOptions{}) l.pv, err = l.cs.CoreV1().PersistentVolumes().Get(l.pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Creating pod") By("Creating pod")
pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
@ -279,7 +279,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}() }()
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Checking if persistent volume exists as expected volume mode") By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1") utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")

View File

@ -25,7 +25,6 @@ import (
"fmt" "fmt"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -226,5 +225,5 @@ func testScriptInPod(
By(fmt.Sprintf("Deleting pod %s", pod.Name)) By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod) err := framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod") framework.ExpectNoError(err, "while deleting pod")
} }

View File

@ -26,7 +26,6 @@ import (
"strings" "strings"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -102,7 +101,7 @@ func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]stri
hostDir := l.getTestDir() hostDir := l.getTestDir()
By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, hostDir)) By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, hostDir))
err := l.hostExec.IssueCommand(fmt.Sprintf("mkdir -p %q && sudo mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node) err := l.hostExec.IssueCommand(fmt.Sprintf("mkdir -p %q && sudo mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
return &LocalTestResource{ return &LocalTestResource{
Node: node, Node: node,
Path: hostDir, Path: hostDir,
@ -112,11 +111,11 @@ func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]stri
func (l *ltrMgr) cleanupLocalVolumeTmpfs(ltr *LocalTestResource) { func (l *ltrMgr) cleanupLocalVolumeTmpfs(ltr *LocalTestResource) {
By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", ltr.Node.Name, ltr.Path)) By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", ltr.Node.Name, ltr.Path))
err := l.hostExec.IssueCommand(fmt.Sprintf("sudo umount %q", ltr.Path), ltr.Node) err := l.hostExec.IssueCommand(fmt.Sprintf("sudo umount %q", ltr.Path), ltr.Node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Removing the test directory") By("Removing the test directory")
err = l.hostExec.IssueCommand(fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node) err = l.hostExec.IssueCommand(fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
// createAndSetupLoopDevice creates an empty file and associates a loop devie with it. // createAndSetupLoopDevice creates an empty file and associates a loop devie with it.
@ -131,14 +130,14 @@ func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) {
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file bs=4096 count=%d", dir, count) ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file bs=4096 count=%d", dir, count)
losetupCmd := fmt.Sprintf("sudo losetup -f %s/file", dir) losetupCmd := fmt.Sprintf("sudo losetup -f %s/file", dir)
err := l.hostExec.IssueCommand(fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node) err := l.hostExec.IssueCommand(fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
// findLoopDevice finds loop device path by its associated storage directory. // findLoopDevice finds loop device path by its associated storage directory.
func (l *ltrMgr) findLoopDevice(dir string, node *v1.Node) string { func (l *ltrMgr) findLoopDevice(dir string, node *v1.Node) string {
cmd := fmt.Sprintf("E2E_LOOP_DEV=$(sudo losetup | grep %s/file | awk '{ print $1 }') 2>&1 > /dev/null && echo ${E2E_LOOP_DEV}", dir) cmd := fmt.Sprintf("E2E_LOOP_DEV=$(sudo losetup | grep %s/file | awk '{ print $1 }') 2>&1 > /dev/null && echo ${E2E_LOOP_DEV}", dir)
loopDevResult, err := l.hostExec.IssueCommandWithResult(cmd, node) loopDevResult, err := l.hostExec.IssueCommandWithResult(cmd, node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
return strings.TrimSpace(loopDevResult) return strings.TrimSpace(loopDevResult)
} }
@ -159,7 +158,7 @@ func (l *ltrMgr) teardownLoopDevice(dir string, node *v1.Node) {
By(fmt.Sprintf("Tear down block device %q on node %q at path %s/file", loopDev, node.Name, dir)) By(fmt.Sprintf("Tear down block device %q on node %q at path %s/file", loopDev, node.Name, dir))
losetupDeleteCmd := fmt.Sprintf("sudo losetup -d %s", loopDev) losetupDeleteCmd := fmt.Sprintf("sudo losetup -d %s", loopDev)
err := l.hostExec.IssueCommand(losetupDeleteCmd, node) err := l.hostExec.IssueCommand(losetupDeleteCmd, node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
return return
} }
@ -168,7 +167,7 @@ func (l *ltrMgr) cleanupLocalVolumeBlock(ltr *LocalTestResource) {
By(fmt.Sprintf("Removing the test directory %s", ltr.loopDir)) By(fmt.Sprintf("Removing the test directory %s", ltr.loopDir))
removeCmd := fmt.Sprintf("rm -r %s", ltr.loopDir) removeCmd := fmt.Sprintf("rm -r %s", ltr.loopDir)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node) err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]string) *LocalTestResource { func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]string) *LocalTestResource {
@ -178,7 +177,7 @@ func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]st
// Format and mount at loopDir and give others rwx for read/write testing // Format and mount at loopDir and give others rwx for read/write testing
cmd := fmt.Sprintf("sudo mkfs -t ext4 %s && sudo mount -t ext4 %s %s && sudo chmod o+rwx %s", loopDev, loopDev, loopDir, loopDir) cmd := fmt.Sprintf("sudo mkfs -t ext4 %s && sudo mount -t ext4 %s %s && sudo chmod o+rwx %s", loopDev, loopDev, loopDir, loopDir)
err := l.hostExec.IssueCommand(cmd, node) err := l.hostExec.IssueCommand(cmd, node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
return &LocalTestResource{ return &LocalTestResource{
Node: node, Node: node,
Path: loopDir, Path: loopDir,
@ -189,7 +188,7 @@ func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]st
func (l *ltrMgr) cleanupLocalVolumeBlockFS(ltr *LocalTestResource) { func (l *ltrMgr) cleanupLocalVolumeBlockFS(ltr *LocalTestResource) {
umountCmd := fmt.Sprintf("sudo umount %s", ltr.Path) umountCmd := fmt.Sprintf("sudo umount %s", ltr.Path)
err := l.hostExec.IssueCommand(umountCmd, ltr.Node) err := l.hostExec.IssueCommand(umountCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
l.cleanupLocalVolumeBlock(ltr) l.cleanupLocalVolumeBlock(ltr)
} }
@ -197,7 +196,7 @@ func (l *ltrMgr) setupLocalVolumeDirectory(node *v1.Node, parameters map[string]
hostDir := l.getTestDir() hostDir := l.getTestDir()
mkdirCmd := fmt.Sprintf("mkdir -p %s", hostDir) mkdirCmd := fmt.Sprintf("mkdir -p %s", hostDir)
err := l.hostExec.IssueCommand(mkdirCmd, node) err := l.hostExec.IssueCommand(mkdirCmd, node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
return &LocalTestResource{ return &LocalTestResource{
Node: node, Node: node,
Path: hostDir, Path: hostDir,
@ -208,7 +207,7 @@ func (l *ltrMgr) cleanupLocalVolumeDirectory(ltr *LocalTestResource) {
By("Removing the test directory") By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", ltr.Path) removeCmd := fmt.Sprintf("rm -r %s", ltr.Path)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node) err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[string]string) *LocalTestResource { func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[string]string) *LocalTestResource {
@ -216,7 +215,7 @@ func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[str
hostDirBackend := hostDir + "-backend" hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && sudo ln -s %s %s", hostDirBackend, hostDirBackend, hostDir) cmd := fmt.Sprintf("mkdir %s && sudo ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
err := l.hostExec.IssueCommand(cmd, node) err := l.hostExec.IssueCommand(cmd, node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
return &LocalTestResource{ return &LocalTestResource{
Node: node, Node: node,
Path: hostDir, Path: hostDir,
@ -229,14 +228,14 @@ func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ltr *LocalTestResource) {
hostDirBackend := hostDir + "-backend" hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("sudo rm -r %s && rm -r %s", hostDir, hostDirBackend) removeCmd := fmt.Sprintf("sudo rm -r %s && rm -r %s", hostDir, hostDirBackend)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node) err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource { func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir() hostDir := l.getTestDir()
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s", hostDir, hostDir, hostDir) cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s", hostDir, hostDir, hostDir)
err := l.hostExec.IssueCommand(cmd, node) err := l.hostExec.IssueCommand(cmd, node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
return &LocalTestResource{ return &LocalTestResource{
Node: node, Node: node,
Path: hostDir, Path: hostDir,
@ -248,7 +247,7 @@ func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ltr *LocalTestResource)
hostDir := ltr.Path hostDir := ltr.Path
removeCmd := fmt.Sprintf("sudo umount %s && rm -r %s", hostDir, hostDir) removeCmd := fmt.Sprintf("sudo umount %s && rm -r %s", hostDir, hostDir)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node) err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource { func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
@ -256,7 +255,7 @@ func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, paramet
hostDirBackend := hostDir + "-backend" hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s && sudo ln -s %s %s", hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir) cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s && sudo ln -s %s %s", hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir)
err := l.hostExec.IssueCommand(cmd, node) err := l.hostExec.IssueCommand(cmd, node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
return &LocalTestResource{ return &LocalTestResource{
Node: node, Node: node,
Path: hostDir, Path: hostDir,
@ -269,12 +268,12 @@ func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ltr *LocalTestResour
hostDirBackend := hostDir + "-backend" hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("sudo rm %s && sudo umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend) removeCmd := fmt.Sprintf("sudo rm %s && sudo umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node) err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
func (l *ltrMgr) setupLocalVolumeGCELocalSSD(node *v1.Node, parameters map[string]string) *LocalTestResource { func (l *ltrMgr) setupLocalVolumeGCELocalSSD(node *v1.Node, parameters map[string]string) *LocalTestResource {
res, err := l.hostExec.IssueCommandWithResult("ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node) res, err := l.hostExec.IssueCommandWithResult("ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
dirName := strings.Fields(res)[0] dirName := strings.Fields(res)[0]
hostDir := "/mnt/disks/by-uuid/google-local-ssds-scsi-fs/" + dirName hostDir := "/mnt/disks/by-uuid/google-local-ssds-scsi-fs/" + dirName
return &LocalTestResource{ return &LocalTestResource{
@ -287,7 +286,7 @@ func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ltr *LocalTestResource) {
// This filesystem is attached in cluster initialization, we clean all files to make it reusable. // This filesystem is attached in cluster initialization, we clean all files to make it reusable.
removeCmd := fmt.Sprintf("find '%s' -mindepth 1 -maxdepth 1 -print0 | xargs -r -0 rm -rf", ltr.Path) removeCmd := fmt.Sprintf("find '%s' -mindepth 1 -maxdepth 1 -print0 | xargs -r -0 rm -rf", ltr.Path)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node) err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource { func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource {

View File

@ -63,11 +63,11 @@ func VerifyExecInPodSucceed(pod *v1.Pod, bashExec string) {
if err != nil { if err != nil {
if err, ok := err.(uexec.CodeExitError); ok { if err, ok := err.(uexec.CodeExitError); ok {
exitCode := err.ExitStatus() exitCode := err.ExitStatus()
Expect(err).NotTo(HaveOccurred(), framework.ExpectNoError(err,
"%q should succeed, but failed with exit code %d and error message %q", "%q should succeed, but failed with exit code %d and error message %q",
bashExec, exitCode, err) bashExec, exitCode, err)
} else { } else {
Expect(err).NotTo(HaveOccurred(), framework.ExpectNoError(err,
"%q should succeed, but failed with error message %q", "%q should succeed, but failed with error message %q",
bashExec, err) bashExec, err)
} }
@ -84,7 +84,7 @@ func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
"%q should fail with exit code %d, but failed with exit code %d and error message %q", "%q should fail with exit code %d, but failed with exit code %d and error message %q",
bashExec, exitCode, actualExitCode, err) bashExec, exitCode, actualExitCode, err)
} else { } else {
Expect(err).NotTo(HaveOccurred(), framework.ExpectNoError(err,
"%q should fail with exit code %d, but failed with error message %q", "%q should fail with exit code %d, but failed with error message %q",
bashExec, exitCode, err) bashExec, exitCode, err)
} }
@ -105,19 +105,19 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
kubeletPid := "" kubeletPid := ""
nodeIP, err := framework.GetHostExternalAddress(c, pod) nodeIP, err := framework.GetHostExternalAddress(c, pod)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
nodeIP = nodeIP + ":22" nodeIP = nodeIP + ":22"
framework.Logf("Checking if sudo command is present") framework.Logf("Checking if sudo command is present")
sshResult, err := framework.SSH("sudo --version", nodeIP, framework.TestContext.Provider) sshResult, err := framework.SSH("sudo --version", nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") { if !strings.Contains(sshResult.Stderr, "command not found") {
sudoPresent = true sudoPresent = true
} }
framework.Logf("Checking if systemctl command is present") framework.Logf("Checking if systemctl command is present")
sshResult, err = framework.SSH("systemctl --version", nodeIP, framework.TestContext.Provider) sshResult, err = framework.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") { if !strings.Contains(sshResult.Stderr, "command not found") {
command = fmt.Sprintf("systemctl %s kubelet", string(kOp)) command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
systemctlPresent = true systemctlPresent = true
@ -134,7 +134,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
framework.Logf("Attempting `%s`", command) framework.Logf("Attempting `%s`", command)
sshResult, err = framework.SSH(command, nodeIP, framework.TestContext.Provider) sshResult, err = framework.SSH(command, nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
framework.LogSSHResult(sshResult) framework.LogSSHResult(sshResult)
Expect(sshResult.Code).To(BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult) Expect(sshResult.Code).To(BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
@ -178,7 +178,7 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
} }
framework.Logf("Attempting `%s`", command) framework.Logf("Attempting `%s`", command)
sshResult, err := framework.SSH(command, nodeIP, framework.TestContext.Provider) sshResult, err := framework.SSH(command, nodeIP, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("SSH to Node %q errored.", nodeIP)) framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
framework.LogSSHResult(sshResult) framework.LogSSHResult(sshResult)
Expect(sshResult.Code).To(BeZero(), "Failed to get kubelet PID") Expect(sshResult.Code).To(BeZero(), "Failed to get kubelet PID")
Expect(sshResult.Stdout).NotTo(BeEmpty(), "Kubelet Main PID should not be Empty") Expect(sshResult.Stdout).NotTo(BeEmpty(), "Kubelet Main PID should not be Empty")
@ -191,7 +191,7 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
file := "/mnt/_SUCCESS" file := "/mnt/_SUCCESS"
out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file)) out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file))
framework.Logf(out) framework.Logf(out)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Restarting kubelet") By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod) KubeletCommand(KRestart, c, clientPod)
@ -199,7 +199,7 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
By("Testing that written file is accessible.") By("Testing that written file is accessible.")
out, err = PodExec(clientPod, fmt.Sprintf("cat %s", file)) out, err = PodExec(clientPod, fmt.Sprintf("cat %s", file))
framework.Logf(out) framework.Logf(out)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, file) framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, file)
} }
@ -207,20 +207,20 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
// forceDelete is true indicating whether the pod is forcefully deleted. // forceDelete is true indicating whether the pod is forcefully deleted.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) { func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
nodeIP, err := framework.GetHostExternalAddress(c, clientPod) nodeIP, err := framework.GetHostExternalAddress(c, clientPod)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
nodeIP = nodeIP + ":22" nodeIP = nodeIP + ":22"
By("Expecting the volume mount to be found.") By("Expecting the volume mount to be found.")
result, err := framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) result, err := framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result) framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.") framework.ExpectNoError(err, "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
if checkSubpath { if checkSubpath {
By("Expecting the volume subpath mount to be found.") By("Expecting the volume subpath mount to be found.")
result, err := framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) result, err := framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result) framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.") framework.ExpectNoError(err, "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
} }
@ -237,13 +237,13 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
} else { } else {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{}) err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{})
} }
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Starting the kubelet and waiting for pod to delete.") By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod) KubeletCommand(KStart, c, clientPod)
err = f.WaitForPodNotFound(clientPod.Name, framework.PodDeleteTimeout) err = f.WaitForPodNotFound(clientPod.Name, framework.PodDeleteTimeout)
if err != nil { if err != nil {
Expect(err).NotTo(HaveOccurred(), "Expected pod to be not found.") framework.ExpectNoError(err, "Expected pod to be not found.")
} }
if forceDelete { if forceDelete {
@ -255,7 +255,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
By("Expecting the volume mount not to be found.") By("Expecting the volume mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) result, err = framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result) framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.") framework.ExpectNoError(err, "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).") Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName) framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
@ -263,7 +263,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
By("Expecting the volume subpath mount not to be found.") By("Expecting the volume subpath mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider) result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result) framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.") framework.ExpectNoError(err, "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).") Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName) framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
} }

View File

@ -278,7 +278,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(volume).NotTo(BeNil(), "get bound PV") Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-ssd") err := checkGCEPD(volume, "pd-ssd")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-ssd") framework.ExpectNoError(err, "checkGCEPD pd-ssd")
}, },
}, },
{ {
@ -295,7 +295,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(volume).NotTo(BeNil(), "get bound PV") Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-standard") framework.ExpectNoError(err, "checkGCEPD pd-standard")
}, },
}, },
// AWS // AWS
@ -314,7 +314,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(volume).NotTo(BeNil(), "get bound PV") Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", false) err := checkAWSEBS(volume, "gp2", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2") framework.ExpectNoError(err, "checkAWSEBS gp2")
}, },
}, },
{ {
@ -332,7 +332,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(volume).NotTo(BeNil(), "get bound PV") Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkAWSEBS(volume, "io1", false) err := checkAWSEBS(volume, "io1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS io1") framework.ExpectNoError(err, "checkAWSEBS io1")
}, },
}, },
{ {
@ -349,7 +349,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(volume).NotTo(BeNil(), "get bound PV") Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkAWSEBS(volume, "sc1", false) err := checkAWSEBS(volume, "sc1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS sc1") framework.ExpectNoError(err, "checkAWSEBS sc1")
}, },
}, },
{ {
@ -366,7 +366,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(volume).NotTo(BeNil(), "get bound PV") Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkAWSEBS(volume, "st1", false) err := checkAWSEBS(volume, "st1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS st1") framework.ExpectNoError(err, "checkAWSEBS st1")
}, },
}, },
{ {
@ -383,7 +383,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(volume).NotTo(BeNil(), "get bound PV") Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", true) err := checkAWSEBS(volume, "gp2", true)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2 encrypted") framework.ExpectNoError(err, "checkAWSEBS gp2 encrypted")
}, },
}, },
// OpenStack generic tests (works on all OpenStack deployments) // OpenStack generic tests (works on all OpenStack deployments)
@ -467,7 +467,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
class := newBetaStorageClass(*betaTest, "beta") class := newBetaStorageClass(*betaTest, "beta")
// we need to create the class manually, testDynamicProvisioning does not accept beta class // we need to create the class manually, testDynamicProvisioning does not accept beta class
class, err := c.StorageV1beta1().StorageClasses().Create(class) class, err := c.StorageV1beta1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer deleteStorageClass(c, class.Name) defer deleteStorageClass(c, class.Name)
betaTest.Client = c betaTest.Client = c
@ -496,7 +496,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(volume).NotTo(BeNil(), "get bound PV") Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD") framework.ExpectNoError(err, "checkGCEPD")
}, },
} }
test.Class = newStorageClass(test, ns, "reclaimpolicy") test.Class = newStorageClass(test, ns, "reclaimpolicy")
@ -526,15 +526,15 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
managedZones := sets.NewString() // subset of allZones managedZones := sets.NewString() // subset of allZones
gceCloud, err := gce.GetGCECloud() gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Get all k8s managed zones (same as zones with nodes in them for test) // Get all k8s managed zones (same as zones with nodes in them for test)
managedZones, err = gceCloud.GetAllZonesFromCloudProvider() managedZones, err = gceCloud.GetAllZonesFromCloudProvider()
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Get a list of all zones in the project // Get a list of all zones in the project
zones, err := gceCloud.ComputeServices().GA.Zones.List(framework.TestContext.CloudConfig.ProjectID).Do() zones, err := gceCloud.ComputeServices().GA.Zones.List(framework.TestContext.CloudConfig.ProjectID).Do()
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
for _, z := range zones.Items { for _, z := range zones.Items {
allZones.Insert(z.Name) allZones.Insert(z.Name)
} }
@ -557,14 +557,14 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
} }
sc := newStorageClass(test, ns, suffix) sc := newStorageClass(test, ns, suffix)
sc, err = c.StorageV1().StorageClasses().Create(sc) sc, err = c.StorageV1().StorageClasses().Create(sc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer deleteStorageClass(c, sc.Name) defer deleteStorageClass(c, sc.Name)
By("Creating a claim and expecting it to timeout") By("Creating a claim and expecting it to timeout")
pvc := newClaim(test, ns, suffix) pvc := newClaim(test, ns, suffix)
pvc.Spec.StorageClassName = &sc.Name pvc.Spec.StorageClassName = &sc.Name
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
}() }()
@ -594,7 +594,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
class := newStorageClass(test, ns, "race") class := newStorageClass(test, ns, "race")
class, err := c.StorageV1().StorageClasses().Create(class) class, err := c.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer deleteStorageClass(c, class.Name) defer deleteStorageClass(c, class.Name)
// To increase chance of detection, attempt multiple iterations // To increase chance of detection, attempt multiple iterations
@ -603,13 +603,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
claim := newClaim(test, ns, suffix) claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name claim.Spec.StorageClassName = &class.Name
tmpClaim, err := framework.CreatePVC(c, ns, claim) tmpClaim, err := framework.CreatePVC(c, ns, claim)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns)) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns))
} }
By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name)) By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name))
residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name) residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Cleanup the test resources before breaking // Cleanup the test resources before breaking
defer deleteProvisionedVolumesAndDisks(c, residualPVs) defer deleteProvisionedVolumesAndDisks(c, residualPVs)
@ -695,7 +695,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
By("waiting for the PV to get deleted") By("waiting for the PV to get deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, framework.PVDeletingTimeout) err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, framework.PVDeletingTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
}) })
}) })
@ -786,7 +786,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
By("creating a claim with default storageclass and expecting it to timeout") By("creating a claim with default storageclass and expecting it to timeout")
claim := newClaim(test, ns, "default") claim := newClaim(test, ns, "default")
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim) claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns)) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns))
}() }()
@ -796,7 +796,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
framework.Logf(err.Error()) framework.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
}) })
@ -817,7 +817,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
By("creating a claim with default storageclass and expecting it to timeout") By("creating a claim with default storageclass and expecting it to timeout")
claim := newClaim(test, ns, "default") claim := newClaim(test, ns, "default")
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim) claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns)) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns))
}() }()
@ -827,7 +827,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
framework.Logf(err.Error()) framework.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{}) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
}) })
}) })
@ -872,7 +872,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
suffix := fmt.Sprintf("invalid-aws") suffix := fmt.Sprintf("invalid-aws")
class := newStorageClass(test, ns, suffix) class := newStorageClass(test, ns, suffix)
class, err := c.StorageV1().StorageClasses().Create(class) class, err := c.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
framework.Logf("deleting storage class %s", class.Name) framework.Logf("deleting storage class %s", class.Name)
framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil)) framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil))
@ -882,7 +882,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
claim := newClaim(test, ns, suffix) claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name claim.Spec.StorageClassName = &class.Name
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
@ -897,7 +897,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// ClaimProvisionTimeout in the very same loop. // ClaimProvisionTimeout in the very same loop.
err = wait.Poll(time.Second, framework.ClaimProvisionTimeout, func() (bool, error) { err = wait.Poll(time.Second, framework.ClaimProvisionTimeout, func() (bool, error) {
events, err := c.CoreV1().Events(claim.Namespace).List(metav1.ListOptions{}) events, err := c.CoreV1().Events(claim.Namespace).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
for _, event := range events.Items { for _, event := range events.Items {
if strings.Contains(event.Message, "failed to create encrypted volume: the volume disappeared after creation, most likely due to inaccessible KMS encryption key") { if strings.Contains(event.Message, "failed to create encrypted volume: the volume disappeared after creation, most likely due to inaccessible KMS encryption key") {
return true, nil return true, nil
@ -919,7 +919,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.Logf("The test missed event about failed provisioning, but checked that no volume was provisioned for %v", framework.ClaimProvisionTimeout) framework.Logf("The test missed event about failed provisioning, but checked that no volume was provisioned for %v", framework.ClaimProvisionTimeout)
err = nil err = nil
} }
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
}) })
}) })
Describe("DynamicProvisioner delayed binding [Slow]", func() { Describe("DynamicProvisioner delayed binding [Slow]", func() {
@ -995,13 +995,13 @@ func getDefaultStorageClassName(c clientset.Interface) string {
func verifyDefaultStorageClass(c clientset.Interface, scName string, expectedDefault bool) { func verifyDefaultStorageClass(c clientset.Interface, scName string, expectedDefault bool) {
sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{}) sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
Expect(storageutil.IsDefaultAnnotation(sc.ObjectMeta)).To(Equal(expectedDefault)) Expect(storageutil.IsDefaultAnnotation(sc.ObjectMeta)).To(Equal(expectedDefault))
} }
func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr string) { func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr string) {
sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{}) sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
if defaultStr == "" { if defaultStr == "" {
delete(sc.Annotations, storageutil.BetaIsDefaultStorageClassAnnotation) delete(sc.Annotations, storageutil.BetaIsDefaultStorageClassAnnotation)
@ -1015,7 +1015,7 @@ func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr
} }
sc, err = c.StorageV1().StorageClasses().Update(sc) sc, err = c.StorageV1().StorageClasses().Update(sc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
expectedDefault := false expectedDefault := false
if defaultStr == "true" { if defaultStr == "true" {
@ -1223,7 +1223,7 @@ func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*
func deleteStorageClass(c clientset.Interface, className string) { func deleteStorageClass(c clientset.Interface, className string) {
err := c.StorageV1().StorageClasses().Delete(className, nil) err := c.StorageV1().StorageClasses().Delete(className, nil)
if err != nil && !apierrs.IsNotFound(err) { if err != nil && !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
} }

View File

@ -76,7 +76,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
if volumePath == "" { if volumePath == "" {
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
pvConfig = framework.PersistentVolumeConfig{ pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "vspherepv-", NamePrefix: "vspherepv-",
Labels: volLabel, Labels: volLabel,
@ -96,17 +96,17 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
} }
By("Creating the PV and PVC") By("Creating the PV and PVC")
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
By("Creating the Client Pod") By("Creating the Client Pod")
clientPod, err = framework.CreateClientPod(c, ns, pvc) clientPod, err = framework.CreateClientPod(c, ns, pvc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
node = clientPod.Spec.NodeName node = clientPod.Spec.NodeName
By("Verify disk should be attached to the node") By("Verify disk should be attached to the node")
isAttached, err := diskIsAttached(volumePath, node) isAttached, err := diskIsAttached(volumePath, node)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
Expect(isAttached).To(BeTrue(), "disk is not attached with the node") Expect(isAttached).To(BeTrue(), "disk is not attached with the node")
}) })
@ -207,10 +207,10 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() { It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() {
By("Deleting the Namespace") By("Deleting the Namespace")
err := c.CoreV1().Namespaces().Delete(ns, nil) err := c.CoreV1().Namespaces().Delete(ns, nil)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
err = framework.WaitForNamespacesDeleted(c, []string{ns}, 3*time.Minute) err = framework.WaitForNamespacesDeleted(c, []string{ns}, 3*time.Minute)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Verifying Persistent Disk detaches") By("Verifying Persistent Disk detaches")
waitForVSphereDiskToDetach(volumePath, node) waitForVSphereDiskToDetach(volumePath, node)

View File

@ -76,14 +76,14 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() { It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() {
var err error var err error
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
deletePVCAfterBind(c, ns, pvc, pv) deletePVCAfterBind(c, ns, pvc, pv)
pvc = nil pvc = nil
By("verify pv is deleted") By("verify pv is deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, 300*time.Second) err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
pv = nil pv = nil
volumePath = "" volumePath = ""
@ -105,13 +105,13 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
var err error var err error
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Wait for PV and PVC to Bind // Wait for PV and PVC to Bind
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
By("Creating the Pod") By("Creating the Pod")
pod, err := framework.CreateClientPod(c, ns, pvc) pod, err := framework.CreateClientPod(c, ns, pvc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Deleting the Claim") By("Deleting the Claim")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
@ -119,7 +119,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
// Verify PV is Present, after PVC is deleted and PV status should be Failed. // Verify PV is Present, after PVC is deleted and PV status should be Failed.
pv, err := c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) pv, err := c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(HaveOccurred()) Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(HaveOccurred())
By("Verify the volume is attached to the node") By("Verify the volume is attached to the node")
@ -166,7 +166,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10) var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10)
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimRetain) volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimRetain)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
writeContentToVSpherePV(c, pvc, volumeFileContent) writeContentToVSpherePV(c, pvc, volumeFileContent)
@ -177,18 +177,18 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
By("Verify PV is retained") By("Verify PV is retained")
framework.Logf("Waiting for PV %v to become Released", pv.Name) framework.Logf("Waiting for PV %v to become Released", pv.Name)
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second) err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
By("Creating the PV for same volume path") By("Creating the PV for same volume path")
pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil) pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil)
pv, err = c.CoreV1().PersistentVolumes().Create(pv) pv, err = c.CoreV1().PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("creating the pvc") By("creating the pvc")
pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) pvc = getVSpherePersistentVolumeClaimSpec(ns, nil)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("wait for the pv and pvc to bind") By("wait for the pv and pvc to bind")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
@ -223,7 +223,7 @@ func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *
By("running testCleanupVSpherePersistentVolumeReclaim") By("running testCleanupVSpherePersistentVolumeReclaim")
if len(volumePath) > 0 { if len(volumePath) > 0 {
err := nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) err := nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
if pv != nil { if pv != nil {
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
@ -244,6 +244,6 @@ func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolu
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) { if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
} }

View File

@ -20,7 +20,6 @@ import (
"time" "time"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -81,21 +80,21 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
}) })
It("should bind volume with claim for given label", func() { It("should bind volume with claim for given label", func() {
volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels) volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("wait for the pvc_ssd to bind with pv_ssd") By("wait for the pvc_ssd to bind with pv_ssd")
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd))
By("Verify status of pvc_vvol is pending") By("Verify status of pvc_vvol is pending")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("delete pvc_ssd") By("delete pvc_ssd")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
By("verify pv_ssd is deleted") By("verify pv_ssd is deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second) err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
volumePath = "" volumePath = ""
By("delete pvc_vvol") By("delete pvc_vvol")

View File

@ -17,9 +17,11 @@ limitations under the License.
package vsphere package vsphere
import ( import (
. "github.com/onsi/gomega"
"os" "os"
"strconv" "strconv"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -72,6 +74,6 @@ func GetAndExpectStringEnvVar(varName string) string {
func GetAndExpectIntEnvVar(varName string) int { func GetAndExpectIntEnvVar(varName string) int {
varValue := GetAndExpectStringEnvVar(varName) varValue := GetAndExpectStringEnvVar(varName)
varIntValue, err := strconv.Atoi(varValue) varIntValue, err := strconv.Atoi(varValue)
Expect(err).NotTo(HaveOccurred(), "Error Parsing "+varName) framework.ExpectNoError(err, "Error Parsing "+varName)
return varIntValue return varIntValue
} }

View File

@ -131,7 +131,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
} }
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams, nil)) sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams, nil))
Expect(sc).NotTo(BeNil(), "Storage class is empty") Expect(sc).NotTo(BeNil(), "Storage class is empty")
Expect(err).NotTo(HaveOccurred(), "Failed to create storage class") framework.ExpectNoError(err, "Failed to create storage class")
defer client.StorageV1().StorageClasses().Delete(scname, nil) defer client.StorageV1().StorageClasses().Delete(scname, nil)
scArrays[index] = sc scArrays[index] = sc
} }
@ -156,15 +156,15 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...) pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...)
By("Deleting pod") By("Deleting pod")
err = framework.DeletePodWithWait(f, client, &pod) err = framework.DeletePodWithWait(f, client, &pod)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
By("Waiting for volumes to be detached from the node") By("Waiting for volumes to be detached from the node")
err = waitForVSphereDisksToDetach(nodeVolumeMap) err = waitForVSphereDisksToDetach(nodeVolumeMap)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
for _, pvcClaim := range pvcClaimList { for _, pvcClaim := range pvcClaimList {
err = framework.DeletePersistentVolumeClaim(client, pvcClaim, namespace) err = framework.DeletePersistentVolumeClaim(client, pvcClaim, namespace)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
}) })
}) })
@ -193,19 +193,19 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s
for i := 0; i < volumesPerPod; i++ { for i := 0; i < volumesPerPod; i++ {
By("Creating PVC using the Storage Class") By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)])) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)]))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
pvclaims[i] = pvclaim pvclaims[i] = pvclaim
} }
By("Waiting for claim to be in bound phase") By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Creating pod to attach PV to the node") By("Creating pod to attach PV to the node")
nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)] nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)]
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "") pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
for _, pv := range persistentvolumes { for _, pv := range persistentvolumes {
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath) nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)

View File

@ -18,6 +18,7 @@ package vsphere
import ( import (
"fmt" "fmt"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
apierrs "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors"
@ -72,7 +73,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
scParameters["diskformat"] = "thin" scParameters["diskformat"] = "thin"
scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil) scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil)
sc, err := client.StorageV1().StorageClasses().Create(scSpec) sc, err := client.StorageV1().StorageClasses().Create(scSpec)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(sc.Name, nil) defer client.StorageV1().StorageClasses().Delete(sc.Name, nil)
By("Creating statefulset") By("Creating statefulset")
@ -81,7 +82,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
replicas := *(statefulset.Spec.Replicas) replicas := *(statefulset.Spec.Replicas)
// Waiting for pods status to be Ready // Waiting for pods status to be Ready
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas) statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas)
Expect(statefulsetTester.CheckMount(statefulset, mountPath)).NotTo(HaveOccurred()) framework.ExpectNoError(statefulsetTester.CheckMount(statefulset, mountPath))
ssPodsBeforeScaleDown := statefulsetTester.GetPodList(statefulset) ssPodsBeforeScaleDown := statefulsetTester.GetPodList(statefulset)
Expect(ssPodsBeforeScaleDown.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) Expect(ssPodsBeforeScaleDown.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas") Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas")
@ -90,7 +91,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
volumesBeforeScaleDown := make(map[string]string) volumesBeforeScaleDown := make(map[string]string)
for _, sspod := range ssPodsBeforeScaleDown.Items { for _, sspod := range ssPodsBeforeScaleDown.Items {
_, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{}) _, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
for _, volumespec := range sspod.Spec.Volumes { for _, volumespec := range sspod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil { if volumespec.PersistentVolumeClaim != nil {
volumePath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) volumePath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
@ -101,7 +102,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1))
_, scaledownErr := statefulsetTester.Scale(statefulset, replicas-1) _, scaledownErr := statefulsetTester.Scale(statefulset, replicas-1)
Expect(scaledownErr).NotTo(HaveOccurred()) framework.ExpectNoError(scaledownErr)
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1) statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1)
// After scale down, verify vsphere volumes are detached from deleted pods // After scale down, verify vsphere volumes are detached from deleted pods
@ -114,7 +115,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
if volumespec.PersistentVolumeClaim != nil { if volumespec.PersistentVolumeClaim != nil {
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName) framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName)
Expect(waitForVSphereDiskToDetach(vSpherediskPath, sspod.Spec.NodeName)).NotTo(HaveOccurred()) framework.ExpectNoError(waitForVSphereDiskToDetach(vSpherediskPath, sspod.Spec.NodeName))
} }
} }
} }
@ -122,7 +123,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas))
_, scaleupErr := statefulsetTester.Scale(statefulset, replicas) _, scaleupErr := statefulsetTester.Scale(statefulset, replicas)
Expect(scaleupErr).NotTo(HaveOccurred()) framework.ExpectNoError(scaleupErr)
statefulsetTester.WaitForStatusReplicas(statefulset, replicas) statefulsetTester.WaitForStatusReplicas(statefulset, replicas)
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas) statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas)
@ -134,9 +135,9 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
By("Verify all volumes are attached to Nodes after Statefulsets is scaled up") By("Verify all volumes are attached to Nodes after Statefulsets is scaled up")
for _, sspod := range ssPodsAfterScaleUp.Items { for _, sspod := range ssPodsAfterScaleUp.Items {
err := framework.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0) err := framework.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
pod, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{}) pod, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
for _, volumespec := range pod.Spec.Volumes { for _, volumespec := range pod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil { if volumespec.PersistentVolumeClaim != nil {
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
@ -145,7 +146,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse()) Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse())
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName)
Expect(isVolumeAttached).To(BeTrue()) Expect(isVolumeAttached).To(BeTrue())
Expect(verifyDiskAttachedError).NotTo(HaveOccurred()) framework.ExpectNoError(verifyDiskAttachedError)
} }
} }
} }

View File

@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
Expect(instances > len(scNames)).To(BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes") Expect(instances > len(scNames)).To(BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes")
iterations = GetAndExpectIntEnvVar(VCPStressIterations) iterations = GetAndExpectIntEnvVar(VCPStressIterations)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_STRESS_ITERATIONS") framework.ExpectNoError(err, "Error Parsing VCP_STRESS_ITERATIONS")
Expect(iterations > 0).To(BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0") Expect(iterations > 0).To(BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0")
policyName = GetAndExpectStringEnvVar(SPBMPolicyName) policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec) sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec)
} }
Expect(sc).NotTo(BeNil()) Expect(sc).NotTo(BeNil())
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(scname, nil) defer client.StorageV1().StorageClasses().Delete(scname, nil)
scArrays[index] = sc scArrays[index] = sc
} }
@ -129,26 +129,26 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1) logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1)
By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name)) By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name)) By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name))
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name)) By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name)) By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(HaveOccurred()) Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(HaveOccurred())
// Get the copy of the Pod to know the assigned node name. // Get the copy of the Pod to know the assigned node name.
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
@ -160,11 +160,11 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name)) By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
err = framework.DeletePodWithWait(f, client, pod) err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name)) By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))
Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(HaveOccurred()) Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(HaveOccurred())

View File

@ -383,14 +383,14 @@ func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[st
func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) { func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) {
for _, filePath := range filePaths { for _, filePath := range filePaths {
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath) _, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName)) framework.ExpectNoError(err, fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName))
} }
} }
func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) { func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) {
for _, filePath := range filePaths { for _, filePath := range filePaths {
err := framework.CreateEmptyFileOnPod(namespace, podName, filePath) err := framework.CreateEmptyFileOnPod(namespace, podName, filePath)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
} }
@ -401,12 +401,12 @@ func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persiste
for index, pv := range persistentvolumes { for index, pv := range persistentvolumes {
// Verify disks are attached to the node // Verify disks are attached to the node
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName) isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath)) Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
// Verify Volumes are accessible // Verify Volumes are accessible
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt") filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
} }
@ -424,7 +424,7 @@ func verifyVolumeCreationOnRightZone(persistentvolumes []*v1.PersistentVolume, n
// Get the datastore object reference from the datastore name // Get the datastore object reference from the datastore name
datastoreRef, err := nodeInfo.VSphere.GetDatastoreRefFromName(ctx, nodeInfo.DataCenterRef, datastoreName) datastoreRef, err := nodeInfo.VSphere.GetDatastoreRefFromName(ctx, nodeInfo.DataCenterRef, datastoreName)
if err != nil { if err != nil {
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
// Find common datastores among the specified zones // Find common datastores among the specified zones
var datastoreCountMap = make(map[string]int) var datastoreCountMap = make(map[string]int)
@ -446,9 +446,9 @@ func verifyVolumeCreationOnRightZone(persistentvolumes []*v1.PersistentVolume, n
// Get vSphere Volume Path from PVC // Get vSphere Volume Path from PVC
func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string { func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string {
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{}) pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
return pv.Spec.VsphereVolume.VolumePath return pv.Spec.VsphereVolume.VolumePath
} }
@ -628,7 +628,7 @@ func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) {
var nodeVM mo.VirtualMachine var nodeVM mo.VirtualMachine
err := vmObject.Properties(ctx, vmObject.Reference(), []string{"config.files"}, &nodeVM) err := vmObject.Properties(ctx, vmObject.Reference(), []string{"config.files"}, &nodeVM)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
Expect(nodeVM.Config).NotTo(BeNil()) Expect(nodeVM.Config).NotTo(BeNil())
vmxPath = nodeVM.Config.Files.VmPathName vmxPath = nodeVM.Config.Files.VmPathName
@ -660,9 +660,9 @@ func poweroffNodeVM(nodeName string, vm *object.VirtualMachine) {
framework.Logf("Powering off node VM %s", nodeName) framework.Logf("Powering off node VM %s", nodeName)
_, err := vm.PowerOff(ctx) _, err := vm.PowerOff(ctx)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
err = vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOff) err = vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOff)
Expect(err).NotTo(HaveOccurred(), "Unable to power off the node") framework.ExpectNoError(err, "Unable to power off the node")
} }
// poweron nodeVM and confirm the poweron state // poweron nodeVM and confirm the poweron state
@ -674,7 +674,7 @@ func poweronNodeVM(nodeName string, vm *object.VirtualMachine) {
vm.PowerOn(ctx) vm.PowerOn(ctx)
err := vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOn) err := vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOn)
Expect(err).NotTo(HaveOccurred(), "Unable to power on the node") framework.ExpectNoError(err, "Unable to power on the node")
} }
// unregister a nodeVM from VC // unregister a nodeVM from VC
@ -686,7 +686,7 @@ func unregisterNodeVM(nodeName string, vm *object.VirtualMachine) {
framework.Logf("Unregistering node VM %s", nodeName) framework.Logf("Unregistering node VM %s", nodeName)
err := vm.Unregister(ctx) err := vm.Unregister(ctx)
Expect(err).NotTo(HaveOccurred(), "Unable to unregister the node") framework.ExpectNoError(err, "Unable to unregister the node")
} }
// register a nodeVM into a VC // register a nodeVM into a VC
@ -700,16 +700,16 @@ func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.Reso
finder := find.NewFinder(nodeInfo.VSphere.Client.Client, false) finder := find.NewFinder(nodeInfo.VSphere.Client.Client, false)
vmFolder, err := finder.FolderOrDefault(ctx, workingDir) vmFolder, err := finder.FolderOrDefault(ctx, workingDir)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
registerTask, err := vmFolder.RegisterVM(ctx, vmxFilePath, nodeName, false, rpool, host) registerTask, err := vmFolder.RegisterVM(ctx, vmxFilePath, nodeName, false, rpool, host)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
err = registerTask.Wait(ctx) err = registerTask.Wait(ctx)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
vmPath := filepath.Join(workingDir, nodeName) vmPath := filepath.Join(workingDir, nodeName)
vm, err := finder.VirtualMachine(ctx, vmPath) vm, err := finder.VirtualMachine(ctx, vmPath)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
poweronNodeVM(nodeName, vm) poweronNodeVM(nodeName, vm)
} }
@ -812,7 +812,7 @@ func invokeVCenterServiceControl(command, service, host string) error {
// Node, else fails. // Node, else fails.
func expectVolumeToBeAttached(nodeName, volumePath string) { func expectVolumeToBeAttached(nodeName, volumePath string) {
isAttached, err := diskIsAttached(volumePath, nodeName) isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath)) Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
} }
@ -850,7 +850,7 @@ func writeContentToPodFile(namespace, podName, filePath, content string) error {
func expectFileContentToMatch(namespace, podName, filePath, content string) { func expectFileContentToMatch(namespace, podName, filePath, content string) {
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, _, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName,
"--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath)) "--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName)) framework.ExpectNoError(err, fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName))
} }
// expectFileContentsToMatch checks if the given contents match the ones present // expectFileContentsToMatch checks if the given contents match the ones present

View File

@ -76,7 +76,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
volumeOptions.Datastore = clusterDatastore volumeOptions.Datastore = clusterDatastore
volumePath, err := nodeInfo.VSphere.CreateVolume(volumeOptions, nodeInfo.DataCenterRef) volumePath, err := nodeInfo.VSphere.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
By("Deleting the vsphere volume") By("Deleting the vsphere volume")
@ -87,13 +87,13 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
By("Creating pod") By("Creating pod")
pod, err := client.CoreV1().Pods(namespace).Create(podspec) pod, err := client.CoreV1().Pods(namespace).Create(podspec)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Waiting for pod to be ready") By("Waiting for pod to be ready")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
// get fresh pod info // get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
By("Verifying volume is attached") By("Verifying volume is attached")
@ -101,11 +101,11 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
By("Deleting pod") By("Deleting pod")
err = framework.DeletePodWithWait(f, client, pod) err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Waiting for volumes to be detached from the node") By("Waiting for volumes to be detached from the node")
err = waitForVSphereDiskToDetach(volumePath, nodeName) err = waitForVSphereDiskToDetach(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
}) })
/* /*

View File

@ -72,7 +72,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": The specified datastore ` + InvalidDatastore + ` is not a shared datastore across node VMs` errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": The specified datastore ` + InvalidDatastore + ` is not a shared datastore across node VMs`
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
}) })
@ -80,12 +80,12 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
By("Creating Storage Class With Invalid Datastore") By("Creating Storage Class With Invalid Datastore")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters, nil)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters, nil))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
By("Expect claim to fail provisioning volume") By("Expect claim to fail provisioning volume")

View File

@ -108,14 +108,14 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
By("Creating Storage Class With DiskFormat") By("Creating Storage Class With DiskFormat")
storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil) storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil)
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec) storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass) pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec) pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer func() { defer func() {
client.CoreV1().PersistentVolumeClaims(namespace).Delete(pvclaimSpec.Name, nil) client.CoreV1().PersistentVolumeClaims(namespace).Delete(pvclaimSpec.Name, nil)
@ -123,15 +123,15 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
By("Waiting for claim to be in bound phase") By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Get new copy of the claim // Get new copy of the claim
pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{}) pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Get the bound PV // Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{}) pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
/* /*
PV is required to be attached to the Node. so that using govmomi API we can grab Disk's Backing Info PV is required to be attached to the Node. so that using govmomi API we can grab Disk's Backing Info
@ -141,14 +141,14 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nodeKeyValueLabel, "while true ; do sleep 2 ; done") podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nodeKeyValueLabel, "while true ; do sleep 2 ; done")
pod, err := client.CoreV1().Pods(namespace).Create(podSpec) pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Waiting for pod to be running") By("Waiting for pod to be running")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName) isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
Expect(isAttached).To(BeTrue()) Expect(isAttached).To(BeTrue())
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Verify Disk Format") By("Verify Disk Format")
Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed") Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed")
@ -174,7 +174,7 @@ func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName) nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef) vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
vmDevices, err := vm.Device(ctx) vmDevices, err := vm.Device(ctx)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
disks := vmDevices.SelectByType((*types.VirtualDisk)(nil)) disks := vmDevices.SelectByType((*types.VirtualDisk)(nil))

View File

@ -70,7 +70,7 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
errorMsg := `Failed to provision volume with StorageClass \"` + DiskSizeSCName + `\": A specified parameter was not correct` errorMsg := `Failed to provision volume with StorageClass \"` + DiskSizeSCName + `\": A specified parameter was not correct`
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
}) })
@ -78,12 +78,12 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
func invokeInvalidDiskSizeTestNeg(client clientset.Interface, namespace string, scParameters map[string]string, diskSize string) error { func invokeInvalidDiskSizeTestNeg(client clientset.Interface, namespace string, scParameters map[string]string, diskSize string) error {
By("Creating Storage Class With invalid disk size") By("Creating Storage Class With invalid disk size")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters, nil)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters, nil))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
By("Expect claim to fail provisioning volume") By("Expect claim to fail provisioning volume")

View File

@ -104,7 +104,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
// Create Pod and verify the persistent volume is accessible // Create Pod and verify the persistent volume is accessible
pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes) pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes)
_, err := framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) _, err := framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Detach and delete volume // Detach and delete volume
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
@ -147,18 +147,18 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("fstype", scParameters, nil)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("fstype", scParameters, nil))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") By("Creating PVC using the Storage Class")
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase") By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
return pvclaim, persistentvolumes return pvclaim, persistentvolumes
} }
@ -168,7 +168,7 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st
By("Creating pod to attach PV to the node") By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand) pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Asserts: Right disk is attached to the pod // Asserts: Right disk is attached to the pod
By("Verify the volume is accessible and available in the pod") By("Verify the volume is accessible and available in the pod")

View File

@ -84,20 +84,20 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
for i := 0; i < numNodes; i++ { for i := 0; i < numNodes; i++ {
By(fmt.Sprintf("%d: Creating a test vsphere volume", i)) By(fmt.Sprintf("%d: Creating a test vsphere volume", i))
volumePath, err := nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) volumePath, err := nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i])) By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i]))
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil)
pod, err := client.CoreV1().Pods(namespace).Create(podspec) pod, err := client.CoreV1().Pods(namespace).Create(podspec)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer framework.DeletePodWithWait(f, client, pod) defer framework.DeletePodWithWait(f, client, pod)
By("Waiting for pod to be ready") By("Waiting for pod to be ready")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
pods = append(pods, pod) pods = append(pods, pod)
@ -109,11 +109,11 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
By("Restarting kubelet on master node") By("Restarting kubelet on master node")
masterAddress := framework.GetMasterHost() + ":22" masterAddress := framework.GetMasterHost() + ":22"
err := framework.RestartKubelet(masterAddress) err := framework.RestartKubelet(masterAddress)
Expect(err).NotTo(HaveOccurred(), "Unable to restart kubelet on master node") framework.ExpectNoError(err, "Unable to restart kubelet on master node")
By("Verifying the kubelet on master node is up") By("Verifying the kubelet on master node is up")
err = framework.WaitForKubeletUp(masterAddress) err = framework.WaitForKubeletUp(masterAddress)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
for i, pod := range pods { for i, pod := range pods {
volumePath := volumePaths[i] volumePath := volumePaths[i]
@ -124,15 +124,15 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
By(fmt.Sprintf("Deleting pod on node %s", nodeName)) By(fmt.Sprintf("Deleting pod on node %s", nodeName))
err = framework.DeletePodWithWait(f, client, pod) err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName)) By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName))
err = waitForVSphereDiskToDetach(volumePath, nodeName) err = waitForVSphereDiskToDetach(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By(fmt.Sprintf("Deleting volume %s", volumePath)) By(fmt.Sprintf("Deleting volume %s", volumePath))
err = nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) err = nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
}) })
}) })

View File

@ -44,7 +44,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
workingDir = os.Getenv("VSPHERE_WORKING_DIR") workingDir = os.Getenv("VSPHERE_WORKING_DIR")
Expect(workingDir).NotTo(BeEmpty()) Expect(workingDir).NotTo(BeEmpty())
@ -69,10 +69,10 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
defer cancel() defer cancel()
vmHost, err := vmObject.HostSystem(ctx) vmHost, err := vmObject.HostSystem(ctx)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
vmPool, err := vmObject.ResourcePool(ctx) vmPool, err := vmObject.ResourcePool(ctx)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
// Unregister Node VM // Unregister Node VM
By("Unregister a node VM") By("Unregister a node VM")

View File

@ -77,36 +77,36 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
By("Creating a Storage Class") By("Creating a Storage Class")
storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil) storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil)
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec) storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") By("Creating PVC using the Storage Class")
pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass) pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass)
pvclaim, err := framework.CreatePVC(client, namespace, pvclaimSpec) pvclaim, err := framework.CreatePVC(client, namespace, pvclaimSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create PVC with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create PVC with err: %v", err))
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
By("Waiting for PVC to be in bound phase") By("Waiting for PVC to be in bound phase")
pvclaims := []*v1.PersistentVolumeClaim{pvclaim} pvclaims := []*v1.PersistentVolumeClaim{pvclaim}
pvs, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) pvs, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err))
volumePath := pvs[0].Spec.VsphereVolume.VolumePath volumePath := pvs[0].Spec.VsphereVolume.VolumePath
By("Creating a Deployment") By("Creating a Deployment")
deployment, err := framework.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "") deployment, err := framework.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "")
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create Deployment with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err))
defer client.AppsV1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{}) defer client.AppsV1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{})
By("Get pod from the deployement") By("Get pod from the deployement")
podList, err := framework.GetPodsForDeployment(client, deployment) podList, err := framework.GetPodsForDeployment(client, deployment)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get pod from the deployement with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to get pod from the deployement with err: %v", err))
Expect(podList.Items).NotTo(BeEmpty()) Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0] pod := podList.Items[0]
node1 := pod.Spec.NodeName node1 := pod.Spec.NodeName
By(fmt.Sprintf("Verify disk is attached to the node: %v", node1)) By(fmt.Sprintf("Verify disk is attached to the node: %v", node1))
isAttached, err := diskIsAttached(volumePath, node1) isAttached, err := diskIsAttached(volumePath, node1)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
Expect(isAttached).To(BeTrue(), "Disk is not attached to the node") Expect(isAttached).To(BeTrue(), "Disk is not attached to the node")
By(fmt.Sprintf("Power off the node: %v", node1)) By(fmt.Sprintf("Power off the node: %v", node1))
@ -116,28 +116,28 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
_, err = vm.PowerOff(ctx) _, err = vm.PowerOff(ctx)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer vm.PowerOn(ctx) defer vm.PowerOn(ctx)
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff) err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff)
Expect(err).NotTo(HaveOccurred(), "Unable to power off the node") framework.ExpectNoError(err, "Unable to power off the node")
// Waiting for the pod to be failed over to a different node // Waiting for the pod to be failed over to a different node
node2, err := waitForPodToFailover(client, deployment, node1) node2, err := waitForPodToFailover(client, deployment, node1)
Expect(err).NotTo(HaveOccurred(), "Pod did not fail over to a different node") framework.ExpectNoError(err, "Pod did not fail over to a different node")
By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2)) By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2))
err = waitForVSphereDiskToAttach(volumePath, node2) err = waitForVSphereDiskToAttach(volumePath, node2)
Expect(err).NotTo(HaveOccurred(), "Disk is not attached to the node") framework.ExpectNoError(err, "Disk is not attached to the node")
By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1)) By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1))
err = waitForVSphereDiskToDetach(volumePath, node1) err = waitForVSphereDiskToDetach(volumePath, node1)
Expect(err).NotTo(HaveOccurred(), "Disk is not detached from the node") framework.ExpectNoError(err, "Disk is not detached from the node")
By(fmt.Sprintf("Power on the previous node: %v", node1)) By(fmt.Sprintf("Power on the previous node: %v", node1))
vm.PowerOn(ctx) vm.PowerOn(ctx)
err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn) err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn)
Expect(err).NotTo(HaveOccurred(), "Unable to power on the node") framework.ExpectNoError(err, "Unable to power on the node")
}) })
}) })

View File

@ -66,7 +66,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty()) Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty())
if os.Getenv("VOLUME_OPS_SCALE") != "" { if os.Getenv("VOLUME_OPS_SCALE") != "" {
volume_ops_scale, err = strconv.Atoi(os.Getenv("VOLUME_OPS_SCALE")) volume_ops_scale, err = strconv.Atoi(os.Getenv("VOLUME_OPS_SCALE"))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} else { } else {
volume_ops_scale = DEFAULT_VOLUME_OPS_SCALE volume_ops_scale = DEFAULT_VOLUME_OPS_SCALE
} }
@ -79,7 +79,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
} }
By("Deleting StorageClass") By("Deleting StorageClass")
err = client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) err = client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
}) })
It("should create pod with many volumes and verify no attach call fails", func() { It("should create pod with many volumes and verify no attach call fails", func() {
@ -88,23 +88,23 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
scParameters := make(map[string]string) scParameters := make(map[string]string)
scParameters["diskformat"] = "thin" scParameters["diskformat"] = "thin"
storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters, nil)) storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters, nil))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Creating PVCs using the Storage Class") By("Creating PVCs using the Storage Class")
count := 0 count := 0
for count < volume_ops_scale { for count < volume_ops_scale {
pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
count++ count++
} }
By("Waiting for all claims to be in bound phase") By("Waiting for all claims to be in bound phase")
persistentvolumes, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Creating pod to attach PVs to the node") By("Creating pod to attach PVs to the node")
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Verify all volumes are accessible and available in the pod") By("Verify all volumes are accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)

View File

@ -147,7 +147,7 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName
sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec) sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec)
} }
Expect(sc).NotTo(BeNil()) Expect(sc).NotTo(BeNil())
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
scArrays[index] = sc scArrays[index] = sc
} }
return scArrays return scArrays
@ -171,14 +171,14 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
for j := 0; j < volumesPerPod; j++ { for j := 0; j < volumesPerPod; j++ {
currsc := sc[((i*numPods)+j)%len(sc)] currsc := sc[((i*numPods)+j)%len(sc)]
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", currsc)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", currsc))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
} }
totalpvclaims = append(totalpvclaims, pvclaims) totalpvclaims = append(totalpvclaims, pvclaims)
} }
for _, pvclaims := range totalpvclaims { for _, pvclaims := range totalpvclaims {
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
totalpvs = append(totalpvs, persistentvolumes) totalpvs = append(totalpvs, persistentvolumes)
} }
elapsed := time.Since(start) elapsed := time.Since(start)
@ -189,7 +189,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
for i, pvclaims := range totalpvclaims { for i, pvclaims := range totalpvclaims {
nodeSelector := nodeSelectorList[i%len(nodeSelectorList)] nodeSelector := nodeSelectorList[i%len(nodeSelectorList)]
pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "") pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
totalpods = append(totalpods, pod) totalpods = append(totalpods, pod)
defer framework.DeletePodWithWait(f, client, pod) defer framework.DeletePodWithWait(f, client, pod)
@ -205,7 +205,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
start = time.Now() start = time.Now()
for _, pod := range totalpods { for _, pod := range totalpods {
err := framework.DeletePodWithWait(f, client, pod) err := framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
elapsed = time.Since(start) elapsed = time.Since(start)
latency[DetachOp] = elapsed.Seconds() latency[DetachOp] = elapsed.Seconds()
@ -217,14 +217,14 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
} }
err := waitForVSphereDisksToDetach(nodeVolumeMap) err := waitForVSphereDisksToDetach(nodeVolumeMap)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Deleting the PVCs") By("Deleting the PVCs")
start = time.Now() start = time.Now()
for _, pvclaims := range totalpvclaims { for _, pvclaims := range totalpvclaims {
for _, pvc := range pvclaims { for _, pvc := range pvclaims {
err = framework.DeletePersistentVolumeClaim(client, pvc.Name, namespace) err = framework.DeletePersistentVolumeClaim(client, pvc.Name, namespace)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
} }
elapsed = time.Since(start) elapsed = time.Since(start)

View File

@ -61,7 +61,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
} }
By("creating vmdk") By("creating vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
}) })
@ -180,7 +180,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
It("should create and delete pod with multiple volumes from same datastore", func() { It("should create and delete pod with multiple volumes from same datastore", func() {
By("creating another vmdk") By("creating another vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
@ -228,7 +228,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
volumeOptions.Datastore = GetAndExpectStringEnvVar(SecondSharedDatastore) volumeOptions.Datastore = GetAndExpectStringEnvVar(SecondSharedDatastore)
volumePath, err := vsp.CreateVolume(volumeOptions, nodeInfo.DataCenterRef) volumePath, err := vsp.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
@ -295,7 +295,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
// Create another VMDK Volume // Create another VMDK Volume
By("creating another vmdk") By("creating another vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
testvolumePathsPodB = append(testvolumePathsPodA, volumePath) testvolumePathsPodB = append(testvolumePathsPodA, volumePath)
@ -358,14 +358,14 @@ func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace st
podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil) podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil)
pod, err = client.CoreV1().Pods(namespace).Create(podspec) pod, err = client.CoreV1().Pods(namespace).Create(podspec)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Waiting for pod to be ready") By("Waiting for pod to be ready")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName)) By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName))
for _, volumePath := range volumePaths { for _, volumePath := range volumePaths {
isAttached, err := diskIsAttached(volumePath, nodeName) isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
Expect(isAttached).To(BeTrue(), "disk:"+volumePath+" is not attached with the node") Expect(isAttached).To(BeTrue(), "disk:"+volumePath+" is not attached with the node")
} }
return pod return pod

View File

@ -110,19 +110,19 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
for i, node := range nodes { for i, node := range nodes {
By(fmt.Sprintf("Creating test vsphere volume %d", i)) By(fmt.Sprintf("Creating test vsphere volume %d", i))
volumePath, err := node.nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, node.nodeInfo.DataCenterRef) volumePath, err := node.nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, node.nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod %d on node %v", i, node.name)) By(fmt.Sprintf("Creating pod %d on node %v", i, node.name))
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, node.kvLabels, nil) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, node.kvLabels, nil)
pod, err := client.CoreV1().Pods(namespace).Create(podspec) pod, err := client.CoreV1().Pods(namespace).Create(podspec)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for pod %d to be ready", i)) By(fmt.Sprintf("Waiting for pod %d to be ready", i))
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
pods = append(pods, pod) pods = append(pods, pod)
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
@ -133,7 +133,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
filePath := fmt.Sprintf("/mnt/volume1/%v_vpxd_restart_test_%v.txt", namespace, strconv.FormatInt(time.Now().UnixNano(), 10)) filePath := fmt.Sprintf("/mnt/volume1/%v_vpxd_restart_test_%v.txt", namespace, strconv.FormatInt(time.Now().UnixNano(), 10))
randomContent := fmt.Sprintf("Random Content -- %v", strconv.FormatInt(time.Now().UnixNano(), 10)) randomContent := fmt.Sprintf("Random Content -- %v", strconv.FormatInt(time.Now().UnixNano(), 10))
err = writeContentToPodFile(namespace, pod.Name, filePath, randomContent) err = writeContentToPodFile(namespace, pod.Name, filePath, randomContent)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
filePaths = append(filePaths, filePath) filePaths = append(filePaths, filePath)
fileContents = append(fileContents, randomContent) fileContents = append(fileContents, randomContent)
} }
@ -141,14 +141,14 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
By("Stopping vpxd on the vCenter host") By("Stopping vpxd on the vCenter host")
vcAddress := vcHost + ":22" vcAddress := vcHost + ":22"
err := invokeVCenterServiceControl("stop", vpxdServiceName, vcAddress) err := invokeVCenterServiceControl("stop", vpxdServiceName, vcAddress)
Expect(err).NotTo(HaveOccurred(), "Unable to stop vpxd on the vCenter host") framework.ExpectNoError(err, "Unable to stop vpxd on the vCenter host")
expectFilesToBeAccessible(namespace, pods, filePaths) expectFilesToBeAccessible(namespace, pods, filePaths)
expectFileContentsToMatch(namespace, pods, filePaths, fileContents) expectFileContentsToMatch(namespace, pods, filePaths, fileContents)
By("Starting vpxd on the vCenter host") By("Starting vpxd on the vCenter host")
err = invokeVCenterServiceControl("start", vpxdServiceName, vcAddress) err = invokeVCenterServiceControl("start", vpxdServiceName, vcAddress)
Expect(err).NotTo(HaveOccurred(), "Unable to start vpxd on the vCenter host") framework.ExpectNoError(err, "Unable to start vpxd on the vCenter host")
expectVolumesToBeAttached(pods, volumePaths) expectVolumesToBeAttached(pods, volumePaths)
expectFilesToBeAccessible(namespace, pods, filePaths) expectFilesToBeAccessible(namespace, pods, filePaths)
@ -161,15 +161,15 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
By(fmt.Sprintf("Deleting pod on node %s", nodeName)) By(fmt.Sprintf("Deleting pod on node %s", nodeName))
err = framework.DeletePodWithWait(f, client, pod) err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName)) By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName))
err = waitForVSphereDiskToDetach(volumePath, nodeName) err = waitForVSphereDiskToDetach(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By(fmt.Sprintf("Deleting volume %s", volumePath)) By(fmt.Sprintf("Deleting volume %s", volumePath))
err = node.nodeInfo.VSphere.DeleteVolume(volumePath, node.nodeInfo.DataCenterRef) err = node.nodeInfo.VSphere.DeleteVolume(volumePath, node.nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
} }
} }
}) })

View File

@ -161,7 +161,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume" errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -176,7 +176,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
errorMsg := "Invalid value for " + Policy_DiskStripes + "." errorMsg := "Invalid value for " + Policy_DiskStripes + "."
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -190,7 +190,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "." errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -207,7 +207,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " + errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " +
"The policy parameters will work only with VSAN Datastore." "The policy parameters will work only with VSAN Datastore."
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -238,7 +238,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\"" errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\""
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -251,7 +251,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\" errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -266,7 +266,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one" errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
}) })
@ -274,24 +274,24 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) { func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) {
By("Creating Storage Class With storage policy params") By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase") By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Creating pod to attach PV to the node") By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node // Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Verify the volume is accessible and available in the pod") By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
@ -306,12 +306,12 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
By("Creating Storage Class With storage policy params") By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
By("Waiting for claim to be in bound phase") By("Waiting for claim to be in bound phase")
@ -325,12 +325,12 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc
func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) { func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) {
By("Creating Storage Class With storage policy params") By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
@ -339,7 +339,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterN
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(pvclaim.Name, metav1.GetOptions{}) updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(pvclaim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
vmName := clusterName + "-dynamic-pvc-" + string(updatedClaim.UID) vmName := clusterName + "-dynamic-pvc-" + string(updatedClaim.UID)
framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
// Wait for 6 minutes to let the vSphere Cloud Provider clean up routine delete the dummy VM // Wait for 6 minutes to let the vSphere Cloud Provider clean up routine delete the dummy VM

View File

@ -134,7 +134,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
errorMsg := "Failed to find a shared datastore matching zone [" + zoneD + "]" errorMsg := "Failed to find a shared datastore matching zone [" + zoneD + "]"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -165,7 +165,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
err := verifyPVCCreationFails(client, namespace, scParameters, zones) err := verifyPVCCreationFails(client, namespace, scParameters, zones)
errorMsg := "The specified datastore " + scParameters[Datastore] + " does not match the provided zones : [" + zoneC + "]" errorMsg := "The specified datastore " + scParameters[Datastore] + " does not match the provided zones : [" + zoneC + "]"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -183,7 +183,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
err := verifyPVCCreationFails(client, namespace, scParameters, zones) err := verifyPVCCreationFails(client, namespace, scParameters, zones)
errorMsg := "No compatible datastores found that satisfy the storage policy requirements" errorMsg := "No compatible datastores found that satisfy the storage policy requirements"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -203,7 +203,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
err := verifyPVCCreationFails(client, namespace, scParameters, zones) err := verifyPVCCreationFails(client, namespace, scParameters, zones)
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + nonCompatPolicy + "\\\"." errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + nonCompatPolicy + "\\\"."
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -215,7 +215,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
err := verifyPVCCreationFails(client, namespace, scParameters, zones) err := verifyPVCCreationFails(client, namespace, scParameters, zones)
errorMsg := "The specified datastore " + scParameters[Datastore] + " does not match the provided zones : [" + zoneC + "]" errorMsg := "The specified datastore " + scParameters[Datastore] + " does not match the provided zones : [" + zoneC + "]"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -224,7 +224,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
err := verifyPVCCreationFails(client, namespace, nil, nil) err := verifyPVCCreationFails(client, namespace, nil, nil)
errorMsg := "No shared datastores found in the Kubernetes cluster" errorMsg := "No shared datastores found in the Kubernetes cluster"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -234,7 +234,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
err := verifyPVCCreationFails(client, namespace, scParameters, nil) err := verifyPVCCreationFails(client, namespace, scParameters, nil)
errorMsg := "No shared datastores found in the Kubernetes cluster" errorMsg := "No shared datastores found in the Kubernetes cluster"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -244,7 +244,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
err := verifyPVCCreationFails(client, namespace, scParameters, nil) err := verifyPVCCreationFails(client, namespace, scParameters, nil)
errorMsg := "No shared datastores found in the Kubernetes cluster" errorMsg := "No shared datastores found in the Kubernetes cluster"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -255,7 +255,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
err := verifyPVCCreationFails(client, namespace, scParameters, nil) err := verifyPVCCreationFails(client, namespace, scParameters, nil)
errorMsg := "No shared datastores found in the Kubernetes cluster" errorMsg := "No shared datastores found in the Kubernetes cluster"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -265,7 +265,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
err := verifyPVCCreationFails(client, namespace, nil, zones) err := verifyPVCCreationFails(client, namespace, nil, zones)
errorMsg := "Failed to find a shared datastore matching zone [" + zoneC + "]" errorMsg := "Failed to find a shared datastore matching zone [" + zoneC + "]"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -276,7 +276,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
err := verifyPVCCreationFails(client, namespace, nil, zones) err := verifyPVCCreationFails(client, namespace, nil, zones)
errorMsg := "Failed to find a shared datastore matching zone [" + zoneA + " " + zoneC + "]" errorMsg := "Failed to find a shared datastore matching zone [" + zoneA + " " + zoneC + "]"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -287,7 +287,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
err := verifyPVCCreationFails(client, namespace, scParameters, zones) err := verifyPVCCreationFails(client, namespace, scParameters, zones)
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "." errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg) framework.ExpectNoError(err, errorMsg)
} }
}) })
@ -303,23 +303,23 @@ var _ = utils.SIGDescribe("Zone Support", func() {
func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) { func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase") By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Creating pod to attach PV to the node") By("Creating pod to attach PV to the node")
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Verify persistent volume was created on the right zone") By("Verify persistent volume was created on the right zone")
verifyVolumeCreationOnRightZone(persistentvolumes, pod.Spec.NodeName, zones) verifyVolumeCreationOnRightZone(persistentvolumes, pod.Spec.NodeName, zones)
@ -336,12 +336,12 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin
func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error { func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class") By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
@ -358,19 +358,19 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara
func verifyPVZoneLabels(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) { func verifyPVZoneLabels(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", nil, zones)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", nil, zones))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the storage class") By("Creating PVC using the storage class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase") By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Verify zone information is present in the volume labels") By("Verify zone information is present in the volume labels")
for _, pv := range persistentvolumes { for _, pv := range persistentvolumes {