Merge pull request #41728 from vmware/e2eTestsUpdate-v3

Automatic merge from submit-queue (batch tested with PRs 41728, 42231)

Adding new tests to e2e/vsphere_volume_placement.go

**What this PR does / why we need it**:
Adding new tests to e2e/vsphere_volume_placement.go

Below is the tests description and test steps.

**Test Back-to-back pod creation/deletion with different volume sources on the same worker node**

1. Create volumes - vmdk2, vmdk1 is created in the test setup.
2. Create pod Spec - pod-SpecA with volume path of vmdk1 and NodeSelector set to label assigned to node1.
3. Create pod Spec - pod-SpecB with volume path of vmdk2 and NodeSelector set to label assigned to node1.
4. Create pod-A using pod-SpecA and wait for pod to become ready.
5. Create pod-B using pod-SpecB and wait for POD to become ready.
6. Verify volumes are attached to the node.
7. Create empty file on the volume to make sure volume is accessible. (Perform this step on pod-A and pod-B)
8. Verify file created in step 5 is present on the volume. (perform this step on pod-A and pod-B)
9. Delete pod-A and pod-B
10. Repeatedly (5 times) perform step 4 to 9 and verify associated volume's content is matching.
11. Wait for vmdk1 and vmdk2 to be detached from node.
12. Delete vmdk1 and vmdk2

**Test multiple volumes from different datastore within the same pod**

1. Create volumes - vmdk2 on non default shared datastore.
2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup on default datastore) and vmdk2.
3. Create pod using spec created in step-2 and wait for pod to become ready.
4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
5. Delete pod.
6. Wait for vmdk1 and vmdk2 to be detached from node.
7. Create pod using spec created in step-2 and wait for pod to become ready.
8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
9. Delete POD.
10. Wait for vmdk1 and vmdk2 to be detached from node.
11. Delete vmdk1 and vmdk2

**Test multiple volumes from same datastore within the same pod**

1. Create volumes - vmdk2, vmdk1 is created in testsetup
2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup) and vmdk2.
3. Create pod using spec created in step-2 and wait for pod to become ready.
4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
5. Delete pod.
6. Wait for vmdk1 and vmdk2 to be detached from node.
7. Create pod using spec created in step-2 and wait for pod to become ready.
8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
9. Delete POD.
10. Wait for vmdk1 and vmdk2 to be detached from node.
11. Delete vmdk1 and vmdk2


**Which issue this PR fixes** 
fixes #

**Special notes for your reviewer**:
Executed tests against K8S v1.5.3 release 

**Release note**:

```release-note
NONE
```

cc: @kerneltime @abrarshivani @BaluDontu @tusharnt @pdhamdhere
pull/6/head
Kubernetes Submit Queue 2017-03-27 11:54:25 -07:00 committed by GitHub
commit c81e99d98d
5 changed files with 393 additions and 168 deletions

View File

@ -100,7 +100,7 @@ func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMa
func DeletePersistentVolume(c clientset.Interface, pvName string) {
if c != nil && len(pvName) > 0 {
Logf("Deleting PersistentVolume %v", pvName)
err := c.Core().PersistentVolumes().Delete(pvName, nil)
err := c.CoreV1().PersistentVolumes().Delete(pvName, nil)
if err != nil && !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
@ -111,7 +111,7 @@ func DeletePersistentVolume(c clientset.Interface, pvName string) {
func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) {
if c != nil && len(pvcName) > 0 {
Logf("Deleting PersistentVolumeClaim %v", pvcName)
err := c.Core().PersistentVolumeClaims(ns).Delete(pvcName, nil)
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil)
if err != nil && !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
@ -128,7 +128,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
DeletePersistentVolumeClaim(c, pvc.Name, ns)
// Check that the PVC is really deleted.
pvc, err := c.Core().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
Expect(apierrs.IsNotFound(err)).To(BeTrue())
// Wait for the PV's phase to return to be `expectPVPhase`
@ -137,7 +137,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
Expect(err).NotTo(HaveOccurred())
// examine the pv's ClaimRef and UID and compare to expected values
pv, err = c.Core().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
cr := pv.Spec.ClaimRef
if expectPVPhase == v1.VolumeAvailable {
@ -162,7 +162,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
var boundPVs, deletedPVCs int
for pvName := range pvols {
pv, err := c.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{})
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
Expect(apierrs.IsNotFound(err)).To(BeFalse())
cr := pv.Spec.ClaimRef
// if pv is bound then delete the pvc it is bound to
@ -173,7 +173,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
pvcKey := makePvcKey(ns, cr.Name)
_, found := claims[pvcKey]
Expect(found).To(BeTrue())
pvc, err := c.Core().PersistentVolumeClaims(ns).Get(cr.Name, metav1.GetOptions{})
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(cr.Name, metav1.GetOptions{})
Expect(apierrs.IsNotFound(err)).To(BeFalse())
DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase)
delete(claims, pvcKey)
@ -186,7 +186,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
// create the PV resource. Fails test on error.
func createPV(c clientset.Interface, pv *v1.PersistentVolume) *v1.PersistentVolume {
pv, err := c.Core().PersistentVolumes().Create(pv)
pv, err := c.CoreV1().PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred())
return pv
}
@ -194,7 +194,7 @@ func createPV(c clientset.Interface, pv *v1.PersistentVolume) *v1.PersistentVolu
// create the PVC resource. Fails test on error.
func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
pvc, err := c.Core().PersistentVolumeClaims(ns).Create(pvc)
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred())
return pvc
}
@ -316,11 +316,11 @@ func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, p
Expect(err).NotTo(HaveOccurred())
// Re-get the pv and pvc objects
pv, err = c.Core().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Re-get the pvc and
pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// The pv and pvc are both bound, but to each other?
@ -354,7 +354,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
}
Expect(err).NotTo(HaveOccurred())
pv, err := c.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{})
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if cr := pv.Spec.ClaimRef; cr != nil && len(cr.Name) > 0 {
// Assert bound pvc is a test resource. Failing assertion could
@ -391,7 +391,7 @@ func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) {
return
}
Logf("Deleting pod %v", pod.Name)
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, nil)
err := c.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil)
if err != nil {
if apierrs.IsNotFound(err) {
return // assume pod was deleted already
@ -418,7 +418,7 @@ func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, clai
pod := MakeWritePod(ns, claimName)
// Instantiate pod (Create)
runPod, err := c.Core().Pods(ns).Create(pod)
runPod, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
Expect(runPod).NotTo(BeNil())
@ -575,13 +575,13 @@ func MakePod(ns string, pvcName string, isPrivileged bool, command string) *v1.P
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
clientPod := MakePod(ns, pvc.Name, true, "")
clientPod, err := c.Core().Pods(ns).Create(clientPod)
clientPod, err := c.CoreV1().Pods(ns).Create(clientPod)
Expect(err).NotTo(HaveOccurred())
// Verify the pod is running before returning it
err = WaitForPodRunningInNamespace(c, clientPod)
Expect(err).NotTo(HaveOccurred())
clientPod, err = c.Core().Pods(ns).Get(clientPod.Name, metav1.GetOptions{})
clientPod, err = c.CoreV1().Pods(ns).Get(clientPod.Name, metav1.GetOptions{})
Expect(apierrs.IsNotFound(err)).To(BeFalse())
return clientPod
}

View File

@ -5484,3 +5484,9 @@ func (f *Framework) NewTestPod(name string, requests v1.ResourceList, limits v1.
},
}
}
// create empty file at given path on the pod.
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
_, err := RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
return err
}

View File

@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
@ -223,6 +224,7 @@ func getVSphereClaimSpecWithStorageClassAnnotation(ns string, storageclass *stor
return claim
}
// func to get pod spec with given volume claim, node selector labels and command
func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]string, command string) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
@ -266,3 +268,67 @@ func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]stri
}
return pod
}
// func to get pod spec with given volume paths, node selector lables and container commands
func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod {
var volumeMounts []v1.VolumeMount
var volumes []v1.Volume
for index, volumePath := range volumePaths {
name := fmt.Sprintf("volume%v", index+1)
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: name, MountPath: "/mnt/" + name})
vsphereVolume := new(v1.VsphereVirtualDiskVolumeSource)
vsphereVolume.VolumePath = volumePath
vsphereVolume.FSType = "ext4"
volumes = append(volumes, v1.Volume{Name: name})
volumes[index].VolumeSource.VsphereVolume = vsphereVolume
}
if commands == nil || len(commands) == 0 {
commands = []string{
"/bin/sh",
"-c",
"while true; do sleep 2; done",
}
}
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "vsphere-e2e-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "vsphere-e2e-container-" + string(uuid.NewUUID()),
Image: "gcr.io/google_containers/busybox:1.24",
Command: commands,
VolumeMounts: volumeMounts,
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: volumes,
},
}
if keyValuelabel != nil {
pod.Spec.NodeSelector = keyValuelabel
}
return pod
}
func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths []string) {
for _, filePath := range filePaths {
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName))
}
}
func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) {
for _, filePath := range filePaths {
err := framework.CreateEmptyFileOnPod(namespace, podName, filePath)
Expect(err).NotTo(HaveOccurred())
}
}

View File

@ -88,19 +88,19 @@ var _ = framework.KubeDescribe("Volume Disk Format [Volumes]", func() {
It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() {
By("Invoking Test for diskformat: eagerzeroedthick")
invokeTest(client, namespace, nodeName, nodeKeyValueLabel, "eagerzeroedthick")
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "eagerzeroedthick")
})
It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func() {
By("Invoking Test for diskformat: zeroedthick")
invokeTest(client, namespace, nodeName, nodeKeyValueLabel, "zeroedthick")
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "zeroedthick")
})
It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func() {
By("Invoking Test for diskformat: thin")
invokeTest(client, namespace, nodeName, nodeKeyValueLabel, "thin")
invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "thin")
})
})
func invokeTest(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, diskFormat string) {
func invokeTest(f *framework.Framework, client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, diskFormat string) {
framework.Logf("Invoking Test for DiskFomat: %s", diskFormat)
scParameters := make(map[string]string)
@ -152,8 +152,11 @@ func invokeTest(client clientset.Interface, namespace string, nodeName string, n
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
Expect(verifyDiskFormat(nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed")
var volumePaths []string
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
By("Delete pod and wait for volume to be detached from node")
deletePodAndWaitForVolumeToDetach(client, namespace, vsp, nodeName, pod, pv.Spec.VsphereVolume.VolumePath)
deletePodAndWaitForVolumeToDetach(f, client, pod, vsp, nodeName, volumePaths)
}

View File

@ -17,9 +17,13 @@ limitations under the License.
package e2e
import (
"fmt"
"os"
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/api/v1"
@ -28,65 +32,42 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = framework.KubeDescribe("Volume Placement [Feature:Volume]", func() {
var _ = framework.KubeDescribe("Volume Placement [Volume]", func() {
f := framework.NewDefaultFramework("volume-placement")
var (
c clientset.Interface
ns string
vsp *vsphere.VSphere
volumePath string
volumePaths []string
node1Name string
node1LabelValue string
node1KeyValueLabel map[string]string
node2Name string
node2LabelValue string
node2KeyValueLabel map[string]string
isNodeLabeled bool
isNodeLabeled bool
err error
)
/*
Steps
1. Create VMDK volume
2. Find two nodes with the status available and ready for scheduling.
3. Add labels to the both nodes. - (vsphere_e2e_label: Random UUID)
*/
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
By("creating vmdk")
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
volumePath, err = createVSphereVolume(vsp, nil)
Expect(err).NotTo(HaveOccurred())
if !isNodeLabeled {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
node1Name = nodeList.Items[0].Name
node2Name = nodeList.Items[1].Name
} else {
framework.Failf("Unable to find ready and schedulable Node")
}
node1LabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
node1KeyValueLabel = make(map[string]string)
node1KeyValueLabel["vsphere_e2e_label"] = node1LabelValue
framework.AddOrUpdateLabelOnNode(c, node1Name, "vsphere_e2e_label", node1LabelValue)
node2LabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
node2KeyValueLabel = make(map[string]string)
node2KeyValueLabel["vsphere_e2e_label"] = node2LabelValue
framework.AddOrUpdateLabelOnNode(c, node2Name, "vsphere_e2e_label", node2LabelValue)
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
isNodeLabeled = true
}
By("creating vmdk")
vsp, err = vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
volumePath, err := createVSphereVolume(vsp, nil)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
})
AfterEach(func() {
for _, volumePath := range volumePaths {
vsp.DeleteVolume(volumePath)
}
volumePaths = nil
})
/*
@ -94,143 +75,312 @@ var _ = framework.KubeDescribe("Volume Placement [Feature:Volume]", func() {
1. Remove labels assigned to node 1 and node 2
2. Delete VMDK volume
*/
AddCleanupAction(func() {
if len(node1LabelValue) > 0 {
if len(node1KeyValueLabel) > 0 {
framework.RemoveLabelOffNode(c, node1Name, "vsphere_e2e_label")
}
if len(node2LabelValue) > 0 {
if len(node2KeyValueLabel) > 0 {
framework.RemoveLabelOffNode(c, node2Name, "vsphere_e2e_label")
}
if len(volumePath) > 0 {
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
vsp.DeleteVolume(volumePath)
}
})
/*
Steps
1. Create pod Spec with volume path of the vmdk and NodeSelector set to label assigned to node1.
2. Create pod and wait for pod to become ready.
3. Verify volume is attached to the node1.
4. Create empty file on the volume to verify volume is writable.
5. Verify newly created file and previously created files exist on the volume.
6. Delete pod.
7. Wait for volume to be detached from the node1.
8. Repeat Step 1 to 7 and make sure back to back pod creation on same worker node with the same volume is working as expected.
*/
It("should create and delete pod with the same volume source on the same worker node", func() {
var volumeFiles []string
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the same node: %v", node1Name))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
})
framework.KubeDescribe("provision pod on node with matching labels", func() {
/*
Steps
/*
Steps
1. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node1's label.
2. Create pod and wait for POD to become ready.
3. Verify volume is attached to the node1.
4. Create empty file on the volume to verify volume is writable.
5. Verify newly created file and previously created files exist on the volume.
6. Delete pod.
7. Wait for volume to be detached from the node1.
8. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node2's label.
9. Create pod and wait for pod to become ready.
10. Verify volume is attached to the node2.
11. Create empty file on the volume to verify volume is writable.
12. Verify newly created file and previously created files exist on the volume.
13. Delete pod.
*/
1. Create POD Spec with volume path of the vmdk and NodeSelector set to label assigned to node1.
2. Create POD and wait for POD to become ready.
3. Verify volume is attached to the node1.
4. Delete POD.
5. Wait for volume to be detached from the node1.
6. Repeat Step 1 to 5 and make sure back to back pod creation on same worker node with the same volume is working as expected.
It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() {
var volumeFiles []string
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
*/
By(fmt.Sprintf("Creating pod on the another node: %v", node2Name))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node2Name, node2KeyValueLabel, volumePaths)
It("should create and delete pod with the same volume source on the same worker node", func() {
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePath)
deletePodAndWaitForVolumeToDetach(c, ns, vsp, node1Name, pod, volumePath)
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node2Name, volumePaths)
})
By("Creating pod on the same node: " + node1Name)
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePath)
deletePodAndWaitForVolumeToDetach(c, ns, vsp, node1Name, pod, volumePath)
})
/*
Test multiple volumes from same datastore within the same pod
1. Create volumes - vmdk2
2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup) and vmdk2.
3. Create pod using spec created in step-2 and wait for pod to become ready.
4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
5. Delete pod.
6. Wait for vmdk1 and vmdk2 to be detached from node.
7. Create pod using spec created in step-2 and wait for pod to become ready.
8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
9. Delete POD.
10. Wait for vmdk1 and vmdk2 to be detached from node.
*/
/*
Steps
It("should create and delete pod with multiple volumes from same datastore", func() {
By("creating another vmdk")
volumePath, err := createVSphereVolume(vsp, nil)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
1. Create POD Spec with volume path of the vmdk and NodeSelector set to node1's label.
2. Create POD and wait for POD to become ready.
3. Verify volume is attached to the node1.
4. Delete POD.
5. Wait for volume to be detached from the node1.
6. Create POD Spec with volume path of the vmdk and NodeSelector set to node2's label.
7. Create POD and wait for POD to become ready.
8. Verify volume is attached to the node2.
9. Delete POD.
*/
By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
volumeFiles := []string{
fmt.Sprintf("/mnt/volume1/%v_1.txt", ns),
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
}
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFilesNames := []string{
fmt.Sprintf("/mnt/volume1/%v_2.txt", ns),
fmt.Sprintf("/mnt/volume2/%v_2.txt", ns),
}
volumeFiles = append(volumeFiles, newEmptyFilesNames[0])
volumeFiles = append(volumeFiles, newEmptyFilesNames[1])
createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFilesNames, volumeFiles)
})
It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() {
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePath)
deletePodAndWaitForVolumeToDetach(c, ns, vsp, node1Name, pod, volumePath)
/*
Test multiple volumes from different datastore within the same pod
1. Create volumes - vmdk2 on non default shared datastore.
2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup on default datastore) and vmdk2.
3. Create pod using spec created in step-2 and wait for pod to become ready.
4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible.
5. Delete pod.
6. Wait for vmdk1 and vmdk2 to be detached from node.
7. Create pod using spec created in step-2 and wait for pod to become ready.
8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4.
9. Delete POD.
10. Wait for vmdk1 and vmdk2 to be detached from node.
*/
It("should create and delete pod with multiple volumes from different datastore", func() {
By("creating another vmdk on non default shared datastore")
var volumeOptions *vsphere.VolumeOptions
volumeOptions = new(vsphere.VolumeOptions)
volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
volumeOptions.Datastore = os.Getenv("VSPHERE_SECOND_SHARED_DATASTORE")
volumePath, err := createVSphereVolume(vsp, volumeOptions)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
By("Creating pod on the another node: " + node2Name)
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node2Name, node2KeyValueLabel, volumePath)
deletePodAndWaitForVolumeToDetach(c, ns, vsp, node2Name, pod, volumePath)
})
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
volumeFiles := []string{
fmt.Sprintf("/mnt/volume1/%v_1.txt", ns),
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
}
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileNames := []string{
fmt.Sprintf("/mnt/volume1/%v_2.txt", ns),
fmt.Sprintf("/mnt/volume2/%v_2.txt", ns),
}
volumeFiles = append(volumeFiles, newEmptyFileNames[0])
volumeFiles = append(volumeFiles, newEmptyFileNames[1])
createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFileNames, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
})
/*
Test Back-to-back pod creation/deletion with different volume sources on the same worker node
1. Create volumes - vmdk2
2. Create pod Spec - pod-SpecA with volume path of vmdk1 and NodeSelector set to label assigned to node1.
3. Create pod Spec - pod-SpecB with volume path of vmdk2 and NodeSelector set to label assigned to node1.
4. Create pod-A using pod-SpecA and wait for pod to become ready.
5. Create pod-B using pod-SpecB and wait for POD to become ready.
6. Verify volumes are attached to the node.
7. Create empty file on the volume to make sure volume is accessible. (Perform this step on pod-A and pod-B)
8. Verify file created in step 5 is present on the volume. (perform this step on pod-A and pod-B)
9. Delete pod-A and pod-B
10. Repeatedly (5 times) perform step 4 to 9 and verify associated volume's content is matching.
11. Wait for vmdk1 and vmdk2 to be detached from node.
*/
It("test back to back pod creation and deletion with different volume sources on the same worker node", func() {
var (
podA *v1.Pod
podB *v1.Pod
testvolumePathsPodA []string
testvolumePathsPodB []string
podAFiles []string
podBFiles []string
)
defer func() {
By("clean up undeleted pods")
framework.DeletePodWithWait(f, c, podA)
framework.DeletePodWithWait(f, c, podB)
By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
for _, volumePath := range volumePaths {
waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(node1Name))
}
}()
testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0])
// Create another VMDK Volume
By("creating another vmdk")
volumePath, err := createVSphereVolume(vsp, nil)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
testvolumePathsPodB = append(testvolumePathsPodA, volumePath)
for index := 0; index < 5; index++ {
By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0]))
podA = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, testvolumePathsPodA)
By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0]))
podB = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, testvolumePathsPodB)
podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1)
podBFileName := fmt.Sprintf("/mnt/volume1/podB_%v_%v.txt", ns, index+1)
podAFiles = append(podAFiles, podAFileName)
podBFiles = append(podBFiles, podBFileName)
// Create empty files on the mounted volumes on the pod to verify volume is writable
By("Creating empty file on volume mounted on pod-A")
framework.CreateEmptyFileOnPod(ns, podA.Name, podAFileName)
By("Creating empty file volume mounted on pod-B")
framework.CreateEmptyFileOnPod(ns, podB.Name, podBFileName)
// Verify newly and previously created files present on the volume mounted on the pod
By("Verify newly Created file and previously created files present on volume mounted on pod-A")
verifyFilesExistOnVSphereVolume(ns, podA.Name, podAFiles)
By("Verify newly Created file and previously created files present on volume mounted on pod-B")
verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles)
By("Deleting pod-A")
framework.DeletePodWithWait(f, c, podA)
By("Deleting pod-B")
framework.DeletePodWithWait(f, c, podB)
}
})
})
func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, vsp *vsphere.VSphere, nodeName string, nodeKeyValueLabel map[string]string, volumePath string) *v1.Pod {
func testSetupVolumePlacement(client clientset.Interface, namespace string) (node1Name string, node1KeyValueLabel map[string]string, node2Name string, node2KeyValueLabel map[string]string) {
nodes := framework.GetReadySchedulableNodesOrDie(client)
if len(nodes.Items) < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
}
node1Name = nodes.Items[0].Name
node2Name = nodes.Items[1].Name
node1LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
node1KeyValueLabel = make(map[string]string)
node1KeyValueLabel["vsphere_e2e_label"] = node1LabelValue
framework.AddOrUpdateLabelOnNode(client, node1Name, "vsphere_e2e_label", node1LabelValue)
node2LabelValue := "vsphere_e2e_" + string(uuid.NewUUID())
node2KeyValueLabel = make(map[string]string)
node2KeyValueLabel["vsphere_e2e_label"] = node2LabelValue
framework.AddOrUpdateLabelOnNode(client, node2Name, "vsphere_e2e_label", node2LabelValue)
return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel
}
func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, vsp *vsphere.VSphere, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod {
var pod *v1.Pod
var err error
By("Creating pod on the node: " + nodeName)
podspec := getPodSpec(volumePath, nodeKeyValueLabel, nil)
By(fmt.Sprintf("Creating pod on the node: %v", nodeName))
podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil)
pod, err = client.CoreV1().Pods(namespace).Create(podspec)
Expect(err).NotTo(HaveOccurred())
By("Waiting for pod to be ready")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
By("Verify volume is attached to the node: " + nodeName)
isAttached, err := verifyVSphereDiskAttached(vsp, volumePath, types.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk is not attached with the node")
By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName))
for _, volumePath := range volumePaths {
isAttached, err := verifyVSphereDiskAttached(vsp, volumePath, types.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk:"+volumePath+" is not attached with the node")
}
return pod
}
func deletePodAndWaitForVolumeToDetach(client clientset.Interface, namespace string, vsp *vsphere.VSphere, nodeName string, pod *v1.Pod, volumePath string) {
var err error
func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfilesToCreate []string, filesToCheck []string) {
// Create empty files on the mounted volumes on the pod to verify volume is writable
By(fmt.Sprintf("Creating empty file on volume mounted on: %v", podname))
createEmptyFilesOnVSphereVolume(namespace, podname, newEmptyfilesToCreate)
// Verify newly and previously created files present on the volume mounted on the pod
By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname))
verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck)
}
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, vsp *vsphere.VSphere, nodeName string, volumePaths []string) {
By("Deleting pod")
err = client.CoreV1().Pods(namespace).Delete(pod.Name, nil)
Expect(err).NotTo(HaveOccurred())
framework.DeletePodWithWait(f, c, pod)
By("Waiting for volume to be detached from the node")
waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(nodeName))
}
func getPodSpec(volumePath string, keyValuelabel map[string]string, commands []string) *v1.Pod {
if commands == nil || len(commands) == 0 {
commands = make([]string, 3)
commands[0] = "/bin/sh"
commands[1] = "-c"
commands[2] = "while true ; do sleep 2 ; done "
}
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "vsphere-e2e-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "vsphere-e2e-container-" + string(uuid.NewUUID()),
Image: "gcr.io/google_containers/busybox:1.24",
Command: commands,
VolumeMounts: []v1.VolumeMount{
{
Name: "vsphere-volume",
MountPath: "/mnt/vsphere-volume",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "vsphere-volume",
VolumeSource: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
},
},
},
}
if keyValuelabel != nil {
pod.Spec.NodeSelector = keyValuelabel
}
return pod
for _, volumePath := range volumePaths {
waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(nodeName))
}
}