Merge pull request #42667 from vmware/e2eTestUpdate-v5

Automatic merge from submit-queue (batch tested with PRs 42667, 43923)

Adding test to perform volume operations storm

**What this PR does / why we need it**:
Adding new test to perform volume operations storm
Test Steps
1. Create storage class for thin Provisioning.
2. Create 30 PVCs using above storage class in annotation, requesting 2 GB files.
3. Wait until all disks are ready and all PVs and PVCs get bind. (**CreateVolume** storm)
4. Create pod to mount volumes using PVCs created in step 2. (**AttachDisk** storm)
5. Wait for pod status to be running.
6. Verify all volumes accessible and available in the pod.
7. Delete pod.
8. wait until volumes gets detached. (**DetachDisk** storm)
9. Delete all PVCs. This should delete all Disks. (**DeleteVolume** storm)
10. Delete storage class.

This test will help validate issue reported at https://github.com/vmware/kubernetes/issues/71

**Which issue this PR fixes**
fixes #

**Special notes for your reviewer**:
executed test on 1.5.3 release with `VOLUME_OPS_SCALE` set to `5`
Will execute test with the changes made on PR - https://github.com/kubernetes/kubernetes/pull/42422, with the  `VOLUME_OPS_SCALE` set to `30`

**Release note**:

```release-note
None
```

cc: @abrarshivani @BaluDontu @tusharnt @pdhamdhere @luomiao @kerneltime
pull/6/head
Kubernetes Submit Queue 2017-03-31 15:11:17 -07:00 committed by GitHub
commit 7a03a095d2
7 changed files with 251 additions and 97 deletions

View File

@ -89,6 +89,7 @@ go_library(
"volumes.go", "volumes.go",
"vsphere_utils.go", "vsphere_utils.go",
"vsphere_volume_diskformat.go", "vsphere_volume_diskformat.go",
"vsphere_volume_ops_storm.go",
"vsphere_volume_placement.go", "vsphere_volume_placement.go",
], ],
tags = ["automanaged"], tags = ["automanaged"],

View File

@ -422,25 +422,6 @@ func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) {
Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name) Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name)
} }
// Create the test pod, wait for (hopefully) success, and then delete the pod.
func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, claimName string) {
Logf("Creating nfs test pod")
// Make pod spec
pod := MakeWritePod(ns, claimName)
// Instantiate pod (Create)
runPod, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
Expect(runPod).NotTo(BeNil())
defer DeletePodWithWait(f, c, runPod)
// Wait for the test pod to complete its lifecycle
testPodSuccessOrFail(c, ns, runPod)
}
// Sanity check for GCE testing. Verify the persistent disk attached to the node. // Sanity check for GCE testing. Verify the persistent disk attached to the node.
func VerifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool { func VerifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
gceCloud, err := GetGCECloud() gceCloud, err := GetGCECloud()
@ -532,75 +513,6 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
} }
} }
// Returns a pod definition based on the namespace. The pod references the PVC's
// name.
func MakeWritePod(ns string, pvcName string) *v1.Pod {
return MakePod(ns, pvcName, true, "touch /mnt/SUCCESS && (id -G | grep -E '\\b777\\b')")
}
// Returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func MakePod(ns string, pvcName string, isPrivileged bool, command string) *v1.Pod {
if len(command) == 0 {
command = "while true; do sleep 1; done"
}
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-tester-",
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "write-pod",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
{
Name: pvcName,
MountPath: "/mnt",
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
},
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
Volumes: []v1.Volume{
{
Name: pvcName,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
},
},
},
},
},
}
}
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
clientPod := MakePod(ns, pvc.Name, true, "")
clientPod, err := c.CoreV1().Pods(ns).Create(clientPod)
Expect(err).NotTo(HaveOccurred())
// Verify the pod is running before returning it
err = WaitForPodRunningInNamespace(c, clientPod)
Expect(err).NotTo(HaveOccurred())
clientPod, err = c.CoreV1().Pods(ns).Get(clientPod.Name, metav1.GetOptions{})
Expect(apierrs.IsNotFound(err)).To(BeFalse())
return clientPod
}
func CreatePDWithRetry() (string, error) { func CreatePDWithRetry() (string, error) {
newDiskName := "" newDiskName := ""
var err error var err error
@ -703,3 +615,105 @@ func deletePD(pdName string) error {
return fmt.Errorf("Provider does not support volume deletion") return fmt.Errorf("Provider does not support volume deletion")
} }
} }
// Create the test pod, wait for (hopefully) success, and then delete the pod.
func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) {
Logf("Creating nfs test pod")
// Make pod spec
pod := MakeWritePod(ns, pvc)
// Instantiate pod (Create)
runPod, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
Expect(runPod).NotTo(BeNil())
defer DeletePodWithWait(f, c, runPod)
// Wait for the test pod to complete its lifecycle
testPodSuccessOrFail(c, ns, runPod)
}
// Returns a pod definition based on the namespace. The pod references the PVC's
// name.
func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
return MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
}
// Returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func MakePod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
if len(command) == 0 {
command = "while true; do sleep 1; done"
}
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-tester-",
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "write-pod",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
},
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
}
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
var volumes = make([]v1.Volume, len(pvclaims))
for index, pvclaim := range pvclaims {
volumename := fmt.Sprintf("volume%v", index+1)
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
}
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Volumes = volumes
return podSpec
}
// create pod with given claims
func CreatePod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
podSpec := MakePod(namespace, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
Expect(err).NotTo(HaveOccurred())
// Waiting for pod to be running
Expect(WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
return pod
}
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
return CreatePod(c, ns, []*v1.PersistentVolumeClaim{pvc}, true, "")
}
// wait until all pvcs phase set to bound
func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim) []*v1.PersistentVolume {
var persistentvolumes = make([]*v1.PersistentVolume, len(pvclaims))
for index, claim := range pvclaims {
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, Poll, ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
// Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the bounded PV
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
}
return persistentvolumes
}

View File

@ -202,7 +202,7 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed by the test // initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed by the test
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pv, pvc := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) pv, pvc := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
pod := framework.MakePod(ns, pvc.Name, true, "") pod := framework.MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, "")
pod.Spec.NodeName = nodeName pod.Spec.NodeName = nodeName
framework.Logf("Creating nfs client Pod %s on node %s", pod.Name, nodeName) framework.Logf("Creating nfs client Pod %s on node %s", pod.Name, nodeName)
pod, err := c.CoreV1().Pods(ns).Create(pod) pod, err := c.CoreV1().Pods(ns).Create(pod)

View File

@ -40,7 +40,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
// 2. create the nfs writer pod, test if the write was successful, // 2. create the nfs writer pod, test if the write was successful,
// then delete the pod and verify that it was deleted // then delete the pod and verify that it was deleted
By("Checking pod has write access to PersistentVolume") By("Checking pod has write access to PersistentVolume")
framework.CreateWaitAndDeletePod(f, c, ns, pvc.Name) framework.CreateWaitAndDeletePod(f, c, ns, pvc)
// 3. delete the PVC, wait for PV to become "Released" // 3. delete the PVC, wait for PV to become "Released"
By("Deleting the PVC to invoke the reclaim policy.") By("Deleting the PVC to invoke the reclaim policy.")
@ -66,7 +66,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
_, found := pvols[pvc.Spec.VolumeName] _, found := pvols[pvc.Spec.VolumeName]
Expect(found).To(BeTrue()) Expect(found).To(BeTrue())
// TODO: currently a serialized test of each PV // TODO: currently a serialized test of each PV
framework.CreateWaitAndDeletePod(f, c, pvcKey.Namespace, pvcKey.Name) framework.CreateWaitAndDeletePod(f, c, pvcKey.Namespace, pvc)
} }
// 2. delete each PVC, wait for its bound PV to reach `expectedPhase` // 2. delete each PVC, wait for its bound PV to reach `expectedPhase`
@ -262,7 +262,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
// (and test) succeed. // (and test) succeed.
It("should test that a PV becomes Available and is clean after the PVC is deleted. [Volume] [Flaky]", func() { It("should test that a PV becomes Available and is clean after the PVC is deleted. [Volume] [Flaky]", func() {
By("Writing to the volume.") By("Writing to the volume.")
pod := framework.MakeWritePod(ns, pvc.Name) pod := framework.MakeWritePod(ns, pvc)
pod, err := c.CoreV1().Pods(ns).Create(pod) pod, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodSuccessInNamespace(c, pod.Name, ns) err = framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)
@ -279,8 +279,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
// If a file is detected in /mnt, fail the pod and do not restart it. // If a file is detected in /mnt, fail the pod and do not restart it.
By("Verifying the mount has been cleaned.") By("Verifying the mount has been cleaned.")
mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
pod = framework.MakePod(ns, pvc.Name, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount)) pod = framework.MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
pod, err = c.CoreV1().Pods(ns).Create(pod) pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodSuccessInNamespace(c, pod.Name, ns) err = framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)

View File

@ -99,7 +99,7 @@ func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) {
// testPod creates a pod that consumes a pv and prints it out. The output is then verified. // testPod creates a pod that consumes a pv and prints it out. The output is then verified.
func (t *PersistentVolumeUpgradeTest) testPod(f *framework.Framework, cmd string) { func (t *PersistentVolumeUpgradeTest) testPod(f *framework.Framework, cmd string) {
pod := framework.MakePod(f.Namespace.Name, t.pvc.Name, false, cmd) pod := framework.CreatePod(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{t.pvc}, false, cmd)
expectedOutput := []string{pvTestData} expectedOutput := []string{pvTestData}
f.TestContainerOutput("pod consumes pv", pod, 0, expectedOutput) f.TestContainerOutput("pod consumes pv", pod, 0, expectedOutput)
} }

View File

@ -17,15 +17,16 @@ limitations under the License.
package e2e package e2e
import ( import (
"fmt"
"path/filepath"
"strconv" "strconv"
"time" "time"
"fmt"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
k8stype "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
@ -332,3 +333,19 @@ func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
} }
// verify volumes are attached to the node and are accessible in pod
func verifyVSphereVolumesAccessible(pod *v1.Pod, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) {
nodeName := pod.Spec.NodeName
namespace := pod.Namespace
for index, pv := range persistentvolumes {
// Verify disks are attached to the node
isAttached, err := verifyVSphereDiskAttached(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
// Verify Volumes are accessible
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
Expect(err).NotTo(HaveOccurred())
}
}

View File

@ -0,0 +1,123 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os"
"strconv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
k8stype "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/api/v1"
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
)
/*
Test to perform Disk Ops storm.
Steps
1. Create storage class for thin Provisioning.
2. Create 30 PVCs using above storage class in annotation, requesting 2 GB files.
3. Wait until all disks are ready and all PVs and PVCs get bind. (CreateVolume storm)
4. Create pod to mount volumes using PVCs created in step 2. (AttachDisk storm)
5. Wait for pod status to be running.
6. Verify all volumes accessible and available in the pod.
7. Delete pod.
8. wait until volumes gets detached. (DetachDisk storm)
9. Delete all PVCs. This should delete all Disks. (DeleteVolume storm)
10. Delete storage class.
*/
var _ = framework.KubeDescribe("vsphere volume operations storm [Volume]", func() {
f := framework.NewDefaultFramework("volume-ops-storm")
const DEFAULT_VOLUME_OPS_SCALE = 30
var (
client clientset.Interface
namespace string
storageclass *storage.StorageClass
pvclaims []*v1.PersistentVolumeClaim
persistentvolumes []*v1.PersistentVolume
err error
volume_ops_scale int
vsp *vsphere.VSphere
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) == 0 {
framework.Failf("Unable to find ready and schedulable Node")
}
if os.Getenv("VOLUME_OPS_SCALE") != "" {
volume_ops_scale, err = strconv.Atoi(os.Getenv("VOLUME_OPS_SCALE"))
Expect(err).NotTo(HaveOccurred())
} else {
volume_ops_scale = DEFAULT_VOLUME_OPS_SCALE
}
pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale)
vsp, err = vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
By("Deleting PVCs")
for _, claim := range pvclaims {
framework.DeletePersistentVolumeClaim(client, claim.Name, namespace)
}
By("Deleting StorageClass")
err = client.StorageV1beta1().StorageClasses().Delete(storageclass.Name, nil)
Expect(err).NotTo(HaveOccurred())
})
It("should create pod with many volumes and verify no attach call fails", func() {
By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volume_ops_scale))
By("Creating Storage Class")
scParameters := make(map[string]string)
scParameters["diskformat"] = "thin"
storageclass, err = client.StorageV1beta1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters))
Expect(err).NotTo(HaveOccurred())
By("Creating PVCs using the Storage Class")
count := 0
for count < volume_ops_scale {
pvclaims[count] = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass))
count++
}
By("Waiting for all claims to be in bound phase")
persistentvolumes = framework.WaitForPVClaimBoundPhase(client, pvclaims)
By("Creating pod to attach PVs to the node")
pod := framework.CreatePod(client, namespace, pvclaims, false, "")
By("Verify all volumes are accessible and available in the pod")
verifyVSphereVolumesAccessible(pod, persistentvolumes, vsp)
By("Deleting pod")
framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node")
for _, pv := range persistentvolumes {
waitForVSphereDiskToDetach(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
}
})
})