k3s/test/e2e/storage/csi_volumes.go

476 lines
16 KiB
Go
Raw Normal View History

/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"context"
"fmt"
"regexp"
"time"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
2018-08-28 13:27:41 +00:00
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/json"
clientset "k8s.io/client-go/kubernetes"
csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1"
2018-08-28 13:27:41 +00:00
csiclient "k8s.io/csi-api/pkg/client/clientset/versioned"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/podlogs"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
2018-08-28 13:27:41 +00:00
imageutils "k8s.io/kubernetes/test/utils/image"
"crypto/sha256"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
// Name of node annotation that contains JSON map of driver names to node
annotationKeyNodeID = "csi.volume.kubernetes.io/nodeid"
)
// List of testDrivers to be executed in below loop
var csiTestDrivers = []func() drivers.TestDriver{
drivers.InitHostPathCSIDriver,
drivers.InitGcePDCSIDriver,
drivers.InitGcePDExternalCSIDriver,
drivers.InitHostV0PathCSIDriver,
}
// List of testSuites to be executed in below loop
var csiTestSuites = []func() testsuites.TestSuite{
testsuites.InitVolumesTestSuite,
testsuites.InitVolumeIOTestSuite,
testsuites.InitVolumeModeTestSuite,
testsuites.InitSubPathTestSuite,
testsuites.InitProvisioningTestSuite,
}
func csiTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestPattern {
tunedPatterns := []testpatterns.TestPattern{}
for _, pattern := range patterns {
// Skip inline volume and pre-provsioned PV tests for csi drivers
if pattern.VolType == testpatterns.InlineVolume || pattern.VolType == testpatterns.PreprovisionedPV {
continue
}
tunedPatterns = append(tunedPatterns, pattern)
}
return tunedPatterns
}
// This executes testSuites for csi volumes.
var _ = utils.SIGDescribe("CSI Volumes", func() {
e2e: deploy from manifest files + enhance CSI volume output Ensuring that CSI drivers get deployed for testing exactly as intended was problematic because the original .yaml files had to be converted into code. e2e/manifest helped a bit, but not enough: - could not load all entities - didn't handle loading .yaml files with multiple entities - actually creating and deleting entities still had to be done in tests The new framework utility code handles all of that, including the tricky cleanup operation that tests got wrong (AfterEach does not get called after test failures!). In addition, it is ensuring that each test gets its own instance of the entities. The PSP role binding for hostpath is now necessary because we switch from creating a pod directly to creation via the StatefulSet controller, which runs with less privileges. Without this, the hostpath test runs into these errors in the kubernetes-e2e-gce job: Oct 19 16:30:09.225: INFO: At 2018-10-19 16:25:07 +0000 UTC - event for csi-hostpath-attacher: {statefulset-controller } FailedCreate: create Pod csi-hostpath-attacher-0 in StatefulSet csi-hostpath-attacher failed error: pods "csi-hostpath-attacher-0" is forbidden: unable to validate against any pod security policy: [] Oct 19 16:30:09.225: INFO: At 2018-10-19 16:25:07 +0000 UTC - event for csi-hostpath-provisioner: {statefulset-controller } FailedCreate: create Pod csi-hostpath-provisioner-0 in StatefulSet csi-hostpath-provisioner failed error: pods "csi-hostpath-provisioner-0" is forbidden: unable to validate against any pod security policy: [] Oct 19 16:30:09.225: INFO: At 2018-10-19 16:25:07 +0000 UTC - event for csi-hostpathplugin: {daemonset-controller } FailedCreate: Error creating: pods "csi-hostpathplugin-" is forbidden: unable to validate against any pod security policy: [] The extra role binding is silently ignored on clusters which don't have this particular role.
2018-10-16 10:35:25 +00:00
f := framework.NewDefaultFramework("csi-volumes")
var (
2018-10-24 17:04:06 +00:00
cancel context.CancelFunc
cs clientset.Interface
ns *v1.Namespace
config framework.VolumeTestConfig
)
BeforeEach(func() {
ctx, c := context.WithCancel(context.Background())
cancel = c
cs = f.ClientSet
ns = f.Namespace
config = framework.VolumeTestConfig{
Namespace: ns.Name,
Prefix: "csi",
}
// Debugging of the following tests heavily depends on the log output
// of the different containers. Therefore include all of that in log
// files (when using --report-dir, as in the CI) or the output stream
// (otherwise).
to := podlogs.LogOutput{
StatusWriter: GinkgoWriter,
}
if framework.TestContext.ReportDir == "" {
to.LogWriter = GinkgoWriter
} else {
test := CurrentGinkgoTestDescription()
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
// We end the prefix with a slash to ensure that all logs
// end up in a directory named after the current test.
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
reg.ReplaceAllString(test.FullTestText, "_") + "/"
}
podlogs.CopyAllLogs(ctx, cs, ns.Name, to)
// pod events are something that the framework already collects itself
// after a failed test. Logging them live is only useful for interactive
// debugging, not when we collect reports.
if framework.TestContext.ReportDir == "" {
podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter)
}
})
AfterEach(func() {
cancel()
})
for _, initDriver := range csiTestDrivers {
curDriver := initDriver()
Context(drivers.GetDriverNameWithFeatureTags(curDriver), func() {
driver := curDriver
BeforeEach(func() {
// setupDriver
drivers.SetCommonDriverParameters(driver, f, config)
driver.CreateDriver()
})
AfterEach(func() {
// Cleanup driver
driver.CleanupDriver()
})
testsuites.RunTestSuite(f, config, driver, csiTestSuites, csiTunePattern)
testCSIDriverUpdate(f, driver)
})
}
2018-08-28 13:27:41 +00:00
e2e: deploy from manifest files + enhance CSI volume output Ensuring that CSI drivers get deployed for testing exactly as intended was problematic because the original .yaml files had to be converted into code. e2e/manifest helped a bit, but not enough: - could not load all entities - didn't handle loading .yaml files with multiple entities - actually creating and deleting entities still had to be done in tests The new framework utility code handles all of that, including the tricky cleanup operation that tests got wrong (AfterEach does not get called after test failures!). In addition, it is ensuring that each test gets its own instance of the entities. The PSP role binding for hostpath is now necessary because we switch from creating a pod directly to creation via the StatefulSet controller, which runs with less privileges. Without this, the hostpath test runs into these errors in the kubernetes-e2e-gce job: Oct 19 16:30:09.225: INFO: At 2018-10-19 16:25:07 +0000 UTC - event for csi-hostpath-attacher: {statefulset-controller } FailedCreate: create Pod csi-hostpath-attacher-0 in StatefulSet csi-hostpath-attacher failed error: pods "csi-hostpath-attacher-0" is forbidden: unable to validate against any pod security policy: [] Oct 19 16:30:09.225: INFO: At 2018-10-19 16:25:07 +0000 UTC - event for csi-hostpath-provisioner: {statefulset-controller } FailedCreate: create Pod csi-hostpath-provisioner-0 in StatefulSet csi-hostpath-provisioner failed error: pods "csi-hostpath-provisioner-0" is forbidden: unable to validate against any pod security policy: [] Oct 19 16:30:09.225: INFO: At 2018-10-19 16:25:07 +0000 UTC - event for csi-hostpathplugin: {daemonset-controller } FailedCreate: Error creating: pods "csi-hostpathplugin-" is forbidden: unable to validate against any pod security policy: [] The extra role binding is silently ignored on clusters which don't have this particular role.
2018-10-16 10:35:25 +00:00
// The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12.
Context("CSI attach test using HostPath driver [Feature:CSIDriverRegistry]", func() {
2018-08-28 13:27:41 +00:00
var (
cs clientset.Interface
csics csiclient.Interface
driver drivers.TestDriver
2018-08-28 13:27:41 +00:00
)
2018-08-28 13:27:41 +00:00
BeforeEach(func() {
cs = f.ClientSet
csics = f.CSIClientSet
driver = drivers.InitHostPathCSIDriver()
drivers.SetCommonDriverParameters(driver, f, config)
driver.CreateDriver()
2018-08-28 13:27:41 +00:00
})
AfterEach(func() {
driver.CleanupDriver()
2018-08-28 13:27:41 +00:00
})
tests := []struct {
name string
driverAttachable bool
driverExists bool
expectVolumeAttachment bool
}{
{
name: "non-attachable volume does not need VolumeAttachment",
driverAttachable: false,
driverExists: true,
expectVolumeAttachment: false,
},
{
name: "attachable volume needs VolumeAttachment",
driverAttachable: true,
driverExists: true,
expectVolumeAttachment: true,
},
{
name: "volume with no CSI driver needs VolumeAttachment",
driverExists: false,
expectVolumeAttachment: true,
},
}
for _, t := range tests {
test := t
It(test.name, func() {
if test.driverExists {
csiDriver := createCSIDriver(csics, drivers.GetUniqueDriverName(driver), test.driverAttachable)
if csiDriver != nil {
defer csics.CsiV1alpha1().CSIDrivers().Delete(csiDriver.Name, nil)
2018-08-28 13:27:41 +00:00
}
}
By("Creating pod")
var sc *storagev1.StorageClass
if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok {
sc = dDriver.GetDynamicProvisionStorageClass("")
}
nodeName := driver.GetDriverInfo().Config.ClientNodeName
scTest := testsuites.StorageClassTest{
Name: driver.GetDriverInfo().Name,
Provisioner: sc.Provisioner,
Parameters: sc.Parameters,
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
NodeName: nodeName,
}
// Use exec to kill the pod quickly
class, claim, pod := startPod(cs, scTest, ns.Name, "exec sleep 6000")
2018-08-28 13:27:41 +00:00
if class != nil {
defer cs.StorageV1().StorageClasses().Delete(class.Name, nil)
}
if claim != nil {
defer cs.CoreV1().PersistentVolumeClaims(ns.Name).Delete(claim.Name, nil)
}
if pod != nil {
// Fully delete (=unmount) the pod before deleting CSI driver
defer framework.DeletePodWithWait(f, cs, pod)
}
if pod == nil {
return
}
err := framework.WaitForPodNameRunningInNamespace(cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "Failed to start pod: %v", err)
By("Checking if VolumeAttachment was created for the pod")
// Check that VolumeAttachment does not exist
handle := getVolumeHandle(cs, claim)
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, scTest.Provisioner, nodeName)))
2018-08-28 13:27:41 +00:00
attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
_, err = cs.StorageV1beta1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
if test.expectVolumeAttachment {
framework.ExpectNoError(err, "Expected VolumeAttachment but none was found")
}
} else {
framework.ExpectNoError(err, "Failed to find VolumeAttachment")
}
}
if !test.expectVolumeAttachment {
Expect(err).To(HaveOccurred(), "Unexpected VolumeAttachment found")
}
})
}
})
})
e2e: deploy from manifest files + enhance CSI volume output Ensuring that CSI drivers get deployed for testing exactly as intended was problematic because the original .yaml files had to be converted into code. e2e/manifest helped a bit, but not enough: - could not load all entities - didn't handle loading .yaml files with multiple entities - actually creating and deleting entities still had to be done in tests The new framework utility code handles all of that, including the tricky cleanup operation that tests got wrong (AfterEach does not get called after test failures!). In addition, it is ensuring that each test gets its own instance of the entities. The PSP role binding for hostpath is now necessary because we switch from creating a pod directly to creation via the StatefulSet controller, which runs with less privileges. Without this, the hostpath test runs into these errors in the kubernetes-e2e-gce job: Oct 19 16:30:09.225: INFO: At 2018-10-19 16:25:07 +0000 UTC - event for csi-hostpath-attacher: {statefulset-controller } FailedCreate: create Pod csi-hostpath-attacher-0 in StatefulSet csi-hostpath-attacher failed error: pods "csi-hostpath-attacher-0" is forbidden: unable to validate against any pod security policy: [] Oct 19 16:30:09.225: INFO: At 2018-10-19 16:25:07 +0000 UTC - event for csi-hostpath-provisioner: {statefulset-controller } FailedCreate: create Pod csi-hostpath-provisioner-0 in StatefulSet csi-hostpath-provisioner failed error: pods "csi-hostpath-provisioner-0" is forbidden: unable to validate against any pod security policy: [] Oct 19 16:30:09.225: INFO: At 2018-10-19 16:25:07 +0000 UTC - event for csi-hostpathplugin: {daemonset-controller } FailedCreate: Error creating: pods "csi-hostpathplugin-" is forbidden: unable to validate against any pod security policy: [] The extra role binding is silently ignored on clusters which don't have this particular role.
2018-10-16 10:35:25 +00:00
func createCSIDriver(csics csiclient.Interface, name string, attachable bool) *csiv1alpha1.CSIDriver {
2018-08-28 13:27:41 +00:00
By("Creating CSIDriver instance")
driver := &csiv1alpha1.CSIDriver{
2018-08-28 13:27:41 +00:00
ObjectMeta: metav1.ObjectMeta{
e2e: deploy from manifest files + enhance CSI volume output Ensuring that CSI drivers get deployed for testing exactly as intended was problematic because the original .yaml files had to be converted into code. e2e/manifest helped a bit, but not enough: - could not load all entities - didn't handle loading .yaml files with multiple entities - actually creating and deleting entities still had to be done in tests The new framework utility code handles all of that, including the tricky cleanup operation that tests got wrong (AfterEach does not get called after test failures!). In addition, it is ensuring that each test gets its own instance of the entities. The PSP role binding for hostpath is now necessary because we switch from creating a pod directly to creation via the StatefulSet controller, which runs with less privileges. Without this, the hostpath test runs into these errors in the kubernetes-e2e-gce job: Oct 19 16:30:09.225: INFO: At 2018-10-19 16:25:07 +0000 UTC - event for csi-hostpath-attacher: {statefulset-controller } FailedCreate: create Pod csi-hostpath-attacher-0 in StatefulSet csi-hostpath-attacher failed error: pods "csi-hostpath-attacher-0" is forbidden: unable to validate against any pod security policy: [] Oct 19 16:30:09.225: INFO: At 2018-10-19 16:25:07 +0000 UTC - event for csi-hostpath-provisioner: {statefulset-controller } FailedCreate: create Pod csi-hostpath-provisioner-0 in StatefulSet csi-hostpath-provisioner failed error: pods "csi-hostpath-provisioner-0" is forbidden: unable to validate against any pod security policy: [] Oct 19 16:30:09.225: INFO: At 2018-10-19 16:25:07 +0000 UTC - event for csi-hostpathplugin: {daemonset-controller } FailedCreate: Error creating: pods "csi-hostpathplugin-" is forbidden: unable to validate against any pod security policy: [] The extra role binding is silently ignored on clusters which don't have this particular role.
2018-10-16 10:35:25 +00:00
Name: name,
2018-08-28 13:27:41 +00:00
},
Spec: csiv1alpha1.CSIDriverSpec{
2018-08-28 13:27:41 +00:00
AttachRequired: &attachable,
},
}
driver, err := csics.CsiV1alpha1().CSIDrivers().Create(driver)
framework.ExpectNoError(err, "Failed to create CSIDriver: %v", err)
return driver
}
func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) string {
2018-09-28 15:41:24 +00:00
// re-get the claim to the latest state with bound volume
2018-08-28 13:27:41 +00:00
claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PVC")
return ""
}
pvName := claim.Spec.VolumeName
pv, err := cs.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PV")
return ""
}
if pv.Spec.CSI == nil {
Expect(pv.Spec.CSI).NotTo(BeNil())
return ""
}
return pv.Spec.CSI.VolumeHandle
}
func startPod(cs clientset.Interface, t testsuites.StorageClassTest, ns string, cmdline string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
2018-08-28 13:27:41 +00:00
class := newStorageClass(t, ns, "")
class, err := cs.StorageV1().StorageClasses().Create(class)
framework.ExpectNoError(err, "Failed to create class : %v", err)
claim := newClaim(t, ns, "")
claim.Spec.StorageClassName = &class.Name
claim, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(claim)
framework.ExpectNoError(err, "Failed to create claim: %v", err)
pod := getTestPod(t, claim, cmdline)
if len(t.NodeName) != 0 {
pod.Spec.NodeName = t.NodeName
}
pod, err = cs.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Failed to create pod: %v", err)
return class, claim, pod
}
func getTestPod(t testsuites.StorageClassTest, claim *v1.PersistentVolumeClaim, cmdline string) *v1.Pod {
2018-08-28 13:27:41 +00:00
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", cmdline},
2018-08-28 13:27:41 +00:00
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claim.Name,
ReadOnly: false,
},
},
},
},
},
}
if len(t.NodeName) != 0 {
pod.Spec.NodeName = t.NodeName
2018-08-28 13:27:41 +00:00
}
return pod
}
func testCSIDriverUpdate(f *framework.Framework, driver drivers.TestDriver) {
// NOTE: this test assumes that all CSI drivers in csiTestDrivers deploy exactly one DaemonSet.
It("should work after update", func() {
cs := f.ClientSet
By("Checking the driver works before update")
var sc *storagev1.StorageClass
if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok {
sc = dDriver.GetDynamicProvisionStorageClass("")
}
nodeName := driver.GetDriverInfo().Config.ClientNodeName
scTest := testsuites.StorageClassTest{
Name: driver.GetDriverInfo().Name,
Provisioner: sc.Provisioner,
Parameters: sc.Parameters,
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
NodeName: nodeName,
}
class, claim, pod := startPod(cs, scTest, f.Namespace.Name, "echo 'test' > /mnt/test/data")
if class != nil {
defer cs.StorageV1().StorageClasses().Delete(class.Name, nil)
}
if claim != nil {
defer cs.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(claim.Name, nil)
}
if pod != nil {
// Fully delete (=unmount) the pod before deleting CSI driver
defer framework.DeletePodWithWait(f, cs, pod)
}
if pod == nil {
return
}
err := framework.WaitForPodSuccessInNamespace(cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "Failed to start pod")
err = framework.DeletePodWithWait(f, cs, pod)
By("Finding CSI driver deployment")
ds, err := getDriverDaemonSet(f)
framework.ExpectNoError(err, "Failed to get driver DaemonSets")
By("Updating the driver")
ds, err = cs.AppsV1().DaemonSets(f.Namespace.Name).Get(ds.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get DaemonSet")
expectedUpdatedPods := ds.Status.DesiredNumberScheduled
framework.Logf("DaemonSet with driver has %d scheduled pods", expectedUpdatedPods)
// For debugging:
framework.Logf("DaemonSet status: %+v", ds.Status)
ds, err = framework.UpdateDaemonSetWithRetries(cs, f.Namespace.Name, ds.Name, func(ds *appsv1.DaemonSet) {
// Simulate driver update by adding a new label in DaemonSet template. This issues rolling update.
if ds.Spec.Template.Labels == nil {
ds.Spec.Template.Labels = map[string]string{}
}
ds.Spec.Template.Labels[f.UniqueName] = ""
})
framework.ExpectNoError(err, "Failed to update DaemonSet")
By("Waiting for the update to complete")
selector := labels.SelectorFromSet(labels.Set(map[string]string{f.UniqueName: ""}))
_, err = framework.WaitForPodsWithLabelRunningReady(cs, f.Namespace.Name, selector, int(expectedUpdatedPods), framework.PodStartTimeout)
framework.ExpectNoError(err, "Failed to wait for updated DaemonSet")
By("Checking the driver works after update with the same claim")
pod2 := getTestPod(scTest, claim, "grep test < /mnt/test/data")
pod2, err = cs.CoreV1().Pods(f.Namespace.Name).Create(pod2)
framework.ExpectNoError(err, "Failed to create pod")
defer framework.DeletePodWithWait(f, cs, pod2)
err = framework.WaitForPodSuccessInNamespace(cs, pod2.Name, pod2.Namespace)
framework.ExpectNoError(err, "Failed to start pod2")
pod2, err = cs.CoreV1().Pods(pod2.Namespace).Get(pod2.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get pod")
nodeName = pod2.Spec.NodeName
err = framework.DeletePodWithWait(f, cs, pod2)
framework.ExpectNoError(err, "Failed to delete pod2")
// #71424: check that NodeID annotation is set after update
node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to read node")
ann, found := node.Annotations[annotationKeyNodeID]
Expect(found).To(BeTrue(), "annotation with NodeID not found")
var nodeIDs map[string]string
err = json.Unmarshal([]byte(ann), &nodeIDs)
framework.ExpectNoError(err, "Failed to parse NodeID json")
_, ok := nodeIDs[class.Provisioner]
Expect(ok).To(BeTrue(), "NodeID of driver not found")
// By waiting for PVC deletion we make sure that the volume is detached, since most cloud providers
// wait for detach + we have finalizer on PVC.
claim, err = cs.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Get(claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get PVC")
cs.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(claim.Name, nil)
err = framework.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, 5*time.Second, 20*time.Minute)
framework.ExpectNoError(err, "Timed out waiting for PV to delete")
})
}
// getDriverDaemonSet finds *any* DaemonSet in framework's namespace and returns it.
// It must be called just after driver installation, where the only DaemonSet in the
// namespace must be the driver.
func getDriverDaemonSet(f *framework.Framework) (*appsv1.DaemonSet, error) {
By("Finding CSI driver DaemonSet")
daemonSets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return nil, err
}
if len(daemonSets.Items) != 1 {
return nil, fmt.Errorf("Got %d DaemonSets in the namespace when only 1 was expected", len(daemonSets.Items))
}
return &daemonSets.Items[0], nil
2018-08-28 13:27:41 +00:00
}