From abf8df7543cf38128aa906ab4a0284f882cdae42 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Wed, 20 Feb 2019 15:42:12 -0500 Subject: [PATCH 1/2] clean up duplication --- test/e2e/storage/csi_mock_volume_test.go | 280 +++++++++++++++++++++++ test/e2e/storage/csi_volumes.go | 228 ------------------ test/e2e/storage/volume_limits.go | 1 - 3 files changed, 280 insertions(+), 229 deletions(-) create mode 100644 test/e2e/storage/csi_mock_volume_test.go diff --git a/test/e2e/storage/csi_mock_volume_test.go b/test/e2e/storage/csi_mock_volume_test.go new file mode 100644 index 0000000000..664300df40 --- /dev/null +++ b/test/e2e/storage/csi_mock_volume_test.go @@ -0,0 +1,280 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "crypto/sha256" + "fmt" + "time" + + "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/drivers" + "k8s.io/kubernetes/test/e2e/storage/testsuites" + "k8s.io/kubernetes/test/e2e/storage/utils" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type cleanupFuncs func() + +var _ = utils.SIGDescribe("CSI Mock volumes", func() { + type mockDriverSetup struct { + cs clientset.Interface + config *testsuites.PerTestConfig + testCleanups []cleanupFuncs + pods []*v1.Pod + pvcs []*v1.PersistentVolumeClaim + sc map[string]*storage.StorageClass + driver testsuites.TestDriver + provisioner string + } + var m mockDriverSetup + var attachable bool + var deployCRD bool + var podInfoVersion *string + var scName string + f := framework.NewDefaultFramework("csi-mock-volumes") + + init := func() { + m = mockDriverSetup{cs: f.ClientSet} + csics := f.CSIClientSet + var err error + + m.driver = drivers.InitMockCSIDriver(deployCRD, attachable, podInfoVersion) + config, testCleanup := m.driver.PrepareTest(f) + m.testCleanups = append(m.testCleanups, testCleanup) + m.config = config + + if deployCRD { + err = waitForCSIDriver(csics, m.config.GetUniqueDriverName()) + framework.ExpectNoError(err, "Failed to get CSIDriver : %v", err) + m.testCleanups = append(m.testCleanups, func() { + destroyCSIDriver(csics, m.config.GetUniqueDriverName()) + }) + } + } + + createPod := func() (*storage.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { + By("Creating pod") + var sc *storagev1.StorageClass + if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok { + sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") + } + m.provisioner = sc.Provisioner + nodeName := m.config.ClientNodeName + scTest := testsuites.StorageClassTest{ + Name: m.driver.GetDriverInfo().Name, + Provisioner: sc.Provisioner, + Parameters: sc.Parameters, + ClaimSize: "1Gi", + ExpectedSize: "1Gi", + } + if scName != "" { + scTest.StorageClassName = scName + } + nodeSelection := testsuites.NodeSelection{ + // The mock driver only works when everything runs on a single node. + Name: nodeName, + } + class, claim, pod := startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name) + if class != nil { + m.sc[class.Name] = class + } + if claim != nil { + m.pvcs = append(m.pvcs, claim) + } + if pod != nil { + m.pods = append(m.pods, pod) + } + return class, claim, pod + } + + resetSharedVariables := func() { + attachable = false + deployCRD = false + scName = "" + podInfoVersion = nil + } + + cleanup := func() { + cs := f.ClientSet + var errs []error + By("Deleting pod") + for _, pod := range m.pods { + errs = append(errs, framework.DeletePodWithWait(f, cs, pod)) + } + + By("Deleting claim") + for _, claim := range m.pvcs { + claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + if err == nil { + cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) + framework.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute) + } + + } + + By("Deleting storageclass") + for _, sc := range m.sc { + cs.StorageV1().StorageClasses().Delete(sc.Name, nil) + } + + By("Cleaning up resources") + for _, cleanupFunc := range m.testCleanups { + cleanupFunc() + } + + // reset some of common variables + resetSharedVariables() + err := utilerrors.NewAggregate(errs) + Expect(err).NotTo(HaveOccurred(), "while cleaning up after test") + } + + // The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12. + Context("CSI attach test using mock driver [Feature:CSIDriverRegistry]", func() { + tests := []struct { + name string + driverAttachable bool + deployDriverCRD bool + }{ + { + name: "should not require VolumeAttach for drivers without attachment", + driverAttachable: false, + deployDriverCRD: true, + }, + { + name: "should require VolumeAttach for drivers with attachment", + driverAttachable: true, + deployDriverCRD: true, + }, + { + name: "should preserve attachment policy when no CSIDriver present", + driverAttachable: true, + deployDriverCRD: false, + }, + } + for _, t := range tests { + It(t.name, func() { + deployCRD = t.deployDriverCRD + attachable = t.driverAttachable + var err error + init() + defer cleanup() + + _, claim, pod := createPod() + if pod == nil { + return + } + err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + framework.ExpectNoError(err, "Failed to start pod: %v", err) + + By("Checking if VolumeAttachment was created for the pod") + handle := getVolumeHandle(m.cs, claim) + attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeName))) + attachmentName := fmt.Sprintf("csi-%x", attachmentHash) + _, err = m.cs.StorageV1beta1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + if t.driverAttachable { + framework.ExpectNoError(err, "Expected VolumeAttachment but none was found") + } + } else { + framework.ExpectNoError(err, "Failed to find VolumeAttachment") + } + } + if !t.driverAttachable { + Expect(err).To(HaveOccurred(), "Unexpected VolumeAttachment found") + } + }) + + } + }) + + Context("CSI workload information using mock driver [Feature:CSIDriverRegistry]", func() { + var ( + err error + podInfoV1 = "v1" + podInfoUnknown = "unknown" + podInfoEmpty = "" + ) + tests := []struct { + name string + podInfoOnMountVersion *string + deployDriverCRD bool + expectPodInfo bool + }{ + { + name: "should not be passed when podInfoOnMountVersion=nil", + podInfoOnMountVersion: nil, + deployDriverCRD: true, + expectPodInfo: false, + }, + { + name: "should be passed when podInfoOnMountVersion=v1", + podInfoOnMountVersion: &podInfoV1, + deployDriverCRD: true, + expectPodInfo: true, + }, + { + name: "should not be passed when podInfoOnMountVersion=", + podInfoOnMountVersion: &podInfoEmpty, + deployDriverCRD: true, + expectPodInfo: false, + }, + { + name: "should not be passed when podInfoOnMountVersion=", + podInfoOnMountVersion: &podInfoUnknown, + deployDriverCRD: true, + expectPodInfo: false, + }, + { + name: "should not be passed when CSIDriver does not exist", + deployDriverCRD: false, + expectPodInfo: false, + }, + } + for _, t := range tests { + It(t.name, func() { + deployCRD = t.deployDriverCRD + scName = "csi-mock-sc-" + f.UniqueName + init() + defer cleanup() + + _, _, pod := createPod() + if pod == nil { + return + } + err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + framework.ExpectNoError(err, "Failed to start pod: %v", err) + By("Checking CSI driver logs") + + // The driver is deployed as a statefulset with stable pod names + driverPodName := "csi-mockplugin-0" + err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, t.expectPodInfo) + framework.ExpectNoError(err) + }) + } + }) +}) diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index 5ddd597290..a0890ee517 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -35,8 +35,6 @@ import ( "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - "crypto/sha256" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/util/rand" @@ -115,223 +113,6 @@ var _ = utils.SIGDescribe("CSI Volumes", func() { testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */) }) }) - - // The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12. - - Context("CSI attach test using mock driver [Feature:CSIDriverRegistry]", func() { - var ( - err error - driver testsuites.TestDriver - ) - - tests := []struct { - name string - driverAttachable bool - deployDriverCRD bool - }{ - { - name: "should not require VolumeAttach for drivers without attachment", - driverAttachable: false, - deployDriverCRD: true, - }, - { - name: "should require VolumeAttach for drivers with attachment", - driverAttachable: true, - deployDriverCRD: true, - }, - { - name: "should preserve attachment policy when no CSIDriver present", - driverAttachable: true, - deployDriverCRD: false, - }, - } - - for _, t := range tests { - test := t - f := framework.NewDefaultFramework("csiattach") - - It(test.name, func() { - cs := f.ClientSet - csics := f.CSIClientSet - ns := f.Namespace - - driver = drivers.InitMockCSIDriver(test.deployDriverCRD, test.driverAttachable, nil) - config, testCleanup := driver.PrepareTest(f) - driverName := config.GetUniqueDriverName() - defer testCleanup() - - if test.deployDriverCRD { - err = waitForCSIDriver(csics, driverName) - framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err) - defer destroyCSIDriver(csics, driverName) - } - - By("Creating pod") - var sc *storagev1.StorageClass - if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok { - sc = dDriver.GetDynamicProvisionStorageClass(config, "") - } - nodeName := config.ClientNodeName - scTest := testsuites.StorageClassTest{ - Name: driver.GetDriverInfo().Name, - Provisioner: sc.Provisioner, - Parameters: sc.Parameters, - ClaimSize: "1Gi", - ExpectedSize: "1Gi", - } - nodeSelection := testsuites.NodeSelection{ - Name: nodeName, - } - class, claim, pod := startPausePod(cs, scTest, nodeSelection, ns.Name) - if class != nil { - defer cs.StorageV1().StorageClasses().Delete(class.Name, nil) - } - if claim != nil { - // Fully delete PV before deleting CSI driver - defer deleteVolume(cs, claim) - } - if pod != nil { - // Fully delete (=unmount) the pod before deleting CSI driver - defer framework.DeletePodWithWait(f, cs, pod) - } - if pod == nil { - return - } - - err = framework.WaitForPodNameRunningInNamespace(cs, pod.Name, pod.Namespace) - framework.ExpectNoError(err, "Failed to start pod: %v", err) - - By("Checking if VolumeAttachment was created for the pod") - handle := getVolumeHandle(cs, claim) - attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, scTest.Provisioner, nodeName))) - attachmentName := fmt.Sprintf("csi-%x", attachmentHash) - _, err = cs.StorageV1beta1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - if test.driverAttachable { - framework.ExpectNoError(err, "Expected VolumeAttachment but none was found") - } - } else { - framework.ExpectNoError(err, "Failed to find VolumeAttachment") - } - } - if !test.driverAttachable { - Expect(err).To(HaveOccurred(), "Unexpected VolumeAttachment found") - } - }) - } - }) - - Context("CSI workload information using mock driver [Feature:CSIDriverRegistry]", func() { - var ( - err error - driver testsuites.TestDriver - podInfoV1 = "v1" - podInfoUnknown = "unknown" - podInfoEmpty = "" - ) - - tests := []struct { - name string - podInfoOnMountVersion *string - deployDriverCRD bool - expectPodInfo bool - }{ - { - name: "should not be passed when podInfoOnMountVersion=nil", - podInfoOnMountVersion: nil, - deployDriverCRD: true, - expectPodInfo: false, - }, - { - name: "should be passed when podInfoOnMountVersion=v1", - podInfoOnMountVersion: &podInfoV1, - deployDriverCRD: true, - expectPodInfo: true, - }, - { - name: "should not be passed when podInfoOnMountVersion=", - podInfoOnMountVersion: &podInfoEmpty, - deployDriverCRD: true, - expectPodInfo: false, - }, - { - name: "should not be passed when podInfoOnMountVersion=", - podInfoOnMountVersion: &podInfoUnknown, - deployDriverCRD: true, - expectPodInfo: false, - }, - { - name: "should not be passed when CSIDriver does not exist", - deployDriverCRD: false, - expectPodInfo: false, - }, - } - for _, t := range tests { - test := t - f := framework.NewDefaultFramework("csiworkload") - - It(test.name, func() { - cs := f.ClientSet - csics := f.CSIClientSet - ns := f.Namespace - - driver = drivers.InitMockCSIDriver(test.deployDriverCRD, true, test.podInfoOnMountVersion) - config, testCleanup := driver.PrepareTest(f) - driverName := config.GetUniqueDriverName() - defer testCleanup() - - if test.deployDriverCRD { - err = waitForCSIDriver(csics, driverName) - framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err) - defer destroyCSIDriver(csics, driverName) - } - - By("Creating pod") - var sc *storagev1.StorageClass - if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok { - sc = dDriver.GetDynamicProvisionStorageClass(config, "") - } - nodeName := config.ClientNodeName - scTest := testsuites.StorageClassTest{ - Name: driver.GetDriverInfo().Name, - Parameters: sc.Parameters, - ClaimSize: "1Gi", - ExpectedSize: "1Gi", - // Provisioner and storage class name must match what's used in - // csi-storageclass.yaml, plus the test-specific suffix. - Provisioner: sc.Provisioner, - StorageClassName: "csi-mock-sc-" + f.UniqueName, - } - nodeSelection := testsuites.NodeSelection{ - // The mock driver only works when everything runs on a single node. - Name: nodeName, - } - class, claim, pod := startPausePod(cs, scTest, nodeSelection, ns.Name) - if class != nil { - defer cs.StorageV1().StorageClasses().Delete(class.Name, nil) - } - if claim != nil { - // Fully delete PV before deleting CSI driver - defer deleteVolume(cs, claim) - } - if pod != nil { - // Fully delete (=unmount) the pod before deleting CSI driver - defer framework.DeletePodWithWait(f, cs, pod) - } - if pod == nil { - return - } - err = framework.WaitForPodNameRunningInNamespace(cs, pod.Name, pod.Namespace) - framework.ExpectNoError(err, "Failed to start pod: %v", err) - By("Checking CSI driver logs") - // The driver is deployed as a statefulset with stable pod names - driverPodName := "csi-mockplugin-0" - err = checkPodInfo(cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo) - framework.ExpectNoError(err) - }) - } - }) }) func testTopologyPositive(cs clientset.Interface, suffix, namespace string, delayBinding, allowedTopologies bool) { @@ -434,15 +215,6 @@ func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) st return pv.Spec.CSI.VolumeHandle } -func deleteVolume(cs clientset.Interface, claim *v1.PersistentVolumeClaim) { - // re-get the claim to the latest state with bound volume - claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) - if err == nil { - cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) - framework.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute) - } -} - func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node testsuites.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { class := newStorageClass(t, ns, "") class, err := cs.StorageV1().StorageClasses().Create(class) diff --git a/test/e2e/storage/volume_limits.go b/test/e2e/storage/volume_limits.go index 9c3a179449..3d17d35752 100644 --- a/test/e2e/storage/volume_limits.go +++ b/test/e2e/storage/volume_limits.go @@ -47,7 +47,6 @@ var _ = utils.SIGDescribe("Volume limits", func() { framework.Failf("Expected volume limits to be set") } } - }) }) From 2516cbd4539619ff935c090cb2c75229b6b5d8d6 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Wed, 20 Feb 2019 17:01:11 -0500 Subject: [PATCH 2/2] Add e2e for CSI volume limit stuff Also use privileged for driver registra --- test/e2e/storage/BUILD | 1 + test/e2e/storage/csi_mock_volume.go | 559 ++++++++++++++++++ test/e2e/storage/csi_mock_volume_test.go | 280 --------- test/e2e/storage/csi_volumes.go | 164 ----- test/e2e/storage/drivers/csi.go | 9 +- .../storage-csi/mock/csi-mock-driver.yaml | 2 + 6 files changed, 570 insertions(+), 445 deletions(-) create mode 100644 test/e2e/storage/csi_mock_volume.go delete mode 100644 test/e2e/storage/csi_mock_volume_test.go diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 7ecc8bf88c..13269e6748 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "csi_mock_volume.go", "csi_volumes.go", "detach_mounted.go", "empty_dir_wrapper.go", diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go new file mode 100644 index 0000000000..02dd0ea9d7 --- /dev/null +++ b/test/e2e/storage/csi_mock_volume.go @@ -0,0 +1,559 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "regexp" + "strings" + "time" + + "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + csiclient "k8s.io/csi-api/pkg/client/clientset/versioned" + volumeutil "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/drivers" + "k8s.io/kubernetes/test/e2e/storage/testsuites" + "k8s.io/kubernetes/test/e2e/storage/utils" + imageutils "k8s.io/kubernetes/test/utils/image" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type cleanupFuncs func() + +const ( + csiNodeLimitUpdateTimeout = 5 * time.Minute + csiPodUnschedulableTimeout = 2 * time.Minute +) + +var _ = utils.SIGDescribe("CSI mock volume", func() { + type testParameters struct { + attachable bool + attachLimit int + registerDriver bool + podInfoVersion *string + scName string + nodeSelectorKey string + } + + type mockDriverSetup struct { + cs clientset.Interface + config *testsuites.PerTestConfig + testCleanups []cleanupFuncs + pods []*v1.Pod + pvcs []*v1.PersistentVolumeClaim + sc map[string]*storage.StorageClass + driver testsuites.TestDriver + nodeLabel map[string]string + provisioner string + tp testParameters + } + + var m mockDriverSetup + + f := framework.NewDefaultFramework("csi-mock-volumes") + + init := func(tp testParameters) { + m = mockDriverSetup{ + cs: f.ClientSet, + sc: make(map[string]*storage.StorageClass), + tp: tp, + } + csics := f.CSIClientSet + var err error + + m.driver = drivers.InitMockCSIDriver(tp.registerDriver, tp.attachable, tp.podInfoVersion, tp.attachLimit) + config, testCleanup := m.driver.PrepareTest(f) + m.testCleanups = append(m.testCleanups, testCleanup) + m.config = config + m.provisioner = config.GetUniqueDriverName() + + if tp.nodeSelectorKey != "" { + framework.AddOrUpdateLabelOnNode(m.cs, m.config.ClientNodeName, tp.nodeSelectorKey, f.Namespace.Name) + m.nodeLabel = map[string]string{ + tp.nodeSelectorKey: f.Namespace.Name, + } + } + + if tp.registerDriver { + err = waitForCSIDriver(csics, m.config.GetUniqueDriverName()) + framework.ExpectNoError(err, "Failed to get CSIDriver : %v", err) + m.testCleanups = append(m.testCleanups, func() { + destroyCSIDriver(csics, m.config.GetUniqueDriverName()) + }) + } + } + + createPod := func() (*storage.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { + By("Creating pod") + var sc *storagev1.StorageClass + if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok { + sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") + } + nodeName := m.config.ClientNodeName + scTest := testsuites.StorageClassTest{ + Name: m.driver.GetDriverInfo().Name, + Provisioner: sc.Provisioner, + Parameters: sc.Parameters, + ClaimSize: "1Gi", + ExpectedSize: "1Gi", + } + if m.tp.scName != "" { + scTest.StorageClassName = m.tp.scName + } + nodeSelection := testsuites.NodeSelection{ + // The mock driver only works when everything runs on a single node. + Name: nodeName, + } + if len(m.nodeLabel) > 0 { + nodeSelection = testsuites.NodeSelection{ + Selector: m.nodeLabel, + } + } + class, claim, pod := startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name) + if class != nil { + m.sc[class.Name] = class + } + if claim != nil { + m.pvcs = append(m.pvcs, claim) + } + if pod != nil { + m.pods = append(m.pods, pod) + } + return class, claim, pod + } + + cleanup := func() { + cs := f.ClientSet + var errs []error + + for _, pod := range m.pods { + By(fmt.Sprintf("Deleting pod %s", pod.Name)) + errs = append(errs, framework.DeletePodWithWait(f, cs, pod)) + } + + for _, claim := range m.pvcs { + By(fmt.Sprintf("Deleting claim %s", claim.Name)) + claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + if err == nil { + cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) + framework.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute) + } + + } + + for _, sc := range m.sc { + By(fmt.Sprintf("Deleting storageclass %s", sc.Name)) + cs.StorageV1().StorageClasses().Delete(sc.Name, nil) + } + + By("Cleaning up resources") + for _, cleanupFunc := range m.testCleanups { + cleanupFunc() + } + + if len(m.nodeLabel) > 0 && len(m.tp.nodeSelectorKey) > 0 { + framework.RemoveLabelOffNode(m.cs, m.config.ClientNodeName, m.tp.nodeSelectorKey) + } + + err := utilerrors.NewAggregate(errs) + Expect(err).NotTo(HaveOccurred(), "while cleaning up after test") + } + + // The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12. + Context("CSI attach test using mock driver [Feature:CSIDriverRegistry]", func() { + tests := []struct { + name string + driverAttachable bool + deployDriverCRD bool + }{ + { + name: "should not require VolumeAttach for drivers without attachment", + driverAttachable: false, + deployDriverCRD: true, + }, + { + name: "should require VolumeAttach for drivers with attachment", + driverAttachable: true, + deployDriverCRD: true, + }, + { + name: "should preserve attachment policy when no CSIDriver present", + driverAttachable: true, + deployDriverCRD: false, + }, + } + for _, t := range tests { + test := t + It(t.name, func() { + var err error + init(testParameters{registerDriver: test.deployDriverCRD, attachable: test.driverAttachable}) + defer cleanup() + + _, claim, pod := createPod() + if pod == nil { + return + } + err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + framework.ExpectNoError(err, "Failed to start pod: %v", err) + + By("Checking if VolumeAttachment was created for the pod") + handle := getVolumeHandle(m.cs, claim) + attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeName))) + attachmentName := fmt.Sprintf("csi-%x", attachmentHash) + _, err = m.cs.StorageV1beta1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + if test.driverAttachable { + framework.ExpectNoError(err, "Expected VolumeAttachment but none was found") + } + } else { + framework.ExpectNoError(err, "Failed to find VolumeAttachment") + } + } + if !test.driverAttachable { + Expect(err).To(HaveOccurred(), "Unexpected VolumeAttachment found") + } + }) + + } + }) + + Context("CSI workload information using mock driver [Feature:CSIDriverRegistry]", func() { + var ( + err error + podInfoV1 = "v1" + podInfoUnknown = "unknown" + podInfoEmpty = "" + ) + tests := []struct { + name string + podInfoOnMountVersion *string + deployDriverCRD bool + expectPodInfo bool + }{ + { + name: "should not be passed when podInfoOnMountVersion=nil", + podInfoOnMountVersion: nil, + deployDriverCRD: true, + expectPodInfo: false, + }, + { + name: "should be passed when podInfoOnMountVersion=v1", + podInfoOnMountVersion: &podInfoV1, + deployDriverCRD: true, + expectPodInfo: true, + }, + { + name: "should not be passed when podInfoOnMountVersion=", + podInfoOnMountVersion: &podInfoEmpty, + deployDriverCRD: true, + expectPodInfo: false, + }, + { + name: "should not be passed when podInfoOnMountVersion=", + podInfoOnMountVersion: &podInfoUnknown, + deployDriverCRD: true, + expectPodInfo: false, + }, + { + name: "should not be passed when CSIDriver does not exist", + deployDriverCRD: false, + expectPodInfo: false, + }, + } + for _, t := range tests { + test := t + It(t.name, func() { + init(testParameters{ + registerDriver: test.deployDriverCRD, + scName: "csi-mock-sc-" + f.UniqueName, + podInfoVersion: test.podInfoOnMountVersion}) + + defer cleanup() + + _, _, pod := createPod() + if pod == nil { + return + } + err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + framework.ExpectNoError(err, "Failed to start pod: %v", err) + By("Checking CSI driver logs") + + // The driver is deployed as a statefulset with stable pod names + driverPodName := "csi-mockplugin-0" + err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo) + framework.ExpectNoError(err) + }) + } + }) + + Context("CSI volume limit information using mock driver", func() { + It("should report attach limit when limit is bigger than 0", func() { + // define volume limit to be 2 for this test + + var err error + init(testParameters{attachable: true, nodeSelectorKey: "node-attach-limit-csi", attachLimit: 2}) + defer cleanup() + nodeName := m.config.ClientNodeName + attachKey := v1.ResourceName(volumeutil.GetCSIAttachLimitKey(m.provisioner)) + + nodeAttachLimit, err := checkNodeForLimits(nodeName, attachKey, m.cs) + Expect(err).NotTo(HaveOccurred(), "while fetching node %v", err) + + Expect(nodeAttachLimit).To(Equal(2)) + + _, _, pod1 := createPod() + Expect(pod1).NotTo(BeNil(), "while creating first pod") + + err = framework.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace) + framework.ExpectNoError(err, "Failed to start pod1: %v", err) + + _, _, pod2 := createPod() + Expect(pod2).NotTo(BeNil(), "while creating second pod") + + err = framework.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace) + framework.ExpectNoError(err, "Failed to start pod2: %v", err) + + _, _, pod3 := createPod() + Expect(pod3).NotTo(BeNil(), "while creating third pod") + err = waitForMaxVolumeCondition(pod3, m.cs) + Expect(err).NotTo(HaveOccurred(), "while waiting for max volume condition") + }) + }) + +}) + +func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error { + var err error + waitErr := wait.PollImmediate(10*time.Second, csiPodUnschedulableTimeout, func() (bool, error) { + pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + conditions := pod.Status.Conditions + for _, condition := range conditions { + matched, _ := regexp.MatchString("max.+volume.+count", condition.Message) + if condition.Reason == v1.PodReasonUnschedulable && matched { + return true, nil + } + + } + return false, nil + }) + return waitErr +} + +func checkNodeForLimits(nodeName string, attachKey v1.ResourceName, cs clientset.Interface) (int, error) { + var attachLimit int64 + + waitErr := wait.PollImmediate(10*time.Second, csiNodeLimitUpdateTimeout, func() (bool, error) { + node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + return false, err + } + limits := getVolumeLimit(node) + var ok bool + if len(limits) > 0 { + attachLimit, ok = limits[attachKey] + if ok { + return true, nil + } + } + return false, nil + }) + return int(attachLimit), waitErr +} + +func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node testsuites.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { + class := newStorageClass(t, ns, "") + var err error + _, err = cs.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{}) + if err != nil { + class, err = cs.StorageV1().StorageClasses().Create(class) + framework.ExpectNoError(err, "Failed to create class : %v", err) + } + + claim := newClaim(t, ns, "") + claim.Spec.StorageClassName = &class.Name + claim, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(claim) + framework.ExpectNoError(err, "Failed to create claim: %v", err) + + pvcClaims := []*v1.PersistentVolumeClaim{claim} + _, err = framework.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err) + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "pvc-volume-tester-", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "volume-tester", + Image: imageutils.GetE2EImage(imageutils.Pause), + VolumeMounts: []v1.VolumeMount{ + { + Name: "my-volume", + MountPath: "/mnt/test", + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + Volumes: []v1.Volume{ + { + Name: "my-volume", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: claim.Name, + ReadOnly: false, + }, + }, + }, + }, + }, + } + + if node.Name != "" { + pod.Spec.NodeName = node.Name + } + if len(node.Selector) != 0 { + pod.Spec.NodeSelector = node.Selector + } + + pod, err = cs.CoreV1().Pods(ns).Create(pod) + framework.ExpectNoError(err, "Failed to create pod: %v", err) + return class, claim, pod +} + +// checkPodInfo tests that NodePublish was called with expected volume_context +func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo bool) error { + expectedAttributes := map[string]string{ + "csi.storage.k8s.io/pod.name": pod.Name, + "csi.storage.k8s.io/pod.namespace": namespace, + "csi.storage.k8s.io/pod.uid": string(pod.UID), + "csi.storage.k8s.io/serviceAccount.name": "default", + } + // Load logs of driver pod + log, err := framework.GetPodLogs(cs, namespace, driverPodName, driverContainerName) + if err != nil { + return fmt.Errorf("could not load CSI driver logs: %s", err) + } + framework.Logf("CSI driver logs:\n%s", log) + // Find NodePublish in the logs + foundAttributes := sets.NewString() + logLines := strings.Split(log, "\n") + for _, line := range logLines { + if !strings.HasPrefix(line, "gRPCCall:") { + continue + } + line = strings.TrimPrefix(line, "gRPCCall:") + // Dummy structure that parses just volume_attributes out of logged CSI call + type MockCSICall struct { + Method string + Request struct { + VolumeContext map[string]string `json:"volume_context"` + } + } + var call MockCSICall + err := json.Unmarshal([]byte(line), &call) + if err != nil { + framework.Logf("Could not parse CSI driver log line %q: %s", line, err) + continue + } + if call.Method != "/csi.v1.Node/NodePublishVolume" { + continue + } + // Check that NodePublish had expected attributes + for k, v := range expectedAttributes { + vv, found := call.Request.VolumeContext[k] + if found && v == vv { + foundAttributes.Insert(k) + framework.Logf("Found volume attribute %s: %s", k, v) + } + } + // Process just the first NodePublish, the rest of the log is useless. + break + } + if expectPodInfo { + if foundAttributes.Len() != len(expectedAttributes) { + return fmt.Errorf("number of found volume attributes does not match, expected %d, got %d", len(expectedAttributes), foundAttributes.Len()) + } + return nil + } else { + if foundAttributes.Len() != 0 { + return fmt.Errorf("some unexpected volume attributes were found: %+v", foundAttributes.List()) + } + return nil + } +} + +func waitForCSIDriver(csics csiclient.Interface, driverName string) error { + timeout := 2 * time.Minute + + framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) { + _, err := csics.CsiV1alpha1().CSIDrivers().Get(driverName, metav1.GetOptions{}) + if !errors.IsNotFound(err) { + return err + } + } + return fmt.Errorf("gave up after waiting %v for CSIDriver %q.", timeout, driverName) +} + +func destroyCSIDriver(csics csiclient.Interface, driverName string) { + driverGet, err := csics.CsiV1alpha1().CSIDrivers().Get(driverName, metav1.GetOptions{}) + if err == nil { + framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name) + // Uncomment the following line to get full dump of CSIDriver object + // framework.Logf("%s", framework.PrettyPrint(driverGet)) + csics.CsiV1alpha1().CSIDrivers().Delete(driverName, nil) + } +} + +func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) string { + // re-get the claim to the latest state with bound volume + claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + if err != nil { + framework.ExpectNoError(err, "Cannot get PVC") + return "" + } + pvName := claim.Spec.VolumeName + pv, err := cs.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + if err != nil { + framework.ExpectNoError(err, "Cannot get PV") + return "" + } + if pv.Spec.CSI == nil { + Expect(pv.Spec.CSI).NotTo(BeNil()) + return "" + } + return pv.Spec.CSI.VolumeHandle +} diff --git a/test/e2e/storage/csi_mock_volume_test.go b/test/e2e/storage/csi_mock_volume_test.go deleted file mode 100644 index 664300df40..0000000000 --- a/test/e2e/storage/csi_mock_volume_test.go +++ /dev/null @@ -1,280 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "crypto/sha256" - "fmt" - "time" - - "k8s.io/api/core/v1" - storage "k8s.io/api/storage/v1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/storage/drivers" - "k8s.io/kubernetes/test/e2e/storage/testsuites" - "k8s.io/kubernetes/test/e2e/storage/utils" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -type cleanupFuncs func() - -var _ = utils.SIGDescribe("CSI Mock volumes", func() { - type mockDriverSetup struct { - cs clientset.Interface - config *testsuites.PerTestConfig - testCleanups []cleanupFuncs - pods []*v1.Pod - pvcs []*v1.PersistentVolumeClaim - sc map[string]*storage.StorageClass - driver testsuites.TestDriver - provisioner string - } - var m mockDriverSetup - var attachable bool - var deployCRD bool - var podInfoVersion *string - var scName string - f := framework.NewDefaultFramework("csi-mock-volumes") - - init := func() { - m = mockDriverSetup{cs: f.ClientSet} - csics := f.CSIClientSet - var err error - - m.driver = drivers.InitMockCSIDriver(deployCRD, attachable, podInfoVersion) - config, testCleanup := m.driver.PrepareTest(f) - m.testCleanups = append(m.testCleanups, testCleanup) - m.config = config - - if deployCRD { - err = waitForCSIDriver(csics, m.config.GetUniqueDriverName()) - framework.ExpectNoError(err, "Failed to get CSIDriver : %v", err) - m.testCleanups = append(m.testCleanups, func() { - destroyCSIDriver(csics, m.config.GetUniqueDriverName()) - }) - } - } - - createPod := func() (*storage.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { - By("Creating pod") - var sc *storagev1.StorageClass - if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok { - sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") - } - m.provisioner = sc.Provisioner - nodeName := m.config.ClientNodeName - scTest := testsuites.StorageClassTest{ - Name: m.driver.GetDriverInfo().Name, - Provisioner: sc.Provisioner, - Parameters: sc.Parameters, - ClaimSize: "1Gi", - ExpectedSize: "1Gi", - } - if scName != "" { - scTest.StorageClassName = scName - } - nodeSelection := testsuites.NodeSelection{ - // The mock driver only works when everything runs on a single node. - Name: nodeName, - } - class, claim, pod := startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name) - if class != nil { - m.sc[class.Name] = class - } - if claim != nil { - m.pvcs = append(m.pvcs, claim) - } - if pod != nil { - m.pods = append(m.pods, pod) - } - return class, claim, pod - } - - resetSharedVariables := func() { - attachable = false - deployCRD = false - scName = "" - podInfoVersion = nil - } - - cleanup := func() { - cs := f.ClientSet - var errs []error - By("Deleting pod") - for _, pod := range m.pods { - errs = append(errs, framework.DeletePodWithWait(f, cs, pod)) - } - - By("Deleting claim") - for _, claim := range m.pvcs { - claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) - if err == nil { - cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) - framework.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute) - } - - } - - By("Deleting storageclass") - for _, sc := range m.sc { - cs.StorageV1().StorageClasses().Delete(sc.Name, nil) - } - - By("Cleaning up resources") - for _, cleanupFunc := range m.testCleanups { - cleanupFunc() - } - - // reset some of common variables - resetSharedVariables() - err := utilerrors.NewAggregate(errs) - Expect(err).NotTo(HaveOccurred(), "while cleaning up after test") - } - - // The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12. - Context("CSI attach test using mock driver [Feature:CSIDriverRegistry]", func() { - tests := []struct { - name string - driverAttachable bool - deployDriverCRD bool - }{ - { - name: "should not require VolumeAttach for drivers without attachment", - driverAttachable: false, - deployDriverCRD: true, - }, - { - name: "should require VolumeAttach for drivers with attachment", - driverAttachable: true, - deployDriverCRD: true, - }, - { - name: "should preserve attachment policy when no CSIDriver present", - driverAttachable: true, - deployDriverCRD: false, - }, - } - for _, t := range tests { - It(t.name, func() { - deployCRD = t.deployDriverCRD - attachable = t.driverAttachable - var err error - init() - defer cleanup() - - _, claim, pod := createPod() - if pod == nil { - return - } - err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) - framework.ExpectNoError(err, "Failed to start pod: %v", err) - - By("Checking if VolumeAttachment was created for the pod") - handle := getVolumeHandle(m.cs, claim) - attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeName))) - attachmentName := fmt.Sprintf("csi-%x", attachmentHash) - _, err = m.cs.StorageV1beta1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - if t.driverAttachable { - framework.ExpectNoError(err, "Expected VolumeAttachment but none was found") - } - } else { - framework.ExpectNoError(err, "Failed to find VolumeAttachment") - } - } - if !t.driverAttachable { - Expect(err).To(HaveOccurred(), "Unexpected VolumeAttachment found") - } - }) - - } - }) - - Context("CSI workload information using mock driver [Feature:CSIDriverRegistry]", func() { - var ( - err error - podInfoV1 = "v1" - podInfoUnknown = "unknown" - podInfoEmpty = "" - ) - tests := []struct { - name string - podInfoOnMountVersion *string - deployDriverCRD bool - expectPodInfo bool - }{ - { - name: "should not be passed when podInfoOnMountVersion=nil", - podInfoOnMountVersion: nil, - deployDriverCRD: true, - expectPodInfo: false, - }, - { - name: "should be passed when podInfoOnMountVersion=v1", - podInfoOnMountVersion: &podInfoV1, - deployDriverCRD: true, - expectPodInfo: true, - }, - { - name: "should not be passed when podInfoOnMountVersion=", - podInfoOnMountVersion: &podInfoEmpty, - deployDriverCRD: true, - expectPodInfo: false, - }, - { - name: "should not be passed when podInfoOnMountVersion=", - podInfoOnMountVersion: &podInfoUnknown, - deployDriverCRD: true, - expectPodInfo: false, - }, - { - name: "should not be passed when CSIDriver does not exist", - deployDriverCRD: false, - expectPodInfo: false, - }, - } - for _, t := range tests { - It(t.name, func() { - deployCRD = t.deployDriverCRD - scName = "csi-mock-sc-" + f.UniqueName - init() - defer cleanup() - - _, _, pod := createPod() - if pod == nil { - return - } - err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) - framework.ExpectNoError(err, "Failed to start pod: %v", err) - By("Checking CSI driver logs") - - // The driver is deployed as a statefulset with stable pod names - driverPodName := "csi-mockplugin-0" - err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, t.expectPodInfo) - framework.ExpectNoError(err) - }) - } - }) -}) diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index a0890ee517..d4234434cf 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -17,23 +17,13 @@ limitations under the License. package storage import ( - "encoding/json" - "fmt" - "strings" - "time" - "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" - csiclient "k8s.io/csi-api/pkg/client/clientset/versioned" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/drivers" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" - imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -172,160 +162,6 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela } } -func waitForCSIDriver(csics csiclient.Interface, driverName string) error { - timeout := 2 * time.Minute - - framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) { - _, err := csics.CsiV1alpha1().CSIDrivers().Get(driverName, metav1.GetOptions{}) - if !errors.IsNotFound(err) { - return err - } - } - return fmt.Errorf("gave up after waiting %v for CSIDriver %q.", timeout, driverName) -} - -func destroyCSIDriver(csics csiclient.Interface, driverName string) { - driverGet, err := csics.CsiV1alpha1().CSIDrivers().Get(driverName, metav1.GetOptions{}) - if err == nil { - framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name) - // Uncomment the following line to get full dump of CSIDriver object - // framework.Logf("%s", framework.PrettyPrint(driverGet)) - csics.CsiV1alpha1().CSIDrivers().Delete(driverName, nil) - } -} - -func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) string { - // re-get the claim to the latest state with bound volume - claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) - if err != nil { - framework.ExpectNoError(err, "Cannot get PVC") - return "" - } - pvName := claim.Spec.VolumeName - pv, err := cs.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) - if err != nil { - framework.ExpectNoError(err, "Cannot get PV") - return "" - } - if pv.Spec.CSI == nil { - Expect(pv.Spec.CSI).NotTo(BeNil()) - return "" - } - return pv.Spec.CSI.VolumeHandle -} - -func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node testsuites.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { - class := newStorageClass(t, ns, "") - class, err := cs.StorageV1().StorageClasses().Create(class) - framework.ExpectNoError(err, "Failed to create class : %v", err) - claim := newClaim(t, ns, "") - claim.Spec.StorageClassName = &class.Name - claim, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(claim) - framework.ExpectNoError(err, "Failed to create claim: %v", err) - - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "pvc-volume-tester-", - }, - Spec: v1.PodSpec{ - NodeName: node.Name, - NodeSelector: node.Selector, - Affinity: node.Affinity, - Containers: []v1.Container{ - { - Name: "volume-tester", - Image: imageutils.GetE2EImage(imageutils.Pause), - VolumeMounts: []v1.VolumeMount{ - { - Name: "my-volume", - MountPath: "/mnt/test", - }, - }, - }, - }, - RestartPolicy: v1.RestartPolicyNever, - Volumes: []v1.Volume{ - { - Name: "my-volume", - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: claim.Name, - ReadOnly: false, - }, - }, - }, - }, - }, - } - - pod, err = cs.CoreV1().Pods(ns).Create(pod) - framework.ExpectNoError(err, "Failed to create pod: %v", err) - return class, claim, pod -} - -// checkPodInfo tests that NodePublish was called with expected volume_context -func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo bool) error { - expectedAttributes := map[string]string{ - "csi.storage.k8s.io/pod.name": pod.Name, - "csi.storage.k8s.io/pod.namespace": namespace, - "csi.storage.k8s.io/pod.uid": string(pod.UID), - "csi.storage.k8s.io/serviceAccount.name": "default", - } - // Load logs of driver pod - log, err := framework.GetPodLogs(cs, namespace, driverPodName, driverContainerName) - if err != nil { - return fmt.Errorf("could not load CSI driver logs: %s", err) - } - framework.Logf("CSI driver logs:\n%s", log) - // Find NodePublish in the logs - foundAttributes := sets.NewString() - logLines := strings.Split(log, "\n") - for _, line := range logLines { - if !strings.HasPrefix(line, "gRPCCall:") { - continue - } - line = strings.TrimPrefix(line, "gRPCCall:") - // Dummy structure that parses just volume_attributes out of logged CSI call - type MockCSICall struct { - Method string - Request struct { - VolumeContext map[string]string `json:"volume_context"` - } - } - var call MockCSICall - err := json.Unmarshal([]byte(line), &call) - if err != nil { - framework.Logf("Could not parse CSI driver log line %q: %s", line, err) - continue - } - if call.Method != "/csi.v1.Node/NodePublishVolume" { - continue - } - // Check that NodePublish had expected attributes - for k, v := range expectedAttributes { - vv, found := call.Request.VolumeContext[k] - if found && v == vv { - foundAttributes.Insert(k) - framework.Logf("Found volume attribute %s: %s", k, v) - } - } - // Process just the first NodePublish, the rest of the log is useless. - break - } - if expectPodInfo { - if foundAttributes.Len() != len(expectedAttributes) { - return fmt.Errorf("number of found volume attributes does not match, expected %d, got %d", len(expectedAttributes), foundAttributes.Len()) - } - return nil - } else { - if foundAttributes.Len() != 0 { - return fmt.Errorf("some unexpected volume attributes were found: %+v", foundAttributes.List()) - } - return nil - } -} - func addSingleCSIZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *storagev1.StorageClass, zone string) { term := v1.TopologySelectorTerm{ MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index 5afdd770db..7eb310cf02 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -38,6 +38,7 @@ package drivers import ( "fmt" "math/rand" + "strconv" . "github.com/onsi/ginkgo" storagev1 "k8s.io/api/storage/v1" @@ -171,13 +172,14 @@ type mockCSIDriver struct { manifests []string podInfoVersion *string attachable bool + attachLimit int } var _ testsuites.TestDriver = &mockCSIDriver{} var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{} // InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface -func InitMockCSIDriver(registerDriver, driverAttachable bool, podInfoVersion *string) testsuites.TestDriver { +func InitMockCSIDriver(registerDriver, driverAttachable bool, podInfoVersion *string, attachLimit int) testsuites.TestDriver { driverManifests := []string{ "test/e2e/testing-manifests/storage-csi/cluster-driver-registrar/rbac.yaml", "test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml", @@ -213,6 +215,7 @@ func InitMockCSIDriver(registerDriver, driverAttachable bool, podInfoVersion *st manifests: driverManifests, podInfoVersion: podInfoVersion, attachable: driverAttachable, + attachLimit: attachLimit, } } @@ -256,6 +259,10 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest containerArgs = append(containerArgs, "--disable-attach") } + if m.attachLimit > 0 { + containerArgs = append(containerArgs, "--attach-limit", strconv.Itoa(m.attachLimit)) + } + // TODO (?): the storage.csi.image.version and storage.csi.image.registry // settings are ignored for this test. We could patch the image definitions. o := utils.PatchCSIOptions{ diff --git a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml index 6aeb02b0aa..e3fcb9f44a 100644 --- a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml +++ b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml @@ -44,6 +44,8 @@ spec: apiVersion: v1 fieldPath: spec.nodeName imagePullPolicy: Always + securityContext: + privileged: true volumeMounts: - mountPath: /csi name: socket-dir