Merge pull request #72002 from pohly/storage-volume-testsuites-concurrency

E2E storage: more tests for different pod/node combinations
pull/564/head
Kubernetes Prow Robot 2019-02-12 10:04:50 -08:00 committed by GitHub
commit f968499812
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 396 additions and 185 deletions

View File

@ -261,9 +261,11 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
Parameters: sc.Parameters,
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
NodeName: nodeName,
}
class, claim, pod := startPausePod(cs, scTest, ns.Name)
nodeSelection := testsuites.NodeSelection{
Name: nodeName,
}
class, claim, pod := startPausePod(cs, scTest, nodeSelection, ns.Name)
if class != nil {
defer cs.StorageV1().StorageClasses().Delete(class.Name, nil)
}
@ -381,16 +383,16 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
Parameters: sc.Parameters,
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
// The mock driver only works when everything runs on a single node.
NodeName: nodeName,
// Provisioner and storage class name must match what's used in
// csi-storageclass.yaml, plus the test-specific suffix.
Provisioner: sc.Provisioner,
StorageClassName: "csi-mock-sc-" + f.UniqueName,
// Mock driver does not provide any persistency.
SkipWriteReadCheck: true,
}
class, claim, pod := startPausePod(cs, scTest, ns.Name)
nodeSelection := testsuites.NodeSelection{
// The mock driver only works when everything runs on a single node.
Name: nodeName,
}
class, claim, pod := startPausePod(cs, scTest, nodeSelection, ns.Name)
if class != nil {
defer cs.StorageV1().StorageClasses().Delete(class.Name, nil)
}
@ -429,7 +431,7 @@ func testTopologyPositive(cs clientset.Interface, suffix, namespace string, dela
claim.Spec.StorageClassName = &class.Name
if delayBinding {
_, node := testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class)
_, node := testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nil /* node selector */, false /* expect unschedulable */)
Expect(node).ToNot(BeNil(), "Unexpected nil node found")
} else {
testsuites.TestDynamicProvisioning(test, cs, claim, class)
@ -450,16 +452,22 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
test := createGCEPDStorageClassTest()
test.DelayBinding = delayBinding
test.NodeSelector = map[string]string{v1.LabelZoneFailureDomain: podZone}
test.ExpectUnschedulable = true
nodeSelector := map[string]string{v1.LabelZoneFailureDomain: podZone}
class := newStorageClass(test, namespace, suffix)
addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, pvZone)
claim := newClaim(test, namespace, suffix)
claim.Spec.StorageClassName = &class.Name
if delayBinding {
testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class)
testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nodeSelector, true /* expect unschedulable */)
} else {
test.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
// Ensure that a pod cannot be scheduled in an unsuitable zone.
pod := testsuites.StartInPodWithVolume(cs, namespace, claim.Name, "pvc-tester-unschedulable", "sleep 100000",
testsuites.NodeSelection{Selector: nodeSelector})
defer testsuites.StopPod(cs, pod)
framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable")
}
testsuites.TestDynamicProvisioning(test, cs, claim, class)
}
}
@ -500,7 +508,7 @@ func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) st
return pv.Spec.CSI.VolumeHandle
}
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node testsuites.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
class := newStorageClass(t, ns, "")
class, err := cs.StorageV1().StorageClasses().Create(class)
framework.ExpectNoError(err, "Failed to create class : %v", err)
@ -514,6 +522,9 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, ns str
GenerateName: "pvc-volume-tester-",
},
Spec: v1.PodSpec{
NodeName: node.Name,
NodeSelector: node.Selector,
Affinity: node.Affinity,
Containers: []v1.Container{
{
Name: "volume-tester",
@ -541,9 +552,6 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, ns str
},
}
if len(t.NodeName) != 0 {
pod.Spec.NodeName = t.NodeName
}
pod, err = cs.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Failed to create pod: %v", err)
return class, claim, pod

View File

@ -84,7 +84,7 @@ var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{}
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
func InitHostPathCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath", config,
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true},
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true, testsuites.CapMultiPODs: true},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
@ -259,7 +259,7 @@ func (m *mockCSIDriver) CleanupDriver() {
// InitHostPathV0CSIDriver returns a variant of hostpathCSIDriver with different manifests.
func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath-v0", config,
map[testsuites.Capability]bool{testsuites.CapPersistence: true},
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
@ -297,6 +297,7 @@ func InitGcePDCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
testsuites.CapPersistence: true,
testsuites.CapFsGroup: true,
testsuites.CapExec: true,
testsuites.CapMultiPODs: true,
},
Config: config,
@ -409,6 +410,7 @@ func InitGcePDExternalCSIDriver(config testsuites.TestConfig) testsuites.TestDri
testsuites.CapPersistence: true,
testsuites.CapFsGroup: true,
testsuites.CapExec: true,
testsuites.CapMultiPODs: true,
},
Config: config,

View File

@ -108,12 +108,14 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
err := checkGCEPD(volume, "pd-standard")
if err != nil {
return err
}
return verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */)
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
var err error
err = checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD")
err = verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */)
Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
},
{
@ -126,16 +128,16 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
err := checkGCEPD(volume, "pd-standard")
if err != nil {
return err
}
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
var err error
err = checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD")
zones, err := framework.GetClusterZones(c)
if err != nil {
return err
}
return verifyZonesInPV(volume, zones, false /* match */)
Expect(err).NotTo(HaveOccurred(), "GetClusterZones")
err = verifyZonesInPV(volume, zones, false /* match */)
Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
},
}
@ -317,7 +319,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
claim.Spec.StorageClassName = &class.Name
claims = append(claims, claim)
}
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class)
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
if node == nil {
framework.Failf("unexpected nil node found")
}
@ -374,7 +376,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s
claim.Spec.StorageClassName = &class.Name
claims = append(claims, claim)
}
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class)
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
if node == nil {
framework.Failf("unexpected nil node found")
}

View File

@ -91,7 +91,7 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern
var isSupported bool
// 1. Check if Whether SnapshotType is supported by driver from its interface
// if isSupported, so it must be a snapshot test case, we just return.
// if isSupported, we still execute the driver and suite tests
if len(pattern.SnapshotType) > 0 {
switch pattern.SnapshotType {
case testpatterns.DynamicCreatedSnapshot:
@ -102,31 +102,30 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern
if !isSupported {
framework.Skipf("Driver %s doesn't support snapshot type %v -- skipping", dInfo.Name, pattern.SnapshotType)
}
return
}
} else {
// 2. Check if Whether volType is supported by driver from its interface
switch pattern.VolType {
case testpatterns.InlineVolume:
_, isSupported = driver.(InlineVolumeTestDriver)
case testpatterns.PreprovisionedPV:
_, isSupported = driver.(PreprovisionedPVTestDriver)
case testpatterns.DynamicPV:
_, isSupported = driver.(DynamicPVTestDriver)
default:
isSupported = false
}
// 2. Check if Whether volType is supported by driver from its interface
switch pattern.VolType {
case testpatterns.InlineVolume:
_, isSupported = driver.(InlineVolumeTestDriver)
case testpatterns.PreprovisionedPV:
_, isSupported = driver.(PreprovisionedPVTestDriver)
case testpatterns.DynamicPV:
_, isSupported = driver.(DynamicPVTestDriver)
default:
isSupported = false
}
if !isSupported {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
if !isSupported {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
// 3. Check if fsType is supported
if !dInfo.SupportedFsType.Has(pattern.FsType) {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType)
}
if pattern.FsType == "xfs" && framework.NodeOSDistroIs("gci") {
framework.Skipf("Distro doesn't support xfs -- skipping")
// 3. Check if fsType is supported
if !dInfo.SupportedFsType.Has(pattern.FsType) {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType)
}
if pattern.FsType == "xfs" && framework.NodeOSDistroIs("gci") {
framework.Skipf("Distro doesn't support xfs -- skipping")
}
}
// 4. Check with driver specific logic

View File

@ -18,6 +18,7 @@ package testsuites
import (
"fmt"
"sync"
"time"
. "github.com/onsi/ginkgo"
@ -37,22 +38,19 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
)
// StorageClassTest represents parameters to be used by provisioning tests
// StorageClassTest represents parameters to be used by provisioning tests.
// Not all parameters are used by all tests.
type StorageClassTest struct {
Name string
CloudProviders []string
Provisioner string
StorageClassName string
Parameters map[string]string
DelayBinding bool
ClaimSize string
ExpectedSize string
PvCheck func(volume *v1.PersistentVolume) error
NodeName string
SkipWriteReadCheck bool
VolumeMode *v1.PersistentVolumeMode
NodeSelector map[string]string // NodeSelector for the pod
ExpectUnschedulable bool // Whether the test pod is expected to be unschedulable
Name string
CloudProviders []string
Provisioner string
StorageClassName string
Parameters map[string]string
DelayBinding bool
ClaimSize string
ExpectedSize string
PvCheck func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume)
VolumeMode *v1.PersistentVolumeMode
}
type provisioningTestSuite struct {
@ -90,16 +88,13 @@ func createProvisioningTestInput(driver TestDriver, pattern testpatterns.TestPat
ClaimSize: resource.claimSize,
ExpectedSize: resource.claimSize,
},
cs: driver.GetDriverInfo().Config.Framework.ClientSet,
dc: driver.GetDriverInfo().Config.Framework.DynamicClient,
pvc: resource.pvc,
sc: resource.sc,
vsc: resource.vsc,
dInfo: driver.GetDriverInfo(),
}
if driver.GetDriverInfo().Config.ClientNodeName != "" {
input.testCase.NodeName = driver.GetDriverInfo().Config.ClientNodeName
cs: driver.GetDriverInfo().Config.Framework.ClientSet,
dc: driver.GetDriverInfo().Config.Framework.DynamicClient,
pvc: resource.pvc,
sc: resource.sc,
vsc: resource.vsc,
dInfo: driver.GetDriverInfo(),
nodeName: driver.GetDriverInfo().Config.ClientNodeName,
}
return resource, input
@ -182,10 +177,17 @@ type provisioningTestInput struct {
sc *storage.StorageClass
vsc *unstructured.Unstructured
dInfo *DriverInfo
nodeName string
}
func testProvisioning(input *provisioningTestInput) {
// common checker for most of the test cases below
pvcheck := func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PVWriteReadSingleNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName})
}
It("should provision storage with defaults", func() {
input.testCase.PvCheck = pvcheck
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
})
@ -195,6 +197,26 @@ func testProvisioning(input *provisioningTestInput) {
}
input.sc.MountOptions = input.dInfo.SupportedMountOption.Union(input.dInfo.RequiredMountOption).List()
input.testCase.PvCheck = pvcheck
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
})
It("should access volume from different nodes", func() {
// The assumption is that if the test hasn't been
// locked onto a single node, then the driver is
// usable on all of them *and* supports accessing a volume
// from any node.
if input.nodeName != "" {
framework.Skipf("Driver %q only supports testing on one node - skipping", input.dInfo.Name)
}
// Ensure that we actually have more than one node.
nodes := framework.GetReadySchedulableNodesOrDie(input.cs)
if len(nodes.Items) <= 1 {
framework.Skipf("need more than one node - skipping")
}
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PVMultiNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName})
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
})
@ -204,7 +226,6 @@ func testProvisioning(input *provisioningTestInput) {
}
block := v1.PersistentVolumeBlock
input.testCase.VolumeMode = &block
input.testCase.SkipWriteReadCheck = true
input.pvc.Spec.VolumeMode = &block
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
})
@ -214,11 +235,59 @@ func testProvisioning(input *provisioningTestInput) {
framework.Skipf("Driver %q does not support populate data from snapshot - skipping", input.dInfo.Name)
}
input.testCase.SkipWriteReadCheck = true
dataSource, cleanupFunc := prepareDataSourceForProvisioning(input.testCase, input.cs, input.dc, input.pvc, input.sc, input.vsc)
dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: input.nodeName}, input.cs, input.dc, input.pvc, input.sc, input.vsc)
defer cleanupFunc()
input.pvc.Spec.DataSource = dataSource
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
By("checking whether the created volume has the pre-populated data")
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: input.nodeName})
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
})
It("should allow concurrent writes on the same node", func() {
if !input.dInfo.Capabilities[CapMultiPODs] {
framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", input.dInfo.Name)
}
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
// We start two pods concurrently on the same node,
// using the same PVC. Both wait for other to create a
// file before returning. The pods are forced onto the
// same node via pod affinity.
wg := sync.WaitGroup{}
wg.Add(2)
firstPodName := "pvc-tester-first"
secondPodName := "pvc-tester-second"
run := func(podName, command string) {
defer GinkgoRecover()
defer wg.Done()
node := NodeSelection{
Name: input.nodeName,
}
if podName == secondPodName {
node.Affinity = &v1.Affinity{
PodAffinity: &v1.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
// Set by RunInPodWithVolume.
"app": firstPodName,
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
}
}
RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, podName, command, node)
}
go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done")
go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done")
wg.Wait()
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
})
}
@ -291,35 +360,7 @@ func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, cla
// Run the checker
if t.PvCheck != nil {
err = t.PvCheck(pv)
Expect(err).NotTo(HaveOccurred())
}
if claim.Spec.DataSource != nil {
By("checking the created volume whether has the pre-populated data")
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
runInPodWithVolume(client, claim.Namespace, claim.Name, t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable)
}
if !t.SkipWriteReadCheck {
// We start two pods:
// - The first writes 'hello word' to the /mnt/test (= the volume).
// - The second one runs grep 'hello world' on /mnt/test.
// If both succeed, Kubernetes actually allocated something that is
// persistent across pods.
By("checking the created volume is writable and has the PV's mount options")
command := "echo 'hello world' > /mnt/test/data"
// We give the first pod the secondary responsibility of checking the volume has
// been mounted with the PV's mount options, if the PV was provisioned with any
for _, option := range pv.Spec.MountOptions {
// Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
}
command += " || (mount | grep 'on /mnt/test'; false)"
runInPodWithVolume(client, claim.Namespace, claim.Name, t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable)
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, t.NodeName, "grep 'hello world' /mnt/test/data", t.NodeSelector, t.ExpectUnschedulable)
t.PvCheck(claim, pv)
}
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
@ -340,15 +381,125 @@ func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, cla
return pv
}
func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) (*v1.PersistentVolume, *v1.Node) {
pvs, node := TestBindingWaitForFirstConsumerMultiPVC(t, client, []*v1.PersistentVolumeClaim{claim}, class)
// PVWriteReadSingleNodeCheck checks that a PV retains data on a single node.
//
// It starts two pods:
// - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
// - The second pod runs grep 'hello world' on /mnt/test on the same node.
//
// The node is selected by Kubernetes when scheduling the first
// pod. It's then selected via its name for the second pod.
//
// If both succeed, Kubernetes actually allocated something that is
// persistent across pods.
//
// This is a common test that can be called from a StorageClassTest.PvCheck.
func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node NodeSelection) {
By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node))
command := "echo 'hello world' > /mnt/test/data"
// We give the first pod the secondary responsibility of checking the volume has
// been mounted with the PV's mount options, if the PV was provisioned with any
for _, option := range volume.Spec.MountOptions {
// Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
}
command += " || (mount | grep 'on /mnt/test'; false)"
pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
defer func() {
// pod might be nil now.
StopPod(client, pod)
}()
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "get pod")
actualNodeName := runningPod.Spec.NodeName
StopPod(client, pod)
pod = nil // Don't stop twice.
By(fmt.Sprintf("checking the created volume is readable and retains data on the same node %q", actualNodeName))
command = "grep 'hello world' /mnt/test/data"
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, NodeSelection{Name: actualNodeName})
}
// PVMultiNodeCheck checks that a PV retains data when moved between nodes.
//
// It starts these pods:
// - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
// - The second pod runs grep 'hello world' on /mnt/test on another node.
//
// The first node is selected by Kubernetes when scheduling the first pod. The second pod uses the same criteria, except that a special anti-affinity
// for the first node gets added. This test can only pass if the cluster has more than one
// suitable node. The caller has to ensure that.
//
// If all succeeds, Kubernetes actually allocated something that is
// persistent across pods and across nodes.
//
// This is a common test that can be called from a StorageClassTest.PvCheck.
func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node NodeSelection) {
Expect(node.Name).To(Equal(""), "this test only works when not locked onto a single node")
var pod *v1.Pod
defer func() {
// passing pod = nil is okay.
StopPod(client, pod)
}()
By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node))
command := "echo 'hello world' > /mnt/test/data"
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "get pod")
actualNodeName := runningPod.Spec.NodeName
StopPod(client, pod)
pod = nil // Don't stop twice.
// Add node-anti-affinity.
secondNode := node
if secondNode.Affinity == nil {
secondNode.Affinity = &v1.Affinity{}
}
if secondNode.Affinity.NodeAffinity == nil {
secondNode.Affinity.NodeAffinity = &v1.NodeAffinity{}
}
if secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{}
}
secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
v1.NodeSelectorTerm{
// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity warns
// that "the value of kubernetes.io/hostname may be the same as the Node name in some environments and a different value in other environments".
// So this might be cleaner:
// MatchFields: []v1.NodeSelectorRequirement{
// {Key: "name", Operator: v1.NodeSelectorOpNotIn, Values: []string{actualNodeName}},
// },
// However, "name", "Name", "ObjectMeta.Name" all got rejected with "not a valid field selector key".
MatchExpressions: []v1.NodeSelectorRequirement{
{Key: "kubernetes.io/hostname", Operator: v1.NodeSelectorOpNotIn, Values: []string{actualNodeName}},
},
})
By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode))
command = "grep 'hello world' /mnt/test/data"
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "get pod")
Expect(runningPod.Spec.NodeName).NotTo(Equal(actualNodeName), "second pod should have run on a different node")
StopPod(client, pod)
pod = nil
}
func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) {
pvs, node := TestBindingWaitForFirstConsumerMultiPVC(t, client, []*v1.PersistentVolumeClaim{claim}, class, nodeSelector, expectUnschedulable)
if pvs == nil {
return nil, node
}
return pvs[0], node
}
func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass) ([]*v1.PersistentVolume, *v1.Node) {
func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
var err error
Expect(len(claims)).ToNot(Equal(0))
namespace := claims[0].Namespace
@ -391,8 +542,8 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse
By("creating a pod referring to the claims")
// Create a pod referring to the claim and wait for it to get to running
var pod *v1.Pod
if t.ExpectUnschedulable {
pod, err = framework.CreateUnschedulablePod(client, namespace, t.NodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
if expectUnschedulable {
pod, err = framework.CreateUnschedulablePod(client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
} else {
pod, err = framework.CreatePod(client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
}
@ -401,7 +552,7 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse
framework.DeletePodOrFail(client, pod.Namespace, pod.Name)
framework.WaitForPodToDisappear(client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
}()
if t.ExpectUnschedulable {
if expectUnschedulable {
// Verify that no claims are provisioned.
verifyPVCsPending(client, createdClaims)
return nil, nil
@ -429,17 +580,40 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse
return pvs, node
}
// runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command string, nodeSelector map[string]string, unschedulable bool) {
// NodeSelection specifies where to run a pod, using a combination of fixed node name,
// node selector and/or affinity.
type NodeSelection struct {
Name string
Selector map[string]string
Affinity *v1.Affinity
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
// It starts, checks, collects output and stops it.
func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node NodeSelection) {
pod := StartInPodWithVolume(c, ns, claimName, podName, command, node)
defer StopPod(c, pod)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
}
// StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory
// The caller is responsible for checking the pod and deleting it.
func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node NodeSelection) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
GenerateName: podName + "-",
Labels: map[string]string{
"app": podName,
},
},
Spec: v1.PodSpec{
NodeName: node.Name,
NodeSelector: node.Selector,
Affinity: node.Affinity,
Containers: []v1.Container{
{
Name: "volume-tester",
@ -466,30 +640,26 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command
},
},
},
NodeSelector: nodeSelector,
},
}
if len(nodeName) != 0 {
pod.Spec.NodeName = nodeName
}
pod, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Failed to create pod: %v", err)
defer func() {
body, err := c.CoreV1().Pods(ns).GetLogs(pod.Name, &v1.PodLogOptions{}).Do().Raw()
if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
} else {
framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
}
framework.DeletePodOrFail(c, ns, pod.Name)
}()
return pod
}
if unschedulable {
framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(c, pod.Name, pod.Namespace))
} else {
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
// StopPod first tries to log the output of the pod's container, then deletes the pod.
func StopPod(c clientset.Interface, pod *v1.Pod) {
if pod == nil {
return
}
body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do().Raw()
if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
} else {
framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
}
framework.DeletePodOrFail(c, pod.Namespace, pod.Name)
}
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
@ -502,7 +672,7 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl
}
func prepareDataSourceForProvisioning(
t StorageClassTest,
node NodeSelection,
client clientset.Interface,
dynamicClient dynamic.Interface,
initClaim *v1.PersistentVolumeClaim,
@ -530,7 +700,7 @@ func prepareDataSourceForProvisioning(
// write namespace to the /mnt/test (= the volume).
By("[Initialize dataSource]write data to volume")
command := fmt.Sprintf("echo '%s' > /mnt/test/initialData", updatedClaim.GetNamespace())
runInPodWithVolume(client, updatedClaim.Namespace, updatedClaim.Name, t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable)
RunInPodWithVolume(client, updatedClaim.Namespace, updatedClaim.Name, "pvc-snapshot-writer", command, node)
By("[Initialize dataSource]creating a SnapshotClass")
snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{})

View File

@ -80,6 +80,10 @@ func (s *snapshottableTestSuite) getTestSuiteInfo() TestSuiteInfo {
}
func (s *snapshottableTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
dInfo := driver.GetDriverInfo()
if !dInfo.Capabilities[CapDataSource] {
framework.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
}
}
func createSnapshottableTestInput(driver TestDriver, pattern testpatterns.TestPattern) (snapshottableTestResource, snapshottableTestInput) {
@ -87,18 +91,17 @@ func createSnapshottableTestInput(driver TestDriver, pattern testpatterns.TestPa
resource := snapshottableTestResource{}
resource.setupResource(driver, pattern)
dInfo := driver.GetDriverInfo()
input := snapshottableTestInput{
testCase: SnapshotClassTest{},
cs: driver.GetDriverInfo().Config.Framework.ClientSet,
dc: driver.GetDriverInfo().Config.Framework.DynamicClient,
pvc: resource.pvc,
sc: resource.sc,
vsc: resource.vsc,
dInfo: driver.GetDriverInfo(),
}
if driver.GetDriverInfo().Config.ClientNodeName != "" {
input.testCase.NodeName = driver.GetDriverInfo().Config.ClientNodeName
testCase: SnapshotClassTest{
NodeName: dInfo.Config.ClientNodeName,
},
cs: dInfo.Config.Framework.ClientSet,
dc: dInfo.Config.Framework.DynamicClient,
pvc: resource.pvc,
sc: resource.sc,
vsc: resource.vsc,
dInfo: dInfo,
}
return resource, input
@ -187,10 +190,7 @@ type snapshottableTestInput struct {
}
func testSnapshot(input *snapshottableTestInput) {
It("should create snapshot with defaults", func() {
if input.dInfo.Name == "csi-hostpath-v0" {
framework.Skipf("skip test when using driver csi-hostpath-v0 - skipping")
}
It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() {
TestCreateSnapshot(input.testCase, input.cs, input.dc, input.pvc, input.sc, input.vsc)
})
}

View File

@ -97,6 +97,13 @@ const (
CapFsGroup Capability = "fsGroup" // volume ownership via fsGroup
CapExec Capability = "exec" // exec a file in the volume
CapDataSource Capability = "dataSource" // support populate data from snapshot
// multiple pods on a node can use the same volume concurrently;
// for CSI, see:
// - https://github.com/container-storage-interface/spec/pull/150
// - https://github.com/container-storage-interface/spec/issues/178
// - NodeStageVolume in the spec
CapMultiPODs Capability = "multipods"
)
// DriverInfo represents a combination of parameters to be used in implementation of TestDriver

View File

@ -226,7 +226,7 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop
claim.Spec.StorageClassName = &class.Name
claims = append(claims, claim)
}
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class)
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
if node == nil {
framework.Failf("unexpected nil node found")
}
@ -273,8 +273,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-ssd")
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkGCEPD(volume, "pd-ssd")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-ssd")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
},
{
@ -286,8 +288,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-standard")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
},
// AWS
@ -301,8 +305,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "gp2", false)
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkAWSEBS(volume, "gp2", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
},
{
@ -315,8 +321,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "3.5Gi",
ExpectedSize: "4Gi", // 4 GiB is minimum for io1
PvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "io1", false)
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkAWSEBS(volume, "io1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS io1")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
},
{
@ -328,8 +336,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "500Gi", // minimum for sc1
ExpectedSize: "500Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "sc1", false)
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkAWSEBS(volume, "sc1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS sc1")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
},
{
@ -341,8 +351,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "500Gi", // minimum for st1
ExpectedSize: "500Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "st1", false)
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkAWSEBS(volume, "st1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS st1")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
},
{
@ -354,8 +366,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "gp2", true)
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkAWSEBS(volume, "gp2", true)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2 encrypted")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
},
// OpenStack generic tests (works on all OpenStack deployments)
@ -366,7 +380,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Parameters: map[string]string{},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: nil, // there is currently nothing to check on OpenStack
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
},
{
Name: "Cinder volume with empty volume type and zone on OpenStack",
@ -378,7 +394,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: nil, // there is currently nothing to check on OpenStack
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
},
// vSphere generic test
{
@ -388,7 +406,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Parameters: map[string]string{},
ClaimSize: "1.5Gi",
ExpectedSize: "1.5Gi",
PvCheck: nil,
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
},
// Azure
{
@ -398,7 +418,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Parameters: map[string]string{},
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: nil,
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
},
}
@ -451,8 +473,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
}
class := newStorageClass(test, ns, "reclaimpolicy")
@ -793,12 +817,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
serverUrl := "https://" + pod.Status.PodIP + ":8081"
By("creating a StorageClass")
test := testsuites.StorageClassTest{
Name: "Gluster Dynamic provisioner test",
Provisioner: "kubernetes.io/glusterfs",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
Parameters: map[string]string{"resturl": serverUrl},
SkipWriteReadCheck: true,
Name: "Gluster Dynamic provisioner test",
Provisioner: "kubernetes.io/glusterfs",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
Parameters: map[string]string{"resturl": serverUrl},
}
suffix := fmt.Sprintf("glusterdptest")
class := newStorageClass(test, ns, suffix)