Add e2e tests for multiAttach

k3s-v1.15.3
Masaki Kimura 2019-02-28 01:31:37 +00:00
parent 8bde75e63f
commit b4c88acec6
16 changed files with 638 additions and 180 deletions

View File

@ -93,6 +93,14 @@ type PersistentVolumeClaimConfig struct {
VolumeMode *v1.PersistentVolumeMode VolumeMode *v1.PersistentVolumeMode
} }
// NodeSelection specifies where to run a pod, using a combination of fixed node name,
// node selector and/or affinity.
type NodeSelection struct {
Name string
Selector map[string]string
Affinity *v1.Affinity
}
// Clean up a pv and pvc in a single pv/pvc test case. // Clean up a pv and pvc in a single pv/pvc test case.
// Note: delete errors are appended to []error so that we can attempt to delete both the pvc and pv. // Note: delete errors are appended to []error so that we can attempt to delete both the pvc and pv.
func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []error { func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []error {
@ -874,14 +882,16 @@ func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector m
// create security pod with given claims // create security pod with given claims
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) { func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeName(client, namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, "", timeout) return CreateSecPodWithNodeSelection(client, namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, NodeSelection{}, timeout)
} }
// create security pod with given claims // create security pod with given claims
func CreateSecPodWithNodeName(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, nodeName string, timeout time.Duration) (*v1.Pod, error) { func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) {
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup) pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
// Setting nodeName // Setting node
pod.Spec.NodeName = nodeName pod.Spec.NodeName = node.Name
pod.Spec.NodeSelector = node.Selector
pod.Spec.Affinity = node.Affinity
pod, err := client.CoreV1().Pods(namespace).Create(pod) pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil { if err != nil {
@ -901,6 +911,44 @@ func CreateSecPodWithNodeName(client clientset.Interface, namespace string, pvcl
return pod, nil return pod, nil
} }
// SetNodeAffinityRequirement sets affinity with specified operator to nodeName to nodeSelection
func SetNodeAffinityRequirement(nodeSelection *NodeSelection, operator v1.NodeSelectorOperator, nodeName string) {
// Add node-anti-affinity.
if nodeSelection.Affinity == nil {
nodeSelection.Affinity = &v1.Affinity{}
}
if nodeSelection.Affinity.NodeAffinity == nil {
nodeSelection.Affinity.NodeAffinity = &v1.NodeAffinity{}
}
if nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{}
}
nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
v1.NodeSelectorTerm{
// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity warns
// that "the value of kubernetes.io/hostname may be the same as the Node name in some environments and a different value in other environments".
// So this might be cleaner:
// MatchFields: []v1.NodeSelectorRequirement{
// {Key: "name", Operator: v1.NodeSelectorOpNotIn, Values: []string{nodeName}},
// },
// However, "name", "Name", "ObjectMeta.Name" all got rejected with "not a valid field selector key".
MatchExpressions: []v1.NodeSelectorRequirement{
{Key: "kubernetes.io/hostname", Operator: operator, Values: []string{nodeName}},
},
})
}
// SetAffinity sets affinity to nodeName to nodeSelection
func SetAffinity(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpIn, nodeName)
}
// SetAntiAffinity sets anti-affinity to nodeName to nodeSelection
func SetAntiAffinity(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpNotIn, nodeName)
}
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed. // Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) { func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "") return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")

View File

@ -153,12 +153,12 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
scTest.AllowVolumeExpansion = true scTest.AllowVolumeExpansion = true
} }
nodeSelection := testsuites.NodeSelection{ nodeSelection := framework.NodeSelection{
// The mock driver only works when everything runs on a single node. // The mock driver only works when everything runs on a single node.
Name: nodeName, Name: nodeName,
} }
if len(m.nodeLabel) > 0 { if len(m.nodeLabel) > 0 {
nodeSelection = testsuites.NodeSelection{ nodeSelection = framework.NodeSelection{
Selector: m.nodeLabel, Selector: m.nodeLabel,
} }
} }
@ -177,11 +177,11 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) { createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
nodeName := m.config.ClientNodeName nodeName := m.config.ClientNodeName
nodeSelection := testsuites.NodeSelection{ nodeSelection := framework.NodeSelection{
Name: nodeName, Name: nodeName,
} }
if len(m.nodeLabel) > 0 { if len(m.nodeLabel) > 0 {
nodeSelection = testsuites.NodeSelection{ nodeSelection = framework.NodeSelection{
Selector: m.nodeLabel, Selector: m.nodeLabel,
} }
} }
@ -597,7 +597,7 @@ func checkNodeForLimits(nodeName string, attachKey v1.ResourceName, cs clientset
return int(attachLimit), waitErr return int(attachLimit), waitErr
} }
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node testsuites.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node framework.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
class := newStorageClass(t, ns, "") class := newStorageClass(t, ns, "")
var err error var err error
_, err = cs.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{}) _, err = cs.StorageV1().StorageClasses().Get(class.Name, metav1.GetOptions{})
@ -659,7 +659,7 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node t
return class, claim, pod return class, claim, pod
} }
func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node testsuites.NodeSelection, ns string) (*v1.Pod, error) { func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node framework.NodeSelection, ns string) (*v1.Pod, error) {
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-", GenerateName: "pvc-volume-tester-",

View File

@ -47,6 +47,7 @@ var csiTestSuites = []func() testsuites.TestSuite{
testsuites.InitSubPathTestSuite, testsuites.InitSubPathTestSuite,
testsuites.InitProvisioningTestSuite, testsuites.InitProvisioningTestSuite,
testsuites.InitSnapshottableTestSuite, testsuites.InitSnapshottableTestSuite,
testsuites.InitMultiVolumeTestSuite,
} }
// This executes testSuites for csi volumes. // This executes testSuites for csi volumes.
@ -155,7 +156,7 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
test.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { test.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
// Ensure that a pod cannot be scheduled in an unsuitable zone. // Ensure that a pod cannot be scheduled in an unsuitable zone.
pod := testsuites.StartInPodWithVolume(cs, namespace, claim.Name, "pvc-tester-unschedulable", "sleep 100000", pod := testsuites.StartInPodWithVolume(cs, namespace, claim.Name, "pvc-tester-unschedulable", "sleep 100000",
testsuites.NodeSelection{Selector: nodeSelector}) framework.NodeSelection{Selector: nodeSelector})
defer testsuites.StopPod(cs, pod) defer testsuites.StopPod(cs, pod)
framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable") framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable")
} }

View File

@ -95,6 +95,7 @@ func InitNFSDriver() testsuites.TestDriver {
Capabilities: map[testsuites.Capability]bool{ Capabilities: map[testsuites.Capability]bool{
testsuites.CapPersistence: true, testsuites.CapPersistence: true,
testsuites.CapExec: true, testsuites.CapExec: true,
testsuites.CapRWX: true,
}, },
}, },
} }
@ -232,6 +233,7 @@ func InitGlusterFSDriver() testsuites.TestDriver {
Capabilities: map[testsuites.Capability]bool{ Capabilities: map[testsuites.Capability]bool{
testsuites.CapPersistence: true, testsuites.CapPersistence: true,
testsuites.CapExec: true, testsuites.CapExec: true,
testsuites.CapRWX: true,
}, },
}, },
} }
@ -586,6 +588,7 @@ func InitCephFSDriver() testsuites.TestDriver {
Capabilities: map[testsuites.Capability]bool{ Capabilities: map[testsuites.Capability]bool{
testsuites.CapPersistence: true, testsuites.CapPersistence: true,
testsuites.CapExec: true, testsuites.CapExec: true,
testsuites.CapRWX: true,
}, },
}, },
} }

View File

@ -55,6 +55,7 @@ var testSuites = []func() testsuites.TestSuite{
testsuites.InitVolumeModeTestSuite, testsuites.InitVolumeModeTestSuite,
testsuites.InitSubPathTestSuite, testsuites.InitSubPathTestSuite,
testsuites.InitProvisioningTestSuite, testsuites.InitProvisioningTestSuite,
testsuites.InitMultiVolumeTestSuite,
} }
// This executes testSuites for in-tree volumes. // This executes testSuites for in-tree volumes.

View File

@ -30,6 +30,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -308,68 +309,22 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
Describe("Default StorageClass", func() { Describe("Default StorageClass", func() {
Context("pods that use multiple volumes", func() { Context("pods that use multiple volumes", func() {
AfterEach(func() {
framework.DeleteAllStatefulSets(c, ns)
})
It("should be reschedulable [Slow]", func() { It("should be reschedulable [Slow]", func() {
// Only run on providers with default storageclass // Only run on providers with default storageclass
framework.SkipUnlessProviderIs("openstack", "gce", "gke", "vsphere", "azure") framework.SkipUnlessProviderIs("openstack", "gce", "gke", "vsphere", "azure")
numVols := 4 numVols := 4
ssTester := framework.NewStatefulSetTester(c)
By("Creating a StatefulSet pod to initialize data") By("Creating pvcs")
writeCmd := "true" claims := []*v1.PersistentVolumeClaim{}
for i := 0; i < numVols; i++ {
writeCmd += fmt.Sprintf("&& touch %v", getVolumeFile(i))
}
writeCmd += "&& sleep 10000"
probe := &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
// Check that the last file got created
Command: []string{"test", "-f", getVolumeFile(numVols - 1)},
},
},
InitialDelaySeconds: 1,
PeriodSeconds: 1,
}
mounts := []v1.VolumeMount{}
claims := []v1.PersistentVolumeClaim{}
for i := 0; i < numVols; i++ { for i := 0; i < numVols; i++ {
pvc := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{}, ns) pvc := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{}, ns)
pvc.Name = getVolName(i) claims = append(claims, pvc)
mounts = append(mounts, v1.VolumeMount{Name: pvc.Name, MountPath: getMountPath(i)})
claims = append(claims, *pvc)
} }
spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe) By("Testing access to pvcs before and after pod recreation on differetn node")
ss, err := c.AppsV1().StatefulSets(ns).Create(spec) testsuites.TestAccessMultipleVolumesAcrossPodRecreation(f, c, ns,
Expect(err).NotTo(HaveOccurred()) framework.NodeSelection{}, claims, false /* sameNode */)
ssTester.WaitForRunningAndReady(1, ss)
By("Deleting the StatefulSet but not the volumes")
// Scale down to 0 first so that the Delete is quick
ss, err = ssTester.Scale(ss, 0)
Expect(err).NotTo(HaveOccurred())
ssTester.WaitForStatusReplicas(ss, 0)
err = c.AppsV1().StatefulSets(ns).Delete(ss.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
By("Creating a new Statefulset and validating the data")
validateCmd := "true"
for i := 0; i < numVols; i++ {
validateCmd += fmt.Sprintf("&& test -f %v", getVolumeFile(i))
}
validateCmd += "&& sleep 10000"
spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe)
ss, err = c.AppsV1().StatefulSets(ns).Create(spec)
Expect(err).NotTo(HaveOccurred())
ssTester.WaitForRunningAndReady(1, ss)
}) })
}) })
}) })

View File

@ -116,7 +116,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
err = verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */) err = verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */)
Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV") Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
}, },
{ {
@ -138,7 +138,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
err = verifyZonesInPV(volume, zones, false /* match */) err = verifyZonesInPV(volume, zones, false /* match */)
Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV") Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
}, },
} }

View File

@ -5,6 +5,7 @@ go_library(
srcs = [ srcs = [
"base.go", "base.go",
"driveroperations.go", "driveroperations.go",
"multivolume.go",
"provisioning.go", "provisioning.go",
"snapshottable.go", "snapshottable.go",
"subpath.go", "subpath.go",
@ -28,6 +29,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",

View File

@ -188,7 +188,7 @@ func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, p
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok { if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, fsType, r.volume) pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, fsType, r.volume)
if pvSource != nil { if pvSource != nil {
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, false) r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, false, pattern.VolMode)
} }
r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name) r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name)
} }
@ -205,7 +205,7 @@ func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, p
if r.sc != nil { if r.sc != nil {
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPVFromDynamicProvisionSC( r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPVFromDynamicProvisionSC(
f, dInfo.Name, claimSize, r.sc, false, nil) f, dInfo.Name, claimSize, r.sc, false, pattern.VolMode)
} }
r.volType = fmt.Sprintf("%s-dynamicPV", dInfo.Name) r.volType = fmt.Sprintf("%s-dynamicPV", dInfo.Name)
} }
@ -269,6 +269,7 @@ func createVolumeSourceWithPVCPV(
pvSource *v1.PersistentVolumeSource, pvSource *v1.PersistentVolumeSource,
volumeNodeAffinity *v1.VolumeNodeAffinity, volumeNodeAffinity *v1.VolumeNodeAffinity,
readOnly bool, readOnly bool,
volMode v1.PersistentVolumeMode,
) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { ) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pvConfig := framework.PersistentVolumeConfig{ pvConfig := framework.PersistentVolumeConfig{
NamePrefix: fmt.Sprintf("%s-", name), NamePrefix: fmt.Sprintf("%s-", name),
@ -276,10 +277,16 @@ func createVolumeSourceWithPVCPV(
PVSource: *pvSource, PVSource: *pvSource,
NodeAffinity: volumeNodeAffinity, NodeAffinity: volumeNodeAffinity,
} }
pvcConfig := framework.PersistentVolumeClaimConfig{ pvcConfig := framework.PersistentVolumeClaimConfig{
StorageClassName: &f.Namespace.Name, StorageClassName: &f.Namespace.Name,
} }
if volMode != "" {
pvConfig.VolumeMode = &volMode
pvcConfig.VolumeMode = &volMode
}
framework.Logf("Creating PVC and PV") framework.Logf("Creating PVC and PV")
pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false) pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
Expect(err).NotTo(HaveOccurred(), "PVC, PV creation failed") Expect(err).NotTo(HaveOccurred(), "PVC, PV creation failed")
@ -302,7 +309,7 @@ func createVolumeSourceWithPVCPVFromDynamicProvisionSC(
claimSize string, claimSize string,
sc *storagev1.StorageClass, sc *storagev1.StorageClass,
readOnly bool, readOnly bool,
volMode *v1.PersistentVolumeMode, volMode v1.PersistentVolumeMode,
) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { ) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
cs := f.ClientSet cs := f.ClientSet
ns := f.Namespace.Name ns := f.Namespace.Name
@ -310,8 +317,8 @@ func createVolumeSourceWithPVCPVFromDynamicProvisionSC(
By("creating a claim") By("creating a claim")
pvc := getClaim(claimSize, ns) pvc := getClaim(claimSize, ns)
pvc.Spec.StorageClassName = &sc.Name pvc.Spec.StorageClassName = &sc.Name
if volMode != nil { if volMode != "" {
pvc.Spec.VolumeMode = volMode pvc.Spec.VolumeMode = &volMode
} }
var err error var err error

View File

@ -22,6 +22,7 @@ import (
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
) )
@ -71,8 +72,8 @@ func GetStorageClass(
Kind: "StorageClass", Kind: "StorageClass",
}, },
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
// Name must be unique, so let's base it on namespace name // Name must be unique, so let's base it on namespace name and use GenerateName
Name: ns + "-" + suffix, Name: names.SimpleNameGenerator.GenerateName(ns + "-" + suffix),
}, },
Provisioner: provisioner, Provisioner: provisioner,
Parameters: parameters, Parameters: parameters,

View File

@ -0,0 +1,474 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
type multiVolumeTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &multiVolumeTestSuite{}
// InitMultiVolumeTestSuite returns multiVolumeTestSuite that implements TestSuite interface
func InitMultiVolumeTestSuite() TestSuite {
return &multiVolumeTestSuite{
tsInfo: TestSuiteInfo{
name: "multiVolume",
testPatterns: []testpatterns.TestPattern{
testpatterns.FsVolModePreprovisionedPV,
testpatterns.FsVolModeDynamicPV,
testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV,
},
},
}
}
func (t *multiVolumeTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
testCleanup func()
cs clientset.Interface
ns *v1.Namespace
driver TestDriver
resources []*genericVolumeTestResource
}
var (
dInfo = driver.GetDriverInfo()
l local
)
BeforeEach(func() {
// Check preconditions.
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[CapBlock] {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode)
}
})
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("multivolume")
init := func() {
l = local{}
l.ns = f.Namespace
l.cs = f.ClientSet
l.driver = driver
// Now do the more expensive test initialization.
l.config, l.testCleanup = driver.PrepareTest(f)
}
cleanup := func() {
for _, resource := range l.resources {
resource.cleanupResource()
}
if l.testCleanup != nil {
l.testCleanup()
l.testCleanup = nil
}
}
// This tests below configuration:
// [pod1] same node [pod2]
// [ node1 ] ==> [ node1 ]
// / \ <- same volume mode / \
// [volume1] [volume2] [volume1] [volume2]
It("should access to two volumes with the same volume mode and retain data across pod recreation on the same node", func() {
// Currently, multiple volumes are not generally available for pre-provisoined volume,
// because containerized storage servers, such as iSCSI and rbd, are just returning
// a static volume inside container, not actually creating a new volume per request.
if pattern.VolType == testpatterns.PreprovisionedPV {
framework.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
}
init()
defer cleanup()
var pvcs []*v1.PersistentVolumeClaim
numVols := 2
for i := 0; i < numVols; i++ {
resource := createGenericVolumeTestResource(driver, l.config, pattern)
l.resources = append(l.resources, resource)
pvcs = append(pvcs, resource.pvc)
}
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
framework.NodeSelection{Name: l.config.ClientNodeName}, pvcs, true /* sameNode */)
})
// This tests below configuration:
// [pod1] different node [pod2]
// [ node1 ] ==> [ node2 ]
// / \ <- same volume mode / \
// [volume1] [volume2] [volume1] [volume2]
It("should access to two volumes with the same volume mode and retain data across pod recreation on different node", func() {
// Currently, multiple volumes are not generally available for pre-provisoined volume,
// because containerized storage servers, such as iSCSI and rbd, are just returning
// a static volume inside container, not actually creating a new volume per request.
if pattern.VolType == testpatterns.PreprovisionedPV {
framework.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
}
init()
defer cleanup()
// Check different-node test requirement
nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
if len(nodes.Items) < 2 {
framework.Skipf("Number of available nodes is less than 2 - skipping")
}
if l.config.ClientNodeName != "" {
framework.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
}
var pvcs []*v1.PersistentVolumeClaim
numVols := 2
for i := 0; i < numVols; i++ {
resource := createGenericVolumeTestResource(driver, l.config, pattern)
l.resources = append(l.resources, resource)
pvcs = append(pvcs, resource.pvc)
}
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
framework.NodeSelection{Name: l.config.ClientNodeName}, pvcs, false /* sameNode */)
})
// This tests below configuration (only <block, filesystem> pattern is tested):
// [pod1] same node [pod2]
// [ node1 ] ==> [ node1 ]
// / \ <- different volume mode / \
// [volume1] [volume2] [volume1] [volume2]
It("should access to two volumes with different volume mode and retain data across pod recreation on the same node", func() {
if pattern.VolMode == v1.PersistentVolumeFilesystem {
framework.Skipf("Filesystem volume case should be covered by block volume case -- skipping")
}
// Currently, multiple volumes are not generally available for pre-provisoined volume,
// because containerized storage servers, such as iSCSI and rbd, are just returning
// a static volume inside container, not actually creating a new volume per request.
if pattern.VolType == testpatterns.PreprovisionedPV {
framework.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
}
init()
defer cleanup()
var pvcs []*v1.PersistentVolumeClaim
numVols := 2
for i := 0; i < numVols; i++ {
curPattern := pattern
if i != 0 {
// 1st volume should be block and set filesystem for 2nd and later volumes
curPattern.VolMode = v1.PersistentVolumeFilesystem
}
resource := createGenericVolumeTestResource(driver, l.config, curPattern)
l.resources = append(l.resources, resource)
pvcs = append(pvcs, resource.pvc)
}
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
framework.NodeSelection{Name: l.config.ClientNodeName}, pvcs, true /* sameNode */)
})
// This tests below configuration (only <block, filesystem> pattern is tested):
// [pod1] different node [pod2]
// [ node1 ] ==> [ node2 ]
// / \ <- different volume mode / \
// [volume1] [volume2] [volume1] [volume2]
It("should access to two volumes with different volume mode and retain data across pod recreation on different node", func() {
if pattern.VolMode == v1.PersistentVolumeFilesystem {
framework.Skipf("Filesystem volume case should be covered by block volume case -- skipping")
}
// Currently, multiple volumes are not generally available for pre-provisoined volume,
// because containerized storage servers, such as iSCSI and rbd, are just returning
// a static volume inside container, not actually creating a new volume per request.
if pattern.VolType == testpatterns.PreprovisionedPV {
framework.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
}
init()
defer cleanup()
// Check different-node test requirement
nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
if len(nodes.Items) < 2 {
framework.Skipf("Number of available nodes is less than 2 - skipping")
}
if l.config.ClientNodeName != "" {
framework.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
}
var pvcs []*v1.PersistentVolumeClaim
numVols := 2
for i := 0; i < numVols; i++ {
curPattern := pattern
if i != 0 {
// 1st volume should be block and set filesystem for 2nd and later volumes
curPattern.VolMode = v1.PersistentVolumeFilesystem
}
resource := createGenericVolumeTestResource(driver, l.config, curPattern)
l.resources = append(l.resources, resource)
pvcs = append(pvcs, resource.pvc)
}
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
framework.NodeSelection{Name: l.config.ClientNodeName}, pvcs, false /* sameNode */)
})
// This tests below configuration:
// [pod1] [pod2]
// [ node1 ]
// \ / <- same volume mode
// [volume1]
It("should concurrently access the single volume from pods on the same node", func() {
init()
defer cleanup()
numPods := 2
if !l.driver.GetDriverInfo().Capabilities[CapMultiPODs] {
framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
}
// Create volume
resource := createGenericVolumeTestResource(l.driver, l.config, pattern)
l.resources = append(l.resources, resource)
// Test access to the volume from pods on different node
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
framework.NodeSelection{Name: l.config.ClientNodeName}, resource.pvc, numPods, true /* sameNode */)
})
// This tests below configuration:
// [pod1] [pod2]
// [ node1 ] [ node2 ]
// \ / <- same volume mode
// [volume1]
It("should concurrently access the single volume from pods on different node", func() {
init()
defer cleanup()
numPods := 2
if !l.driver.GetDriverInfo().Capabilities[CapRWX] {
framework.Skipf("Driver %s doesn't support %v -- skipping", l.driver.GetDriverInfo().Name, CapRWX)
}
// Check different-node test requirement
nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
if len(nodes.Items) < numPods {
framework.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", numPods))
}
if l.config.ClientNodeName != "" {
framework.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
}
// Create volume
resource := createGenericVolumeTestResource(l.driver, l.config, pattern)
l.resources = append(l.resources, resource)
// Test access to the volume from pods on different node
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
framework.NodeSelection{Name: l.config.ClientNodeName}, resource.pvc, numPods, false /* sameNode */)
})
}
// testAccessMultipleVolumes tests access to multiple volumes from single pod on the specified node
// If readSeedBase > 0, read test are done before write/read test assuming that there is already data written.
func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, ns string,
node framework.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string {
By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node))
pod, err := framework.CreateSecPodWithNodeSelection(cs, ns, pvcs,
false, "", false, false, framework.SELinuxLabel,
nil, node, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).NotTo(HaveOccurred())
byteLen := 64
for i, pvc := range pvcs {
// CreateSecPodWithNodeSelection make volumes accessible via /mnt/volume({i} + 1)
index := i + 1
path := fmt.Sprintf("/mnt/volume%d", index)
By(fmt.Sprintf("Checking if the volume%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path)
if readSeedBase > 0 {
By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, readSeedBase+int64(i))
}
By(fmt.Sprintf("Checking if write to the volume%d works properly", index))
utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
}
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "get pod")
return pod.Spec.NodeName
}
// TestAccessMultipleVolumesAcrossPodRecreation tests access to multiple volumes from single pod,
// then recreate pod on the same or different node depending on requiresSameNode,
// and recheck access to the volumes from the recreated pod
func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs clientset.Interface, ns string,
node framework.NodeSelection, pvcs []*v1.PersistentVolumeClaim, requiresSameNode bool) {
// No data is written in volume, so passing negative value
readSeedBase := int64(-1)
writeSeedBase := time.Now().UTC().UnixNano()
// Test access to multiple volumes on the specified node
nodeName := testAccessMultipleVolumes(f, cs, ns, node, pvcs, readSeedBase, writeSeedBase)
// Set affinity depending on requiresSameNode
if requiresSameNode {
framework.SetAffinity(&node, nodeName)
} else {
framework.SetAntiAffinity(&node, nodeName)
}
// Test access to multiple volumes again on the node updated above
// Setting previous writeSeed to current readSeed to check previous data is retained
readSeedBase = writeSeedBase
// Update writeSeed with new value
writeSeedBase = time.Now().UTC().UnixNano()
_ = testAccessMultipleVolumes(f, cs, ns, node, pvcs, readSeedBase, writeSeedBase)
}
// TestConcurrentAccessToSingleVolume tests access to a single volume from multiple pods,
// then delete the last pod, and recheck access to the volume after pod deletion to check if other
// pod deletion doesn't affect. Pods are deployed on the same node or different nodes depending on requiresSameNode.
// Read/write check are done across pod, by check reading both what pod{n-1} and pod{n} wrote from pod{n}.
func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Interface, ns string,
node framework.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool) {
var pods []*v1.Pod
// Create each pod with pvc
for i := 0; i < numPods; i++ {
index := i + 1
By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node))
pod, err := framework.CreateSecPodWithNodeSelection(cs, ns,
[]*v1.PersistentVolumeClaim{pvc},
false, "", false, false, framework.SELinuxLabel,
nil, node, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).NotTo(HaveOccurred())
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
pods = append(pods, pod)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("get pod%d", index))
actualNodeName := pod.Spec.NodeName
// Set affinity depending on requiresSameNode
if requiresSameNode {
framework.SetAffinity(&node, actualNodeName)
} else {
framework.SetAntiAffinity(&node, actualNodeName)
}
}
var seed int64
byteLen := 64
path := "/mnt/volume1"
// Check if volume can be accessed from each pod
for i, pod := range pods {
index := i + 1
By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path)
if i != 0 {
By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
// For 1st pod, no one has written data yet, so pass the read check
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
}
// Update the seed and check if write/read works properly
seed = time.Now().UTC().UnixNano()
By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index))
utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index))
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
}
// Delete the last pod and remove from slice of pods
if len(pods) < 2 {
framework.Failf("Number of pods shouldn't be less than 2, but got %d", len(pods))
}
lastPod := pods[len(pods)-1]
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, lastPod))
pods = pods[:len(pods)-1]
// Recheck if pv can be accessed from each pod after the last pod deletion
for i, pod := range pods {
index := i + 1
// index of pod and index of pvc match, because pods are created above way
By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
if i == 0 {
// This time there should be data that last pod wrote, for 1st pod
By(fmt.Sprintf("From pod%d, rechecking if reading the data that last pod write works properly", index))
} else {
By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1))
}
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
// Update the seed and check if write/read works properly
seed = time.Now().UTC().UnixNano()
By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index))
utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index))
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
}
}

View File

@ -18,7 +18,6 @@ package testsuites
import ( import (
"fmt" "fmt"
"sync"
"time" "time"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -180,7 +179,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
framework.Skipf("need more than one node - skipping") framework.Skipf("need more than one node - skipping")
} }
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PVMultiNodeCheck(l.cs, claim, volume, NodeSelection{Name: l.config.ClientNodeName}) PVMultiNodeCheck(l.cs, claim, volume, framework.NodeSelection{Name: l.config.ClientNodeName})
} }
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning()
}) })
@ -214,62 +213,14 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
dc := l.config.Framework.DynamicClient dc := l.config.Framework.DynamicClient
vsc := sDriver.GetSnapshotClass(l.config) vsc := sDriver.GetSnapshotClass(l.config)
dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc) dataSource, cleanupFunc := prepareDataSourceForProvisioning(framework.NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc)
defer cleanupFunc() defer cleanupFunc()
l.pvc.Spec.DataSource = dataSource l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
By("checking whether the created volume has the pre-populated data") By("checking whether the created volume has the pre-populated data")
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace) command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: l.config.ClientNodeName}) RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, framework.NodeSelection{Name: l.config.ClientNodeName})
}
l.testCase.TestDynamicProvisioning()
})
It("should allow concurrent writes on the same node", func() {
if !dInfo.Capabilities[CapMultiPODs] {
framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
}
init()
defer cleanup()
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
// We start two pods concurrently on the same node,
// using the same PVC. Both wait for other to create a
// file before returning. The pods are forced onto the
// same node via pod affinity.
wg := sync.WaitGroup{}
wg.Add(2)
firstPodName := "pvc-tester-first"
secondPodName := "pvc-tester-second"
run := func(podName, command string) {
defer GinkgoRecover()
defer wg.Done()
node := NodeSelection{
Name: l.config.ClientNodeName,
}
if podName == secondPodName {
node.Affinity = &v1.Affinity{
PodAffinity: &v1.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
// Set by RunInPodWithVolume.
"app": firstPodName,
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
}
}
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, podName, command, node)
}
go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done")
go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done")
wg.Wait()
} }
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning()
}) })
@ -384,7 +335,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
// persistent across pods. // persistent across pods.
// //
// This is a common test that can be called from a StorageClassTest.PvCheck. // This is a common test that can be called from a StorageClassTest.PvCheck.
func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node NodeSelection) { func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node framework.NodeSelection) {
By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node)) By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node))
command := "echo 'hello world' > /mnt/test/data" command := "echo 'hello world' > /mnt/test/data"
// We give the first pod the secondary responsibility of checking the volume has // We give the first pod the secondary responsibility of checking the volume has
@ -408,7 +359,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
By(fmt.Sprintf("checking the created volume is readable and retains data on the same node %q", actualNodeName)) By(fmt.Sprintf("checking the created volume is readable and retains data on the same node %q", actualNodeName))
command = "grep 'hello world' /mnt/test/data" command = "grep 'hello world' /mnt/test/data"
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, NodeSelection{Name: actualNodeName}) RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, framework.NodeSelection{Name: actualNodeName})
} }
// PVMultiNodeCheck checks that a PV retains data when moved between nodes. // PVMultiNodeCheck checks that a PV retains data when moved between nodes.
@ -425,7 +376,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
// persistent across pods and across nodes. // persistent across pods and across nodes.
// //
// This is a common test that can be called from a StorageClassTest.PvCheck. // This is a common test that can be called from a StorageClassTest.PvCheck.
func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node NodeSelection) { func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node framework.NodeSelection) {
Expect(node.Name).To(Equal(""), "this test only works when not locked onto a single node") Expect(node.Name).To(Equal(""), "this test only works when not locked onto a single node")
var pod *v1.Pod var pod *v1.Pod
@ -446,30 +397,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
// Add node-anti-affinity. // Add node-anti-affinity.
secondNode := node secondNode := node
if secondNode.Affinity == nil { framework.SetAntiAffinity(&secondNode, actualNodeName)
secondNode.Affinity = &v1.Affinity{}
}
if secondNode.Affinity.NodeAffinity == nil {
secondNode.Affinity.NodeAffinity = &v1.NodeAffinity{}
}
if secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{}
}
secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
v1.NodeSelectorTerm{
// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity warns
// that "the value of kubernetes.io/hostname may be the same as the Node name in some environments and a different value in other environments".
// So this might be cleaner:
// MatchFields: []v1.NodeSelectorRequirement{
// {Key: "name", Operator: v1.NodeSelectorOpNotIn, Values: []string{actualNodeName}},
// },
// However, "name", "Name", "ObjectMeta.Name" all got rejected with "not a valid field selector key".
MatchExpressions: []v1.NodeSelectorRequirement{
{Key: "kubernetes.io/hostname", Operator: v1.NodeSelectorOpNotIn, Values: []string{actualNodeName}},
},
})
By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode)) By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode))
command = "grep 'hello world' /mnt/test/data" command = "grep 'hello world' /mnt/test/data"
if framework.NodeOSDistroIs("windows") { if framework.NodeOSDistroIs("windows") {
@ -573,17 +501,9 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
return pvs, node return pvs, node
} }
// NodeSelection specifies where to run a pod, using a combination of fixed node name,
// node selector and/or affinity.
type NodeSelection struct {
Name string
Selector map[string]string
Affinity *v1.Affinity
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. // RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
// It starts, checks, collects output and stops it. // It starts, checks, collects output and stops it.
func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node NodeSelection) { func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node framework.NodeSelection) {
pod := StartInPodWithVolume(c, ns, claimName, podName, command, node) pod := StartInPodWithVolume(c, ns, claimName, podName, command, node)
defer StopPod(c, pod) defer StopPod(c, pod)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace)) framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
@ -591,7 +511,7 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command s
// StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory // StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory
// The caller is responsible for checking the pod and deleting it. // The caller is responsible for checking the pod and deleting it.
func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node NodeSelection) *v1.Pod { func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node framework.NodeSelection) *v1.Pod {
pod := &v1.Pod{ pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "Pod", Kind: "Pod",
@ -664,7 +584,7 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl
} }
func prepareDataSourceForProvisioning( func prepareDataSourceForProvisioning(
node NodeSelection, node framework.NodeSelection,
client clientset.Interface, client clientset.Interface,
dynamicClient dynamic.Interface, dynamicClient dynamic.Interface,
initClaim *v1.PersistentVolumeClaim, initClaim *v1.PersistentVolumeClaim,

View File

@ -121,6 +121,8 @@ const (
// - https://github.com/container-storage-interface/spec/issues/178 // - https://github.com/container-storage-interface/spec/issues/178
// - NodeStageVolume in the spec // - NodeStageVolume in the spec
CapMultiPODs Capability = "multipods" CapMultiPODs Capability = "multipods"
CapRWX Capability = "RWX" // support ReadWriteMany access modes
) )
// DriverInfo represents static information about a TestDriver. // DriverInfo represents static information about a TestDriver.

View File

@ -182,9 +182,9 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
By("Creating pod") By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel, false, "", false, false, framework.SELinuxLabel,
nil, l.config.ClientNodeName, framework.PodStartTimeout) nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}() }()
@ -213,9 +213,9 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
By("Creating pod") By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel, false, "", false, false, framework.SELinuxLabel,
nil, l.config.ClientNodeName, framework.PodStartTimeout) nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}() }()
@ -273,9 +273,9 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating pod") By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel, false, "", false, false, framework.SELinuxLabel,
nil, l.config.ClientNodeName, framework.PodStartTimeout) nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() { defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}() }()

View File

@ -17,7 +17,11 @@ limitations under the License.
package utils package utils
import ( import (
"crypto/sha256"
"encoding/base64"
"fmt" "fmt"
"math/rand"
"path/filepath"
"strings" "strings"
"time" "time"
@ -483,3 +487,43 @@ func CheckReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path str
VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1) VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
} }
} }
func genBinDataFromSeed(len int, seed int64) []byte {
binData := make([]byte, len)
rand.Seed(seed)
len, err := rand.Read(binData)
if err != nil {
fmt.Printf("Error: %v", err)
}
return binData
}
func CheckReadFromPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
var pathForVolMode string
if volMode == v1.PersistentVolumeBlock {
pathForVolMode = path
} else {
pathForVolMode = filepath.Join(path, "file1.txt")
}
sum := sha256.Sum256(genBinDataFromSeed(len, seed))
VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum", pathForVolMode, len))
VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, len, sum))
}
func CheckWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
var pathForVolMode string
if volMode == v1.PersistentVolumeBlock {
pathForVolMode = path
} else {
pathForVolMode = filepath.Join(path, "file1.txt")
}
encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len))
}

View File

@ -276,7 +276,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkGCEPD(volume, "pd-ssd") err := checkGCEPD(volume, "pd-ssd")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-ssd") Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-ssd")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
}, },
{ {
@ -291,7 +291,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-standard") Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-standard")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
}, },
// AWS // AWS
@ -308,7 +308,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkAWSEBS(volume, "gp2", false) err := checkAWSEBS(volume, "gp2", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2") Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
}, },
{ {
@ -324,7 +324,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkAWSEBS(volume, "io1", false) err := checkAWSEBS(volume, "io1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS io1") Expect(err).NotTo(HaveOccurred(), "checkAWSEBS io1")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
}, },
{ {
@ -339,7 +339,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkAWSEBS(volume, "sc1", false) err := checkAWSEBS(volume, "sc1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS sc1") Expect(err).NotTo(HaveOccurred(), "checkAWSEBS sc1")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
}, },
{ {
@ -354,7 +354,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkAWSEBS(volume, "st1", false) err := checkAWSEBS(volume, "st1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS st1") Expect(err).NotTo(HaveOccurred(), "checkAWSEBS st1")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
}, },
{ {
@ -369,7 +369,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkAWSEBS(volume, "gp2", true) err := checkAWSEBS(volume, "gp2", true)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2 encrypted") Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2 encrypted")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
}, },
// OpenStack generic tests (works on all OpenStack deployments) // OpenStack generic tests (works on all OpenStack deployments)
@ -381,7 +381,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
}, },
{ {
@ -395,7 +395,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
}, },
// vSphere generic test // vSphere generic test
@ -407,7 +407,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "1.5Gi", ExpectedSize: "1.5Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
}, },
// Azure // Azure
@ -419,7 +419,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1Gi", ClaimSize: "1Gi",
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
}, },
} }
@ -480,7 +480,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD") Expect(err).NotTo(HaveOccurred(), "checkGCEPD")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
}, },
} }
test.Class = newStorageClass(test, ns, "reclaimpolicy") test.Class = newStorageClass(test, ns, "reclaimpolicy")