Merge pull request #72434 from pohly/storage-volume-testsuites-config

e2e/storage: delay CreateDriver, simplify APIs and test definition
pull/564/head
Kubernetes Prow Robot 2019-02-20 05:37:45 -08:00 committed by GitHub
commit 07cf5fa08e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 1468 additions and 1764 deletions

View File

@ -67,11 +67,9 @@ go_library(
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/podlogs:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/storage/drivers:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
"//test/e2e/storage/testsuites:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/utils/image:go_default_library",

View File

@ -17,10 +17,8 @@ limitations under the License.
package storage
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strings"
"time"
@ -32,9 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
csiclient "k8s.io/csi-api/pkg/client/clientset/versioned"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/podlogs"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -47,9 +43,9 @@ import (
)
// List of testDrivers to be executed in below loop
var csiTestDrivers = []func(config testsuites.TestConfig) testsuites.TestDriver{
var csiTestDrivers = []func() testsuites.TestDriver{
drivers.InitHostPathCSIDriver,
drivers.InitGcePDCSIDriver,
func() testsuites.TestDriver { return drivers.InitGcePDCSIDriver(false /* topology enabled */) },
drivers.InitGcePDExternalCSIDriver,
drivers.InitHostPathV0CSIDriver,
// Don't run tests with mock driver (drivers.InitMockCSIDriver), it does not provide persistent storage.
@ -65,133 +61,58 @@ var csiTestSuites = []func() testsuites.TestSuite{
testsuites.InitSnapshottableTestSuite,
}
func csiTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestPattern {
tunedPatterns := []testpatterns.TestPattern{}
for _, pattern := range patterns {
// Skip inline volume and pre-provsioned PV tests for csi drivers
if pattern.VolType == testpatterns.InlineVolume || pattern.VolType == testpatterns.PreprovisionedPV {
continue
}
tunedPatterns = append(tunedPatterns, pattern)
}
return tunedPatterns
}
// This executes testSuites for csi volumes.
var _ = utils.SIGDescribe("CSI Volumes", func() {
f := framework.NewDefaultFramework("csi-volumes")
var (
cancel context.CancelFunc
cs clientset.Interface
csics csiclient.Interface
ns *v1.Namespace
// Common configuration options for each driver.
config = testsuites.TestConfig{
Framework: f,
Prefix: "csi",
}
)
BeforeEach(func() {
ctx, c := context.WithCancel(context.Background())
cancel = c
cs = f.ClientSet
csics = f.CSIClientSet
ns = f.Namespace
// Debugging of the following tests heavily depends on the log output
// of the different containers. Therefore include all of that in log
// files (when using --report-dir, as in the CI) or the output stream
// (otherwise).
to := podlogs.LogOutput{
StatusWriter: GinkgoWriter,
}
if framework.TestContext.ReportDir == "" {
to.LogWriter = GinkgoWriter
} else {
test := CurrentGinkgoTestDescription()
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
// We end the prefix with a slash to ensure that all logs
// end up in a directory named after the current test.
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
reg.ReplaceAllString(test.FullTestText, "_") + "/"
}
podlogs.CopyAllLogs(ctx, cs, ns.Name, to)
// pod events are something that the framework already collects itself
// after a failed test. Logging them live is only useful for interactive
// debugging, not when we collect reports.
if framework.TestContext.ReportDir == "" {
podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter)
}
})
AfterEach(func() {
cancel()
})
for _, initDriver := range csiTestDrivers {
curDriver := initDriver(config)
curConfig := curDriver.GetDriverInfo().Config
curDriver := initDriver()
Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
BeforeEach(func() {
// Reset config. The driver might have modified its copy
// in a previous test.
curDriver.GetDriverInfo().Config = curConfig
// setupDriver
curDriver.CreateDriver()
})
AfterEach(func() {
// Cleanup driver
curDriver.CleanupDriver()
})
testsuites.RunTestSuite(f, curDriver, csiTestSuites, csiTunePattern)
testsuites.DefineTestSuite(curDriver, csiTestSuites)
})
}
Context("CSI Topology test using GCE PD driver [Feature:CSINodeInfo]", func() {
newConfig := config
newConfig.TopologyEnabled = true
driver := drivers.InitGcePDCSIDriver(newConfig).(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite.
f := framework.NewDefaultFramework("csitopology")
driver := drivers.InitGcePDCSIDriver(true /* topology enabled */).(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite.
var (
config *testsuites.PerTestConfig
testCleanup func()
)
BeforeEach(func() {
driver.CreateDriver()
config, testCleanup = driver.PrepareTest(f)
})
AfterEach(func() {
driver.CleanupDriver()
if testCleanup != nil {
testCleanup()
}
})
It("should provision zonal PD with immediate volume binding and AllowedTopologies set and mount the volume to a pod", func() {
suffix := "topology-positive"
testTopologyPositive(cs, suffix, ns.GetName(), false /* delayBinding */, true /* allowedTopologies */)
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */, true /* allowedTopologies */)
})
It("should provision zonal PD with delayed volume binding and mount the volume to a pod", func() {
suffix := "delayed"
testTopologyPositive(cs, suffix, ns.GetName(), true /* delayBinding */, false /* allowedTopologies */)
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, false /* allowedTopologies */)
})
It("should provision zonal PD with delayed volume binding and AllowedTopologies set and mount the volume to a pod", func() {
suffix := "delayed-topology-positive"
testTopologyPositive(cs, suffix, ns.GetName(), true /* delayBinding */, true /* allowedTopologies */)
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, true /* allowedTopologies */)
})
It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with immediate volume binding", func() {
framework.SkipUnlessMultizone(cs)
framework.SkipUnlessMultizone(config.Framework.ClientSet)
suffix := "topology-negative"
testTopologyNegative(cs, suffix, ns.GetName(), false /* delayBinding */)
testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */)
})
It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with delayed volume binding", func() {
framework.SkipUnlessMultizone(cs)
framework.SkipUnlessMultizone(config.Framework.ClientSet)
suffix := "delayed-topology-negative"
testTopologyNegative(cs, suffix, ns.GetName(), true /* delayBinding */)
testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */)
})
})
@ -227,29 +148,30 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
for _, t := range tests {
test := t
It(test.name, func() {
By("Deploying mock CSI driver")
config := testsuites.TestConfig{
Framework: f,
Prefix: "csi-attach",
}
f := framework.NewDefaultFramework("csiattach")
driver = drivers.InitMockCSIDriver(config, test.deployDriverCRD, test.driverAttachable, nil)
driver.CreateDriver()
defer driver.CleanupDriver()
It(test.name, func() {
cs := f.ClientSet
csics := f.CSIClientSet
ns := f.Namespace
driver = drivers.InitMockCSIDriver(test.deployDriverCRD, test.driverAttachable, nil)
config, testCleanup := driver.PrepareTest(f)
driverName := config.GetUniqueDriverName()
defer testCleanup()
if test.deployDriverCRD {
err = waitForCSIDriver(csics, driver)
err = waitForCSIDriver(csics, driverName)
framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err)
defer destroyCSIDriver(csics, driver)
defer destroyCSIDriver(csics, driverName)
}
By("Creating pod")
var sc *storagev1.StorageClass
if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok {
sc = dDriver.GetDynamicProvisionStorageClass("")
sc = dDriver.GetDynamicProvisionStorageClass(config, "")
}
nodeName := driver.GetDriverInfo().Config.ClientNodeName
nodeName := config.ClientNodeName
scTest := testsuites.StorageClassTest{
Name: driver.GetDriverInfo().Name,
Provisioner: sc.Provisioner,
@ -347,29 +269,30 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
}
for _, t := range tests {
test := t
It(test.name, func() {
By("Deploying mock CSI driver")
config := testsuites.TestConfig{
Framework: f,
Prefix: "csi-workload",
}
f := framework.NewDefaultFramework("csiworkload")
driver = drivers.InitMockCSIDriver(config, test.deployDriverCRD, true, test.podInfoOnMountVersion)
driver.CreateDriver()
defer driver.CleanupDriver()
It(test.name, func() {
cs := f.ClientSet
csics := f.CSIClientSet
ns := f.Namespace
driver = drivers.InitMockCSIDriver(test.deployDriverCRD, true, test.podInfoOnMountVersion)
config, testCleanup := driver.PrepareTest(f)
driverName := config.GetUniqueDriverName()
defer testCleanup()
if test.deployDriverCRD {
err = waitForCSIDriver(csics, driver)
err = waitForCSIDriver(csics, driverName)
framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err)
defer destroyCSIDriver(csics, driver)
defer destroyCSIDriver(csics, driverName)
}
By("Creating pod")
var sc *storagev1.StorageClass
if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok {
sc = dDriver.GetDynamicProvisionStorageClass("")
sc = dDriver.GetDynamicProvisionStorageClass(config, "")
}
nodeName := driver.GetDriverInfo().Config.ClientNodeName
nodeName := config.ClientNodeName
scTest := testsuites.StorageClassTest{
Name: driver.GetDriverInfo().Name,
Parameters: sc.Parameters,
@ -420,14 +343,16 @@ func testTopologyPositive(cs clientset.Interface, suffix, namespace string, dela
topoZone := getRandomClusterZone(cs)
addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, topoZone)
}
claim := newClaim(test, namespace, suffix)
claim.Spec.StorageClassName = &class.Name
test.Client = cs
test.Claim = newClaim(test, namespace, suffix)
test.Claim.Spec.StorageClassName = &class.Name
test.Class = class
if delayBinding {
_, node := testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nil /* node selector */, false /* expect unschedulable */)
_, node := test.TestBindingWaitForFirstConsumer(nil /* node selector */, false /* expect unschedulable */)
Expect(node).ToNot(BeNil(), "Unexpected nil node found")
} else {
testsuites.TestDynamicProvisioning(test, cs, claim, class)
test.TestDynamicProvisioning()
}
}
@ -447,12 +372,13 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
test.DelayBinding = delayBinding
nodeSelector := map[string]string{v1.LabelZoneFailureDomain: podZone}
class := newStorageClass(test, namespace, suffix)
addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, pvZone)
claim := newClaim(test, namespace, suffix)
claim.Spec.StorageClassName = &class.Name
test.Client = cs
test.Class = newStorageClass(test, namespace, suffix)
addSingleCSIZoneAllowedTopologyToStorageClass(cs, test.Class, pvZone)
test.Claim = newClaim(test, namespace, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name
if delayBinding {
testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nodeSelector, true /* expect unschedulable */)
test.TestBindingWaitForFirstConsumer(nodeSelector, true /* expect unschedulable */)
} else {
test.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
// Ensure that a pod cannot be scheduled in an unsuitable zone.
@ -461,13 +387,12 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
defer testsuites.StopPod(cs, pod)
framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable")
}
testsuites.TestDynamicProvisioning(test, cs, claim, class)
test.TestDynamicProvisioning()
}
}
func waitForCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) error {
func waitForCSIDriver(csics csiclient.Interface, driverName string) error {
timeout := 2 * time.Minute
driverName := testsuites.GetUniqueDriverName(driver)
framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) {
@ -479,8 +404,7 @@ func waitForCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) e
return fmt.Errorf("gave up after waiting %v for CSIDriver %q.", timeout, driverName)
}
func destroyCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) {
driverName := testsuites.GetUniqueDriverName(driver)
func destroyCSIDriver(csics csiclient.Interface, driverName string) {
driverGet, err := csics.CsiV1alpha1().CSIDrivers().Get(driverName, metav1.GetOptions{})
if err == nil {
framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name)

View File

@ -56,12 +56,11 @@ const (
// hostpathCSI
type hostpathCSIDriver struct {
cleanup func()
driverInfo testsuites.DriverInfo
manifests []string
}
func initHostPathCSIDriver(name string, config testsuites.TestConfig, capabilities map[testsuites.Capability]bool, manifests ...string) testsuites.TestDriver {
func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]bool, manifests ...string) testsuites.TestDriver {
return &hostpathCSIDriver{
driverInfo: testsuites.DriverInfo{
Name: name,
@ -71,7 +70,6 @@ func initHostPathCSIDriver(name string, config testsuites.TestConfig, capabiliti
"", // Default fsType
),
Capabilities: capabilities,
Config: config,
},
manifests: manifests,
}
@ -82,8 +80,8 @@ var _ testsuites.DynamicPVTestDriver = &hostpathCSIDriver{}
var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{}
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
func InitHostPathCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath", config,
func InitHostPathCSIDriver() testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath",
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true, testsuites.CapMultiPODs: true},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
@ -104,19 +102,19 @@ func (h *hostpathCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
provisioner := testsuites.GetUniqueDriverName(h)
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := config.GetUniqueDriverName()
parameters := map[string]string{}
ns := h.driverInfo.Config.Framework.Namespace.Name
ns := config.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-sc", provisioner)
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
}
func (h *hostpathCSIDriver) GetSnapshotClass() *unstructured.Unstructured {
snapshotter := testsuites.GetUniqueDriverName(h)
func (h *hostpathCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured {
snapshotter := config.GetUniqueDriverName()
parameters := map[string]string{}
ns := h.driverInfo.Config.Framework.Namespace.Name
ns := config.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-vsc", snapshotter)
return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix)
@ -126,57 +124,60 @@ func (h *hostpathCSIDriver) GetClaimSize() string {
return "5Gi"
}
func (h *hostpathCSIDriver) CreateDriver() {
func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name))
f := h.driverInfo.Config.Framework
cancelLogging := testsuites.StartPodLogs(f)
cs := f.ClientSet
// The hostpath CSI driver only works when everything runs on the same node.
nodes := framework.GetReadySchedulableNodesOrDie(cs)
nodeName := nodes.Items[rand.Intn(len(nodes.Items))].Name
h.driverInfo.Config.ClientNodeName = nodeName
config := &testsuites.PerTestConfig{
Driver: h,
Prefix: "hostpath",
Framework: f,
ClientNodeName: nodeName,
}
// TODO (?): the storage.csi.image.version and storage.csi.image.registry
// settings are ignored for this test. We could patch the image definitions.
o := utils.PatchCSIOptions{
OldDriverName: h.driverInfo.Name,
NewDriverName: testsuites.GetUniqueDriverName(h),
NewDriverName: config.GetUniqueDriverName(),
DriverContainerName: "hostpath",
DriverContainerArguments: []string{"--drivername=csi-hostpath-" + f.UniqueName},
DriverContainerArguments: []string{"--drivername=" + config.GetUniqueDriverName()},
ProvisionerContainerName: "csi-provisioner",
SnapshotterContainerName: "csi-snapshotter",
NodeName: nodeName,
}
cleanup, err := h.driverInfo.Config.Framework.CreateFromManifests(func(item interface{}) error {
return utils.PatchCSIDeployment(h.driverInfo.Config.Framework, o, item)
cleanup, err := config.Framework.CreateFromManifests(func(item interface{}) error {
return utils.PatchCSIDeployment(config.Framework, o, item)
},
h.manifests...)
h.cleanup = cleanup
if err != nil {
framework.Failf("deploying %s driver: %v", h.driverInfo.Name, err)
}
}
func (h *hostpathCSIDriver) CleanupDriver() {
if h.cleanup != nil {
return config, func() {
By(fmt.Sprintf("uninstalling %s driver", h.driverInfo.Name))
h.cleanup()
cleanup()
cancelLogging()
}
}
// mockCSI
type mockCSIDriver struct {
cleanup func()
driverInfo testsuites.DriverInfo
manifests []string
podInfoVersion *string
attachable bool
}
var _ testsuites.TestDriver = &mockCSIDriver{}
var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{}
// InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface
func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttachable bool, podInfoVersion *string) testsuites.TestDriver {
func InitMockCSIDriver(registerDriver, driverAttachable bool, podInfoVersion *string) testsuites.TestDriver {
driverManifests := []string{
"test/e2e/testing-manifests/storage-csi/cluster-driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
@ -187,16 +188,12 @@ func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttac
"test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml",
}
config.ServerConfig = &framework.VolumeTestConfig{}
if registerDriver {
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-cluster-driver-registrar.yaml")
}
if driverAttachable {
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml")
} else {
config.ServerConfig.ServerArgs = append(config.ServerConfig.ServerArgs, "--disable-attach")
}
return &mockCSIDriver{
@ -212,10 +209,10 @@ func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttac
testsuites.CapFsGroup: false,
testsuites.CapExec: false,
},
Config: config,
},
manifests: driverManifests,
podInfoVersion: podInfoVersion,
attachable: driverAttachable,
}
}
@ -226,10 +223,10 @@ func (m *mockCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
func (m *mockCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (m *mockCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
provisioner := testsuites.GetUniqueDriverName(m)
func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := config.GetUniqueDriverName()
parameters := map[string]string{}
ns := m.driverInfo.Config.Framework.Namespace.Name
ns := config.Framework.Namespace.Name
suffix := fmt.Sprintf("%s-sc", provisioner)
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
@ -239,20 +236,24 @@ func (m *mockCSIDriver) GetClaimSize() string {
return "5Gi"
}
func (m *mockCSIDriver) CreateDriver() {
func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
By("deploying csi mock driver")
f := m.driverInfo.Config.Framework
cancelLogging := testsuites.StartPodLogs(f)
cs := f.ClientSet
// pods should be scheduled on the node
nodes := framework.GetReadySchedulableNodesOrDie(cs)
node := nodes.Items[rand.Intn(len(nodes.Items))]
m.driverInfo.Config.ClientNodeName = node.Name
config := &testsuites.PerTestConfig{
Driver: m,
Prefix: "mock",
Framework: f,
ClientNodeName: node.Name,
}
containerArgs := []string{"--name=csi-mock-" + f.UniqueName}
if m.driverInfo.Config.ServerConfig != nil && m.driverInfo.Config.ServerConfig.ServerArgs != nil {
containerArgs = append(containerArgs, m.driverInfo.Config.ServerConfig.ServerArgs...)
if !m.attachable {
containerArgs = append(containerArgs, "--disable-attach")
}
// TODO (?): the storage.csi.image.version and storage.csi.image.registry
@ -264,29 +265,27 @@ func (m *mockCSIDriver) CreateDriver() {
DriverContainerArguments: containerArgs,
ProvisionerContainerName: "csi-provisioner",
ClusterRegistrarContainerName: "csi-cluster-driver-registrar",
NodeName: m.driverInfo.Config.ClientNodeName,
NodeName: config.ClientNodeName,
PodInfoVersion: m.podInfoVersion,
}
cleanup, err := f.CreateFromManifests(func(item interface{}) error {
return utils.PatchCSIDeployment(f, o, item)
},
m.manifests...)
m.cleanup = cleanup
if err != nil {
framework.Failf("deploying csi mock driver: %v", err)
}
}
func (m *mockCSIDriver) CleanupDriver() {
if m.cleanup != nil {
return config, func() {
By("uninstalling csi mock driver")
m.cleanup()
cleanup()
cancelLogging()
}
}
// InitHostPathV0CSIDriver returns a variant of hostpathCSIDriver with different manifests.
func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath-v0", config,
func InitHostPathV0CSIDriver() testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath-v0",
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
@ -300,7 +299,7 @@ func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver
// gce-pd
type gcePDCSIDriver struct {
cleanup func()
topologyEnabled bool
driverInfo testsuites.DriverInfo
}
@ -308,8 +307,9 @@ var _ testsuites.TestDriver = &gcePDCSIDriver{}
var _ testsuites.DynamicPVTestDriver = &gcePDCSIDriver{}
// InitGcePDCSIDriver returns gcePDCSIDriver that implements TestDriver interface
func InitGcePDCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitGcePDCSIDriver(topologyEnabled bool) testsuites.TestDriver {
return &gcePDCSIDriver{
topologyEnabled: topologyEnabled,
driverInfo: testsuites.DriverInfo{
Name: GCEPDCSIProvisionerName,
FeatureTag: "[Serial]",
@ -327,8 +327,6 @@ func InitGcePDCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
testsuites.CapExec: true,
testsuites.CapMultiPODs: true,
},
Config: config,
},
}
}
@ -338,21 +336,14 @@ func (g *gcePDCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
}
func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
f := g.driverInfo.Config.Framework
framework.SkipUnlessProviderIs("gce", "gke")
if !g.driverInfo.Config.TopologyEnabled {
// Topology is disabled in external-provisioner, so in a multizone cluster, a pod could be
// scheduled in a different zone from the provisioned volume, causing basic provisioning
// tests to fail.
framework.SkipIfMultizone(f.ClientSet)
}
if pattern.FsType == "xfs" {
framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom")
}
}
func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
ns := g.driverInfo.Config.Framework.Namespace.Name
func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
ns := config.Framework.Namespace.Name
provisioner := g.driverInfo.Name
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
@ -368,8 +359,16 @@ func (g *gcePDCSIDriver) GetClaimSize() string {
return "5Gi"
}
func (g *gcePDCSIDriver) CreateDriver() {
func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
if !g.topologyEnabled {
// Topology is disabled in external-provisioner, so in a multizone cluster, a pod could be
// scheduled in a different zone from the provisioned volume, causing basic provisioning
// tests to fail.
framework.SkipIfMultizone(f.ClientSet)
}
By("deploying csi gce-pd driver")
cancelLogging := testsuites.StartPodLogs(f)
// It would be safer to rename the gcePD driver, but that
// hasn't been done before either and attempts to do so now led to
// errors during driver registration, therefore it is disabled
@ -382,7 +381,7 @@ func (g *gcePDCSIDriver) CreateDriver() {
// DriverContainerName: "gce-driver",
// ProvisionerContainerName: "csi-external-provisioner",
// }
createGCESecrets(g.driverInfo.Config.Framework.ClientSet, g.driverInfo.Config.Framework.Namespace.Name)
createGCESecrets(f.ClientSet, f.Namespace.Name)
manifests := []string{
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
@ -392,22 +391,24 @@ func (g *gcePDCSIDriver) CreateDriver() {
"test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml",
}
if g.driverInfo.Config.TopologyEnabled {
if g.topologyEnabled {
manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss_alpha.yaml")
} else {
manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml")
}
cleanup, err := g.driverInfo.Config.Framework.CreateFromManifests(nil, manifests...)
g.cleanup = cleanup
cleanup, err := f.CreateFromManifests(nil, manifests...)
if err != nil {
framework.Failf("deploying csi gce-pd driver: %v", err)
}
}
func (g *gcePDCSIDriver) CleanupDriver() {
return &testsuites.PerTestConfig{
Driver: g,
Prefix: "gcepd",
Framework: f,
}, func() {
By("uninstalling gce-pd driver")
if g.cleanup != nil {
g.cleanup()
cleanup()
cancelLogging()
}
}
@ -420,7 +421,7 @@ var _ testsuites.TestDriver = &gcePDExternalCSIDriver{}
var _ testsuites.DynamicPVTestDriver = &gcePDExternalCSIDriver{}
// InitGcePDExternalCSIDriver returns gcePDExternalCSIDriver that implements TestDriver interface
func InitGcePDExternalCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
func InitGcePDExternalCSIDriver() testsuites.TestDriver {
return &gcePDExternalCSIDriver{
driverInfo: testsuites.DriverInfo{
Name: GCEPDCSIProvisionerName,
@ -440,8 +441,6 @@ func InitGcePDExternalCSIDriver(config testsuites.TestConfig) testsuites.TestDri
testsuites.CapExec: true,
testsuites.CapMultiPODs: true,
},
Config: config,
},
}
}
@ -452,14 +451,13 @@ func (g *gcePDExternalCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
func (g *gcePDExternalCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
framework.SkipUnlessProviderIs("gce", "gke")
framework.SkipIfMultizone(g.driverInfo.Config.Framework.ClientSet)
if pattern.FsType == "xfs" {
framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom")
}
}
func (g *gcePDExternalCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
ns := g.driverInfo.Config.Framework.Namespace.Name
func (g *gcePDExternalCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
ns := config.Framework.Namespace.Name
provisioner := g.driverInfo.Name
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
@ -475,8 +473,12 @@ func (g *gcePDExternalCSIDriver) GetClaimSize() string {
return "5Gi"
}
func (g *gcePDExternalCSIDriver) CreateDriver() {
}
func (g *gcePDExternalCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
framework.SkipIfMultizone(f.ClientSet)
func (g *gcePDExternalCSIDriver) CleanupDriver() {
return &testsuites.PerTestConfig{
Driver: g,
Prefix: "gcepdext",
Framework: f,
}, func() {}
}

File diff suppressed because it is too large Load Diff

View File

@ -18,15 +18,13 @@ package storage
import (
. "github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// List of testDrivers to be executed in below loop
var testDrivers = []func(config testsuites.TestConfig) testsuites.TestDriver{
var testDrivers = []func() testsuites.TestDriver{
drivers.InitNFSDriver,
drivers.InitGlusterFSDriver,
drivers.InitISCSIDriver,
@ -59,41 +57,13 @@ var testSuites = []func() testsuites.TestSuite{
testsuites.InitProvisioningTestSuite,
}
func intreeTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestPattern {
return patterns
}
// This executes testSuites for in-tree volumes.
var _ = utils.SIGDescribe("In-tree Volumes", func() {
f := framework.NewDefaultFramework("volumes")
var (
// Common configuration options for all drivers.
config = testsuites.TestConfig{
Framework: f,
Prefix: "in-tree",
}
)
for _, initDriver := range testDrivers {
curDriver := initDriver(config)
curConfig := curDriver.GetDriverInfo().Config
curDriver := initDriver()
Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
BeforeEach(func() {
// Reset config. The driver might have modified its copy
// in a previous test.
curDriver.GetDriverInfo().Config = curConfig
// setupDriver
curDriver.CreateDriver()
})
AfterEach(func() {
// Cleanup driver
curDriver.CleanupDriver()
})
testsuites.RunTestSuite(f, curDriver, testSuites, intreeTunePattern)
testsuites.DefineTestSuite(curDriver, testSuites)
})
}
})

View File

@ -143,10 +143,11 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
}
for _, test := range tests {
class := newStorageClass(test, ns, "" /* suffix */)
claim := newClaim(test, ns, "" /* suffix */)
claim.Spec.StorageClassName = &class.Name
testsuites.TestDynamicProvisioning(test, c, claim, class)
test.Client = c
test.Class = newStorageClass(test, ns, "" /* suffix */)
test.Claim = newClaim(test, ns, "" /* suffix */)
test.Claim.Spec.StorageClassName = &test.Class.Name
test.TestDynamicProvisioning()
}
}
@ -301,6 +302,7 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string)
func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) {
test := testsuites.StorageClassTest{
Client: c,
Name: "Regional PD storage class with waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
@ -312,14 +314,14 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
}
suffix := "delayed-regional"
class := newStorageClass(test, ns, suffix)
test.Class = newStorageClass(test, ns, suffix)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
claim.Spec.StorageClassName = &test.Class.Name
claims = append(claims, claim)
}
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
if node == nil {
framework.Failf("unexpected nil node found")
}
@ -345,17 +347,20 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
}
suffix := "topo-regional"
class := newStorageClass(test, ns, suffix)
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
zones := getTwoRandomZones(c)
addAllowedTopologiesToStorageClass(c, class, zones)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
pv := testsuites.TestDynamicProvisioning(test, c, claim, class)
addAllowedTopologiesToStorageClass(c, test.Class, zones)
test.Claim = newClaim(test, ns, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name
pv := test.TestDynamicProvisioning()
checkZonesFromLabelAndAffinity(pv, sets.NewString(zones...), true)
}
func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string, pvcCount int) {
test := testsuites.StorageClassTest{
Client: c,
Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
@ -367,16 +372,16 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s
}
suffix := "topo-delayed-regional"
class := newStorageClass(test, ns, suffix)
test.Class = newStorageClass(test, ns, suffix)
topoZones := getTwoRandomZones(c)
addAllowedTopologiesToStorageClass(c, class, topoZones)
addAllowedTopologiesToStorageClass(c, test.Class, topoZones)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
claim.Spec.StorageClassName = &test.Class.Name
claims = append(claims, claim)
}
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
if node == nil {
framework.Failf("unexpected nil node found")
}

View File

@ -31,6 +31,7 @@ go_library(
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/podlogs:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/utils/image:go_default_library",

View File

@ -17,7 +17,9 @@ limitations under the License.
package testsuites
import (
"context"
"fmt"
"regexp"
"time"
. "github.com/onsi/ginkgo"
@ -32,6 +34,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/podlogs"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@ -39,10 +42,10 @@ import (
type TestSuite interface {
// getTestSuiteInfo returns the TestSuiteInfo for this TestSuite
getTestSuiteInfo() TestSuiteInfo
// skipUnsupportedTest skips the test if this TestSuite is not suitable to be tested with the combination of TestPattern and TestDriver
skipUnsupportedTest(testpatterns.TestPattern, TestDriver)
// execTest executes test of the testpattern for the driver
execTest(TestDriver, testpatterns.TestPattern)
// defineTest defines tests of the testpattern for the driver.
// Called inside a Ginkgo context that reflects the current driver and test pattern,
// so the test suite can define tests directly with ginkgo.It.
defineTests(TestDriver, testpatterns.TestPattern)
}
// TestSuiteInfo represents a set of parameters for TestSuite
@ -54,11 +57,8 @@ type TestSuiteInfo struct {
// TestResource represents an interface for resources that is used by TestSuite
type TestResource interface {
// setupResource sets up test resources to be used for the tests with the
// combination of TestDriver and TestPattern
setupResource(TestDriver, testpatterns.TestPattern)
// cleanupResource clean up the test resources created in SetupResource
cleanupResource(TestDriver, testpatterns.TestPattern)
// cleanupResource cleans up the test resources created when setting up the resource
cleanupResource()
}
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
@ -66,27 +66,34 @@ func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.name, tsInfo.featureTag)
}
// RunTestSuite runs all testpatterns of all testSuites for a driver
func RunTestSuite(f *framework.Framework, driver TestDriver, tsInits []func() TestSuite, tunePatternFunc func([]testpatterns.TestPattern) []testpatterns.TestPattern) {
// DefineTestSuite defines tests for all testpatterns and all testSuites for a driver
func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite) {
for _, testSuiteInit := range tsInits {
suite := testSuiteInit()
patterns := tunePatternFunc(suite.getTestSuiteInfo().testPatterns)
for _, pattern := range patterns {
suite.execTest(driver, pattern)
for _, pattern := range suite.getTestSuiteInfo().testPatterns {
p := pattern
Context(getTestNameStr(suite, p), func() {
BeforeEach(func() {
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(driver, p)
})
suite.defineTests(driver, p)
})
}
}
}
// skipUnsupportedTest will skip tests if the combination of driver, testsuite, and testpattern
// skipUnsupportedTest will skip tests if the combination of driver, and testpattern
// is not suitable to be tested.
// Whether it needs to be skipped is checked by following steps:
// 1. Check if Whether SnapshotType is supported by driver from its interface
// 2. Check if Whether volType is supported by driver from its interface
// 3. Check if fsType is supported
// 4. Check with driver specific logic
// 5. Check with testSuite specific logic
func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpatterns.TestPattern) {
//
// Test suites can also skip tests inside their own defineTests function or in
// individual tests.
func skipUnsupportedTest(driver TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
var isSupported bool
@ -130,9 +137,6 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern
// 4. Check with driver specific logic
driver.SkipUnsupportedTest(pattern)
// 5. Check with testSuite specific logic
suite.skipUnsupportedTest(pattern, driver)
}
// genericVolumeTestResource is a generic implementation of TestResource that wil be able to
@ -141,40 +145,45 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern
// Also, see subpath.go in the same directory for how to extend and use it.
type genericVolumeTestResource struct {
driver TestDriver
config *PerTestConfig
pattern testpatterns.TestPattern
volType string
volSource *v1.VolumeSource
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
sc *storagev1.StorageClass
driverTestResource interface{}
volume TestVolume
}
var _ TestResource = &genericVolumeTestResource{}
// setupResource sets up genericVolumeTestResource
func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
r.driver = driver
func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern) *genericVolumeTestResource {
r := genericVolumeTestResource{
driver: driver,
config: config,
pattern: pattern,
}
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
f := config.Framework
cs := f.ClientSet
fsType := pattern.FsType
volType := pattern.VolType
// Create volume for pre-provisioned volume tests
r.driverTestResource = CreateVolume(driver, volType)
r.volume = CreateVolume(driver, config, volType)
switch volType {
case testpatterns.InlineVolume:
framework.Logf("Creating resource for inline volume")
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
r.volSource = iDriver.GetVolumeSource(false, fsType, r.driverTestResource)
r.volSource = iDriver.GetVolumeSource(false, fsType, r.volume)
r.volType = dInfo.Name
}
case testpatterns.PreprovisionedPV:
framework.Logf("Creating resource for pre-provisioned PV")
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, fsType, r.driverTestResource)
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, fsType, r.volume)
if pvSource != nil {
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, false)
}
@ -184,7 +193,7 @@ func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern tes
framework.Logf("Creating resource for dynamic PV")
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
claimSize := dDriver.GetClaimSize()
r.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
r.sc = dDriver.GetDynamicProvisionStorageClass(r.config, fsType)
By("creating a StorageClass " + r.sc.Name)
var err error
@ -204,13 +213,14 @@ func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern tes
if r.volSource == nil {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, volType)
}
return &r
}
// cleanupResource cleans up genericVolumeTestResource
func (r *genericVolumeTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
volType := pattern.VolType
func (r *genericVolumeTestResource) cleanupResource() {
f := r.config.Framework
volType := r.pattern.VolType
if r.pvc != nil || r.pv != nil {
switch volType {
@ -241,7 +251,9 @@ func (r *genericVolumeTestResource) cleanupResource(driver TestDriver, pattern t
}
// Cleanup volume for pre-provisioned volume tests
DeleteVolume(driver, volType, r.driverTestResource)
if r.volume != nil {
r.volume.DeleteVolume()
}
}
func createVolumeSourceWithPVCPV(
@ -354,7 +366,7 @@ func deleteStorageClass(cs clientset.Interface, className string) {
// the testsuites package whereas framework.VolumeTestConfig is merely
// an implementation detail. It contains fields that have no effect,
// which makes it unsuitable for use in the testsuits public API.
func convertTestConfig(in *TestConfig) framework.VolumeTestConfig {
func convertTestConfig(in *PerTestConfig) framework.VolumeTestConfig {
if in.ServerConfig != nil {
return *in.ServerConfig
}
@ -388,3 +400,42 @@ func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.U
return snapshot
}
// StartPodLogs begins capturing log output and events from current
// and future pods running in the namespace of the framework. That
// ends when the returned cleanup function is called.
//
// The output goes to log files (when using --report-dir, as in the
// CI) or the output stream (otherwise).
func StartPodLogs(f *framework.Framework) func() {
ctx, cancel := context.WithCancel(context.Background())
cs := f.ClientSet
ns := f.Namespace
to := podlogs.LogOutput{
StatusWriter: GinkgoWriter,
}
if framework.TestContext.ReportDir == "" {
to.LogWriter = GinkgoWriter
} else {
test := CurrentGinkgoTestDescription()
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
// We end the prefix with a slash to ensure that all logs
// end up in a directory named after the current test.
//
// TODO: use a deeper directory hierarchy once gubernator
// supports that (https://github.com/kubernetes/test-infra/issues/10289).
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
reg.ReplaceAllString(test.FullTestText, "_") + "/"
}
podlogs.CopyAllLogs(ctx, cs, ns.Name, to)
// pod events are something that the framework already collects itself
// after a failed test. Logging them live is only useful for interactive
// debugging, not when we collect reports.
if framework.TestContext.ReportDir == "" {
podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter)
}
return cancel
}

View File

@ -37,13 +37,13 @@ func GetDriverNameWithFeatureTags(driver TestDriver) string {
}
// CreateVolume creates volume for test unless dynamicPV test
func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) interface{} {
func CreateVolume(driver TestDriver, config *PerTestConfig, volType testpatterns.TestVolType) TestVolume {
switch volType {
case testpatterns.InlineVolume:
fallthrough
case testpatterns.PreprovisionedPV:
if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok {
return pDriver.CreateVolume(volType)
return pDriver.CreateVolume(config, volType)
}
case testpatterns.DynamicPV:
// No need to create volume
@ -53,22 +53,6 @@ func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) interface
return nil
}
// DeleteVolume deletes volume for test unless dynamicPV test
func DeleteVolume(driver TestDriver, volType testpatterns.TestVolType, testResource interface{}) {
switch volType {
case testpatterns.InlineVolume:
fallthrough
case testpatterns.PreprovisionedPV:
if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok {
pDriver.DeleteVolume(volType, testResource)
}
case testpatterns.DynamicPV:
// No need to delete volume
default:
framework.Failf("Invalid volType specified: %v", volType)
}
}
// GetStorageClass constructs a new StorageClass instance
// with a unique name that is based on namespace + suffix.
func GetStorageClass(
@ -119,8 +103,3 @@ func GetSnapshotClass(
return snapshotClass
}
// GetUniqueDriverName returns unique driver name that can be used parallelly in tests
func GetUniqueDriverName(driver TestDriver) string {
return fmt.Sprintf("%s-%s", driver.GetDriverInfo().Name, driver.GetDriverInfo().Config.Framework.UniqueName)
}

View File

@ -41,6 +41,9 @@ import (
// StorageClassTest represents parameters to be used by provisioning tests.
// Not all parameters are used by all tests.
type StorageClassTest struct {
Client clientset.Interface
Claim *v1.PersistentVolumeClaim
Class *storage.StorageClass
Name string
CloudProviders []string
Provisioner string
@ -76,183 +79,162 @@ func (p *provisioningTestSuite) getTestSuiteInfo() TestSuiteInfo {
return p.tsInfo
}
func (p *provisioningTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
testCleanup func()
testCase *StorageClassTest
cs clientset.Interface
pvc *v1.PersistentVolumeClaim
sc *storage.StorageClass
}
func createProvisioningTestInput(driver TestDriver, pattern testpatterns.TestPattern) (provisioningTestResource, provisioningTestInput) {
// Setup test resource for driver and testpattern
resource := provisioningTestResource{}
resource.setupResource(driver, pattern)
input := provisioningTestInput{
testCase: StorageClassTest{
ClaimSize: resource.claimSize,
ExpectedSize: resource.claimSize,
},
cs: driver.GetDriverInfo().Config.Framework.ClientSet,
dc: driver.GetDriverInfo().Config.Framework.DynamicClient,
pvc: resource.pvc,
sc: resource.sc,
vsc: resource.vsc,
dInfo: driver.GetDriverInfo(),
nodeName: driver.GetDriverInfo().Config.ClientNodeName,
}
return resource, input
}
func (p *provisioningTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(p, pattern), func() {
var (
resource provisioningTestResource
input provisioningTestInput
needsCleanup bool
dInfo = driver.GetDriverInfo()
dDriver DynamicPVTestDriver
l local
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(p, driver, pattern)
needsCleanup = true
// Create test input
resource, input = createProvisioningTestInput(driver, pattern)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
// Check preconditions.
if pattern.VolType != testpatterns.DynamicPV {
framework.Skipf("Suite %q does not support %v", p.tsInfo.name, pattern.VolType)
}
ok := false
dDriver, ok = driver.(DynamicPVTestDriver)
if !ok {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
})
// Ginkgo's "Global Shared Behaviors" require arguments for a shared function
// to be a single struct and to be passed as a pointer.
// Please see https://onsi.github.io/ginkgo/#global-shared-behaviors for details.
testProvisioning(&input)
})
}
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("provisioning")
type provisioningTestResource struct {
driver TestDriver
init := func() {
l = local{}
claimSize string
sc *storage.StorageClass
pvc *v1.PersistentVolumeClaim
// follow parameter is used to test provision volume from snapshot
vsc *unstructured.Unstructured
// Now do the more expensive test initialization.
l.config, l.testCleanup = driver.PrepareTest(f)
l.cs = l.config.Framework.ClientSet
claimSize := dDriver.GetClaimSize()
l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, "")
if l.sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
}
var _ TestResource = &provisioningTestResource{}
func (p *provisioningTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
// Setup provisioningTest resource
switch pattern.VolType {
case testpatterns.DynamicPV:
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
p.sc = dDriver.GetDynamicProvisionStorageClass("")
if p.sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name)
}
p.driver = driver
p.claimSize = dDriver.GetClaimSize()
p.pvc = getClaim(p.claimSize, driver.GetDriverInfo().Config.Framework.Namespace.Name)
p.pvc.Spec.StorageClassName = &p.sc.Name
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", p.sc, p.pvc)
if sDriver, ok := driver.(SnapshottableTestDriver); ok {
p.vsc = sDriver.GetSnapshotClass()
}
}
default:
framework.Failf("Dynamic Provision test doesn't support: %s", pattern.VolType)
l.pvc = getClaim(claimSize, l.config.Framework.Namespace.Name)
l.pvc.Spec.StorageClassName = &l.sc.Name
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.sc, l.pvc)
l.testCase = &StorageClassTest{
Client: l.config.Framework.ClientSet,
Claim: l.pvc,
Class: l.sc,
ClaimSize: claimSize,
ExpectedSize: claimSize,
}
}
func (p *provisioningTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
cleanup := func() {
if l.testCleanup != nil {
l.testCleanup()
l.testCleanup = nil
}
type provisioningTestInput struct {
testCase StorageClassTest
cs clientset.Interface
dc dynamic.Interface
pvc *v1.PersistentVolumeClaim
sc *storage.StorageClass
vsc *unstructured.Unstructured
dInfo *DriverInfo
nodeName string
}
func testProvisioning(input *provisioningTestInput) {
// common checker for most of the test cases below
pvcheck := func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PVWriteReadSingleNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName})
}
It("should provision storage with defaults", func() {
input.testCase.PvCheck = pvcheck
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
init()
defer cleanup()
l.testCase.TestDynamicProvisioning()
})
It("should provision storage with mount options", func() {
if input.dInfo.SupportedMountOption == nil {
framework.Skipf("Driver %q does not define supported mount option - skipping", input.dInfo.Name)
if dInfo.SupportedMountOption == nil {
framework.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name)
}
input.sc.MountOptions = input.dInfo.SupportedMountOption.Union(input.dInfo.RequiredMountOption).List()
input.testCase.PvCheck = pvcheck
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
init()
defer cleanup()
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
l.testCase.TestDynamicProvisioning()
})
It("should access volume from different nodes", func() {
init()
defer cleanup()
// The assumption is that if the test hasn't been
// locked onto a single node, then the driver is
// usable on all of them *and* supports accessing a volume
// from any node.
if input.nodeName != "" {
framework.Skipf("Driver %q only supports testing on one node - skipping", input.dInfo.Name)
if l.config.ClientNodeName != "" {
framework.Skipf("Driver %q only supports testing on one node - skipping", dInfo.Name)
}
// Ensure that we actually have more than one node.
nodes := framework.GetReadySchedulableNodesOrDie(input.cs)
nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
if len(nodes.Items) <= 1 {
framework.Skipf("need more than one node - skipping")
}
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PVMultiNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName})
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PVMultiNodeCheck(l.cs, claim, volume, NodeSelection{Name: l.config.ClientNodeName})
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
l.testCase.TestDynamicProvisioning()
})
It("should create and delete block persistent volumes", func() {
if !input.dInfo.Capabilities[CapBlock] {
framework.Skipf("Driver %q does not support BlockVolume - skipping", input.dInfo.Name)
if !dInfo.Capabilities[CapBlock] {
framework.Skipf("Driver %q does not support BlockVolume - skipping", dInfo.Name)
}
init()
defer cleanup()
block := v1.PersistentVolumeBlock
input.testCase.VolumeMode = &block
input.pvc.Spec.VolumeMode = &block
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
l.testCase.VolumeMode = &block
l.pvc.Spec.VolumeMode = &block
l.testCase.TestDynamicProvisioning()
})
It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
if !input.dInfo.Capabilities[CapDataSource] {
framework.Skipf("Driver %q does not support populate data from snapshot - skipping", input.dInfo.Name)
if !dInfo.Capabilities[CapDataSource] {
framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name)
}
dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: input.nodeName}, input.cs, input.dc, input.pvc, input.sc, input.vsc)
sDriver, ok := driver.(SnapshottableTestDriver)
if !ok {
framework.Failf("Driver %q has CapDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
}
init()
defer cleanup()
dc := l.config.Framework.DynamicClient
vsc := sDriver.GetSnapshotClass(l.config)
dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc)
defer cleanupFunc()
input.pvc.Spec.DataSource = dataSource
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
By("checking whether the created volume has the pre-populated data")
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: input.nodeName})
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: l.config.ClientNodeName})
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
l.testCase.TestDynamicProvisioning()
})
It("should allow concurrent writes on the same node", func() {
if !input.dInfo.Capabilities[CapMultiPODs] {
framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", input.dInfo.Name)
if !dInfo.Capabilities[CapMultiPODs] {
framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
}
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
init()
defer cleanup()
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
// We start two pods concurrently on the same node,
// using the same PVC. Both wait for other to create a
// file before returning. The pods are forced onto the
@ -265,7 +247,7 @@ func testProvisioning(input *provisioningTestInput) {
defer GinkgoRecover()
defer wg.Done()
node := NodeSelection{
Name: input.nodeName,
Name: l.config.ClientNodeName,
}
if podName == secondPodName {
node.Affinity = &v1.Affinity{
@ -283,18 +265,24 @@ func testProvisioning(input *provisioningTestInput) {
},
}
}
RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, podName, command, node)
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, podName, command, node)
}
go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done")
go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done")
wg.Wait()
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
l.testCase.TestDynamicProvisioning()
})
}
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest and storageClass
func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) *v1.PersistentVolume {
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest
func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
client := t.Client
Expect(client).NotTo(BeNil(), "StorageClassTest.Client is required")
claim := t.Claim
Expect(claim).NotTo(BeNil(), "StorageClassTest.Claim is required")
class := t.Class
var err error
if class != nil {
Expect(*claim.Spec.StorageClassName).To(Equal(class.Name))
@ -493,29 +481,29 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
pod = nil
}
func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) {
pvs, node := TestBindingWaitForFirstConsumerMultiPVC(t, client, []*v1.PersistentVolumeClaim{claim}, class, nodeSelector, expectUnschedulable)
func (t StorageClassTest) TestBindingWaitForFirstConsumer(nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) {
pvs, node := t.TestBindingWaitForFirstConsumerMultiPVC([]*v1.PersistentVolumeClaim{t.Claim}, nodeSelector, expectUnschedulable)
if pvs == nil {
return nil, node
}
return pvs[0], node
}
func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
var err error
Expect(len(claims)).ToNot(Equal(0))
namespace := claims[0].Namespace
By("creating a storage class " + class.Name)
class, err = client.StorageV1().StorageClasses().Create(class)
By("creating a storage class " + t.Class.Name)
class, err := t.Client.StorageV1().StorageClasses().Create(t.Class)
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(client, class.Name)
defer deleteStorageClass(t.Client, class.Name)
By("creating claims")
var claimNames []string
var createdClaims []*v1.PersistentVolumeClaim
for _, claim := range claims {
c, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
claimNames = append(claimNames, c.Name)
createdClaims = append(createdClaims, c)
Expect(err).NotTo(HaveOccurred())
@ -523,7 +511,7 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse
defer func() {
var errors map[string]error
for _, claim := range createdClaims {
err := framework.DeletePersistentVolumeClaim(client, claim.Name, claim.Namespace)
err := framework.DeletePersistentVolumeClaim(t.Client, claim.Name, claim.Namespace)
if err != nil {
errors[claim.Name] = err
}
@ -537,44 +525,44 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
By("checking the claims are in pending state")
err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true)
err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true)
Expect(err).To(HaveOccurred())
verifyPVCsPending(client, createdClaims)
verifyPVCsPending(t.Client, createdClaims)
By("creating a pod referring to the claims")
// Create a pod referring to the claim and wait for it to get to running
var pod *v1.Pod
if expectUnschedulable {
pod, err = framework.CreateUnschedulablePod(client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
pod, err = framework.CreateUnschedulablePod(t.Client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
} else {
pod, err = framework.CreatePod(client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
pod, err = framework.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
}
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.DeletePodOrFail(client, pod.Namespace, pod.Name)
framework.WaitForPodToDisappear(client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
framework.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
framework.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
}()
if expectUnschedulable {
// Verify that no claims are provisioned.
verifyPVCsPending(client, createdClaims)
verifyPVCsPending(t.Client, createdClaims)
return nil, nil
}
// collect node details
node, err := client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
node, err := t.Client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("re-checking the claims to see they binded")
var pvs []*v1.PersistentVolume
for _, claim := range createdClaims {
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// make sure claim did bind
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
pv, err := t.Client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pvs = append(pvs, pv)
}

View File

@ -24,13 +24,10 @@ import (
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
@ -54,7 +51,6 @@ type SnapshotClassTest struct {
Parameters map[string]string
NodeName string
NodeSelector map[string]string // NodeSelector for the pod
SnapshotContentCheck func(snapshotContent *unstructured.Unstructured) error
}
type snapshottableTestSuite struct {
@ -79,198 +75,116 @@ func (s *snapshottableTestSuite) getTestSuiteInfo() TestSuiteInfo {
return s.tsInfo
}
func (s *snapshottableTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
dInfo := driver.GetDriverInfo()
if !dInfo.Capabilities[CapDataSource] {
framework.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
}
}
func createSnapshottableTestInput(driver TestDriver, pattern testpatterns.TestPattern) (snapshottableTestResource, snapshottableTestInput) {
// Setup test resource for driver and testpattern
resource := snapshottableTestResource{}
resource.setupResource(driver, pattern)
dInfo := driver.GetDriverInfo()
input := snapshottableTestInput{
testCase: SnapshotClassTest{
NodeName: dInfo.Config.ClientNodeName,
},
cs: dInfo.Config.Framework.ClientSet,
dc: dInfo.Config.Framework.DynamicClient,
pvc: resource.pvc,
sc: resource.sc,
vsc: resource.vsc,
dInfo: dInfo,
}
return resource, input
}
func (s *snapshottableTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(s, pattern), func() {
func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
resource snapshottableTestResource
input snapshottableTestInput
needsCleanup bool
sDriver SnapshottableTestDriver
dDriver DynamicPVTestDriver
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(s, driver, pattern)
needsCleanup = true
// Create test input
resource, input = createSnapshottableTestInput(driver, pattern)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
// Check preconditions.
Expect(pattern.SnapshotType).To(Equal(testpatterns.DynamicCreatedSnapshot))
dInfo := driver.GetDriverInfo()
ok := false
sDriver, ok = driver.(SnapshottableTestDriver)
if !dInfo.Capabilities[CapDataSource] || !ok {
framework.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
}
dDriver, ok = driver.(DynamicPVTestDriver)
if !ok {
framework.Skipf("Driver %q does not support dynamic provisioning - skipping", driver.GetDriverInfo().Name)
}
})
// Ginkgo's "Global Shared Behaviors" require arguments for a shared function
// to be a single struct and to be passed as a pointer.
// Please see https://onsi.github.io/ginkgo/#global-shared-behaviors for details.
testSnapshot(&input)
})
}
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("snapshotting")
type snapshottableTestResource struct {
driver TestDriver
claimSize string
It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() {
cs := f.ClientSet
dc := f.DynamicClient
sc *storage.StorageClass
pvc *v1.PersistentVolumeClaim
// volume snapshot class
vsc *unstructured.Unstructured
}
// Now do the more expensive test initialization.
config, testCleanup := driver.PrepareTest(f)
defer testCleanup()
var _ TestResource = &snapshottableTestResource{}
func (s *snapshottableTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
// Setup snapshottableTest resource
switch pattern.SnapshotType {
case testpatterns.DynamicCreatedSnapshot:
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
s.sc = dDriver.GetDynamicProvisionStorageClass("")
if s.sc == nil {
vsc := sDriver.GetSnapshotClass(config)
class := dDriver.GetDynamicProvisionStorageClass(config, "")
if class == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name)
}
s.driver = driver
s.claimSize = dDriver.GetClaimSize()
s.pvc = getClaim(s.claimSize, driver.GetDriverInfo().Config.Framework.Namespace.Name)
s.pvc.Spec.StorageClassName = &s.sc.Name
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", s.sc, s.pvc)
if sDriver, ok := driver.(SnapshottableTestDriver); ok {
s.vsc = sDriver.GetSnapshotClass()
}
}
claimSize := dDriver.GetClaimSize()
pvc := getClaim(claimSize, config.Framework.Namespace.Name)
pvc.Spec.StorageClassName = &class.Name
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc)
default:
framework.Failf("Dynamic Snapshot test doesn't support: %s", pattern.SnapshotType)
}
}
func (s *snapshottableTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
}
type snapshottableTestInput struct {
testCase SnapshotClassTest
cs clientset.Interface
dc dynamic.Interface
pvc *v1.PersistentVolumeClaim
sc *storage.StorageClass
// volume snapshot class
vsc *unstructured.Unstructured
dInfo *DriverInfo
}
func testSnapshot(input *snapshottableTestInput) {
It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() {
TestCreateSnapshot(input.testCase, input.cs, input.dc, input.pvc, input.sc, input.vsc)
})
}
// TestCreateSnapshot tests dynamic creating snapshot with specified SnapshotClassTest and snapshotClass
func TestCreateSnapshot(
t SnapshotClassTest,
client clientset.Interface,
dynamicClient dynamic.Interface,
claim *v1.PersistentVolumeClaim,
class *storage.StorageClass,
snapshotClass *unstructured.Unstructured,
) *unstructured.Unstructured {
var err error
if class != nil {
By("creating a StorageClass " + class.Name)
class, err = client.StorageV1().StorageClasses().Create(class)
class, err := cs.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting storage class %s", class.Name)
framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil))
framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil))
}()
}
By("creating a claim")
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
// typically this claim has already been deleted
err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
if err != nil && !apierrs.IsNotFound(err) {
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
}
}()
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking the claim")
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("creating a SnapshotClass")
snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{})
vsc, err = dc.Resource(snapshotClassGVR).Create(vsc, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting SnapshotClass %s", snapshotClass.GetName())
framework.ExpectNoError(dynamicClient.Resource(snapshotClassGVR).Delete(snapshotClass.GetName(), nil))
framework.Logf("deleting SnapshotClass %s", vsc.GetName())
framework.ExpectNoError(dc.Resource(snapshotClassGVR).Delete(vsc.GetName(), nil))
}()
By("creating a snapshot")
snapshot := getSnapshot(claim.Name, claim.Namespace, snapshotClass.GetName())
snapshot := getSnapshot(pvc.Name, pvc.Namespace, vsc.GetName())
snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{})
snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
// typically this snapshot has already been deleted
err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil)
err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil)
if err != nil && !apierrs.IsNotFound(err) {
framework.Failf("Error deleting snapshot %q. Error: %v", claim.Name, err)
framework.Failf("Error deleting snapshot %q. Error: %v", pvc.Name, err)
}
}()
err = WaitForSnapshotReady(dynamicClient, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
err = WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking the snapshot")
// Get new copy of the snapshot
snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{})
snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the bound snapshotContent
snapshotSpec := snapshot.Object["spec"].(map[string]interface{})
snapshotContentName := snapshotSpec["snapshotContentName"].(string)
snapshotContent, err := dynamicClient.Resource(snapshotContentGVR).Get(snapshotContentName, metav1.GetOptions{})
snapshotContent, err := dc.Resource(snapshotContentGVR).Get(snapshotContentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
snapshotContentSpec := snapshotContent.Object["spec"].(map[string]interface{})
@ -279,18 +193,11 @@ func TestCreateSnapshot(
// Check SnapshotContent properties
By("checking the SnapshotContent")
Expect(snapshotContentSpec["snapshotClassName"]).To(Equal(snapshotClass.GetName()))
Expect(snapshotContentSpec["snapshotClassName"]).To(Equal(vsc.GetName()))
Expect(volumeSnapshotRef["name"]).To(Equal(snapshot.GetName()))
Expect(volumeSnapshotRef["namespace"]).To(Equal(snapshot.GetNamespace()))
Expect(persistentVolumeRef["name"]).To(Equal(pv.Name))
// Run the checker
if t.SnapshotContentCheck != nil {
err = t.SnapshotContentCheck(snapshotContent)
Expect(err).NotTo(HaveOccurred())
}
return snapshotContent
})
}
// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first.

View File

@ -71,98 +71,54 @@ func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo {
return s.tsInfo
}
func (s *subPathTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
}
func createSubPathTestInput(pattern testpatterns.TestPattern, resource subPathTestResource) subPathTestInput {
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
subPath := f.Namespace.Name
subPathDir := filepath.Join(volumePath, subPath)
return subPathTestInput{
f: f,
subPathDir: subPathDir,
filePathInSubpath: filepath.Join(volumePath, fileName),
filePathInVolume: filepath.Join(subPathDir, fileName),
volType: resource.volType,
pod: resource.pod,
formatPod: resource.formatPod,
volSource: resource.genericVolumeTestResource.volSource,
roVol: resource.roVolSource,
}
}
func (s *subPathTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(s, pattern), func() {
var (
resource subPathTestResource
input subPathTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(s, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = subPathTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createSubPathTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testSubPath(&input)
})
}
type subPathTestResource struct {
genericVolumeTestResource
func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
testCleanup func()
resource *genericVolumeTestResource
roVolSource *v1.VolumeSource
pod *v1.Pod
formatPod *v1.Pod
subPathDir string
filePathInSubpath string
filePathInVolume string
}
var l local
var _ TestResource = &subPathTestResource{}
// No preconditions to test. Normally they would be in a BeforeEach here.
func (s *subPathTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
s.driver = driver
dInfo := s.driver.GetDriverInfo()
f := dInfo.Config.Framework
fsType := pattern.FsType
volType := pattern.VolType
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("provisioning")
// Setup generic test resource
s.genericVolumeTestResource.setupResource(driver, pattern)
init := func() {
l = local{}
// Now do the more expensive test initialization.
l.config, l.testCleanup = driver.PrepareTest(f)
l.resource = createGenericVolumeTestResource(driver, l.config, pattern)
// Setup subPath test dependent resource
volType := pattern.VolType
switch volType {
case testpatterns.InlineVolume:
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
s.roVolSource = iDriver.GetVolumeSource(true, fsType, s.genericVolumeTestResource.driverTestResource)
l.roVolSource = iDriver.GetVolumeSource(true, pattern.FsType, l.resource.volume)
}
case testpatterns.PreprovisionedPV:
s.roVolSource = &v1.VolumeSource{
l.roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: s.genericVolumeTestResource.pvc.Name,
ClaimName: l.resource.pvc.Name,
ReadOnly: true,
},
}
case testpatterns.DynamicPV:
s.roVolSource = &v1.VolumeSource{
l.roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: s.genericVolumeTestResource.pvc.Name,
ClaimName: l.resource.pvc.Name,
ReadOnly: true,
},
}
@ -171,246 +127,295 @@ func (s *subPathTestResource) setupResource(driver TestDriver, pattern testpatte
}
subPath := f.Namespace.Name
config := dInfo.Config
s.pod = SubpathTestPod(f, subPath, s.volType, s.volSource, true)
s.pod.Spec.NodeName = config.ClientNodeName
s.pod.Spec.NodeSelector = config.ClientNodeSelector
l.pod = SubpathTestPod(f, subPath, l.resource.volType, l.resource.volSource, true)
l.pod.Spec.NodeName = l.config.ClientNodeName
l.pod.Spec.NodeSelector = l.config.ClientNodeSelector
s.formatPod = volumeFormatPod(f, s.volSource)
s.formatPod.Spec.NodeName = config.ClientNodeName
s.formatPod.Spec.NodeSelector = config.ClientNodeSelector
l.formatPod = volumeFormatPod(f, l.resource.volSource)
l.formatPod.Spec.NodeName = l.config.ClientNodeName
l.formatPod.Spec.NodeSelector = l.config.ClientNodeSelector
l.subPathDir = filepath.Join(volumePath, subPath)
l.filePathInSubpath = filepath.Join(volumePath, fileName)
l.filePathInVolume = filepath.Join(l.subPathDir, fileName)
}
func (s *subPathTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
// Cleanup subPath test dependent resource
cleanup := func() {
if l.pod != nil {
By("Deleting pod")
err := framework.DeletePodWithWait(f, f.ClientSet, s.pod)
err := framework.DeletePodWithWait(f, f.ClientSet, l.pod)
Expect(err).ToNot(HaveOccurred(), "while deleting pod")
// Cleanup generic test resource
s.genericVolumeTestResource.cleanupResource(driver, pattern)
l.pod = nil
}
type subPathTestInput struct {
f *framework.Framework
subPathDir string
filePathInSubpath string
filePathInVolume string
volType string
pod *v1.Pod
formatPod *v1.Pod
volSource *v1.VolumeSource
roVol *v1.VolumeSource
if l.resource != nil {
l.resource.cleanupResource()
l.resource = nil
}
if l.testCleanup != nil {
l.testCleanup()
l.testCleanup = nil
}
}
func testSubPath(input *subPathTestInput) {
It("should support non-existent path", func() {
init()
defer cleanup()
// Write the file in the subPath from init container 1
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1])
setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
// Read it from outside the subPath from container 1
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
testReadFile(f, l.filePathInVolume, l.pod, 1)
})
It("should support existing directory", func() {
init()
defer cleanup()
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
// Write the file in the subPath from init container 1
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1])
setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
// Read it from outside the subPath from container 1
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
testReadFile(f, l.filePathInVolume, l.pod, 1)
})
It("should support existing single file", func() {
init()
defer cleanup()
// Create the file in the init container
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", input.subPathDir, input.filePathInVolume))
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", l.subPathDir, l.filePathInVolume))
// Read it from inside the subPath from container 0
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
testReadFile(f, l.filePathInSubpath, l.pod, 0)
})
It("should support file as subpath", func() {
// Create the file in the init container
setInitCommand(input.pod, fmt.Sprintf("echo %s > %s", input.f.Namespace.Name, input.subPathDir))
init()
defer cleanup()
TestBasicSubpath(input.f, input.f.Namespace.Name, input.pod)
// Create the file in the init container
setInitCommand(l.pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, l.subPathDir))
TestBasicSubpath(f, f.Namespace.Name, l.pod)
})
It("should fail if subpath directory is outside the volume [Slow]", func() {
init()
defer cleanup()
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin %s", input.subPathDir))
setInitCommand(l.pod, fmt.Sprintf("ln -s /bin %s", l.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod, false)
testPodFailSubpath(f, l.pod, false)
})
It("should fail if subpath file is outside the volume [Slow]", func() {
init()
defer cleanup()
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/sh %s", input.subPathDir))
setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/sh %s", l.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod, false)
testPodFailSubpath(f, l.pod, false)
})
It("should fail if non-existent subpath is outside the volume [Slow]", func() {
init()
defer cleanup()
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", input.subPathDir))
setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", l.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod, false)
testPodFailSubpath(f, l.pod, false)
})
It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
init()
defer cleanup()
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s ../ %s", input.subPathDir))
setInitCommand(l.pod, fmt.Sprintf("ln -s ../ %s", l.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod, false)
testPodFailSubpath(f, l.pod, false)
})
It("should support creating multiple subpath from same volumes [Slow]", func() {
init()
defer cleanup()
subpathDir1 := filepath.Join(volumePath, "subpath1")
subpathDir2 := filepath.Join(volumePath, "subpath2")
filepath1 := filepath.Join("/test-subpath1", fileName)
filepath2 := filepath.Join("/test-subpath2", fileName)
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath1",
SubPath: "subpath1",
})
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath2",
SubPath: "subpath2",
})
// Write the files from container 0 and instantly read them back
addMultipleWrites(&input.pod.Spec.Containers[0], filepath1, filepath2)
testMultipleReads(input.f, input.pod, 0, filepath1, filepath2)
addMultipleWrites(&l.pod.Spec.Containers[0], filepath1, filepath2)
testMultipleReads(f, l.pod, 0, filepath1, filepath2)
})
It("should support restarting containers using directory as subpath [Slow]", func() {
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %v; touch %v", input.subPathDir, probeFilePath))
init()
defer cleanup()
testPodContainerRestart(input.f, input.pod)
// Create the directory
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %v; touch %v", l.subPathDir, probeFilePath))
testPodContainerRestart(f, l.pod)
})
It("should support restarting containers using file as subpath [Slow]", func() {
// Create the file
setInitCommand(input.pod, fmt.Sprintf("touch %v; touch %v", input.subPathDir, probeFilePath))
init()
defer cleanup()
testPodContainerRestart(input.f, input.pod)
// Create the file
setInitCommand(l.pod, fmt.Sprintf("touch %v; touch %v", l.subPathDir, probeFilePath))
testPodContainerRestart(f, l.pod)
})
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
testSubpathReconstruction(input.f, input.pod, false)
init()
defer cleanup()
testSubpathReconstruction(f, l.pod, false)
})
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
if strings.HasPrefix(input.volType, "hostPath") || strings.HasPrefix(input.volType, "csi-hostpath") {
init()
defer cleanup()
if strings.HasPrefix(l.resource.volType, "hostPath") || strings.HasPrefix(l.resource.volType, "csi-hostpath") {
// TODO: This skip should be removed once #61446 is fixed
framework.Skipf("%s volume type does not support reconstruction, skipping", input.volType)
framework.Skipf("%s volume type does not support reconstruction, skipping", l.resource.volType)
}
testSubpathReconstruction(input.f, input.pod, true)
testSubpathReconstruction(f, l.pod, true)
})
It("should support readOnly directory specified in the volumeMount", func() {
init()
defer cleanup()
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
// Write the file in the volume from init container 2
setWriteCommand(input.filePathInVolume, &input.pod.Spec.InitContainers[2])
setWriteCommand(l.filePathInVolume, &l.pod.Spec.InitContainers[2])
// Read it from inside the subPath from container 0
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(f, l.filePathInSubpath, l.pod, 0)
})
It("should support readOnly file specified in the volumeMount", func() {
init()
defer cleanup()
// Create the file
setInitCommand(input.pod, fmt.Sprintf("touch %s", input.subPathDir))
setInitCommand(l.pod, fmt.Sprintf("touch %s", l.subPathDir))
// Write the file in the volume from init container 2
setWriteCommand(input.subPathDir, &input.pod.Spec.InitContainers[2])
setWriteCommand(l.subPathDir, &l.pod.Spec.InitContainers[2])
// Read it from inside the subPath from container 0
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(input.f, volumePath, input.pod, 0)
l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(f, volumePath, l.pod, 0)
})
It("should support existing directories when readOnly specified in the volumeSource", func() {
if input.roVol == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", input.volType)
init()
defer cleanup()
if l.roVolSource == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.volType)
}
pod := input.pod.DeepCopy()
origpod := l.pod.DeepCopy()
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
// Write the file in the subPath from init container 1
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1])
setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
// Read it from inside the subPath from container 0
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
testReadFile(f, l.filePathInSubpath, l.pod, 0)
// Reset the pod
input.pod = pod
l.pod = origpod
// Set volume source to read only
input.pod.Spec.Volumes[0].VolumeSource = *input.roVol
l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource
// Read it from inside the subPath from container 0
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
testReadFile(f, l.filePathInSubpath, l.pod, 0)
})
It("should verify container cannot write to subpath readonly volumes", func() {
if input.roVol == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", input.volType)
init()
defer cleanup()
if l.roVolSource == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.volType)
}
// Format the volume while it's writable
formatVolume(input.f, input.formatPod)
formatVolume(f, l.formatPod)
// Set volume source to read only
input.pod.Spec.Volumes[0].VolumeSource = *input.roVol
l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource
// Write the file in the volume from container 0
setWriteCommand(input.subPathDir, &input.pod.Spec.Containers[0])
setWriteCommand(l.subPathDir, &l.pod.Spec.Containers[0])
// Pod should fail
testPodFailSubpath(input.f, input.pod, true)
testPodFailSubpath(f, l.pod, true)
})
It("should be able to unmount after the subpath directory is deleted", func() {
// Change volume container to busybox so we can exec later
input.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
input.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
init()
defer cleanup()
By(fmt.Sprintf("Creating pod %s", input.pod.Name))
removeUnusedContainers(input.pod)
pod, err := input.f.ClientSet.CoreV1().Pods(input.f.Namespace.Name).Create(input.pod)
// Change volume container to busybox so we can exec later
l.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
l.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
By(fmt.Sprintf("Creating pod %s", l.pod.Name))
removeUnusedContainers(l.pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(l.pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod")
defer func() {
By(fmt.Sprintf("Deleting pod %s", pod.Name))
framework.DeletePodWithWait(input.f, input.f.ClientSet, pod)
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()
// Wait for pod to be running
err = framework.WaitForPodRunningInNamespace(input.f.ClientSet, pod)
err = framework.WaitForPodRunningInNamespace(f.ClientSet, l.pod)
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
// Exec into container that mounted the volume, delete subpath directory
rmCmd := fmt.Sprintf("rm -rf %s", input.subPathDir)
_, err = podContainerExec(pod, 1, rmCmd)
rmCmd := fmt.Sprintf("rm -rf %s", l.subPathDir)
_, err = podContainerExec(l.pod, 1, rmCmd)
Expect(err).ToNot(HaveOccurred(), "while removing subpath directory")
// Delete pod (from defer) and wait for it to be successfully deleted

View File

@ -25,35 +25,53 @@ import (
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
// TestDriver represents an interface for a driver to be tested in TestSuite
// TestDriver represents an interface for a driver to be tested in TestSuite.
// Except for GetDriverInfo, all methods will be called at test runtime and thus
// can use framework.Skipf, framework.Fatal, Gomega assertions, etc.
type TestDriver interface {
// GetDriverInfo returns DriverInfo for the TestDriver
// GetDriverInfo returns DriverInfo for the TestDriver. This must be static
// information.
GetDriverInfo() *DriverInfo
// CreateDriver creates all driver resources that is required for TestDriver method
// except CreateVolume
CreateDriver()
// CreateDriver cleanup all the resources that is created in CreateDriver
CleanupDriver()
// SkipUnsupportedTest skips test in Testpattern is not suitable to test with the TestDriver
// SkipUnsupportedTest skips test if Testpattern is not
// suitable to test with the TestDriver. It gets called after
// parsing parameters of the test suite and before the
// framework is initialized. Cheap tests that just check
// parameters like the cloud provider can and should be
// done in SkipUnsupportedTest to avoid setting up more
// expensive resources like framework.Framework. Tests that
// depend on a connection to the cluster can be done in
// PrepareTest once the framework is ready.
SkipUnsupportedTest(testpatterns.TestPattern)
// PrepareTest is called at test execution time each time a new test case is about to start.
// It sets up all necessary resources and returns the per-test configuration
// plus a cleanup function that frees all allocated resources.
PrepareTest(f *framework.Framework) (*PerTestConfig, func())
}
// TestVolume is the result of PreprovisionedVolumeTestDriver.CreateVolume.
// The only common functionality is to delete it. Individual driver interfaces
// have additional methods that work with volumes created by them.
type TestVolume interface {
DeleteVolume()
}
// PreprovisionedVolumeTestDriver represents an interface for a TestDriver that has pre-provisioned volume
type PreprovisionedVolumeTestDriver interface {
TestDriver
// CreateVolume creates a pre-provisioned volume.
CreateVolume(testpatterns.TestVolType) interface{}
// DeleteVolume deletes a volume that is created in CreateVolume
DeleteVolume(testpatterns.TestVolType, interface{})
// CreateVolume creates a pre-provisioned volume of the desired volume type.
CreateVolume(config *PerTestConfig, volumeType testpatterns.TestVolType) TestVolume
}
// InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume
type InlineVolumeTestDriver interface {
PreprovisionedVolumeTestDriver
// GetVolumeSource returns a volumeSource for inline volume.
// It will set readOnly and fsType to the volumeSource, if TestDriver supports both of them.
// It will return nil, if the TestDriver doesn't support either of the parameters.
GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource
GetVolumeSource(readOnly bool, fsType string, testVolume TestVolume) *v1.VolumeSource
}
// PreprovisionedPVTestDriver represents an interface for a TestDriver that supports PreprovisionedPV
@ -62,8 +80,7 @@ type PreprovisionedPVTestDriver interface {
// GetPersistentVolumeSource returns a PersistentVolumeSource with volume node affinity for pre-provisioned Persistent Volume.
// It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them.
// It will return nil, if the TestDriver doesn't support either of the parameters.
// Volume node affinity is optional, it will be nil for volumes which does not have volume node affinity.
GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity)
GetPersistentVolumeSource(readOnly bool, fsType string, testVolume TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity)
}
// DynamicPVTestDriver represents an interface for a TestDriver that supports DynamicPV
@ -72,7 +89,7 @@ type DynamicPVTestDriver interface {
// GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume.
// It will set fsType to the StorageClass, if TestDriver supports it.
// It will return nil, if the TestDriver doesn't support it.
GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass
GetDynamicProvisionStorageClass(config *PerTestConfig, fsType string) *storagev1.StorageClass
// GetClaimSize returns the size of the volume that is to be provisioned ("5Gi", "1Mi").
// The size must be chosen so that the resulting volume is large enough for all
@ -85,7 +102,7 @@ type SnapshottableTestDriver interface {
TestDriver
// GetSnapshotClass returns a SnapshotClass to create snapshot.
// It will return nil, if the TestDriver doesn't support it.
GetSnapshotClass() *unstructured.Unstructured
GetSnapshotClass(config *PerTestConfig) *unstructured.Unstructured
}
// Capability represents a feature that a volume plugin supports
@ -106,7 +123,7 @@ const (
CapMultiPODs Capability = "multipods"
)
// DriverInfo represents a combination of parameters to be used in implementation of TestDriver
// DriverInfo represents static information about a TestDriver.
type DriverInfo struct {
Name string // Name of the driver
FeatureTag string // FeatureTag for the driver
@ -116,14 +133,15 @@ type DriverInfo struct {
SupportedMountOption sets.String // Map of string for supported mount option
RequiredMountOption sets.String // Map of string for required mount option (Optional)
Capabilities map[Capability]bool // Map that represents plugin capabilities
Config TestConfig // Test configuration for the current test.
}
// TestConfig represents parameters that control test execution.
// They can still be modified after defining tests, for example
// in a BeforeEach or when creating the driver.
type TestConfig struct {
// PerTestConfig represents parameters that control test execution.
// One instance gets allocated for each test and is then passed
// via pointer to functions involved in the test.
type PerTestConfig struct {
// The test driver for the test.
Driver TestDriver
// Some short word that gets inserted into dynamically
// generated entities (pods, paths) as first part of the name
// to make debugging easier. Can be the same for different
@ -148,8 +166,9 @@ type TestConfig struct {
// the configuration that then has to be used to run tests.
// The values above are ignored for such tests.
ServerConfig *framework.VolumeTestConfig
// TopologyEnabled indicates that the Topology feature gate
// should be enabled in external-provisioner
TopologyEnabled bool
}
// GetUniqueDriverName returns unique driver name that can be used parallelly in tests
func (config *PerTestConfig) GetUniqueDriverName() string {
return config.Driver.GetDriverInfo().Name + "-" + config.Framework.UniqueName
}

View File

@ -74,87 +74,65 @@ func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeIOTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
testCleanup func()
resource *genericVolumeTestResource
}
var (
dInfo = driver.GetDriverInfo()
l local
)
func createVolumeIOTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumeIOTestInput {
var fsGroup *int64
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
fileSizes := createFileSizes(dInfo.MaxFileSize)
volSource := resource.volSource
// No preconditions to test. Normally they would be in a BeforeEach here.
if volSource == nil {
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volumeio")
init := func() {
l = local{}
// Now do the more expensive test initialization.
l.config, l.testCleanup = driver.PrepareTest(f)
l.resource = createGenericVolumeTestResource(driver, l.config, pattern)
if l.resource.volSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
}
cleanup := func() {
if l.resource != nil {
l.resource.cleanupResource()
l.resource = nil
}
if l.testCleanup != nil {
l.testCleanup()
l.testCleanup = nil
}
}
It("should write files of various sizes, verify size, validate content [Slow]", func() {
init()
defer cleanup()
cs := f.ClientSet
fileSizes := createFileSizes(dInfo.MaxFileSize)
testFile := fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name)
var fsGroup *int64
if dInfo.Capabilities[CapFsGroup] {
fsGroupVal := int64(1234)
fsGroup = &fsGroupVal
}
return volumeIOTestInput{
f: f,
name: dInfo.Name,
config: &dInfo.Config,
volSource: *volSource,
testFile: fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name),
podSec: v1.PodSecurityContext{
podSec := v1.PodSecurityContext{
FSGroup: fsGroup,
},
fileSizes: fileSizes,
}
}
func (t *volumeIOTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource genericVolumeTestResource
input volumeIOTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = genericVolumeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumeIOTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
execTestVolumeIO(&input)
})
}
type volumeIOTestInput struct {
f *framework.Framework
name string
config *TestConfig
volSource v1.VolumeSource
testFile string
podSec v1.PodSecurityContext
fileSizes []int64
}
func execTestVolumeIO(input *volumeIOTestInput) {
It("should write files of various sizes, verify size, validate content [Slow]", func() {
f := input.f
cs := f.ClientSet
err := testVolumeIO(f, cs, convertTestConfig(input.config), input.volSource, &input.podSec, input.testFile, input.fileSizes)
err := testVolumeIO(f, cs, convertTestConfig(l.config), *l.resource.volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
}

View File

@ -26,6 +26,7 @@ import (
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -61,105 +62,41 @@ func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeModeTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
}
func createVolumeModeTestInput(pattern testpatterns.TestPattern, resource volumeModeTestResource) volumeModeTestInput {
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
return volumeModeTestInput{
f: f,
sc: resource.sc,
pvc: resource.pvc,
pv: resource.pv,
testVolType: pattern.VolType,
nodeName: dInfo.Config.ClientNodeName,
volMode: pattern.VolMode,
isBlockSupported: dInfo.Capabilities[CapBlock],
}
}
func getVolumeModeTestFunc(pattern testpatterns.TestPattern, driver TestDriver) func(*volumeModeTestInput) {
dInfo := driver.GetDriverInfo()
isBlockSupported := dInfo.Capabilities[CapBlock]
volMode := pattern.VolMode
volType := pattern.VolType
switch volType {
case testpatterns.PreprovisionedPV:
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
return testVolumeModeFailForPreprovisionedPV
}
return testVolumeModeSuccessForPreprovisionedPV
case testpatterns.DynamicPV:
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
return testVolumeModeFailForDynamicPV
}
return testVolumeModeSuccessForDynamicPV
default:
framework.Failf("Volume mode test doesn't support volType: %v", volType)
}
return nil
}
func (t *volumeModeTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource volumeModeTestResource
input volumeModeTestInput
testFunc func(*volumeModeTestInput)
needsCleanup bool
)
testFunc = getVolumeModeTestFunc(pattern, driver)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = volumeModeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumeModeTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testFunc(&input)
})
}
type volumeModeTestResource struct {
driver TestDriver
func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
testCleanup func()
cs clientset.Interface
ns *v1.Namespace
sc *storagev1.StorageClass
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
driverTestResource interface{}
volume TestVolume
}
var (
dInfo = driver.GetDriverInfo()
l local
)
var _ TestResource = &volumeModeTestResource{}
// No preconditions to test. Normally they would be in a BeforeEach here.
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volumemode")
init := func() {
l = local{}
l.ns = f.Namespace
l.cs = f.ClientSet
// Now do the more expensive test initialization.
l.config, l.testCleanup = driver.PrepareTest(f)
func (s *volumeModeTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
s.driver = driver
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
ns := f.Namespace
fsType := pattern.FsType
volBindMode := storagev1.VolumeBindingImmediate
volMode := pattern.VolMode
volType := pattern.VolType
var (
scName string
@ -168,210 +105,211 @@ func (s *volumeModeTestResource) setupResource(driver TestDriver, pattern testpa
)
// Create volume for pre-provisioned volume tests
s.driverTestResource = CreateVolume(driver, volType)
l.volume = CreateVolume(driver, l.config, pattern.VolType)
switch volType {
switch pattern.VolType {
case testpatterns.PreprovisionedPV:
if volMode == v1.PersistentVolumeBlock {
scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name)
} else if volMode == v1.PersistentVolumeFilesystem {
scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name)
if pattern.VolMode == v1.PersistentVolumeBlock {
scName = fmt.Sprintf("%s-%s-sc-for-block", l.ns.Name, dInfo.Name)
} else if pattern.VolMode == v1.PersistentVolumeFilesystem {
scName = fmt.Sprintf("%s-%s-sc-for-file", l.ns.Name, dInfo.Name)
}
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, s.driverTestResource)
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, l.volume)
if pvSource == nil {
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
}
sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource, volumeNodeAffinity)
s.sc = sc
s.pv = framework.MakePersistentVolume(pvConfig)
s.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity)
l.sc = storageClass
l.pv = framework.MakePersistentVolume(pvConfig)
l.pvc = framework.MakePersistentVolumeClaim(pvcConfig, l.ns.Name)
}
case testpatterns.DynamicPV:
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
s.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
if s.sc == nil {
l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, fsType)
if l.sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
}
s.sc.VolumeBindingMode = &volBindMode
l.sc.VolumeBindingMode = &volBindMode
claimSize := dDriver.GetClaimSize()
s.pvc = getClaim(claimSize, ns.Name)
s.pvc.Spec.StorageClassName = &s.sc.Name
s.pvc.Spec.VolumeMode = &volMode
l.pvc = getClaim(claimSize, l.ns.Name)
l.pvc.Spec.StorageClassName = &l.sc.Name
l.pvc.Spec.VolumeMode = &pattern.VolMode
}
default:
framework.Failf("Volume mode test doesn't support: %s", volType)
framework.Failf("Volume mode test doesn't support: %s", pattern.VolType)
}
}
func (s *volumeModeTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
cs := f.ClientSet
ns := f.Namespace
volType := pattern.VolType
cleanup := func() {
if l.pv != nil || l.pvc != nil {
By("Deleting pv and pvc")
errs := framework.PVPVCCleanup(cs, ns.Name, s.pv, s.pvc)
errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, l.pv, l.pvc)
if len(errs) > 0 {
framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
framework.Logf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
}
l.pv = nil
l.pvc = nil
}
if l.sc != nil {
By("Deleting sc")
if s.sc != nil {
deleteStorageClass(cs, s.sc.Name)
deleteStorageClass(f.ClientSet, l.sc.Name)
l.sc = nil
}
// Cleanup volume for pre-provisioned volume tests
DeleteVolume(driver, volType, s.driverTestResource)
if l.volume != nil {
l.volume.DeleteVolume()
l.volume = nil
}
type volumeModeTestInput struct {
f *framework.Framework
sc *storagev1.StorageClass
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
testVolType testpatterns.TestVolType
nodeName string
volMode v1.PersistentVolumeMode
isBlockSupported bool
if l.testCleanup != nil {
l.testCleanup()
l.testCleanup = nil
}
}
func testVolumeModeFailForPreprovisionedPV(input *volumeModeTestInput) {
// We register different tests depending on the drive
isBlockSupported := dInfo.Capabilities[CapBlock]
switch pattern.VolType {
case testpatterns.PreprovisionedPV:
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
It("should fail to create pod by failing to mount volume", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
init()
defer cleanup()
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
Expect(err).NotTo(HaveOccurred())
// Prebind pv
input.pvc.Spec.VolumeName = input.pv.Name
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
l.pvc.Spec.VolumeName = l.pv.Name
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
nil, l.config.ClientNodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
Expect(err).To(HaveOccurred())
})
}
func testVolumeModeSuccessForPreprovisionedPV(input *volumeModeTestInput) {
} else {
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
init()
defer cleanup()
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
Expect(err).NotTo(HaveOccurred())
// Prebind pv
input.pvc.Spec.VolumeName = input.pv.Name
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
l.pvc.Spec.VolumeName = l.pv.Name
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
nil, l.config.ClientNodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
Expect(err).NotTo(HaveOccurred())
By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")
By("Checking if read/write to persistent volume works properly")
utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1")
utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests
}
func testVolumeModeFailForDynamicPV(input *volumeModeTestInput) {
case testpatterns.DynamicPV:
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
It("should fail in binding dynamic provisioned PV to PVC", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
init()
defer cleanup()
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).To(HaveOccurred())
})
}
func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) {
} else {
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
init()
defer cleanup()
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(input.pvc.Namespace).Get(input.pvc.Name, metav1.GetOptions{})
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Get(l.pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
input.pv, err = cs.CoreV1().PersistentVolumes().Get(input.pvc.Spec.VolumeName, metav1.GetOptions{})
l.pv, err = l.cs.CoreV1().PersistentVolumes().Get(l.pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
nil, l.config.ClientNodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
Expect(err).NotTo(HaveOccurred())
By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")
By("Checking if read/write to persistent volume works properly")
utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1")
utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests
}
default:
framework.Failf("Volume mode test doesn't support volType: %v", pattern.VolType)
}
}
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource, volumeNodeAffinity *v1.VolumeNodeAffinity) (*storagev1.StorageClass,

View File

@ -89,101 +89,80 @@ func skipExecTest(driver TestDriver) {
}
}
func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumesTestInput {
var fsGroup *int64
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Config.Framework
volSource := resource.volSource
func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
testCleanup func()
if volSource == nil {
resource *genericVolumeTestResource
}
var dInfo = driver.GetDriverInfo()
var l local
// No preconditions to test. Normally they would be in a BeforeEach here.
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volumeio")
init := func() {
l = local{}
// Now do the more expensive test initialization.
l.config, l.testCleanup = driver.PrepareTest(f)
l.resource = createGenericVolumeTestResource(driver, l.config, pattern)
if l.resource.volSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
if dInfo.Capabilities[CapFsGroup] {
fsGroupVal := int64(1234)
fsGroup = &fsGroupVal
}
return volumesTestInput{
f: f,
name: dInfo.Name,
config: &dInfo.Config,
fsGroup: fsGroup,
resource: resource,
fsType: pattern.FsType,
tests: []framework.VolumeTest{
cleanup := func() {
if l.resource != nil {
l.resource.cleanupResource()
l.resource = nil
}
if l.testCleanup != nil {
l.testCleanup()
l.testCleanup = nil
}
}
It("should be mountable", func() {
skipPersistenceTest(driver)
init()
defer func() {
framework.VolumeTestCleanup(f, convertTestConfig(l.config))
cleanup()
}()
tests := []framework.VolumeTest{
{
Volume: *volSource,
Volume: *l.resource.volSource,
File: "index.html",
// Must match content
ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s",
dInfo.Name, f.Namespace.Name),
},
},
}
config := convertTestConfig(l.config)
framework.InjectHtml(f.ClientSet, config, tests[0].Volume, tests[0].ExpectedContent)
var fsGroup *int64
if dInfo.Capabilities[CapFsGroup] {
fsGroupVal := int64(1234)
fsGroup = &fsGroupVal
}
func (t *volumesTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource genericVolumeTestResource
input volumesTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource = genericVolumeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumesTestInput(pattern, resource)
framework.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testVolumes(&input)
})
}
type volumesTestInput struct {
f *framework.Framework
name string
config *TestConfig
fsGroup *int64
fsType string
tests []framework.VolumeTest
resource genericVolumeTestResource
}
func testVolumes(input *volumesTestInput) {
It("should be mountable", func() {
f := input.f
cs := f.ClientSet
defer framework.VolumeTestCleanup(f, convertTestConfig(input.config))
skipPersistenceTest(input.resource.driver)
volumeTest := input.tests
config := convertTestConfig(input.config)
framework.InjectHtml(cs, config, volumeTest[0].Volume, volumeTest[0].ExpectedContent)
framework.TestVolumeClient(cs, config, input.fsGroup, input.fsType, input.tests)
})
It("should allow exec of files on the volume", func() {
f := input.f
skipExecTest(input.resource.driver)
skipExecTest(driver)
init()
defer cleanup()
testScriptInPod(f, input.resource.volType, input.resource.volSource, input.resource.driver.GetDriverInfo().Config.ClientNodeSelector)
testScriptInPod(f, l.resource.volType, l.resource.volSource, l.config.ClientNodeSelector)
})
}

View File

@ -212,21 +212,22 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop
action := "creating claims with class with waitForFirstConsumer"
suffix := "delayed"
var topoZone string
class := newStorageClass(test, ns, suffix)
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
if specifyAllowedTopology {
action += " and allowedTopologies"
suffix += "-topo"
topoZone = getRandomClusterZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, class, topoZone)
addSingleZoneAllowedTopologyToStorageClass(c, test.Class, topoZone)
}
By(action)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
claim.Spec.StorageClassName = &test.Class.Name
claims = append(claims, claim)
}
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
if node == nil {
framework.Failf("unexpected nil node found")
}
@ -439,10 +440,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
By("Testing " + test.Name)
suffix := fmt.Sprintf("%d", i)
class := newStorageClass(test, ns, suffix)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
testsuites.TestDynamicProvisioning(test, c, claim, class)
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
test.Claim = newClaim(test, ns, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name
test.TestDynamicProvisioning()
}
// Run the last test with storage.k8s.io/v1beta1 on pvc
@ -454,9 +456,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(c, class.Name)
claim := newClaim(*betaTest, ns, "beta")
claim.Spec.StorageClassName = &(class.Name)
testsuites.TestDynamicProvisioning(*betaTest, c, claim, nil)
betaTest.Client = c
betaTest.Class = nil
betaTest.Claim = newClaim(*betaTest, ns, "beta")
betaTest.Claim.Spec.StorageClassName = &(class.Name)
(*betaTest).TestDynamicProvisioning()
}
})
@ -464,6 +468,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.SkipUnlessProviderIs("gce", "gke")
test := testsuites.StorageClassTest{
Client: c,
Name: "HDD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
@ -478,12 +483,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
},
}
class := newStorageClass(test, ns, "reclaimpolicy")
test.Class = newStorageClass(test, ns, "reclaimpolicy")
retain := v1.PersistentVolumeReclaimRetain
class.ReclaimPolicy = &retain
claim := newClaim(test, ns, "reclaimpolicy")
claim.Spec.StorageClassName = &class.Name
pv := testsuites.TestDynamicProvisioning(test, c, claim, class)
test.Class.ReclaimPolicy = &retain
test.Claim = newClaim(test, ns, "reclaimpolicy")
test.Claim.Spec.StorageClassName = &test.Class.Name
pv := test.TestDynamicProvisioning()
By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
@ -717,17 +722,18 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
By("creating a StorageClass")
test := testsuites.StorageClassTest{
Client: c,
Name: "external provisioner test",
Provisioner: externalPluginName,
ClaimSize: "1500Mi",
ExpectedSize: "1500Mi",
}
class := newStorageClass(test, ns, "external")
claim := newClaim(test, ns, "external")
claim.Spec.StorageClassName = &(class.Name)
test.Class = newStorageClass(test, ns, "external")
test.Claim = newClaim(test, ns, "external")
test.Claim.Spec.StorageClassName = &test.Class.Name
By("creating a claim with a external provisioning annotation")
testsuites.TestDynamicProvisioning(test, c, claim, class)
test.TestDynamicProvisioning()
})
})
@ -737,13 +743,14 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
By("creating a claim with no annotation")
test := testsuites.StorageClassTest{
Client: c,
Name: "default",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
}
claim := newClaim(test, ns, "default")
testsuites.TestDynamicProvisioning(test, c, claim, nil)
test.Claim = newClaim(test, ns, "default")
test.TestDynamicProvisioning()
})
// Modifying the default storage class can be disruptive to other tests that depend on it
@ -816,6 +823,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
serverUrl := "http://" + pod.Status.PodIP + ":8081"
By("creating a StorageClass")
test := testsuites.StorageClassTest{
Client: c,
Name: "Gluster Dynamic provisioner test",
Provisioner: "kubernetes.io/glusterfs",
ClaimSize: "2Gi",
@ -823,13 +831,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Parameters: map[string]string{"resturl": serverUrl},
}
suffix := fmt.Sprintf("glusterdptest")
class := newStorageClass(test, ns, suffix)
test.Class = newStorageClass(test, ns, suffix)
By("creating a claim object with a suffix for gluster dynamic provisioner")
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
test.Claim = newClaim(test, ns, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name
testsuites.TestDynamicProvisioning(test, c, claim, class)
test.TestDynamicProvisioning()
})
})
@ -928,12 +936,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}
By("creating a claim with class with allowedTopologies set")
suffix := "topology"
class := newStorageClass(test, ns, suffix)
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
zone := getRandomClusterZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, class, zone)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
pv := testsuites.TestDynamicProvisioning(test, c, claim, class)
addSingleZoneAllowedTopologyToStorageClass(c, test.Class, zone)
test.Claim = newClaim(test, ns, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name
pv := test.TestDynamicProvisioning()
checkZoneFromLabelAndAffinity(pv, zone, true)
}
})