Refactor volume test in a similar way to csi tests

Refactoring for non-csi e2e test similar to below commit in csi e2e test.
4d11dab272 (diff-0d9ecaa3e6a0297186ad33f57aad472e)

Scopes for this refactoring are below four files:
  - test/e2e/storage/volumes.go
  - test/e2e/storage/volume_io.go
  - test/e2e/storage/persistent_volumes-volumemode.go
  - test/e2e/storage/subpath.go

fixes: #66571
pull/8/head
Masaki Kimura 2018-08-29 14:39:01 +00:00
parent 113872798d
commit 877ed5c22d
22 changed files with 4114 additions and 2356 deletions

View File

@ -751,6 +751,8 @@ test/e2e/scalability
test/e2e/scheduling
test/e2e/servicecatalog
test/e2e/storage
test/e2e/storage/drivers
test/e2e/storage/testsuites
test/e2e/storage/utils
test/e2e/storage/vsphere
test/e2e/ui

View File

@ -507,7 +507,6 @@ func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
// Deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error {
const maxWait = 5 * time.Minute
if pod == nil {
return nil
}
@ -519,8 +518,8 @@ func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error {
}
return fmt.Errorf("pod Delete API error: %v", err)
}
Logf("Wait up to %v for pod %q to be fully deleted", maxWait, pod.Name)
err = f.WaitForPodNotFound(pod.Name, maxWait)
Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, pod.Name)
err = f.WaitForPodNotFound(pod.Name, PodDeleteTimeout)
if err != nil {
return fmt.Errorf("pod %q was not deleted: %v", pod.Name, err)
}
@ -1006,11 +1005,19 @@ func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector m
// create security pod with given claims
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeName(client, namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, "", timeout)
}
// create security pod with given claims
func CreateSecPodWithNodeName(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, nodeName string, timeout time.Duration) (*v1.Pod, error) {
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Setting nodeName
pod.Spec.NodeName = nodeName
// Waiting for pod to be running
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
if err != nil {

View File

@ -68,7 +68,7 @@ const (
TiB int64 = 1024 * GiB
// Waiting period for volume server (Ceph, ...) to initialize itself.
VolumeServerPodStartupTimeout = 1 * time.Minute
VolumeServerPodStartupTimeout = 3 * time.Minute
// Waiting period for pod to be cleaned up and unmount its volumes so we
// don't tear down containers with NFS/Ceph/Gluster server too early.
@ -356,16 +356,42 @@ func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.
return pod
}
// Wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function.
func CleanUpVolumeServer(f *Framework, serverPod *v1.Pod) {
CleanUpVolumeServerWithSecret(f, serverPod, nil)
}
// Wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function.
func CleanUpVolumeServerWithSecret(f *Framework, serverPod *v1.Pod, secret *v1.Secret) {
cs := f.ClientSet
ns := f.Namespace
if secret != nil {
Logf("Deleting server secret %q...", secret.Name)
err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{})
if err != nil {
Logf("Delete secret failed: %v", err)
}
}
Logf("Deleting server pod %q...", serverPod.Name)
err := DeletePodWithWait(f, cs, serverPod)
if err != nil {
Logf("Server pod delete failed: %v", err)
}
}
// Clean both server and client pods.
func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
By(fmt.Sprint("cleaning the environment after ", config.Prefix))
defer GinkgoRecover()
client := f.ClientSet
podClient := client.CoreV1().Pods(config.Namespace)
cs := f.ClientSet
err := podClient.Delete(config.Prefix+"-client", nil)
pod, err := cs.CoreV1().Pods(config.Namespace).Get(config.Prefix+"-client", metav1.GetOptions{})
ExpectNoError(err, "Failed to get client pod: %v", err)
err = DeletePodWithWait(f, cs, pod)
if err != nil {
// Log the error before failing test: if the test has already failed,
// framework.ExpectNoError() won't print anything to logs!
@ -374,15 +400,9 @@ func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
}
if config.ServerImage != "" {
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
// See issue #24100.
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
By("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(PodCleanupTimeout)
err = podClient.Delete(config.Prefix+"-server", nil)
pod, err := cs.CoreV1().Pods(config.Namespace).Get(config.Prefix+"-server", metav1.GetOptions{})
ExpectNoError(err, "Failed to get server pod: %v", err)
err = DeletePodWithWait(f, cs, pod)
if err != nil {
glog.Warningf("Failed to delete server pod: %v", err)
ExpectNoError(err, "Failed to delete server pod: %v", err)
@ -438,9 +458,7 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro
}
podsNamespacer := client.CoreV1().Pods(config.Namespace)
if fsGroup != nil {
clientPod.Spec.SecurityContext.FSGroup = fsGroup
}
clientPod.Spec.SecurityContext.FSGroup = fsGroup
for i, test := range tests {
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
@ -520,6 +538,7 @@ func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V
VolumeSource: volume,
},
},
NodeName: config.ClientNodeName,
NodeSelector: config.NodeSelector,
},
}

View File

@ -9,19 +9,18 @@ go_library(
"ephemeral_volume.go",
"flexvolume.go",
"generic_persistent_volume-disruptive.go",
"in_tree_volumes.go",
"mounted_volume_resize.go",
"nfs_persistent_volume-disruptive.go",
"pd.go",
"persistent_volumes.go",
"persistent_volumes-gce.go",
"persistent_volumes-local.go",
"persistent_volumes-volumemode.go",
"pv_protection.go",
"pvc_protection.go",
"regional_pd.go",
"subpath.go",
"volume_expand.go",
"volume_io.go",
"volume_metrics.go",
"volume_provisioning.go",
"volumes.go",
@ -67,8 +66,9 @@ go_library(
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/e2e/storage/drivers:go_default_library",
"//test/e2e/storage/testsuites:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/e2e/storage/vsphere:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
@ -92,6 +92,9 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//test/e2e/storage/drivers:all-srcs",
"//test/e2e/storage/testpatterns:all-srcs",
"//test/e2e/storage/testsuites:all-srcs",
"//test/e2e/storage/utils:all-srcs",
"//test/e2e/storage/vsphere:all-srcs",
],

View File

@ -0,0 +1,42 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"base.go",
"in_tree.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/drivers",
visibility = ["//visibility:public"],
deps = [
"//pkg/kubelet/apis:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
"//test/e2e/storage/vsphere:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,175 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package drivers
import (
"fmt"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
// TestDriver represents an interface for a driver to be tested in TestSuite
type TestDriver interface {
// GetDriverInfo returns DriverInfo for the TestDriver
GetDriverInfo() *DriverInfo
// CreateDriver creates all driver resources that is required for TestDriver method
// except CreateVolume
CreateDriver()
// CreateDriver cleanup all the resources that is created in CreateDriver
CleanupDriver()
// SkipUnsupportedTest skips test in Testpattern is not suitable to test with the TestDriver
SkipUnsupportedTest(testpatterns.TestPattern)
}
// PreprovisionedVolumeTestDriver represents an interface for a TestDriver that has pre-provisioned volume
type PreprovisionedVolumeTestDriver interface {
TestDriver
// CreateVolume creates a pre-provisioned volume.
CreateVolume(testpatterns.TestVolType)
// DeleteVolume deletes a volume that is created in CreateVolume
DeleteVolume(testpatterns.TestVolType)
}
// InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume
type InlineVolumeTestDriver interface {
PreprovisionedVolumeTestDriver
// GetVolumeSource returns a volumeSource for inline volume.
// It will set readOnly and fsType to the volumeSource, if TestDriver supports both of them.
// It will return nil, if the TestDriver doesn't support either of the parameters.
GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource
}
// PreprovisionedPVTestDriver represents an interface for a TestDriver that supports PreprovisionedPV
type PreprovisionedPVTestDriver interface {
PreprovisionedVolumeTestDriver
// GetPersistentVolumeSource returns a PersistentVolumeSource for pre-provisioned Persistent Volume.
// It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them.
// It will return nil, if the TestDriver doesn't support either of the parameters.
GetPersistentVolumeSource(readOnly bool, fsType string) *v1.PersistentVolumeSource
}
// DynamicPVTestDriver represents an interface for a TestDriver that supports DynamicPV
type DynamicPVTestDriver interface {
TestDriver
// GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume.
// It will set fsType to the StorageClass, if TestDriver supports it.
// It will return nil, if the TestDriver doesn't support it.
GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass
}
// DriverInfo represents a combination of parameters to be used in implementation of TestDriver
type DriverInfo struct {
Name string // Name of the driver
FeatureTag string // FeatureTag for the driver
MaxFileSize int64 // Max file size to be tested for this driver
SupportedFsType sets.String // Map of string for supported fs type
IsPersistent bool // Flag to represent whether it provides persistency
IsFsGroupSupported bool // Flag to represent whether it supports fsGroup
IsBlockSupported bool // Flag to represent whether it supports Block Volume
// Parameters below will be set inside test loop by using SetCommonDriverParameters.
// Drivers that implement TestDriver is required to set all the above parameters
// and return DriverInfo on GetDriverInfo() call.
Framework *framework.Framework // Framework for the test
Config framework.VolumeTestConfig // VolumeTestConfig for thet test
}
// GetDriverNameWithFeatureTags returns driver name with feature tags
// For example)
// - [Driver: nfs]
// - [Driver: rbd][Feature:Volumes]
func GetDriverNameWithFeatureTags(driver TestDriver) string {
dInfo := driver.GetDriverInfo()
return fmt.Sprintf("[Driver: %s]%s", dInfo.Name, dInfo.FeatureTag)
}
func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) {
// Create Volume for test unless dynamicPV test
switch volType {
case testpatterns.InlineVolume:
fallthrough
case testpatterns.PreprovisionedPV:
if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok {
pDriver.CreateVolume(volType)
}
case testpatterns.DynamicPV:
// No need to create volume
default:
framework.Failf("Invalid volType specified: %v", volType)
}
}
func DeleteVolume(driver TestDriver, volType testpatterns.TestVolType) {
// Delete Volume for test unless dynamicPV test
switch volType {
case testpatterns.InlineVolume:
fallthrough
case testpatterns.PreprovisionedPV:
if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok {
pDriver.DeleteVolume(volType)
}
case testpatterns.DynamicPV:
// No need to delete volume
default:
framework.Failf("Invalid volType specified: %v", volType)
}
}
// SetCommonDriverParameters sets a common driver parameters to TestDriver
// This function is intended to be called in BeforeEach() inside test loop.
func SetCommonDriverParameters(
driver TestDriver,
f *framework.Framework,
config framework.VolumeTestConfig,
) {
dInfo := driver.GetDriverInfo()
dInfo.Framework = f
dInfo.Config = config
}
func getStorageClass(
provisioner string,
parameters map[string]string,
bindingMode *storagev1.VolumeBindingMode,
ns string,
suffix string,
) *storagev1.StorageClass {
if bindingMode == nil {
defaultBindingMode := storagev1.VolumeBindingImmediate
bindingMode = &defaultBindingMode
}
return &storagev1.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
// Name must be unique, so let's base it on namespace name
Name: ns + "-" + suffix,
},
Provisioner: provisioner,
Parameters: parameters,
VolumeBindingMode: bindingMode,
}
}

File diff suppressed because it is too large Load Diff

View File

@ -27,11 +27,18 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
volumePath = "/test-volume"
volumeName = "test-volume"
mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
)
var _ = utils.SIGDescribe("Ephemeralstorage", func() {
var (
c clientset.Interface

View File

@ -0,0 +1,91 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// List of testDrivers to be executed in below loop
var testDrivers = []func() drivers.TestDriver{
drivers.InitNFSDriver,
drivers.InitGlusterFSDriver,
drivers.InitISCSIDriver,
drivers.InitRbdDriver,
drivers.InitCephFSDriver,
drivers.InitHostpathDriver,
drivers.InitHostpathSymlinkDriver,
drivers.InitEmptydirDriver,
drivers.InitCinderDriver,
drivers.InitGceDriver,
drivers.InitVSphereDriver,
drivers.InitAzureDriver,
drivers.InitAwsDriver,
}
// List of testSuites to be executed in below loop
var testSuites = []func() testsuites.TestSuite{
testsuites.InitVolumesTestSuite,
testsuites.InitVolumeIOTestSuite,
testsuites.InitVolumeModeTestSuite,
testsuites.InitSubPathTestSuite,
}
// This executes testSuites for in-tree volumes.
var _ = utils.SIGDescribe("In-tree Volumes", func() {
f := framework.NewDefaultFramework("volumes")
var (
ns *v1.Namespace
config framework.VolumeTestConfig
)
BeforeEach(func() {
ns = f.Namespace
config = framework.VolumeTestConfig{
Namespace: ns.Name,
Prefix: "volume",
}
})
for _, initDriver := range testDrivers {
curDriver := initDriver()
Context(fmt.Sprintf(drivers.GetDriverNameWithFeatureTags(curDriver)), func() {
driver := curDriver
BeforeEach(func() {
// setupDriver
drivers.SetCommonDriverParameters(driver, f, config)
driver.CreateDriver()
})
AfterEach(func() {
// Cleanup driver
driver.CleanupDriver()
})
testsuites.RunTestSuite(f, config, driver, testSuites)
})
}
})

View File

@ -1,429 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
noProvisioner = "kubernetes.io/no-provisioner"
pvNamePrefix = "pv"
)
func generateConfigsForStaticProvisionPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource) (*storagev1.StorageClass,
framework.PersistentVolumeConfig, framework.PersistentVolumeClaimConfig) {
// StorageClass
scConfig := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: scName,
},
Provisioner: noProvisioner,
VolumeBindingMode: &volBindMode,
}
// PV
pvConfig := framework.PersistentVolumeConfig{
PVSource: pvSource,
NamePrefix: pvNamePrefix,
StorageClassName: scName,
VolumeMode: &volMode,
}
// PVC
pvcConfig := framework.PersistentVolumeClaimConfig{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &scName,
VolumeMode: &volMode,
}
return scConfig, pvConfig, pvcConfig
}
func createPVTestResource(cs clientset.Interface, ns string,
scConfig *storagev1.StorageClass, pvConfig framework.PersistentVolumeConfig,
pvcConfig framework.PersistentVolumeClaimConfig) (*storagev1.StorageClass, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
By("Creating sc")
sc, err := cs.StorageV1().StorageClasses().Create(scConfig)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
pv, pvc, err := framework.CreatePVPVC(cs, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns, pv, pvc))
By("Creating a pod")
// TODO(mkimuram): Need to set anti-affinity with storage server pod.
// Otherwise, storage server pod can also be affected on destructive tests.
pod, err := framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred())
return sc, pod, pv, pvc
}
func createPVTestResourceWithFailure(cs clientset.Interface, ns string,
scConfig *storagev1.StorageClass, pvConfig framework.PersistentVolumeConfig,
pvcConfig framework.PersistentVolumeClaimConfig) (*storagev1.StorageClass, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
By("Creating sc")
sc, err := cs.StorageV1().StorageClasses().Create(scConfig)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
pv, pvc, err := framework.CreatePVPVC(cs, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns, pv, pvc))
By("Creating a pod")
pod, err := framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout)
Expect(err).To(HaveOccurred())
return sc, pod, pv, pvc
}
func deletePVTestResource(f *framework.Framework, cs clientset.Interface, ns string, sc *storagev1.StorageClass,
pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
By("Deleting pv and pvc")
errs := framework.PVPVCCleanup(cs, ns, pv, pvc)
if len(errs) > 0 {
framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
}
By("Deleting sc")
framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(sc.Name, nil))
}
func checkVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// Check if block exists
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path))
// Double check that it's not directory
utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1)
} else {
// Check if directory exists
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path))
// Double check that it's not block
utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1)
}
}
func checkReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// random -> file1
utils.VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
// file1 -> dev (write to dev)
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
// dev -> file2 (read from dev)
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
// file1 == file2 (check contents)
utils.VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2")
// Clean up temp files
utils.VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2")
// Check that writing file to block volume fails
utils.VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
} else {
// text -> file1 (write to file)
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
// grep file1 (read from file and check contents)
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path))
// Check that writing to directory as block volume fails
utils.VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
}
}
func skipBlockSupportTestIfUnsupported(volMode v1.PersistentVolumeMode, isBlockSupported bool) {
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
framework.Skipf("Skip assertion for block test for block supported plugin.(Block unsupported)")
}
}
func skipBlockUnsupportTestUnlessUnspported(volMode v1.PersistentVolumeMode, isBlockSupported bool) {
if !(volMode == v1.PersistentVolumeBlock && !isBlockSupported) {
framework.Skipf("Skip assertion for block test for block unsupported plugin.(Block suppported or FileSystem test)")
}
}
var _ = utils.SIGDescribe("PersistentVolumes-volumeMode", func() {
f := framework.NewDefaultFramework("pv-volmode")
const (
pvTestSCPrefix = "pvtest"
)
var (
cs clientset.Interface
ns string
scName string
isBlockSupported bool
serverIP string
secret *v1.Secret
serverPod *v1.Pod
pvSource v1.PersistentVolumeSource
sc *storagev1.StorageClass
pod *v1.Pod
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
volMode v1.PersistentVolumeMode
volBindMode storagev1.VolumeBindingMode
)
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
volBindMode = storagev1.VolumeBindingImmediate
})
AssertCreateDeletePodAndReadWriteVolume := func() {
// For block supported plugins
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
skipBlockSupportTestIfUnsupported(volMode, isBlockSupported)
scConfig, pvConfig, pvcConfig := generateConfigsForStaticProvisionPVTest(scName, volBindMode, volMode, pvSource)
sc, pod, pv, pvc = createPVTestResource(cs, ns, scConfig, pvConfig, pvcConfig)
defer deletePVTestResource(f, cs, ns, sc, pod, pv, pvc)
By("Checking if persistent volume exists as expected volume mode")
checkVolumeModeOfPath(pod, volMode, "/mnt/volume1")
By("Checking if read/write to persistent volume works properly")
checkReadWriteToPath(pod, volMode, "/mnt/volume1")
})
// For block unsupported plugins
It("should fail to create pod by failing to mount volume", func() {
skipBlockUnsupportTestUnlessUnspported(volMode, isBlockSupported)
scConfig, pvConfig, pvcConfig := generateConfigsForStaticProvisionPVTest(scName, volBindMode, volMode, pvSource)
sc, pod, pv, pvc = createPVTestResourceWithFailure(cs, ns, scConfig, pvConfig, pvcConfig)
deletePVTestResource(f, cs, ns, sc, pod, pv, pvc)
})
}
verifyAll := func() {
AssertCreateDeletePodAndReadWriteVolume()
// TODO(mkimuram): Add more tests
}
Describe("NFS", func() {
const pvTestNFSSCSuffix = "nfs"
BeforeEach(func() {
isBlockSupported = false
scName = fmt.Sprintf("%v-%v-%v", pvTestSCPrefix, ns, pvTestNFSSCSuffix)
_, serverPod, serverIP = framework.NewNFSServer(cs, ns, []string{})
pvSource = v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting NFS server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: NFS server pod failed to delete")
})
Context("FileSystem volume Test", func() {
BeforeEach(func() {
volMode = v1.PersistentVolumeFilesystem
})
verifyAll()
})
Context("Block volume Test[Feature:BlockVolume]", func() {
BeforeEach(func() {
volMode = v1.PersistentVolumeBlock
})
verifyAll()
})
})
Describe("iSCSI [Feature:Volumes]", func() {
const pvTestISCSISCSuffix = "iscsi"
BeforeEach(func() {
isBlockSupported = true
scName = fmt.Sprintf("%v-%v-%v", pvTestSCPrefix, ns, pvTestISCSISCSuffix)
_, serverPod, serverIP = framework.NewISCSIServer(cs, ns)
pvSource = v1.PersistentVolumeSource{
ISCSI: &v1.ISCSIPersistentVolumeSource{
TargetPortal: serverIP + ":3260",
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
Lun: 0,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting iSCSI server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: iSCSI server pod failed to delete")
})
Context("FileSystem volume Test", func() {
BeforeEach(func() {
volMode = v1.PersistentVolumeFilesystem
})
verifyAll()
})
Context("Block volume Test[Feature:BlockVolume]", func() {
BeforeEach(func() {
volMode = v1.PersistentVolumeBlock
})
verifyAll()
})
})
Describe("Ceph-RBD [Feature:Volumes]", func() {
const pvTestRBDSCSuffix = "rbd"
BeforeEach(func() {
isBlockSupported = true
scName = fmt.Sprintf("%v-%v-%v", pvTestSCPrefix, ns, pvTestRBDSCSuffix)
_, serverPod, secret, serverIP = framework.NewRBDServer(cs, ns)
framework.Logf("namespace: %v, secret.Name: %v", ns, secret.Name)
pvSource = v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.SecretReference{
Name: secret.Name,
Namespace: ns,
},
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", secret.Name)
secErr := cs.CoreV1().Secrets(ns).Delete(secret.Name, &metav1.DeleteOptions{})
framework.Logf("AfterEach: deleting Ceph-RDB server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
if secErr != nil || err != nil {
if secErr != nil {
framework.Logf("AfterEach: Ceph-RDB delete secret failed: %v", secErr)
}
if err != nil {
framework.Logf("AfterEach: Ceph-RDB server pod delete failed: %v", err)
}
framework.Failf("AfterEach: cleanup failed")
}
})
Context("FileSystem volume Test", func() {
BeforeEach(func() {
volMode = v1.PersistentVolumeFilesystem
})
verifyAll()
})
Context("Block volume Test[Feature:BlockVolume]", func() {
BeforeEach(func() {
volMode = v1.PersistentVolumeBlock
})
verifyAll()
})
})
Describe("CephFS [Feature:Volumes]", func() {
const pvTestCephFSSCSuffix = "cephfs"
BeforeEach(func() {
isBlockSupported = false
scName = fmt.Sprintf("%v-%v-%v", pvTestSCPrefix, ns, pvTestCephFSSCSuffix)
_, serverPod, secret, serverIP = framework.NewRBDServer(cs, ns)
pvSource = v1.PersistentVolumeSource{
CephFS: &v1.CephFSPersistentVolumeSource{
Monitors: []string{serverIP + ":6789"},
User: "kube",
SecretRef: &v1.SecretReference{
Name: secret.Name,
Namespace: ns,
},
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting CephFS server secret %q...", secret.Name)
secErr := cs.CoreV1().Secrets(ns).Delete(secret.Name, &metav1.DeleteOptions{})
framework.Logf("AfterEach: deleting CephFS server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
if secErr != nil || err != nil {
if secErr != nil {
framework.Logf("AfterEach: CephFS delete secret failed: %v", secErr)
}
if err != nil {
framework.Logf("AfterEach: CephFS server pod delete failed: %v", err)
}
framework.Failf("AfterEach: cleanup failed")
}
})
Context("FileSystem volume Test", func() {
BeforeEach(func() {
volMode = v1.PersistentVolumeFilesystem
})
verifyAll()
})
Context("Block volume Test[Feature:BlockVolume]", func() {
BeforeEach(func() {
volMode = v1.PersistentVolumeBlock
})
verifyAll()
})
})
})

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,26 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["testpattern.go"],
importpath = "k8s.io/kubernetes/test/e2e/storage/testpatterns",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//test/e2e/framework:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,168 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testpatterns
import (
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
// MinFileSize represents minimum file size (1 MiB) for testing
MinFileSize = 1 * framework.MiB
// FileSizeSmall represents small file size (1 MiB) for testing
FileSizeSmall = 1 * framework.MiB
// FileSizeMedium represents medium file size (100 MiB) for testing
FileSizeMedium = 100 * framework.MiB
// FileSizeLarge represents large file size (1 GiB) for testing
FileSizeLarge = 1 * framework.GiB
)
// TestVolType represents a volume type to be tested in a TestSuite
type TestVolType string
var (
// InlineVolume represents a volume type that is used inline in volumeSource
InlineVolume TestVolType = "InlineVolume"
// PreprovisionedPV represents a volume type for pre-provisioned Persistent Volume
PreprovisionedPV TestVolType = "PreprovisionedPV"
// DynamicPV represents a volume type for dynamic provisioned Persistent Volume
DynamicPV TestVolType = "DynamicPV"
)
// TestPattern represents a combination of parameters to be tested in a TestSuite
type TestPattern struct {
Name string // Name of TestPattern
FeatureTag string // featureTag for the TestSuite
VolType TestVolType // Volume type of the volume
FsType string // Fstype of the volume
VolMode v1.PersistentVolumeMode // PersistentVolumeMode of the volume
}
var (
// Definitions for default fsType
// DefaultFsInlineVolume is TestPattern for "Inline-volume (default fs)"
DefaultFsInlineVolume = TestPattern{
Name: "Inline-volume (default fs)",
VolType: InlineVolume,
}
// DefaultFsPreprovisionedPV is TestPattern for "Pre-provisioned PV (default fs)"
DefaultFsPreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (default fs)",
VolType: PreprovisionedPV,
}
// DefaultFsDynamicPV is TestPattern for "Dynamic PV (default fs)"
DefaultFsDynamicPV = TestPattern{
Name: "Dynamic PV (default fs)",
VolType: DynamicPV,
}
// Definitions for ext3
// Ext3InlineVolume is TestPattern for "Inline-volume (ext3)"
Ext3InlineVolume = TestPattern{
Name: "Inline-volume (ext3)",
VolType: InlineVolume,
FsType: "ext3",
}
// Ext3PreprovisionedPV is TestPattern for "Pre-provisioned PV (ext3)"
Ext3PreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (ext3)",
VolType: PreprovisionedPV,
FsType: "ext3",
}
// Ext3DynamicPV is TestPattern for "Dynamic PV (ext3)"
Ext3DynamicPV = TestPattern{
Name: "Dynamic PV (ext3)",
VolType: DynamicPV,
FsType: "ext3",
}
// Definitions for ext4
// Ext4InlineVolume is TestPattern for "Inline-volume (ext4)"
Ext4InlineVolume = TestPattern{
Name: "Inline-volume (ext4)",
VolType: InlineVolume,
FsType: "ext4",
}
// Ext4PreprovisionedPV is TestPattern for "Pre-provisioned PV (ext4)"
Ext4PreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (ext4)",
VolType: PreprovisionedPV,
FsType: "ext4",
}
// Ext4DynamicPV is TestPattern for "Dynamic PV (ext4)"
Ext4DynamicPV = TestPattern{
Name: "Dynamic PV (ext4)",
VolType: DynamicPV,
FsType: "ext4",
}
// Definitions for xfs
// XfsInlineVolume is TestPattern for "Inline-volume (xfs)"
XfsInlineVolume = TestPattern{
Name: "Inline-volume (xfs)",
VolType: InlineVolume,
FsType: "xfs",
}
// XfsPreprovisionedPV is TestPattern for "Pre-provisioned PV (xfs)"
XfsPreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (xfs)",
VolType: PreprovisionedPV,
FsType: "xfs",
}
// XfsDynamicPV is TestPattern for "Dynamic PV (xfs)"
XfsDynamicPV = TestPattern{
Name: "Dynamic PV (xfs)",
VolType: DynamicPV,
FsType: "xfs",
}
// Definitions for Filesystem volume mode
// FsVolModePreprovisionedPV is TestPattern for "Pre-provisioned PV (filesystem)"
FsVolModePreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (filesystem volmode)",
VolType: PreprovisionedPV,
VolMode: v1.PersistentVolumeFilesystem,
}
// FsVolModeDynamicPV is TestPattern for "Dynamic PV (filesystem)"
FsVolModeDynamicPV = TestPattern{
Name: "Dynamic PV (filesystem volmode)",
VolType: DynamicPV,
VolMode: v1.PersistentVolumeFilesystem,
}
// Definitions for block volume mode
// BlockVolModePreprovisionedPV is TestPattern for "Pre-provisioned PV (block)"
BlockVolModePreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (block volmode)",
VolType: PreprovisionedPV,
VolMode: v1.PersistentVolumeBlock,
}
// BlockVolModeDynamicPV is TestPattern for "Dynamic PV (block)(immediate bind)"
BlockVolModeDynamicPV = TestPattern{
Name: "Dynamic PV (block volmode)",
VolType: DynamicPV,
VolMode: v1.PersistentVolumeBlock,
}
)

View File

@ -0,0 +1,47 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"base.go",
"subpath.go",
"volume_io.go",
"volumemode.go",
"volumes.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/testsuites",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/storage/drivers:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,306 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
// TestSuite represents an interface for a set of tests whchi works with TestDriver
type TestSuite interface {
// getTestSuiteInfo returns the TestSuiteInfo for this TestSuite
getTestSuiteInfo() TestSuiteInfo
// skipUnsupportedTest skips the test if this TestSuite is not suitable to be tested with the combination of TestPattern and TestDriver
skipUnsupportedTest(testpatterns.TestPattern, drivers.TestDriver)
// execTest executes test of the testpattern for the driver
execTest(drivers.TestDriver, testpatterns.TestPattern)
}
type TestSuiteInfo struct {
name string // name of the TestSuite
featureTag string // featureTag for the TestSuite
testPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite
}
// TestResource represents an interface for resources that is used by TestSuite
type TestResource interface {
// setupResource sets up test resources to be used for the tests with the
// combination of TestDriver and TestPattern
setupResource(drivers.TestDriver, testpatterns.TestPattern)
// cleanupResource clean up the test resources created in SetupResource
cleanupResource(drivers.TestDriver, testpatterns.TestPattern)
}
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
tsInfo := suite.getTestSuiteInfo()
return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.name, tsInfo.featureTag)
}
// RunTestSuite runs all testpatterns of all testSuites for a driver
func RunTestSuite(f *framework.Framework, config framework.VolumeTestConfig, driver drivers.TestDriver, tsInits []func() TestSuite) {
for _, testSuiteInit := range tsInits {
suite := testSuiteInit()
tsInfo := suite.getTestSuiteInfo()
for _, pattern := range tsInfo.testPatterns {
suite.execTest(driver, pattern)
}
}
}
// skipUnsupportedTest will skip tests if the combination of driver, testsuite, and testpattern
// is not suitable to be tested.
// Whether it needs to be skipped is checked by following steps:
// 1. Check if Whether volType is supported by driver from its interface
// 2. Check if fsType is supported by driver
// 3. Check with driver specific logic
// 4. Check with testSuite specific logic
func skipUnsupportedTest(suite TestSuite, driver drivers.TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
// 1. Check if Whether volType is supported by driver from its interface
var isSupported bool
switch pattern.VolType {
case testpatterns.InlineVolume:
_, isSupported = driver.(drivers.InlineVolumeTestDriver)
case testpatterns.PreprovisionedPV:
_, isSupported = driver.(drivers.PreprovisionedPVTestDriver)
case testpatterns.DynamicPV:
_, isSupported = driver.(drivers.DynamicPVTestDriver)
default:
isSupported = false
}
if !isSupported {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
// 2. Check if fsType is supported by driver
if !dInfo.SupportedFsType.Has(pattern.FsType) {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType)
}
// 3. Check with driver specific logic
driver.SkipUnsupportedTest(pattern)
// 4. Check with testSuite specific logic
suite.skipUnsupportedTest(pattern, driver)
}
// genericVolumeTestResource is a generic implementation of TestResource that wil be able to
// be used in most of TestSuites.
// See volume_io.go or volumes.go in test/e2e/storage/testsuites/ for how to use this resource.
// Also, see subpath.go in the same directory for how to extend and use it.
type genericVolumeTestResource struct {
driver drivers.TestDriver
volType string
volSource *v1.VolumeSource
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
sc *storagev1.StorageClass
}
var _ TestResource = &genericVolumeTestResource{}
// SetupResource sets up genericVolumeTestResource
func (r *genericVolumeTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
r.driver = driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
cs := f.ClientSet
fsType := pattern.FsType
volType := pattern.VolType
// Create volume for pre-provisioned volume tests
drivers.CreateVolume(driver, volType)
switch volType {
case testpatterns.InlineVolume:
framework.Logf("Creating resource for inline volume")
if iDriver, ok := driver.(drivers.InlineVolumeTestDriver); ok {
r.volSource = iDriver.GetVolumeSource(false, fsType)
r.volType = dInfo.Name
}
case testpatterns.PreprovisionedPV:
framework.Logf("Creating resource for pre-provisioned PV")
if pDriver, ok := driver.(drivers.PreprovisionedPVTestDriver); ok {
pvSource := pDriver.GetPersistentVolumeSource(false, fsType)
if pvSource != nil {
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, false)
}
r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name)
}
case testpatterns.DynamicPV:
framework.Logf("Creating resource for dynamic PV")
if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok {
claimSize := "2Gi"
r.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
By("creating a StorageClass " + r.sc.Name)
var err error
r.sc, err = cs.StorageV1().StorageClasses().Create(r.sc)
Expect(err).NotTo(HaveOccurred())
if r.sc != nil {
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPVFromDynamicProvisionSC(
f, dInfo.Name, claimSize, r.sc, false, nil)
}
r.volType = fmt.Sprintf("%s-dynamicPV", dInfo.Name)
}
default:
framework.Failf("genericVolumeTestResource doesn't support: %s", volType)
}
if r.volSource == nil {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, volType)
}
}
// CleanupResource clean up genericVolumeTestResource
func (r *genericVolumeTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
volType := pattern.VolType
if r.pvc != nil || r.pv != nil {
By("Deleting pv and pvc")
if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 {
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
}
}
if r.sc != nil {
By("Deleting sc")
deleteStorageClass(f.ClientSet, r.sc.Name)
}
// Cleanup volume for pre-provisioned volume tests
drivers.DeleteVolume(driver, volType)
}
func createVolumeSourceWithPVCPV(
f *framework.Framework,
name string,
pvSource *v1.PersistentVolumeSource,
readOnly bool,
) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: fmt.Sprintf("%s-", name),
StorageClassName: f.Namespace.Name,
PVSource: *pvSource,
}
pvcConfig := framework.PersistentVolumeClaimConfig{
StorageClassName: &f.Namespace.Name,
}
framework.Logf("Creating PVC and PV")
pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
Expect(err).NotTo(HaveOccurred(), "PVC, PV creation failed")
err = framework.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc)
Expect(err).NotTo(HaveOccurred(), "PVC, PV failed to bind")
volSource := &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: readOnly,
},
}
return volSource, pv, pvc
}
func createVolumeSourceWithPVCPVFromDynamicProvisionSC(
f *framework.Framework,
name string,
claimSize string,
sc *storagev1.StorageClass,
readOnly bool,
volMode *v1.PersistentVolumeMode,
) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
cs := f.ClientSet
ns := f.Namespace.Name
By("creating a claim")
pvc := getClaim(claimSize, ns)
pvc.Spec.StorageClassName = &sc.Name
if volMode != nil {
pvc.Spec.VolumeMode = volMode
}
var err error
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
volSource := &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: readOnly,
},
}
return volSource, pv, pvc
}
func getClaim(claimSize string, ns string) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: ns,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
},
},
},
}
return &claim
}
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
func deleteStorageClass(cs clientset.Interface, className string) {
err := cs.StorageV1().StorageClasses().Delete(className, nil)
if err != nil && !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
}

View File

@ -0,0 +1,747 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"fmt"
"path/filepath"
"strings"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
volumePath = "/test-volume"
volumeName = "test-volume"
probeVolumePath = "/probe-volume"
probeFilePath = probeVolumePath + "/probe-file"
fileName = "test-file"
retryDuration = 20
mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
)
type subPathTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &subPathTestSuite{}
// InitSubPathTestSuite returns subPathTestSuite that implements TestSuite interface
func InitSubPathTestSuite() TestSuite {
return &subPathTestSuite{
tsInfo: TestSuiteInfo{
name: "subPath",
testPatterns: []testpatterns.TestPattern{
testpatterns.DefaultFsInlineVolume,
testpatterns.DefaultFsPreprovisionedPV,
testpatterns.DefaultFsDynamicPV,
},
},
}
}
func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo {
return s.tsInfo
}
func (s *subPathTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
}
func createSubPathTestInput(pattern testpatterns.TestPattern, resource subPathTestResource) subPathTestInput {
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
subPath := f.Namespace.Name
subPathDir := filepath.Join(volumePath, subPath)
return subPathTestInput{
f: f,
subPathDir: subPathDir,
filePathInSubpath: filepath.Join(volumePath, fileName),
filePathInVolume: filepath.Join(subPathDir, fileName),
volType: resource.volType,
pod: resource.pod,
volSource: resource.genericVolumeTestResource.volSource,
roVol: resource.roVolSource,
}
}
func (s *subPathTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(s, pattern), func() {
var (
resource subPathTestResource
input subPathTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(s, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource := subPathTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createSubPathTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testSubPath(&input)
})
}
type subPathTestResource struct {
genericVolumeTestResource
roVolSource *v1.VolumeSource
pod *v1.Pod
}
var _ TestResource = &subPathTestResource{}
func (s *subPathTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
s.driver = driver
dInfo := s.driver.GetDriverInfo()
f := dInfo.Framework
fsType := pattern.FsType
volType := pattern.VolType
// Setup generic test resource
s.genericVolumeTestResource.setupResource(driver, pattern)
// Setup subPath test dependent resource
switch volType {
case testpatterns.InlineVolume:
if iDriver, ok := driver.(drivers.InlineVolumeTestDriver); ok {
s.roVolSource = iDriver.GetVolumeSource(true, fsType)
}
case testpatterns.PreprovisionedPV:
s.roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: s.genericVolumeTestResource.pvc.Name,
ReadOnly: true,
},
}
case testpatterns.DynamicPV:
s.roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: s.genericVolumeTestResource.pvc.Name,
ReadOnly: true,
},
}
default:
framework.Failf("SubPath test doesn't support: %s", volType)
}
subPath := f.Namespace.Name
config := dInfo.Config
s.pod = TestPodSubpath(f, subPath, s.volType, s.volSource, true)
s.pod.Spec.NodeName = config.ClientNodeName
s.pod.Spec.NodeSelector = config.NodeSelector
}
func (s *subPathTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
// Cleanup subPath test dependent resource
By("Deleting pod")
err := framework.DeletePodWithWait(f, f.ClientSet, s.pod)
Expect(err).ToNot(HaveOccurred(), "while deleting pod")
// Cleanup generic test resource
s.genericVolumeTestResource.cleanupResource(driver, pattern)
}
type subPathTestInput struct {
f *framework.Framework
subPathDir string
filePathInSubpath string
filePathInVolume string
volType string
pod *v1.Pod
volSource *v1.VolumeSource
roVol *v1.VolumeSource
}
func testSubPath(input *subPathTestInput) {
It("should support non-existent path", func() {
// Write the file in the subPath from container 0
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.Containers[0])
// Read it from outside the subPath from container 1
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
})
It("should support existing directory", func() {
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
// Write the file in the subPath from container 0
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.Containers[0])
// Read it from outside the subPath from container 1
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
})
It("should support existing single file", func() {
// Create the file in the init container
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", input.subPathDir, input.filePathInVolume))
// Read it from inside the subPath from container 0
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
})
It("should support file as subpath", func() {
// Create the file in the init container
setInitCommand(input.pod, fmt.Sprintf("echo %s > %s", input.f.Namespace.Name, input.subPathDir))
TestBasicSubpath(input.f, input.f.Namespace.Name, input.pod)
})
It("should fail if subpath directory is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin %s", input.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod)
})
It("should fail if subpath file is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/sh %s", input.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod)
})
It("should fail if non-existent subpath is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", input.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod)
})
It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(input.pod, fmt.Sprintf("ln -s ../ %s", input.subPathDir))
// Pod should fail
testPodFailSubpath(input.f, input.pod)
})
It("should support creating multiple subpath from same volumes [Slow]", func() {
subpathDir1 := filepath.Join(volumePath, "subpath1")
subpathDir2 := filepath.Join(volumePath, "subpath2")
filepath1 := filepath.Join("/test-subpath1", fileName)
filepath2 := filepath.Join("/test-subpath2", fileName)
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath1",
SubPath: "subpath1",
})
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath2",
SubPath: "subpath2",
})
addMultipleWrites(&input.pod.Spec.Containers[0], filepath1, filepath2)
testMultipleReads(input.f, input.pod, 0, filepath1, filepath2)
})
It("should support restarting containers using directory as subpath [Slow]", func() {
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %v; touch %v", input.subPathDir, probeFilePath))
testPodContainerRestart(input.f, input.pod)
})
It("should support restarting containers using file as subpath [Slow]", func() {
// Create the file
setInitCommand(input.pod, fmt.Sprintf("touch %v; touch %v", input.subPathDir, probeFilePath))
testPodContainerRestart(input.f, input.pod)
})
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
testSubpathReconstruction(input.f, input.pod, false)
})
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
if input.volType == "hostPath" || input.volType == "hostPathSymlink" {
framework.Skipf("%s volume type does not support reconstruction, skipping", input.volType)
}
testSubpathReconstruction(input.f, input.pod, true)
})
It("should support readOnly directory specified in the volumeMount", func() {
// Create the directory
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
// Write the file in the volume from container 1
setWriteCommand(input.filePathInVolume, &input.pod.Spec.Containers[1])
// Read it from inside the subPath from container 0
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
})
It("should support readOnly file specified in the volumeMount", func() {
// Create the file
setInitCommand(input.pod, fmt.Sprintf("touch %s", input.subPathDir))
// Write the file in the volume from container 1
setWriteCommand(input.subPathDir, &input.pod.Spec.Containers[1])
// Read it from inside the subPath from container 0
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(input.f, volumePath, input.pod, 0)
})
It("should support existing directories when readOnly specified in the volumeSource", func() {
if input.roVol == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", input.volType)
}
// Initialize content in the volume while it's writable
initVolumeContent(input.f, input.pod, input.filePathInVolume, input.filePathInSubpath)
// Set volume source to read only
input.pod.Spec.Volumes[0].VolumeSource = *input.roVol
// Read it from inside the subPath from container 0
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
})
It("should fail for new directories when readOnly specified in the volumeSource", func() {
if input.roVol == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", input.volType)
}
// Format the volume while it's writable
formatVolume(input.f, input.volSource)
// Set volume source to read only
input.pod.Spec.Volumes[0].VolumeSource = *input.roVol
// Pod should fail
testPodFailSubpathError(input.f, input.pod, "")
})
// TODO: add a test case for the same disk with two partitions
}
// TestBasicSubpath runs basic subpath test
func TestBasicSubpath(f *framework.Framework, contents string, pod *v1.Pod) {
TestBasicSubpathFile(f, contents, pod, volumePath)
}
// TestBasicSubpathFile runs basic subpath file test
func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod, filepath string) {
setReadCommand(filepath, &pod.Spec.Containers[0])
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents})
By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod")
}
// TestPodSubpath runs pod subpath test
func TestPodSubpath(f *framework.Framework, subpath, volumeType string, source *v1.VolumeSource, privilegedSecurityContext bool) *v1.Pod {
var (
suffix = strings.ToLower(fmt.Sprintf("%s-%s", volumeType, rand.String(4)))
gracePeriod = int64(1)
probeVolumeName = "liveness-probe-volume"
)
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-subpath-test-%s", suffix),
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: fmt.Sprintf("init-volume-%s", suffix),
Image: imageutils.GetE2EImage(imageutils.BusyBox),
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
},
{
Name: probeVolumeName,
MountPath: probeVolumePath,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privilegedSecurityContext,
},
},
},
Containers: []v1.Container{
{
Name: fmt.Sprintf("test-container-subpath-%s", suffix),
Image: mountImage,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
SubPath: subpath,
},
{
Name: probeVolumeName,
MountPath: probeVolumePath,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privilegedSecurityContext,
},
},
{
Name: fmt.Sprintf("test-container-volume-%s", suffix),
Image: mountImage,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
},
{
Name: probeVolumeName,
MountPath: probeVolumePath,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privilegedSecurityContext,
},
},
},
RestartPolicy: v1.RestartPolicyNever,
TerminationGracePeriodSeconds: &gracePeriod,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: *source,
},
{
Name: probeVolumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
},
}
}
func clearSubpathPodCommands(pod *v1.Pod) {
pod.Spec.InitContainers[0].Command = nil
pod.Spec.Containers[0].Args = nil
pod.Spec.Containers[1].Args = nil
}
func setInitCommand(pod *v1.Pod, command string) {
pod.Spec.InitContainers[0].Command = []string{"/bin/sh", "-ec", command}
}
func setWriteCommand(file string, container *v1.Container) {
container.Args = []string{
fmt.Sprintf("--new_file_0644=%v", file),
fmt.Sprintf("--file_mode=%v", file),
}
}
func addSubpathVolumeContainer(container *v1.Container, volumeMount v1.VolumeMount) {
existingMounts := container.VolumeMounts
container.VolumeMounts = append(existingMounts, volumeMount)
}
func addMultipleWrites(container *v1.Container, file1 string, file2 string) {
container.Args = []string{
fmt.Sprintf("--new_file_0644=%v", file1),
fmt.Sprintf("--new_file_0666=%v", file2),
}
}
func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) {
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("multi_subpath", pod, containerIndex, []string{
"content of file \"" + file1 + "\": mount-tester new file",
"content of file \"" + file2 + "\": mount-tester new file",
})
}
func setReadCommand(file string, container *v1.Container) {
container.Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", file),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
}
func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerIndex int) {
setReadCommand(file, &pod.Spec.Containers[containerIndex])
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("subpath", pod, containerIndex, []string{
"content of file \"" + file + "\": mount-tester new file",
})
By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod")
}
func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
testPodFailSubpathError(f, pod, "subPath")
}
func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg string) {
By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod")
defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
Expect(err).To(HaveOccurred(), "while waiting for pod to be running")
By("Checking for subpath error event")
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": pod.Name,
"involvedObject.namespace": f.Namespace.Name,
"reason": "Failed",
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options)
Expect(err).NotTo(HaveOccurred(), "while getting pod events")
Expect(len(events.Items)).NotTo(Equal(0), "no events found")
Expect(events.Items[0].Message).To(ContainSubstring(errorMsg), fmt.Sprintf("%q error not found", errorMsg))
}
// Tests that the existing subpath mount is detected when a container restarts
func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
// Add liveness probe to subpath container
pod.Spec.Containers[0].LivenessProbe = &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"cat", probeFilePath},
},
},
InitialDelaySeconds: 1,
FailureThreshold: 1,
PeriodSeconds: 2,
}
// Start pod
By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod")
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
By("Failing liveness probe")
out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath))
framework.Logf("Pod exec output: %v", out)
Expect(err).ToNot(HaveOccurred(), "while failing liveness probe")
// Check that container has restarted
By("Waiting for container to restart")
restarts := int32(0)
err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name == pod.Spec.Containers[0].Name {
framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
restarts = status.RestartCount
if restarts > 0 {
framework.Logf("Container has restart count: %v", restarts)
return true, nil
}
}
}
return false, nil
})
Expect(err).ToNot(HaveOccurred(), "while waiting for container to restart")
// Fix liveness probe
By("Rewriting the file")
writeCmd := fmt.Sprintf("echo test-after > %v", probeFilePath)
out, err = podContainerExec(pod, 1, writeCmd)
framework.Logf("Pod exec output: %v", out)
Expect(err).ToNot(HaveOccurred(), "while rewriting the probe file")
// Wait for container restarts to stabilize
By("Waiting for container to stop restarting")
stableCount := int(0)
stableThreshold := int(time.Minute / framework.Poll)
err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name == pod.Spec.Containers[0].Name {
if status.RestartCount == restarts {
stableCount++
if stableCount > stableThreshold {
framework.Logf("Container restart has stabilized")
return true, nil
}
} else {
restarts = status.RestartCount
stableCount = 0
framework.Logf("Container has restart count: %v", restarts)
}
break
}
}
return false, nil
})
Expect(err).ToNot(HaveOccurred(), "while waiting for container to stabilize")
}
func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
// Change to busybox
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
// If grace period is too short, then there is not enough time for the volume
// manager to cleanup the volumes
gracePeriod := int64(30)
pod.Spec.TerminationGracePeriodSeconds = &gracePeriod
By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod")
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred(), "while getting pod")
utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
}
func formatVolume(f *framework.Framework, volumeSource *v1.VolumeSource) {
var err error
// Launch pod to format the volume
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("volume-prep-%s", f.Namespace.Name),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name),
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-ec", "echo nothing"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: "/vol",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: *volumeSource,
},
},
},
}
By(fmt.Sprintf("Creating pod to format volume %s", pod.Name))
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred(), "while creating volume init pod")
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
Expect(err).ToNot(HaveOccurred(), "while waiting for volume init pod to succeed")
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred(), "while deleting volume init pod")
}
func initVolumeContent(f *framework.Framework, pod *v1.Pod, volumeFilepath, subpathFilepath string) {
setWriteCommand(volumeFilepath, &pod.Spec.Containers[1])
setReadCommand(subpathFilepath, &pod.Spec.Containers[0])
By(fmt.Sprintf("Creating pod to write volume content %s", pod.Name))
f.TestContainerOutput("subpath", pod, 0, []string{
"content of file \"" + subpathFilepath + "\": mount-tester new file",
})
By(fmt.Sprintf("Deleting pod %s", pod.Name))
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred(), "while deleting pod")
// This pod spec is going to be reused; reset all the commands
clearSubpathPodCommands(pod)
}
func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) {
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", "/bin/sh", "-c", bashExec)
}

View File

@ -0,0 +1,359 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that the plugin VolumeSources are working when pseudo-streaming
* various write sizes to mounted files.
*/
package testsuites
import (
"fmt"
"math"
"path"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
// MD5 hashes of the test file corresponding to each file size.
// Test files are generated in testVolumeIO()
// If test file generation algorithm changes, these must be recomputed.
var md5hashes = map[int64]string{
testpatterns.FileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710",
testpatterns.FileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104",
testpatterns.FileSizeLarge: "8d763edc71bd16217664793b5a15e403",
}
type volumeIOTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &volumeIOTestSuite{}
// InitVolumeIOTestSuite returns volumeIOTestSuite that implements TestSuite interface
func InitVolumeIOTestSuite() TestSuite {
return &volumeIOTestSuite{
tsInfo: TestSuiteInfo{
name: "volumeIO",
testPatterns: []testpatterns.TestPattern{
testpatterns.DefaultFsInlineVolume,
testpatterns.DefaultFsPreprovisionedPV,
testpatterns.DefaultFsDynamicPV,
},
},
}
}
func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeIOTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
}
func createVolumeIOTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumeIOTestInput {
var fsGroup *int64
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
fileSizes := createFileSizes(dInfo.MaxFileSize)
volSource := resource.volSource
if volSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
if dInfo.IsFsGroupSupported {
fsGroupVal := int64(1234)
fsGroup = &fsGroupVal
}
return volumeIOTestInput{
f: f,
name: dInfo.Name,
config: dInfo.Config,
volSource: *volSource,
testFile: fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name),
podSec: v1.PodSecurityContext{
FSGroup: fsGroup,
},
fileSizes: fileSizes,
}
}
func (t *volumeIOTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource genericVolumeTestResource
input volumeIOTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource := genericVolumeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumeIOTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
execTestVolumeIO(&input)
})
}
type volumeIOTestInput struct {
f *framework.Framework
name string
config framework.VolumeTestConfig
volSource v1.VolumeSource
testFile string
podSec v1.PodSecurityContext
fileSizes []int64
}
func execTestVolumeIO(input *volumeIOTestInput) {
It("should write files of various sizes, verify size, validate content [Slow]", func() {
f := input.f
cs := f.ClientSet
err := testVolumeIO(f, cs, input.config, input.volSource, &input.podSec, input.testFile, input.fileSizes)
Expect(err).NotTo(HaveOccurred())
})
}
func createFileSizes(maxFileSize int64) []int64 {
allFileSizes := []int64{
testpatterns.FileSizeSmall,
testpatterns.FileSizeMedium,
testpatterns.FileSizeLarge,
}
fileSizes := []int64{}
for _, size := range allFileSizes {
if size <= maxFileSize {
fileSizes = append(fileSizes, size)
}
}
return fileSizes
}
// Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env.
func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
volName := fmt.Sprintf("%s-%s", config.Prefix, "io-volume")
var gracePeriod int64 = 1
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-io-client",
Labels: map[string]string{
"role": config.Prefix + "-io-client",
},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: config.Prefix + "-io-init",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
initCmd,
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: dir,
},
},
},
},
Containers: []v1.Container{
{
Name: config.Prefix + "-io-client",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
"sleep 3600", // keep pod alive until explicitly deleted
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: dir,
},
},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: podSecContext,
Volumes: []v1.Volume{
{
Name: volName,
VolumeSource: volsrc,
},
},
RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails
NodeName: config.ClientNodeName,
NodeSelector: config.NodeSelector,
},
}
}
// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file.
func writeToFile(pod *v1.Pod, fpath, ddInput string, fsize int64) error {
By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
loopCnt := fsize / testpatterns.MinFileSize
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, testpatterns.MinFileSize, fpath)
_, err := utils.PodExec(pod, writeCmd)
return err
}
// Verify that the test file is the expected size and contains the expected content.
func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) error {
By("verifying file size")
rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
if err != nil || rtnstr == "" {
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
}
size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n"))
if err != nil {
return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err)
}
if int64(size) != expectSize {
return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize)
}
By("verifying file hash")
rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
if err != nil {
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err)
}
actualHash := strings.TrimSuffix(rtnstr, "\n")
expectedHash, ok := md5hashes[expectSize]
if !ok {
return fmt.Errorf("File hash is unknown for file size %d. Was a new file size added to the test suite?",
expectSize)
}
if actualHash != expectedHash {
return fmt.Errorf("MD5 hash is incorrect for file %s with size %d. Expected: `%s`; Actual: `%s`",
fpath, expectSize, expectedHash, actualHash)
}
return nil
}
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
func deleteFile(pod *v1.Pod, fpath string) {
By(fmt.Sprintf("deleting test file %s...", fpath))
_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
if err != nil {
// keep going, the test dir will be deleted when the volume is unmounted
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
}
}
// Create the client pod and create files of the sizes passed in by the `fsizes` parameter. Delete the
// client pod and the new files when done.
// Note: the file name is appended to "/opt/<Prefix>/<namespace>", eg. "/opt/nfs/e2e-.../<file>".
// Note: nil can be passed for the podSecContext parm, in which case it is ignored.
// Note: `fsizes` values are enforced to each be at least `MinFileSize` and a multiple of `MinFileSize`
// bytes.
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framework.VolumeTestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
dir := path.Join("/opt", config.Prefix, config.Namespace)
ddInput := path.Join(dir, "dd_if")
writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value
loopCnt := testpatterns.MinFileSize / int64(len(writeBlk))
// initContainer cmd to create and fill dd's input file. The initContainer is used to create
// the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is
// used to create a 1MiB file in the target directory.
initCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo -n %s >>%s; let i+=1; done", loopCnt, writeBlk, ddInput)
clientPod := makePodSpec(config, dir, initCmd, volsrc, podSecContext)
By(fmt.Sprintf("starting %s", clientPod.Name))
podsNamespacer := cs.CoreV1().Pods(config.Namespace)
clientPod, err = podsNamespacer.Create(clientPod)
if err != nil {
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
}
defer func() {
// note the test dir will be removed when the kubelet unmounts it
By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
e := framework.DeletePodWithWait(f, cs, clientPod)
if e != nil {
framework.Logf("client pod failed to delete: %v", e)
if err == nil { // delete err is returned if err is not set
err = e
}
} else {
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(framework.PodCleanupTimeout)
}
}()
err = framework.WaitForPodRunningInNamespace(cs, clientPod)
if err != nil {
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
}
// create files of the passed-in file sizes and verify test file size and content
for _, fsize := range fsizes {
// file sizes must be a multiple of `MinFileSize`
if math.Mod(float64(fsize), float64(testpatterns.MinFileSize)) != 0 {
fsize = fsize/testpatterns.MinFileSize + testpatterns.MinFileSize
}
fpath := path.Join(dir, fmt.Sprintf("%s-%d", file, fsize))
if err = writeToFile(clientPod, fpath, ddInput, fsize); err != nil {
return err
}
if err = verifyFile(clientPod, fpath, fsize, ddInput); err != nil {
return err
}
deleteFile(clientPod, fpath)
}
return
}

View File

@ -0,0 +1,443 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
noProvisioner = "kubernetes.io/no-provisioner"
pvNamePrefix = "pv"
)
type volumeModeTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &volumeModeTestSuite{}
// InitVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface
func InitVolumeModeTestSuite() TestSuite {
return &volumeModeTestSuite{
tsInfo: TestSuiteInfo{
name: "volumeMode",
featureTag: "[Feature:BlockVolume]",
testPatterns: []testpatterns.TestPattern{
testpatterns.FsVolModePreprovisionedPV,
testpatterns.FsVolModeDynamicPV,
testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV,
},
},
}
}
func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeModeTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
}
func createVolumeModeTestInput(pattern testpatterns.TestPattern, resource volumeModeTestResource) volumeModeTestInput {
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
return volumeModeTestInput{
f: f,
sc: resource.sc,
pvc: resource.pvc,
pv: resource.pv,
testVolType: pattern.VolType,
nodeName: dInfo.Config.ClientNodeName,
volMode: pattern.VolMode,
isBlockSupported: dInfo.IsBlockSupported,
}
}
func getVolumeModeTestFunc(pattern testpatterns.TestPattern, driver drivers.TestDriver) func(*volumeModeTestInput) {
dInfo := driver.GetDriverInfo()
isBlockSupported := dInfo.IsBlockSupported
volMode := pattern.VolMode
volType := pattern.VolType
switch volType {
case testpatterns.PreprovisionedPV:
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
return testVolumeModeFailForPreprovisionedPV
}
return testVolumeModeSuccessForPreprovisionedPV
case testpatterns.DynamicPV:
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
return testVolumeModeFailForDynamicPV
}
return testVolumeModeSuccessForDynamicPV
default:
framework.Failf("Volume mode test doesn't support volType: %v", volType)
}
return nil
}
func (t *volumeModeTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource volumeModeTestResource
input volumeModeTestInput
testFunc func(*volumeModeTestInput)
needsCleanup bool
)
testFunc = getVolumeModeTestFunc(pattern, driver)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource := volumeModeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumeModeTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testFunc(&input)
})
}
type volumeModeTestResource struct {
driver drivers.TestDriver
sc *storagev1.StorageClass
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
}
var _ TestResource = &volumeModeTestResource{}
func (s *volumeModeTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
s.driver = driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
ns := f.Namespace
fsType := pattern.FsType
volBindMode := storagev1.VolumeBindingImmediate
volMode := pattern.VolMode
volType := pattern.VolType
var (
scName string
pvSource *v1.PersistentVolumeSource
)
// Create volume for pre-provisioned volume tests
drivers.CreateVolume(driver, volType)
switch volType {
case testpatterns.PreprovisionedPV:
if volMode == v1.PersistentVolumeBlock {
scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name)
} else if volMode == v1.PersistentVolumeFilesystem {
scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name)
}
if pDriver, ok := driver.(drivers.PreprovisionedPVTestDriver); ok {
pvSource = pDriver.GetPersistentVolumeSource(false, fsType)
if pvSource == nil {
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
}
sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource)
s.sc = sc
s.pv = framework.MakePersistentVolume(pvConfig)
s.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
}
case testpatterns.DynamicPV:
if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok {
s.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
if s.sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
}
s.sc.VolumeBindingMode = &volBindMode
claimSize := "2Gi"
s.pvc = getClaim(claimSize, ns.Name)
s.pvc.Spec.StorageClassName = &s.sc.Name
s.pvc.Spec.VolumeMode = &volMode
}
default:
framework.Failf("Volume mode test doesn't support: %s", volType)
}
}
func (s *volumeModeTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
cs := f.ClientSet
ns := f.Namespace
volType := pattern.VolType
By("Deleting pv and pvc")
errs := framework.PVPVCCleanup(cs, ns.Name, s.pv, s.pvc)
if len(errs) > 0 {
framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
}
By("Deleting sc")
if s.sc != nil {
deleteStorageClass(cs, s.sc.Name)
}
// Cleanup volume for pre-provisioned volume tests
drivers.DeleteVolume(driver, volType)
}
type volumeModeTestInput struct {
f *framework.Framework
sc *storagev1.StorageClass
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
testVolType testpatterns.TestVolType
nodeName string
volMode v1.PersistentVolumeMode
isBlockSupported bool
}
func testVolumeModeFailForPreprovisionedPV(input *volumeModeTestInput) {
It("should fail to create pod by failing to mount volume", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
Expect(err).NotTo(HaveOccurred())
// Prebind pv
input.pvc.Spec.VolumeName = input.pv.Name
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).To(HaveOccurred())
})
}
func testVolumeModeSuccessForPreprovisionedPV(input *volumeModeTestInput) {
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
Expect(err).NotTo(HaveOccurred())
// Prebind pv
input.pvc.Spec.VolumeName = input.pv.Name
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).NotTo(HaveOccurred())
By("Checking if persistent volume exists as expected volume mode")
checkVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
By("Checking if read/write to persistent volume works properly")
checkReadWriteToPath(pod, input.volMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests
}
func testVolumeModeFailForDynamicPV(input *volumeModeTestInput) {
It("should fail in binding dynamic provisioned PV to PVC", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).To(HaveOccurred())
})
}
func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) {
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
f := input.f
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(input.pvc.Namespace).Get(input.pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
input.pv, err = cs.CoreV1().PersistentVolumes().Get(input.pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, input.nodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
}()
Expect(err).NotTo(HaveOccurred())
By("Checking if persistent volume exists as expected volume mode")
checkVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
By("Checking if read/write to persistent volume works properly")
checkReadWriteToPath(pod, input.volMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests
}
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource) (*storagev1.StorageClass,
framework.PersistentVolumeConfig, framework.PersistentVolumeClaimConfig) {
// StorageClass
scConfig := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: scName,
},
Provisioner: noProvisioner,
VolumeBindingMode: &volBindMode,
}
// PV
pvConfig := framework.PersistentVolumeConfig{
PVSource: pvSource,
NamePrefix: pvNamePrefix,
StorageClassName: scName,
VolumeMode: &volMode,
}
// PVC
pvcConfig := framework.PersistentVolumeClaimConfig{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &scName,
VolumeMode: &volMode,
}
return scConfig, pvConfig, pvcConfig
}
func checkVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// Check if block exists
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path))
// Double check that it's not directory
utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1)
} else {
// Check if directory exists
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path))
// Double check that it's not block
utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1)
}
}
func checkReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// random -> file1
utils.VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
// file1 -> dev (write to dev)
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
// dev -> file2 (read from dev)
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
// file1 == file2 (check contents)
utils.VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2")
// Clean up temp files
utils.VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2")
// Check that writing file to block volume fails
utils.VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
} else {
// text -> file1 (write to file)
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
// grep file1 (read from file and check contents)
utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path))
// Check that writing to directory as block volume fails
utils.VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
}
}

View File

@ -0,0 +1,160 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This test checks that various VolumeSources are working.
// test/e2e/common/volumes.go duplicates the GlusterFS test from this file. Any changes made to this
// test should be made there as well.
package testsuites
import (
"fmt"
. "github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
type volumesTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &volumesTestSuite{}
// InitVolumesTestSuite returns volumesTestSuite that implements TestSuite interface
func InitVolumesTestSuite() TestSuite {
return &volumesTestSuite{
tsInfo: TestSuiteInfo{
name: "volumes",
testPatterns: []testpatterns.TestPattern{
// Default fsType
testpatterns.DefaultFsInlineVolume,
testpatterns.DefaultFsPreprovisionedPV,
testpatterns.DefaultFsDynamicPV,
// ext3
testpatterns.Ext3InlineVolume,
testpatterns.Ext3PreprovisionedPV,
testpatterns.Ext3DynamicPV,
// ext4
testpatterns.Ext4InlineVolume,
testpatterns.Ext4PreprovisionedPV,
testpatterns.Ext4DynamicPV,
// xfs
testpatterns.XfsInlineVolume,
testpatterns.XfsPreprovisionedPV,
testpatterns.XfsDynamicPV,
},
},
}
}
func (t *volumesTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumesTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) {
dInfo := driver.GetDriverInfo()
if !dInfo.IsPersistent {
framework.Skipf("Driver %q does not provide persistency - skipping", dInfo.Name)
}
}
func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumesTestInput {
var fsGroup *int64
driver := resource.driver
dInfo := driver.GetDriverInfo()
f := dInfo.Framework
volSource := resource.volSource
if volSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
if dInfo.IsFsGroupSupported {
fsGroupVal := int64(1234)
fsGroup = &fsGroupVal
}
return volumesTestInput{
f: f,
name: dInfo.Name,
config: dInfo.Config,
fsGroup: fsGroup,
tests: []framework.VolumeTest{
{
Volume: *volSource,
File: "index.html",
// Must match content
ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s",
dInfo.Name, f.Namespace.Name),
},
},
}
}
func (t *volumesTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) {
Context(getTestNameStr(t, pattern), func() {
var (
resource genericVolumeTestResource
input volumesTestInput
needsCleanup bool
)
BeforeEach(func() {
needsCleanup = false
// Skip unsupported tests to avoid unnecessary resource initialization
skipUnsupportedTest(t, driver, pattern)
needsCleanup = true
// Setup test resource for driver and testpattern
resource := genericVolumeTestResource{}
resource.setupResource(driver, pattern)
// Create test input
input = createVolumesTestInput(pattern, resource)
})
AfterEach(func() {
if needsCleanup {
resource.cleanupResource(driver, pattern)
}
})
testVolumes(&input)
})
}
type volumesTestInput struct {
f *framework.Framework
name string
config framework.VolumeTestConfig
fsGroup *int64
tests []framework.VolumeTest
}
func testVolumes(input *volumesTestInput) {
It("should be mountable", func() {
f := input.f
cs := f.ClientSet
defer framework.VolumeTestCleanup(f, input.config)
volumeTest := input.tests
framework.InjectHtml(cs, input.config, volumeTest[0].Volume, volumeTest[0].ExpectedContent)
framework.TestVolumeClient(cs, input.config, input.fsGroup, input.tests)
})
}

View File

@ -1,434 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that the plugin VolumeSources are working when pseudo-streaming
* various write sizes to mounted files. Note that the plugin is defined inline in
* the pod spec, not via a persistent volume and claim.
*
* These tests work only when privileged containers are allowed, exporting various
* filesystems (NFS, GlusterFS, ...) usually needs some mounting or other privileged
* magic in the server pod. Note that the server containers are for testing purposes
* only and should not be used in production.
*/
package storage
import (
"fmt"
"math"
"path"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
minFileSize = 1 * framework.MiB
fileSizeSmall = 1 * framework.MiB
fileSizeMedium = 100 * framework.MiB
fileSizeLarge = 1 * framework.GiB
)
// MD5 hashes of the test file corresponding to each file size.
// Test files are generated in testVolumeIO()
// If test file generation algorithm changes, these must be recomputed.
var md5hashes = map[int64]string{
fileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710",
fileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104",
fileSizeLarge: "8d763edc71bd16217664793b5a15e403",
}
// Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env.
func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod {
volName := fmt.Sprintf("%s-%s", config.Prefix, "io-volume")
var gracePeriod int64 = 1
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-io-client",
Labels: map[string]string{
"role": config.Prefix + "-io-client",
},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: config.Prefix + "-io-init",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
initCmd,
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: dir,
},
},
},
},
Containers: []v1.Container{
{
Name: config.Prefix + "-io-client",
Image: framework.BusyBoxImage,
Command: []string{
"/bin/sh",
"-c",
"sleep 3600", // keep pod alive until explicitly deleted
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: dir,
},
},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: podSecContext,
Volumes: []v1.Volume{
{
Name: volName,
VolumeSource: volsrc,
},
},
RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails
},
}
}
// Write `fsize` bytes to `fpath` in the pod, using dd and the `dd_input` file.
func writeToFile(pod *v1.Pod, fpath, dd_input string, fsize int64) error {
By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
loopCnt := fsize / minFileSize
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, dd_input, minFileSize, fpath)
_, err := utils.PodExec(pod, writeCmd)
return err
}
// Verify that the test file is the expected size and contains the expected content.
func verifyFile(pod *v1.Pod, fpath string, expectSize int64, dd_input string) error {
By("verifying file size")
rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
if err != nil || rtnstr == "" {
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
}
size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n"))
if err != nil {
return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err)
}
if int64(size) != expectSize {
return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize)
}
By("verifying file hash")
rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
if err != nil {
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err)
}
actualHash := strings.TrimSuffix(rtnstr, "\n")
expectedHash, ok := md5hashes[expectSize]
if !ok {
return fmt.Errorf("File hash is unknown for file size %d. Was a new file size added to the test suite?",
expectSize)
}
if actualHash != expectedHash {
return fmt.Errorf("MD5 hash is incorrect for file %s with size %d. Expected: `%s`; Actual: `%s`",
fpath, expectSize, expectedHash, actualHash)
}
return nil
}
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
func deleteFile(pod *v1.Pod, fpath string) {
By(fmt.Sprintf("deleting test file %s...", fpath))
_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
if err != nil {
// keep going, the test dir will be deleted when the volume is unmounted
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
}
}
// Create the client pod and create files of the sizes passed in by the `fsizes` parameter. Delete the
// client pod and the new files when done.
// Note: the file name is appended to "/opt/<Prefix>/<namespace>", eg. "/opt/nfs/e2e-.../<file>".
// Note: nil can be passed for the podSecContext parm, in which case it is ignored.
// Note: `fsizes` values are enforced to each be at least `minFileSize` and a multiple of `minFileSize`
// bytes.
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framework.VolumeTestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
dir := path.Join("/opt", config.Prefix, config.Namespace)
dd_input := path.Join(dir, "dd_if")
writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value
loopCnt := minFileSize / int64(len(writeBlk))
// initContainer cmd to create and fill dd's input file. The initContainer is used to create
// the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is
// used to create a 1MiB file in the target directory.
initCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo -n %s >>%s; let i+=1; done", loopCnt, writeBlk, dd_input)
clientPod := makePodSpec(config, dir, initCmd, volsrc, podSecContext)
By(fmt.Sprintf("starting %s", clientPod.Name))
podsNamespacer := cs.CoreV1().Pods(config.Namespace)
clientPod, err = podsNamespacer.Create(clientPod)
if err != nil {
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
}
defer func() {
// note the test dir will be removed when the kubelet unmounts it
By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
e := framework.DeletePodWithWait(f, cs, clientPod)
if e != nil {
framework.Logf("client pod failed to delete: %v", e)
if err == nil { // delete err is returned if err is not set
err = e
}
} else {
framework.Logf("sleeping a bit so kubelet can unmount and detach the volume")
time.Sleep(framework.PodCleanupTimeout)
}
}()
err = framework.WaitForPodRunningInNamespace(cs, clientPod)
if err != nil {
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
}
// create files of the passed-in file sizes and verify test file size and content
for _, fsize := range fsizes {
// file sizes must be a multiple of `minFileSize`
if math.Mod(float64(fsize), float64(minFileSize)) != 0 {
fsize = fsize/minFileSize + minFileSize
}
fpath := path.Join(dir, fmt.Sprintf("%s-%d", file, fsize))
if err = writeToFile(clientPod, fpath, dd_input, fsize); err != nil {
return err
}
if err = verifyFile(clientPod, fpath, fsize, dd_input); err != nil {
return err
}
deleteFile(clientPod, fpath)
}
return
}
// These tests need privileged containers which are disabled by default.
// TODO: support all of the plugins tested in storage/volumes.go
var _ = utils.SIGDescribe("Volume plugin streaming [Slow]", func() {
f := framework.NewDefaultFramework("volume-io")
var (
config framework.VolumeTestConfig
cs clientset.Interface
ns string
serverIP string
serverPod *v1.Pod
volSource v1.VolumeSource
)
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
})
////////////////////////////////////////////////////////////////////////
// NFS
////////////////////////////////////////////////////////////////////////
Describe("NFS", func() {
testFile := "nfs_io_test"
// client pod uses selinux
podSec := v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
}
BeforeEach(func() {
config, serverPod, serverIP = framework.NewNFSServer(cs, ns, []string{})
volSource = v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting NFS server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: NFS server pod failed to delete")
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium, fileSizeLarge}
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
////////////////////////////////////////////////////////////////////////
// Gluster
////////////////////////////////////////////////////////////////////////
Describe("GlusterFS", func() {
var name string
testFile := "gluster_io_test"
BeforeEach(func() {
framework.SkipUnlessNodeOSDistroIs("gci")
// create gluster server and endpoints
config, serverPod, serverIP = framework.NewGlusterfsServer(cs, ns)
name = config.Prefix + "-server"
volSource = v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: name,
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting Gluster endpoints %q...", name)
epErr := cs.CoreV1().Endpoints(ns).Delete(name, nil)
framework.Logf("AfterEach: deleting Gluster server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
if epErr != nil || err != nil {
if epErr != nil {
framework.Logf("AfterEach: Gluster delete endpoints failed: %v", err)
}
if err != nil {
framework.Logf("AfterEach: Gluster server pod delete failed: %v", err)
}
framework.Failf("AfterEach: cleanup failed")
}
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium}
err := testVolumeIO(f, cs, config, volSource, nil /*no secContext*/, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
////////////////////////////////////////////////////////////////////////
// iSCSI
// The iscsiadm utility and iscsi target kernel modules must be installed on all nodes.
////////////////////////////////////////////////////////////////////////
Describe("iSCSI [Feature:Volumes]", func() {
testFile := "iscsi_io_test"
BeforeEach(func() {
config, serverPod, serverIP = framework.NewISCSIServer(cs, ns)
volSource = v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: serverIP + ":3260",
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
Lun: 0,
FSType: "ext2",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting iSCSI server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
Expect(err).NotTo(HaveOccurred(), "AfterEach: iSCSI server pod failed to delete")
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium}
fsGroup := int64(1234)
podSec := v1.PodSecurityContext{
FSGroup: &fsGroup,
}
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
////////////////////////////////////////////////////////////////////////
// Ceph RBD
////////////////////////////////////////////////////////////////////////
Describe("Ceph-RBD [Feature:Volumes]", func() {
var (
secret *v1.Secret
)
testFile := "ceph-rbd_io_test"
BeforeEach(func() {
config, serverPod, secret, serverIP = framework.NewRBDServer(cs, ns)
volSource = v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{
Name: secret.Name,
},
FSType: "ext2",
ReadOnly: false,
},
}
})
AfterEach(func() {
framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", secret.Name)
secErr := cs.CoreV1().Secrets(ns).Delete(secret.Name, &metav1.DeleteOptions{})
framework.Logf("AfterEach: deleting Ceph-RDB server pod %q...", serverPod.Name)
err := framework.DeletePodWithWait(f, cs, serverPod)
if secErr != nil || err != nil {
if secErr != nil {
framework.Logf("AfterEach: Ceph-RDB delete secret failed: %v", secErr)
}
if err != nil {
framework.Logf("AfterEach: Ceph-RDB server pod delete failed: %v", err)
}
framework.Failf("AfterEach: cleanup failed")
}
})
It("should write files of various sizes, verify size, validate content", func() {
fileSizes := []int64{fileSizeSmall, fileSizeMedium}
fsGroup := int64(1234)
podSec := v1.PodSecurityContext{
FSGroup: &fsGroup,
}
err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
})
})

View File

@ -1105,7 +1105,7 @@ func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr
verifyDefaultStorageClass(c, scName, expectedDefault)
}
func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim {
func getClaim(claimSize string, ns string) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
@ -1117,7 +1117,7 @@ func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim {
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(t.claimSize),
v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
},
},
},
@ -1126,6 +1126,10 @@ func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim {
return &claim
}
func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim {
return getClaim(t.claimSize, ns)
}
// runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command string) {
pod := &v1.Pod{
@ -1217,6 +1221,20 @@ func newStorageClass(t storageClassTest, ns string, suffix string) *storage.Stor
if t.delayBinding {
bindingMode = storage.VolumeBindingWaitForFirstConsumer
}
return getStorageClass(pluginName, t.parameters, &bindingMode, ns, suffix)
}
func getStorageClass(
provisioner string,
parameters map[string]string,
bindingMode *storage.VolumeBindingMode,
ns string,
suffix string,
) *storage.StorageClass {
if bindingMode == nil {
defaultBindingMode := storage.VolumeBindingImmediate
bindingMode = &defaultBindingMode
}
return &storage.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
@ -1225,9 +1243,9 @@ func newStorageClass(t storageClassTest, ns string, suffix string) *storage.Stor
// Name must be unique, so let's base it on namespace name
Name: ns + "-" + suffix,
},
Provisioner: pluginName,
Parameters: t.parameters,
VolumeBindingMode: &bindingMode,
Provisioner: provisioner,
Parameters: parameters,
VolumeBindingMode: bindingMode,
}
}

View File

@ -14,72 +14,18 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that various VolumeSources are working.
*
* There are two ways, how to test the volumes:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
* The test creates a server pod, exporting simple 'index.html' file.
* Then it uses appropriate VolumeSource to import this file into a client pod
* and checks that the pod can see the file. It does so by importing the file
* into web server root and loadind the index.html from it.
*
* These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
* other privileged magic in the server pod.
*
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server outside of Kubernetes (Cinder, ...)
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/
// test/e2e/common/volumes.go duplicates the GlusterFS test from this file. Any changes made to this
// test should be made there as well.
// This test is volumes test for configmap.
package storage
import (
"os/exec"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere"
)
func DeleteCinderVolume(name string) error {
// Try to delete the volume for several seconds - it takes
// a while for the plugin to detach it.
var output []byte
var err error
timeout := time.Second * 120
framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
output, err = exec.Command("cinder", "delete", name).CombinedOutput()
if err == nil {
framework.Logf("Cinder volume %s deleted", name)
return nil
} else {
framework.Logf("Failed to delete volume %s: %v", name, err)
}
}
framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
return err
}
// These tests need privileged containers, which are disabled by default.
var _ = utils.SIGDescribe("Volumes", func() {
f := framework.NewDefaultFramework("volume")
@ -94,277 +40,6 @@ var _ = utils.SIGDescribe("Volumes", func() {
namespace = f.Namespace
})
////////////////////////////////////////////////////////////////////////
// NFS
////////////////////////////////////////////////////////////////////////
Describe("NFS", func() {
It("should be mountable", func() {
config, _, serverIP := framework.NewNFSServer(cs, namespace.Name, []string{})
defer framework.VolumeTestCleanup(f, config)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/nfs/index.html
ExpectedContent: "Hello from NFS!",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Gluster
////////////////////////////////////////////////////////////////////////
Describe("GlusterFS", func() {
It("should be mountable", func() {
//TODO (copejon) GFS is not supported on debian image.
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom")
// create gluster server and endpoints
config, _, _ := framework.NewGlusterfsServer(cs, namespace.Name)
name := config.Prefix + "-server"
defer func() {
framework.VolumeTestCleanup(f, config)
err := cs.CoreV1().Endpoints(namespace.Name).Delete(name, nil)
Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed")
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: name,
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/gluster/index.html
ExpectedContent: "Hello from GlusterFS!",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// iSCSI
////////////////////////////////////////////////////////////////////////
// The test needs privileged containers, which are disabled by default.
// Also, make sure that iscsiadm utility and iscsi target kernel modules
// are installed on all nodes!
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=iSCSI"
Describe("iSCSI [Feature:Volumes]", func() {
It("should be mountable", func() {
config, _, serverIP := framework.NewISCSIServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: serverIP + ":3260",
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
Lun: 0,
FSType: "ext2",
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/iscsi/block.tar.gz
ExpectedContent: "Hello from iSCSI",
},
}
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Ceph RBD
////////////////////////////////////////////////////////////////////////
Describe("Ceph RBD [Feature:Volumes]", func() {
It("should be mountable", func() {
config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{
Name: secret.Name,
},
FSType: "ext2",
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/rbd/create_block.sh
ExpectedContent: "Hello from RBD",
},
}
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Ceph
////////////////////////////////////////////////////////////////////////
Describe("CephFS [Feature:Volumes]", func() {
It("should be mountable", func() {
config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name)
defer framework.VolumeTestCleanup(f, config)
defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil)
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{serverIP + ":6789"},
User: "kube",
SecretRef: &v1.LocalObjectReference{Name: secret.Name},
ReadOnly: true,
},
},
File: "index.html",
// Must match content of test/images/volumes-tester/ceph/index.html
ExpectedContent: "Hello Ceph!",
},
}
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// OpenStack Cinder
////////////////////////////////////////////////////////////////////////
// This test assumes that OpenStack client tools are installed
// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone)
// and that the usual OpenStack authentication env. variables are set
// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least).
Describe("Cinder", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("openstack")
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "cinder",
}
// We assume that namespace.Name is a random string
volumeName := namespace.Name
By("creating a test Cinder volume")
output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput()
outputString := string(output[:])
framework.Logf("cinder output:\n%s", outputString)
Expect(err).NotTo(HaveOccurred())
defer DeleteCinderVolume(volumeName)
// Parse 'id'' from stdout. Expected format:
// | attachments | [] |
// | availability_zone | nova |
// ...
// | id | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 |
volumeID := ""
for _, line := range strings.Split(outputString, "\n") {
fields := strings.Fields(line)
if len(fields) != 5 {
continue
}
if fields[1] != "id" {
continue
}
volumeID = fields[3]
break
}
framework.Logf("Volume ID: %s", volumeID)
Expect(volumeID).NotTo(Equal(""))
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: volumeID,
FSType: "ext3",
ReadOnly: false,
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from Cinder from namespace " + volumeName,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// GCE PD
////////////////////////////////////////////////////////////////////////
Describe("PD", func() {
var config framework.VolumeTestConfig
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
config = framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "pd",
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
// so pods should be also scheduled there.
NodeSelector: map[string]string{
kubeletapis.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
},
}
})
It("should be mountable with ext3", func() {
testGCEPD(f, config, cs, "ext3")
})
It("should be mountable with ext4", func() {
testGCEPD(f, config, cs, "ext4")
})
It("should be mountable with xfs", func() {
// xfs is not supported on gci
// and not installed by default on debian
framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom")
testGCEPD(f, config, cs, "xfs")
})
})
////////////////////////////////////////////////////////////////////////
// ConfigMap
////////////////////////////////////////////////////////////////////////
Describe("ConfigMap", func() {
It("should be mountable", func() {
config := framework.VolumeTestConfig{
@ -434,139 +109,4 @@ var _ = utils.SIGDescribe("Volumes", func() {
framework.TestVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// vSphere
////////////////////////////////////////////////////////////////////////
Describe("vsphere", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("vsphere")
vspheretest.Bootstrap(f)
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
var volumePath string
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "vsphere",
}
volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
defer func() {
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}()
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from vSphere from namespace " + namespace.Name,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// Azure Disk
////////////////////////////////////////////////////////////////////////
Describe("Azure Disk", func() {
It("should be mountable [Slow]", func() {
framework.SkipUnlessProviderIs("azure")
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "azure",
}
By("creating a test azure disk volume")
volumeName, err := framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.DeletePDWithRetry(volumeName)
}()
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
fsType := "ext4"
readOnly := false
diskName := volumeName[(strings.LastIndex(volumeName, "/") + 1):]
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
DataDiskURI: volumeName,
FSType: &fsType,
ReadOnly: &readOnly,
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from Azure from namespace " + volumeName,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
})
})
})
func testGCEPD(f *framework.Framework, config framework.VolumeTestConfig, cs clientset.Interface, fs string) {
By("creating a test gce pd volume")
volumeName, err := framework.CreatePDWithRetry()
Expect(err).NotTo(HaveOccurred())
defer func() {
// - Get NodeName from the pod spec to which the volume is mounted.
// - Force detach and delete.
pod, err := f.PodClient().Get(config.Prefix+"-client", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed getting pod %q.", config.Prefix+"-client")
detachAndDeletePDs(volumeName, []types.NodeName{types.NodeName(pod.Spec.NodeName)})
}()
defer func() {
framework.Logf("Running volumeTestCleanup")
framework.VolumeTestCleanup(f, config)
}()
tests := []framework.VolumeTest{
{
Volume: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: volumeName,
FSType: fs,
ReadOnly: false,
},
},
File: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
ExpectedContent: "Hello from GCE from namespace " + volumeName,
},
}
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
fsGroup := int64(1234)
framework.TestVolumeClient(cs, config, &fsGroup, tests)
}