Merge pull request #73525 from cofyc/fix71237-intree

Implement localDriver with various local volume types
pull/564/head
Kubernetes Prow Robot 2019-02-06 10:44:37 -08:00 committed by GitHub
commit 3dedf4b7f6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 696 additions and 29 deletions

View File

@ -39,6 +39,7 @@ import (
"fmt"
"math/rand"
"os/exec"
"strconv"
"strings"
"time"
@ -120,7 +121,7 @@ func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, testResource i
}
}
func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
ntr, ok := testResource.(*nfsTestResource)
Expect(ok).To(BeTrue(), "Failed to cast test resource to NFS Test Resource")
return &v1.PersistentVolumeSource{
@ -129,7 +130,7 @@ func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, test
Path: "/",
ReadOnly: readOnly,
},
}
}, nil
}
func (n *nfsDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
@ -276,7 +277,7 @@ func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, testReso
}
}
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
gtr, ok := testResource.(*glusterTestResource)
Expect(ok).To(BeTrue(), "Failed to cast test resource to Gluster Test Resource")
@ -288,7 +289,7 @@ func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string
Path: "test_vol",
ReadOnly: readOnly,
},
}
}, nil
}
func (g *glusterFSDriver) CreateDriver() {
@ -402,7 +403,7 @@ func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, testResource
return &volSource
}
func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
itr, ok := testResource.(*iSCSITestResource)
Expect(ok).To(BeTrue(), "Failed to cast test resource to iSCSI Test Resource")
@ -417,7 +418,7 @@ func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, te
if fsType != "" {
pvSource.ISCSI.FSType = fsType
}
return &pvSource
return &pvSource, nil
}
func (i *iSCSIDriver) CreateDriver() {
@ -519,7 +520,7 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, testResource i
return &volSource
}
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
f := r.driverInfo.Config.Framework
ns := f.Namespace
@ -542,7 +543,7 @@ func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, test
if fsType != "" {
pvSource.RBD.FSType = fsType
}
return &pvSource
return &pvSource, nil
}
func (r *rbdDriver) CreateDriver() {
@ -637,7 +638,7 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, testResourc
}
}
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
f := c.driverInfo.Config.Framework
ns := f.Namespace
@ -654,7 +655,7 @@ func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, t
},
ReadOnly: readOnly,
},
}
}, nil
}
func (c *cephFSDriver) CreateDriver() {
@ -1026,7 +1027,7 @@ func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, testResourc
return &volSource
}
func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
ctr, ok := testResource.(*cinderTestResource)
Expect(ok).To(BeTrue(), "Failed to cast test resource to Cinder Test Resource")
@ -1039,7 +1040,7 @@ func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, t
if fsType != "" {
pvSource.Cinder.FSType = fsType
}
return &pvSource
return &pvSource, nil
}
func (c *cinderDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
@ -1192,7 +1193,7 @@ func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, testResource
return &volSource
}
func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
gtr, ok := testResource.(*gcePdTestResource)
Expect(ok).To(BeTrue(), "Failed to cast test resource to GCE PD Test Resource")
pvSource := v1.PersistentVolumeSource{
@ -1204,7 +1205,7 @@ func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, te
if fsType != "" {
pvSource.GCEPersistentDisk.FSType = fsType
}
return &pvSource
return &pvSource, nil
}
func (g *gcePdDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
@ -1315,14 +1316,14 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, testResour
return &volSource
}
func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
vtr, ok := testResource.(*vSphereTestResource)
Expect(ok).To(BeTrue(), "Failed to cast test resource to vSphere Test Resource")
// vSphere driver doesn't seem to support readOnly volume
// TODO: check if it is correct
if readOnly {
return nil
return nil, nil
}
pvSource := v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
@ -1332,7 +1333,7 @@ func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string,
if fsType != "" {
pvSource.VsphereVolume.FSType = fsType
}
return &pvSource
return &pvSource, nil
}
func (v *vSphereDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
@ -1440,7 +1441,7 @@ func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string, testResource
return &volSource
}
func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
atr, ok := testResource.(*azureTestResource)
Expect(ok).To(BeTrue(), "Failed to cast test resource to Azure Test Resource")
@ -1456,7 +1457,7 @@ func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, te
if fsType != "" {
pvSource.AzureDisk.FSType = &fsType
}
return &pvSource
return &pvSource, nil
}
func (a *azureDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
@ -1558,7 +1559,7 @@ func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, testResource i
return &volSource
}
func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource {
func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
pvSource := v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: a.volumeName,
@ -1607,3 +1608,184 @@ func (a *awsDriver) DeleteVolume(volType testpatterns.TestVolType, testResource
framework.DeletePDWithRetry(a.volumeName)
}
*/
// local
type localDriver struct {
driverInfo testsuites.DriverInfo
node *v1.Node
hostExec utils.HostExec
// volumeType represents local volume type we are testing, e.g. tmpfs,
// directory, block device.
volumeType utils.LocalVolumeType
ltrMgr utils.LocalTestResourceManager
}
var (
// capabilities
defaultLocalVolumeCapabilities = map[testsuites.Capability]bool{
testsuites.CapPersistence: true,
testsuites.CapFsGroup: true,
testsuites.CapBlock: false,
testsuites.CapExec: true,
}
localVolumeCapabitilies = map[utils.LocalVolumeType]map[testsuites.Capability]bool{
utils.LocalVolumeBlock: {
testsuites.CapPersistence: true,
testsuites.CapFsGroup: true,
testsuites.CapBlock: true,
testsuites.CapExec: true,
},
}
// fstype
defaultLocalVolumeSupportedFsTypes = sets.NewString("")
localVolumeSupportedFsTypes = map[utils.LocalVolumeType]sets.String{
utils.LocalVolumeBlock: sets.NewString(
"", // Default fsType
"ext2",
"ext3",
"ext4",
"xfs",
),
}
// max file size
defaultLocalVolumeMaxFileSize = testpatterns.FileSizeSmall
localVolumeMaxFileSizes = map[utils.LocalVolumeType]int64{}
)
var _ testsuites.TestDriver = &localDriver{}
var _ testsuites.PreprovisionedVolumeTestDriver = &localDriver{}
var _ testsuites.PreprovisionedPVTestDriver = &localDriver{}
func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config testsuites.TestConfig) testsuites.TestDriver {
maxFileSize := defaultLocalVolumeMaxFileSize
if maxFileSizeByVolType, ok := localVolumeMaxFileSizes[volumeType]; ok {
maxFileSize = maxFileSizeByVolType
}
supportedFsTypes := defaultLocalVolumeSupportedFsTypes
if supportedFsTypesByType, ok := localVolumeSupportedFsTypes[volumeType]; ok {
supportedFsTypes = supportedFsTypesByType
}
capabilities := defaultLocalVolumeCapabilities
if capabilitiesByType, ok := localVolumeCapabitilies[volumeType]; ok {
capabilities = capabilitiesByType
}
return func(config testsuites.TestConfig) testsuites.TestDriver {
hostExec := utils.NewHostExec(config.Framework)
// custom tag to distinguish from tests of other volume types
featureTag := fmt.Sprintf("[LocalVolumeType: %s]", volumeType)
// For GCE Local SSD volumes, we must run serially
if volumeType == utils.LocalVolumeGCELocalSSD {
featureTag += " [Serial]"
}
return &localDriver{
driverInfo: testsuites.DriverInfo{
Name: "local",
FeatureTag: featureTag,
MaxFileSize: maxFileSize,
SupportedFsType: supportedFsTypes,
Capabilities: capabilities,
Config: config,
},
hostExec: hostExec,
volumeType: volumeType,
ltrMgr: utils.NewLocalResourceManager("local-driver", hostExec, "/tmp"),
}
}
}
func (l *localDriver) GetDriverInfo() *testsuites.DriverInfo {
return &l.driverInfo
}
func (l *localDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
if l.volumeType == utils.LocalVolumeGCELocalSSD {
ssdInterface := "scsi"
filesystemType := "fs"
ssdCmd := fmt.Sprintf("ls -1 /mnt/disks/by-uuid/google-local-ssds-%s-%s/ | wc -l", ssdInterface, filesystemType)
res, err := l.hostExec.IssueCommandWithResult(ssdCmd, l.node)
Expect(err).NotTo(HaveOccurred())
num, err := strconv.Atoi(strings.TrimSpace(res))
Expect(err).NotTo(HaveOccurred())
if num < 1 {
framework.Skipf("Requires at least 1 %s %s localSSD ", ssdInterface, filesystemType)
}
}
}
func (l *localDriver) CreateDriver() {
// choose a randome node to test against
l.node = l.randomNode()
}
func (l *localDriver) CleanupDriver() {
l.hostExec.Cleanup()
}
func (l *localDriver) randomNode() *v1.Node {
f := l.driverInfo.Config.Framework
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node := nodes.Items[rand.Intn(len(nodes.Items))]
return &node
}
func (l *localDriver) CreateVolume(volType testpatterns.TestVolType) interface{} {
switch volType {
case testpatterns.PreprovisionedPV:
node := l.node
// assign this to schedule pod on this node
l.driverInfo.Config.ClientNodeName = node.Name
return l.ltrMgr.Create(node, l.volumeType, nil)
default:
framework.Failf("Unsupported volType: %v is specified", volType)
}
return nil
}
func (l *localDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) {
ltr, ok := testResource.(*utils.LocalTestResource)
Expect(ok).To(BeTrue(), "Failed to cast test resource to local Test Resource")
switch volType {
case testpatterns.PreprovisionedPV:
l.ltrMgr.Remove(ltr)
default:
framework.Failf("Unsupported volType: %v is specified", volType)
}
return
}
func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity {
nodeKey := "kubernetes.io/hostname"
if node.Labels == nil {
framework.Failf("Node does not have labels")
}
nodeValue, found := node.Labels[nodeKey]
if !found {
framework.Failf("Node does not have required label %q", nodeKey)
}
return &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeKey,
Operator: v1.NodeSelectorOpIn,
Values: []string{nodeValue},
},
},
},
},
},
}
}
func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
ltr, ok := testResource.(*utils.LocalTestResource)
Expect(ok).To(BeTrue(), "Failed to cast test resource to local Test Resource")
return &v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{
Path: ltr.Path,
FSType: &fsType,
},
}, l.nodeAffinityForNode(ltr.Node)
}

View File

@ -40,6 +40,14 @@ var testDrivers = []func(config testsuites.TestConfig) testsuites.TestDriver{
drivers.InitVSphereDriver,
drivers.InitAzureDriver,
drivers.InitAwsDriver,
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectory),
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectoryLink),
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectoryBindMounted),
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeDirectoryLinkBindMounted),
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeTmpfs),
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeBlock),
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeBlockFS),
drivers.InitLocalDriverWithVolumeType(utils.LocalVolumeGCELocalSSD),
}
// List of testSuites to be executed in below loop

View File

@ -157,9 +157,9 @@ func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern tes
case testpatterns.PreprovisionedPV:
framework.Logf("Creating resource for pre-provisioned PV")
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource := pDriver.GetPersistentVolumeSource(false, fsType, r.driverTestResource)
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, fsType, r.driverTestResource)
if pvSource != nil {
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, false)
r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, false)
}
r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name)
}
@ -231,12 +231,14 @@ func createVolumeSourceWithPVCPV(
f *framework.Framework,
name string,
pvSource *v1.PersistentVolumeSource,
volumeNodeAffinity *v1.VolumeNodeAffinity,
readOnly bool,
) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: fmt.Sprintf("%s-", name),
StorageClassName: f.Namespace.Name,
PVSource: *pvSource,
NodeAffinity: volumeNodeAffinity,
}
pvcConfig := framework.PersistentVolumeClaimConfig{
StorageClassName: &f.Namespace.Name,

View File

@ -58,10 +58,11 @@ type InlineVolumeTestDriver interface {
// PreprovisionedPVTestDriver represents an interface for a TestDriver that supports PreprovisionedPV
type PreprovisionedPVTestDriver interface {
PreprovisionedVolumeTestDriver
// GetPersistentVolumeSource returns a PersistentVolumeSource for pre-provisioned Persistent Volume.
// GetPersistentVolumeSource returns a PersistentVolumeSource with volume node affinity for pre-provisioned Persistent Volume.
// It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them.
// It will return nil, if the TestDriver doesn't support either of the parameters.
GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.PersistentVolumeSource
// Volume node affinity is optional, it will be nil for volumes which does not have volume node affinity.
GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity)
}
// DynamicPVTestDriver represents an interface for a TestDriver that supports DynamicPV

View File

@ -162,8 +162,9 @@ func (s *volumeModeTestResource) setupResource(driver TestDriver, pattern testpa
volType := pattern.VolType
var (
scName string
pvSource *v1.PersistentVolumeSource
scName string
pvSource *v1.PersistentVolumeSource
volumeNodeAffinity *v1.VolumeNodeAffinity
)
// Create volume for pre-provisioned volume tests
@ -177,12 +178,12 @@ func (s *volumeModeTestResource) setupResource(driver TestDriver, pattern testpa
scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name)
}
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource = pDriver.GetPersistentVolumeSource(false, fsType, s.driverTestResource)
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, s.driverTestResource)
if pvSource == nil {
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
}
sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource)
sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource, volumeNodeAffinity)
s.sc = sc
s.pv = framework.MakePersistentVolume(pvConfig)
s.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
@ -373,7 +374,7 @@ func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) {
}
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource) (*storagev1.StorageClass,
volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource, volumeNodeAffinity *v1.VolumeNodeAffinity) (*storagev1.StorageClass,
framework.PersistentVolumeConfig, framework.PersistentVolumeClaimConfig) {
// StorageClass
scConfig := &storagev1.StorageClass{
@ -386,6 +387,7 @@ func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1
// PV
pvConfig := framework.PersistentVolumeConfig{
PVSource: pvSource,
NodeAffinity: volumeNodeAffinity,
NamePrefix: pvNamePrefix,
StorageClassName: scName,
VolumeMode: &volMode,

View File

@ -10,6 +10,8 @@ go_library(
srcs = [
"deployment.go",
"framework.go",
"host_exec.go",
"local.go",
"utils.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
@ -20,6 +22,7 @@ go_library(
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",

View File

@ -0,0 +1,124 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
// HostExec represents interface we require to execute commands on remote host.
type HostExec interface {
IssueCommandWithResult(cmd string, node *v1.Node) (string, error)
IssueCommand(cmd string, node *v1.Node) error
Cleanup()
}
// hostExecutor implements HostExec
type hostExecutor struct {
*framework.Framework
nodeExecPods map[string]*v1.Pod
}
// NewHostExec returns a HostExec
func NewHostExec(framework *framework.Framework) HostExec {
return &hostExecutor{
Framework: framework,
nodeExecPods: make(map[string]*v1.Pod),
}
}
// launchNodeExecPod launches a hostexec pod for local PV and waits
// until it's Running.
func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
f := h.Framework
cs := f.ClientSet
ns := f.Namespace
hostExecPod := framework.NewHostExecPodSpec(ns.Name, fmt.Sprintf("hostexec-%s", node))
hostExecPod.Spec.NodeName = node
hostExecPod.Spec.Volumes = []v1.Volume{
{
// Required to enter into host mount namespace via nsenter.
Name: "rootfs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/",
},
},
},
}
hostExecPod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
{
Name: "rootfs",
MountPath: "/rootfs",
ReadOnly: true,
},
}
hostExecPod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{
Privileged: func(privileged bool) *bool {
return &privileged
}(true),
}
pod, err := cs.CoreV1().Pods(ns.Name).Create(hostExecPod)
framework.ExpectNoError(err)
err = framework.WaitForPodRunningInNamespace(cs, pod)
framework.ExpectNoError(err)
return pod
}
// IssueCommandWithResult issues command on given node and returns stdout.
func (h *hostExecutor) IssueCommandWithResult(cmd string, node *v1.Node) (string, error) {
pod, ok := h.nodeExecPods[node.Name]
if !ok {
pod = h.launchNodeExecPod(node.Name)
if pod == nil {
return "", fmt.Errorf("failed to create hostexec pod for node %q", node)
}
h.nodeExecPods[node.Name] = pod
}
args := []string{
"exec",
fmt.Sprintf("--namespace=%v", pod.Namespace),
pod.Name,
"--",
"nsenter",
"--mount=/rootfs/proc/1/ns/mnt",
"--",
"sh",
"-c",
cmd,
}
return framework.RunKubectl(args...)
}
// IssueCommand works like IssueCommandWithResult, but discards result.
func (h *hostExecutor) IssueCommand(cmd string, node *v1.Node) error {
_, err := h.IssueCommandWithResult(cmd, node)
return err
}
// Cleanup cleanup resources it created during test.
// Note that in most cases it is not necessary to call this because we create
// pods under test namespace which will be destroyed in teardown phase.
func (h *hostExecutor) Cleanup() {
for _, pod := range h.nodeExecPods {
framework.DeletePodOrFail(h.Framework.ClientSet, pod.Namespace, pod.Name)
}
h.nodeExecPods = make(map[string]*v1.Pod)
}

View File

@ -0,0 +1,345 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
/*
* Various local test resource implementations.
*/
import (
"fmt"
"path/filepath"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
)
// LocalVolumeType represents type of local volume, e.g. tmpfs, directory,
// block, etc.
type LocalVolumeType string
const (
// A simple directory as local volume
LocalVolumeDirectory LocalVolumeType = "dir"
// Like LocalVolumeDirectory but it's a symbolic link to directory
LocalVolumeDirectoryLink LocalVolumeType = "dir-link"
// Like LocalVolumeDirectory but bind mounted
LocalVolumeDirectoryBindMounted LocalVolumeType = "dir-bindmounted"
// Like LocalVolumeDirectory but it's a symbolic link to self bind mounted directory
// Note that bind mounting at symbolic link actually mounts at directory it
// links to
LocalVolumeDirectoryLinkBindMounted LocalVolumeType = "dir-link-bindmounted"
// Use temporary filesystem as local volume
LocalVolumeTmpfs LocalVolumeType = "tmpfs"
// Block device, creates a local file, and maps it as a block device
LocalVolumeBlock LocalVolumeType = "block"
// Filesystem backed by a block device
LocalVolumeBlockFS LocalVolumeType = "blockfs"
// Use GCE Local SSD as local volume, this is a filesystem
LocalVolumeGCELocalSSD LocalVolumeType = "gce-localssd-scsi-fs"
)
// LocalTestResource represents test resource of a local volume.
type LocalTestResource struct {
VolumeType LocalVolumeType
Node *v1.Node
// Volume path, path to filesystem or block device on the node
Path string
// If volume is backed by a loop device, we create loop device storage file
// under this directory.
loopDir string
}
// LocalTestResourceManager represents interface to create/destroy local test resources on node
type LocalTestResourceManager interface {
Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource
Remove(ltr *LocalTestResource)
}
// ltrMgr implements LocalTestResourceManager
type ltrMgr struct {
prefix string
hostExec HostExec
// hostBase represents a writable directory on the host under which we
// create test directories
hostBase string
}
// NewLocalResourceManager returns a instance of LocalTestResourceManager
func NewLocalResourceManager(prefix string, hostExec HostExec, hostBase string) LocalTestResourceManager {
return &ltrMgr{
prefix: prefix,
hostExec: hostExec,
hostBase: hostBase,
}
}
// getTestDir returns a test dir under `hostBase` directory with randome name.
func (l *ltrMgr) getTestDir() string {
testDirName := fmt.Sprintf("%s-%s", l.prefix, string(uuid.NewUUID()))
return filepath.Join(l.hostBase, testDirName)
}
func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, hostDir))
err := l.hostExec.IssueCommand(fmt.Sprintf("mkdir -p %q && sudo mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node)
Expect(err).NotTo(HaveOccurred())
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeTmpfs(ltr *LocalTestResource) {
By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", ltr.Node.Name, ltr.Path))
err := l.hostExec.IssueCommand(fmt.Sprintf("sudo umount %q", ltr.Path), ltr.Node)
Expect(err).NotTo(HaveOccurred())
By("Removing the test directory")
err = l.hostExec.IssueCommand(fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node)
Expect(err).NotTo(HaveOccurred())
}
// createAndSetupLoopDevice creates an empty file and associates a loop devie with it.
func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) {
By(fmt.Sprintf("Creating block device on node %q using path %q", node.Name, dir))
mkdirCmd := fmt.Sprintf("mkdir -p %s", dir)
count := size / 4096
// xfs requires at least 4096 blocks
if count < 4096 {
count = 4096
}
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file bs=4096 count=%d", dir, count)
losetupCmd := fmt.Sprintf("sudo losetup -f %s/file", dir)
err := l.hostExec.IssueCommand(fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node)
Expect(err).NotTo(HaveOccurred())
}
// findLoopDevice finds loop device path by its associated storage directory.
func (l *ltrMgr) findLoopDevice(dir string, node *v1.Node) string {
cmd := fmt.Sprintf("E2E_LOOP_DEV=$(sudo losetup | grep %s/file | awk '{ print $1 }') 2>&1 > /dev/null && echo ${E2E_LOOP_DEV}", dir)
loopDevResult, err := l.hostExec.IssueCommandWithResult(cmd, node)
Expect(err).NotTo(HaveOccurred())
return strings.TrimSpace(loopDevResult)
}
func (l *ltrMgr) setupLocalVolumeBlock(node *v1.Node, parameters map[string]string) *LocalTestResource {
loopDir := l.getTestDir()
l.createAndSetupLoopDevice(loopDir, node, 20*1024*1024)
loopDev := l.findLoopDevice(loopDir, node)
return &LocalTestResource{
Node: node,
Path: loopDev,
loopDir: loopDir,
}
}
// teardownLoopDevice tears down loop device by its associated storage directory.
func (l *ltrMgr) teardownLoopDevice(dir string, node *v1.Node) {
loopDev := l.findLoopDevice(dir, node)
By(fmt.Sprintf("Tear down block device %q on node %q at path %s/file", loopDev, node.Name, dir))
losetupDeleteCmd := fmt.Sprintf("sudo losetup -d %s", loopDev)
err := l.hostExec.IssueCommand(losetupDeleteCmd, node)
Expect(err).NotTo(HaveOccurred())
return
}
func (l *ltrMgr) cleanupLocalVolumeBlock(ltr *LocalTestResource) {
l.teardownLoopDevice(ltr.loopDir, ltr.Node)
By(fmt.Sprintf("Removing the test directory %s", ltr.loopDir))
removeCmd := fmt.Sprintf("rm -r %s", ltr.loopDir)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred())
}
func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]string) *LocalTestResource {
ltr := l.setupLocalVolumeBlock(node, parameters)
loopDev := ltr.Path
loopDir := ltr.loopDir
// Format and mount at loopDir and give others rwx for read/write testing
cmd := fmt.Sprintf("sudo mkfs -t ext4 %s && sudo mount -t ext4 %s %s && sudo chmod o+rwx %s", loopDev, loopDev, loopDir, loopDir)
err := l.hostExec.IssueCommand(cmd, node)
Expect(err).NotTo(HaveOccurred())
return &LocalTestResource{
Node: node,
Path: loopDir,
loopDir: loopDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeBlockFS(ltr *LocalTestResource) {
umountCmd := fmt.Sprintf("sudo umount %s", ltr.Path)
err := l.hostExec.IssueCommand(umountCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred())
l.cleanupLocalVolumeBlock(ltr)
}
func (l *ltrMgr) setupLocalVolumeDirectory(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
mkdirCmd := fmt.Sprintf("mkdir -p %s", hostDir)
err := l.hostExec.IssueCommand(mkdirCmd, node)
Expect(err).NotTo(HaveOccurred())
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectory(ltr *LocalTestResource) {
By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", ltr.Path)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred())
}
func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && sudo ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
err := l.hostExec.IssueCommand(cmd, node)
Expect(err).NotTo(HaveOccurred())
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ltr *LocalTestResource) {
By("Removing the test directory")
hostDir := ltr.Path
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("sudo rm -r %s && rm -r %s", hostDir, hostDirBackend)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred())
}
func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s", hostDir, hostDir, hostDir)
err := l.hostExec.IssueCommand(cmd, node)
Expect(err).NotTo(HaveOccurred())
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ltr *LocalTestResource) {
By("Removing the test directory")
hostDir := ltr.Path
removeCmd := fmt.Sprintf("sudo umount %s && rm -r %s", hostDir, hostDir)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred())
}
func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && sudo mount --bind %s %s && sudo ln -s %s %s", hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir)
err := l.hostExec.IssueCommand(cmd, node)
Expect(err).NotTo(HaveOccurred())
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ltr *LocalTestResource) {
By("Removing the test directory")
hostDir := ltr.Path
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("sudo rm %s && sudo umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred())
}
func (l *ltrMgr) setupLocalVolumeGCELocalSSD(node *v1.Node, parameters map[string]string) *LocalTestResource {
res, err := l.hostExec.IssueCommandWithResult("ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node)
Expect(err).NotTo(HaveOccurred())
dirName := strings.Fields(res)[0]
hostDir := "/mnt/disks/by-uuid/google-local-ssds-scsi-fs/" + dirName
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ltr *LocalTestResource) {
// This filesystem is attached in cluster initialization, we clean all files to make it reusable.
removeCmd := fmt.Sprintf("find '%s' -mindepth 1 -maxdepth 1 -print0 | xargs -r -0 rm -rf", ltr.Path)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
Expect(err).NotTo(HaveOccurred())
}
func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource {
var ltr *LocalTestResource
switch volumeType {
case LocalVolumeDirectory:
ltr = l.setupLocalVolumeDirectory(node, parameters)
case LocalVolumeDirectoryLink:
ltr = l.setupLocalVolumeDirectoryLink(node, parameters)
case LocalVolumeDirectoryBindMounted:
ltr = l.setupLocalVolumeDirectoryBindMounted(node, parameters)
case LocalVolumeDirectoryLinkBindMounted:
ltr = l.setupLocalVolumeDirectoryLinkBindMounted(node, parameters)
case LocalVolumeTmpfs:
ltr = l.setupLocalVolumeTmpfs(node, parameters)
case LocalVolumeBlock:
ltr = l.setupLocalVolumeBlock(node, parameters)
case LocalVolumeBlockFS:
ltr = l.setupLocalVolumeBlockFS(node, parameters)
case LocalVolumeGCELocalSSD:
ltr = l.setupLocalVolumeGCELocalSSD(node, parameters)
default:
framework.Failf("Failed to create local test resource on node %q, unsupported volume type: %v is specified", node.Name, volumeType)
return nil
}
if ltr == nil {
framework.Failf("Failed to create local test resource on node %q, volume type: %v, parameters: %v", node.Name, volumeType, parameters)
}
ltr.VolumeType = volumeType
return ltr
}
func (l *ltrMgr) Remove(ltr *LocalTestResource) {
switch ltr.VolumeType {
case LocalVolumeDirectory:
l.cleanupLocalVolumeDirectory(ltr)
case LocalVolumeDirectoryLink:
l.cleanupLocalVolumeDirectoryLink(ltr)
case LocalVolumeDirectoryBindMounted:
l.cleanupLocalVolumeDirectoryBindMounted(ltr)
case LocalVolumeDirectoryLinkBindMounted:
l.cleanupLocalVolumeDirectoryLinkBindMounted(ltr)
case LocalVolumeTmpfs:
l.cleanupLocalVolumeTmpfs(ltr)
case LocalVolumeBlock:
l.cleanupLocalVolumeBlock(ltr)
case LocalVolumeBlockFS:
l.cleanupLocalVolumeBlockFS(ltr)
case LocalVolumeGCELocalSSD:
l.cleanupLocalVolumeGCELocalSSD(ltr)
default:
framework.Failf("Failed to remove local test resource, unsupported volume type: %v is specified", ltr.VolumeType)
}
return
}