e2e: CSI Volume tests

This e2e test tests the CSI volume plugin in kubernetes with
a CSI hostPath driver. It is also setup to be able to be
tested with more drivers in the future.
pull/6/head
Luis Pabón 2017-11-20 23:43:22 -05:00
parent 3d652cae03
commit d1eb8a6163
4 changed files with 544 additions and 94 deletions

View File

@ -8,6 +8,8 @@ load(
go_library(
name = "go_default_library",
srcs = [
"csi_hostpath.go",
"csi_volumes.go",
"empty_dir_wrapper.go",
"flexvolume.go",
"pd.go",
@ -47,6 +49,7 @@ go_library(
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",

View File

@ -0,0 +1,199 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file is used to deploy the CSI hostPath plugin
// More Information: https://github.com/kubernetes-csi/drivers/tree/master/pkg/hostpath
package storage
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
csiHostPathPluginImage string = "docker.io/k8scsi/hostpathplugin:0.1"
)
func csiHostPathPod(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
f *framework.Framework,
sa *v1.ServiceAccount,
) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
priv := true
mountPropagation := v1.MountPropagationBidirectional
hostPathType := v1.HostPathDirectoryOrCreate
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-pod",
Namespace: config.Namespace,
Labels: map[string]string{
"app": "hostpath-driver",
},
},
Spec: v1.PodSpec{
ServiceAccountName: sa.GetName(),
NodeName: config.ServerNodeName,
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: "external-provisioner",
Image: csiExternalProvisionerImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--provisioner=csi-hostpath",
"--csi-address=/csi/csi.sock",
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "driver-registrar",
Image: csiDriverRegistrarImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=/csi/csi.sock",
},
Env: []v1.EnvVar{
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "external-attacher",
Image: csiExternalAttacherImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=$(ADDRESS)",
},
Env: []v1.EnvVar{
{
Name: "ADDRESS",
Value: "/csi/csi.sock",
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "hostpath-driver",
Image: csiHostPathPluginImage,
ImagePullPolicy: v1.PullAlways,
SecurityContext: &v1.SecurityContext{
Privileged: &priv,
},
Args: []string{
"--v=5",
"--endpoint=$(CSI_ENDPOINT)",
"--nodeid=$(KUBE_NODE_NAME)",
},
Env: []v1.EnvVar{
{
Name: "CSI_ENDPOINT",
Value: "unix://" + "/csi/csi.sock",
},
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
{
Name: "mountpoint-dir",
MountPath: "/var/lib/kubelet/pods",
MountPropagation: &mountPropagation,
},
},
},
},
Volumes: []v1.Volume{
{
Name: "socket-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/plugins/csi-hostpath",
Type: &hostPathType,
},
},
},
{
Name: "mountpoint-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/pods",
Type: &hostPathType,
},
},
},
},
},
}
err := framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err, "Failed to delete pod %s/%s: %v",
pod.GetNamespace(), pod.GetName(), err)
if teardown {
return nil
}
ret, err := podClient.Create(pod)
if err != nil {
framework.ExpectNoError(err, "Failed to create %q pod: %v", pod.GetName(), err)
}
// Wait for pod to come up
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret))
return ret
}

View File

@ -0,0 +1,243 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"math/rand"
"time"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
. "github.com/onsi/ginkgo"
)
const (
csiExternalAttacherImage string = "docker.io/k8scsi/csi-attacher:0.1"
csiExternalProvisionerImage string = "docker.io/k8scsi/csi-provisioner:0.1"
csiDriverRegistrarImage string = "docker.io/k8scsi/driver-registrar"
)
func externalAttacherServiceAccount(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
) *v1.ServiceAccount {
serviceAccountName := config.Prefix + "-service-account"
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
sa := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
},
}
serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := serviceAccountClient.Create(sa)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err)
}
return ret
}
func externalAttacherClusterRole(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
) *rbacv1.ClusterRole {
clusterRoleClient := client.RbacV1().ClusterRoles()
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-cluster-role",
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"persistentvolumes"},
Verbs: []string{"create", "delete", "get", "list", "watch", "update"},
},
{
APIGroups: []string{""},
Resources: []string{"persistentvolumesclaims"},
Verbs: []string{"get", "list", "watch", "update"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "list", "watch", "update"},
},
{
APIGroups: []string{"storage.k8s.io"},
Resources: []string{"volumeattachments"},
Verbs: []string{"get", "list", "watch", "update"},
},
{
APIGroups: []string{"storage.k8s.io"},
Resources: []string{"storageclasses"},
Verbs: []string{"get", "list", "watch"},
},
},
}
clusterRoleClient.Delete(role.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := clusterRoleClient.Get(role.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := clusterRoleClient.Create(role)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err)
}
return ret
}
func externalAttacherClusterRoleBinding(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
sa *v1.ServiceAccount,
clusterRole *rbacv1.ClusterRole,
) *rbacv1.ClusterRoleBinding {
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-role-binding",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: clusterRole.GetName(),
APIGroup: "rbac.authorization.k8s.io",
},
}
clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := clusterRoleBindingClient.Create(binding)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
return ret
}
var _ = utils.SIGDescribe("CSI Volumes [Feature:CSI]", func() {
f := framework.NewDefaultFramework("csi-mock-plugin")
var (
cs clientset.Interface
ns *v1.Namespace
node v1.Node
config framework.VolumeTestConfig
suffix string
)
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node = nodes.Items[rand.Intn(len(nodes.Items))]
config = framework.VolumeTestConfig{
Namespace: ns.Name,
Prefix: "csi",
ClientNodeName: node.Name,
ServerNodeName: node.Name,
WaitForCompletion: true,
}
suffix = ns.Name
})
// Create one of these for each of the drivers to be tested
// CSI hostPath driver test
Describe("Sanity CSI plugin test using hostPath CSI driver", func() {
var (
clusterRole *rbacv1.ClusterRole
serviceAccount *v1.ServiceAccount
)
BeforeEach(func() {
By("deploying csi hostpath driver")
clusterRole = externalAttacherClusterRole(cs, config, false)
serviceAccount = externalAttacherServiceAccount(cs, config, false)
externalAttacherClusterRoleBinding(cs, config, false, serviceAccount, clusterRole)
csiHostPathPod(cs, config, false, f, serviceAccount)
})
AfterEach(func() {
By("uninstalling csi hostpath driver")
csiHostPathPod(cs, config, true, f, serviceAccount)
externalAttacherClusterRoleBinding(cs, config, true, serviceAccount, clusterRole)
serviceAccount = externalAttacherServiceAccount(cs, config, true)
clusterRole = externalAttacherClusterRole(cs, config, true)
})
It("should provision storage with a hostPath CSI driver", func() {
t := storageClassTest{
name: "csi-hostpath",
provisioner: "csi-hostpath",
parameters: map[string]string{},
claimSize: "1Gi",
expectedSize: "1Gi",
nodeName: node.Name,
}
claim := newClaim(t, ns.GetName(), "")
class := newStorageClass(t, ns.GetName(), "")
testDynamicProvisioning(t, cs, claim, class)
})
})
})

View File

@ -56,6 +56,7 @@ type storageClassTest struct {
claimSize string
expectedSize string
pvCheck func(volume *v1.PersistentVolume) error
nodeName string
}
const (
@ -139,10 +140,10 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
// Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
}
runInPodWithVolume(client, claim.Namespace, claim.Name, command)
runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, command)
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")
runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, "grep 'hello world' /mnt/test/data")
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
@ -250,140 +251,140 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// that can be used to persist data among pods.
tests := []storageClassTest{
{
"SSD PD on GCE/GKE",
[]string{"gce", "gke"},
"kubernetes.io/gce-pd",
map[string]string{
name: "SSD PD on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-ssd",
"zone": cloudZone,
},
"1.5G",
"2G",
func(volume *v1.PersistentVolume) error {
claimSize: "1.5G",
expectedSize: "2G",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-ssd")
},
},
{
"HDD PD on GCE/GKE",
[]string{"gce", "gke"},
"kubernetes.io/gce-pd",
map[string]string{
name: "HDD PD on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-standard",
},
"1.5G",
"2G",
func(volume *v1.PersistentVolume) error {
claimSize: "1.5G",
expectedSize: "2G",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
},
},
// AWS
{
"gp2 EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
name: "gp2 EBS on AWS",
cloudProviders: []string{"aws"},
provisioner: "kubernetes.io/aws-ebs",
parameters: map[string]string{
"type": "gp2",
"zone": cloudZone,
},
"1.5Gi",
"2Gi",
func(volume *v1.PersistentVolume) error {
claimSize: "1.5Gi",
expectedSize: "2Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "gp2", false)
},
},
{
"io1 EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
name: "io1 EBS on AWS",
cloudProviders: []string{"aws"},
provisioner: "kubernetes.io/aws-ebs",
parameters: map[string]string{
"type": "io1",
"iopsPerGB": "50",
},
"3.5Gi",
"4Gi", // 4 GiB is minimum for io1
func(volume *v1.PersistentVolume) error {
claimSize: "3.5Gi",
expectedSize: "4Gi", // 4 GiB is minimum for io1
pvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "io1", false)
},
},
{
"sc1 EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
name: "sc1 EBS on AWS",
cloudProviders: []string{"aws"},
provisioner: "kubernetes.io/aws-ebs",
parameters: map[string]string{
"type": "sc1",
},
"500Gi", // minimum for sc1
"500Gi",
func(volume *v1.PersistentVolume) error {
claimSize: "500Gi", // minimum for sc1
expectedSize: "500Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "sc1", false)
},
},
{
"st1 EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
name: "st1 EBS on AWS",
cloudProviders: []string{"aws"},
provisioner: "kubernetes.io/aws-ebs",
parameters: map[string]string{
"type": "st1",
},
"500Gi", // minimum for st1
"500Gi",
func(volume *v1.PersistentVolume) error {
claimSize: "500Gi", // minimum for st1
expectedSize: "500Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "st1", false)
},
},
{
"encrypted EBS on AWS",
[]string{"aws"},
"kubernetes.io/aws-ebs",
map[string]string{
name: "encrypted EBS on AWS",
cloudProviders: []string{"aws"},
provisioner: "kubernetes.io/aws-ebs",
parameters: map[string]string{
"encrypted": "true",
},
"1Gi",
"1Gi",
func(volume *v1.PersistentVolume) error {
claimSize: "1Gi",
expectedSize: "1Gi",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "gp2", true)
},
},
// OpenStack generic tests (works on all OpenStack deployments)
{
"generic Cinder volume on OpenStack",
[]string{"openstack"},
"kubernetes.io/cinder",
map[string]string{},
"1.5Gi",
"2Gi",
nil, // there is currently nothing to check on OpenStack
name: "generic Cinder volume on OpenStack",
cloudProviders: []string{"openstack"},
provisioner: "kubernetes.io/cinder",
parameters: map[string]string{},
claimSize: "1.5Gi",
expectedSize: "2Gi",
pvCheck: nil, // there is currently nothing to check on OpenStack
},
{
"Cinder volume with empty volume type and zone on OpenStack",
[]string{"openstack"},
"kubernetes.io/cinder",
map[string]string{
name: "Cinder volume with empty volume type and zone on OpenStack",
cloudProviders: []string{"openstack"},
provisioner: "kubernetes.io/cinder",
parameters: map[string]string{
"type": "",
"availability": "",
},
"1.5Gi",
"2Gi",
nil, // there is currently nothing to check on OpenStack
claimSize: "1.5Gi",
expectedSize: "2Gi",
pvCheck: nil, // there is currently nothing to check on OpenStack
},
// vSphere generic test
{
"generic vSphere volume",
[]string{"vsphere"},
"kubernetes.io/vsphere-volume",
map[string]string{},
"1.5Gi",
"1.5Gi",
nil,
name: "generic vSphere volume",
cloudProviders: []string{"vsphere"},
provisioner: "kubernetes.io/vsphere-volume",
parameters: map[string]string{},
claimSize: "1.5Gi",
expectedSize: "1.5Gi",
pvCheck: nil,
},
{
"Azure disk volume with empty sku and location",
[]string{"azure"},
"kubernetes.io/azure-disk",
map[string]string{},
"1Gi",
"1Gi",
nil,
name: "Azure disk volume with empty sku and location",
cloudProviders: []string{"azure"},
provisioner: "kubernetes.io/azure-disk",
parameters: map[string]string{},
claimSize: "1Gi",
expectedSize: "1Gi",
pvCheck: nil,
},
}
@ -430,15 +431,15 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.SkipUnlessProviderIs("gce", "gke")
test := storageClassTest{
"HDD PD on GCE/GKE",
[]string{"gce", "gke"},
"kubernetes.io/gce-pd",
map[string]string{
name: "HDD PD on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-standard",
},
"1G",
"1G",
func(volume *v1.PersistentVolume) error {
claimSize: "1G",
expectedSize: "1G",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
},
}
@ -464,15 +465,15 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.SkipUnlessProviderIs("gce", "gke")
test := storageClassTest{
"HDD PD on GCE/GKE",
[]string{"gce", "gke"},
"kubernetes.io/gce-pd",
map[string]string{
name: "HDD PD on GCE/GKE",
cloudProviders: []string{"gce", "gke"},
provisioner: "kubernetes.io/gce-pd",
parameters: map[string]string{
"type": "pd-standard",
},
"1G",
"1G",
func(volume *v1.PersistentVolume) error {
claimSize: "1G",
expectedSize: "1G",
pvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
},
}
@ -791,7 +792,7 @@ func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim {
}
// runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command string) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -829,6 +830,10 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
},
},
}
if len(nodeName) != 0 {
pod.Spec.NodeName = nodeName
}
pod, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Failed to create pod: %v", err)
defer func() {