Merge pull request #62433 from davidz627/feature/csiGCETest

Automatic merge from submit-queue (batch tested with PRs 62694, 62569, 62646, 61633, 62433). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Add GCE-PD CSI Driver test to E2E test suite

Fixes: #60462

/sig storage
/kind technical-debt
/assign @saad-ali @msau42 

**What this PR does / why we need it**:
This PR adds an E2E test for the GCE-PD CSI driver that deploys the driver in a production-like setting and tests whether dynamic provisioning with the driver is possible.

```release-note
NONE
```
pull/8/head
Kubernetes Submit Queue 2018-04-18 14:44:24 -07:00 committed by GitHub
commit 17fec00b89
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 747 additions and 372 deletions

View File

@ -382,6 +382,22 @@ func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) {
}
}
func SkipUnlessSecretExistsAfterWait(c clientset.Interface, name, namespace string, timeout time.Duration) {
Logf("Waiting for secret %v in namespace %v to exist in duration %v", name, namespace, timeout)
start := time.Now()
if wait.PollImmediate(15*time.Second, timeout, func() (bool, error) {
_, err := c.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
Logf("Secret %v in namespace %v still does not exist after duration %v", name, namespace, time.Since(start))
return false, nil
}
return true, nil
}) != nil {
Skipf("Secret %v in namespace %v did not exist after timeout of %v", name, namespace, timeout)
}
Logf("Secret %v in namespace %v found after duration %v", name, namespace, time.Since(start))
}
func SkipIfContainerRuntimeIs(runtimes ...string) {
for _, runtime := range runtimes {
if runtime == TestContext.ContainerRuntime {

View File

@ -125,3 +125,20 @@ func StatefulSetFromManifest(fileName, ns string) (*apps.StatefulSet, error) {
}
return &ss, nil
}
// DaemonSetFromManifest returns a DaemonSet from a manifest stored in fileName in the Namespace indicated by ns.
func DaemonSetFromManifest(fileName, ns string) (*apps.DaemonSet, error) {
var ds apps.DaemonSet
data := generated.ReadOrDie(fileName)
json, err := utilyaml.ToJSON(data)
if err != nil {
return nil, err
}
err = runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), json, &ds)
if err != nil {
return nil, err
}
ds.Namespace = ns
return &ds, nil
}

View File

@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"csi_hostpath.go",
"csi_objects.go",
"csi_volumes.go",
"empty_dir_wrapper.go",
"flexvolume.go",
@ -39,6 +39,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/e2e/storage/vsphere:go_default_library",
"//test/utils/image:go_default_library",

View File

@ -1,199 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file is used to deploy the CSI hostPath plugin
// More Information: https://github.com/kubernetes-csi/drivers/tree/master/pkg/hostpath
package storage
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
csiHostPathPluginImage string = "quay.io/k8scsi/hostpathplugin:v0.2.0"
)
func csiHostPathPod(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
f *framework.Framework,
sa *v1.ServiceAccount,
) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
priv := true
mountPropagation := v1.MountPropagationBidirectional
hostPathType := v1.HostPathDirectoryOrCreate
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-pod",
Namespace: config.Namespace,
Labels: map[string]string{
"app": "hostpath-driver",
},
},
Spec: v1.PodSpec{
ServiceAccountName: sa.GetName(),
NodeName: config.ServerNodeName,
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: "external-provisioner",
Image: csiExternalProvisionerImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--provisioner=csi-hostpath",
"--csi-address=/csi/csi.sock",
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "driver-registrar",
Image: csiDriverRegistrarImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=/csi/csi.sock",
},
Env: []v1.EnvVar{
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "external-attacher",
Image: csiExternalAttacherImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=$(ADDRESS)",
},
Env: []v1.EnvVar{
{
Name: "ADDRESS",
Value: "/csi/csi.sock",
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "hostpath-driver",
Image: csiHostPathPluginImage,
ImagePullPolicy: v1.PullAlways,
SecurityContext: &v1.SecurityContext{
Privileged: &priv,
},
Args: []string{
"--v=5",
"--endpoint=$(CSI_ENDPOINT)",
"--nodeid=$(KUBE_NODE_NAME)",
},
Env: []v1.EnvVar{
{
Name: "CSI_ENDPOINT",
Value: "unix://" + "/csi/csi.sock",
},
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
{
Name: "mountpoint-dir",
MountPath: "/var/lib/kubelet/pods",
MountPropagation: &mountPropagation,
},
},
},
},
Volumes: []v1.Volume{
{
Name: "socket-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/plugins/csi-hostpath",
Type: &hostPathType,
},
},
},
{
Name: "mountpoint-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/pods",
Type: &hostPathType,
},
},
},
},
},
}
err := framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err, "Failed to delete pod %s/%s: %v",
pod.GetNamespace(), pod.GetName(), err)
if teardown {
return nil
}
ret, err := podClient.Create(pod)
if err != nil {
framework.ExpectNoError(err, "Failed to create %q pod: %v", pod.GetName(), err)
}
// Wait for pod to come up
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret))
return ret
}

View File

@ -0,0 +1,404 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file is used to deploy the CSI hostPath plugin
// More Information: https://github.com/kubernetes-csi/drivers/tree/master/pkg/hostpath
package storage
import (
"fmt"
"time"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/manifest"
. "github.com/onsi/ginkgo"
)
const (
csiHostPathPluginImage string = "quay.io/k8scsi/hostpathplugin:v0.2.0"
csiExternalAttacherImage string = "quay.io/k8scsi/csi-attacher:v0.2.0"
csiExternalProvisionerImage string = "quay.io/k8scsi/csi-provisioner:v0.2.0"
csiDriverRegistrarImage string = "quay.io/k8scsi/driver-registrar:v0.2.0"
)
// Create the driver registrar cluster role if it doesn't exist, no teardown so that tests
// are parallelizable. This role will be shared with many of the CSI tests.
func csiDriverRegistrarClusterRole(
config framework.VolumeTestConfig,
) *rbacv1.ClusterRole {
// TODO(Issue: #62237) Remove impersonation workaround and cluster role when issue resolved
By("Creating an impersonating superuser kubernetes clientset to define cluster role")
rc, err := framework.LoadConfig()
framework.ExpectNoError(err)
rc.Impersonate = restclient.ImpersonationConfig{
UserName: "superuser",
Groups: []string{"system:masters"},
}
superuserClientset, err := clientset.NewForConfig(rc)
framework.ExpectNoError(err, "Failed to create superuser clientset: %v", err)
By("Creating the CSI driver registrar cluster role")
clusterRoleClient := superuserClientset.RbacV1().ClusterRoles()
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: csiDriverRegistrarClusterRoleName,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"events"},
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "update", "patch"},
},
},
}
ret, err := clusterRoleClient.Create(role)
if err != nil {
if apierrs.IsAlreadyExists(err) {
return ret
}
framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err)
}
return ret
}
func csiServiceAccount(
client clientset.Interface,
config framework.VolumeTestConfig,
componentName string,
teardown bool,
) *v1.ServiceAccount {
creatingString := "Creating"
if teardown {
creatingString = "Deleting"
}
By(fmt.Sprintf("%v a CSI service account for %v", creatingString, componentName))
serviceAccountName := config.Prefix + "-" + componentName + "-service-account"
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
sa := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
},
}
serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := serviceAccountClient.Create(sa)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err)
}
return ret
}
func csiClusterRoleBindings(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
sa *v1.ServiceAccount,
clusterRolesNames []string,
) {
bindingString := "Binding"
if teardown {
bindingString = "Unbinding"
}
By(fmt.Sprintf("%v cluster roles %v to the CSI service account %v", bindingString, clusterRolesNames, sa.GetName()))
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
for _, clusterRoleName := range clusterRolesNames {
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-" + clusterRoleName + "-" + config.Namespace + "-role-binding",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: clusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return
}
_, err = clusterRoleBindingClient.Create(binding)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
}
}
func csiHostPathPod(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
f *framework.Framework,
sa *v1.ServiceAccount,
) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
priv := true
mountPropagation := v1.MountPropagationBidirectional
hostPathType := v1.HostPathDirectoryOrCreate
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-pod",
Namespace: config.Namespace,
Labels: map[string]string{
"app": "hostpath-driver",
},
},
Spec: v1.PodSpec{
ServiceAccountName: sa.GetName(),
NodeName: config.ServerNodeName,
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Name: "external-provisioner",
Image: csiExternalProvisionerImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--provisioner=csi-hostpath",
"--csi-address=/csi/csi.sock",
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "driver-registrar",
Image: csiDriverRegistrarImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=/csi/csi.sock",
},
Env: []v1.EnvVar{
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "external-attacher",
Image: csiExternalAttacherImage,
ImagePullPolicy: v1.PullAlways,
Args: []string{
"--v=5",
"--csi-address=$(ADDRESS)",
},
Env: []v1.EnvVar{
{
Name: "ADDRESS",
Value: "/csi/csi.sock",
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
},
},
{
Name: "hostpath-driver",
Image: csiHostPathPluginImage,
ImagePullPolicy: v1.PullAlways,
SecurityContext: &v1.SecurityContext{
Privileged: &priv,
},
Args: []string{
"--v=5",
"--endpoint=$(CSI_ENDPOINT)",
"--nodeid=$(KUBE_NODE_NAME)",
},
Env: []v1.EnvVar{
{
Name: "CSI_ENDPOINT",
Value: "unix://" + "/csi/csi.sock",
},
{
Name: "KUBE_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "socket-dir",
MountPath: "/csi",
},
{
Name: "mountpoint-dir",
MountPath: "/var/lib/kubelet/pods",
MountPropagation: &mountPropagation,
},
},
},
},
Volumes: []v1.Volume{
{
Name: "socket-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/plugins/csi-hostpath",
Type: &hostPathType,
},
},
},
{
Name: "mountpoint-dir",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/var/lib/kubelet/pods",
Type: &hostPathType,
},
},
},
},
},
}
err := framework.DeletePodWithWait(f, client, pod)
framework.ExpectNoError(err, "Failed to delete pod %s/%s: %v",
pod.GetNamespace(), pod.GetName(), err)
if teardown {
return nil
}
ret, err := podClient.Create(pod)
if err != nil {
framework.ExpectNoError(err, "Failed to create %q pod: %v", pod.GetName(), err)
}
// Wait for pod to come up
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret))
return ret
}
func deployGCEPDCSIDriver(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
f *framework.Framework,
nodeSA *v1.ServiceAccount,
controllerSA *v1.ServiceAccount,
) {
// Get API Objects from manifests
nodeds, err := manifest.DaemonSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml", config.Namespace)
framework.ExpectNoError(err, "Failed to create DaemonSet from manifest")
nodeds.Spec.Template.Spec.ServiceAccountName = nodeSA.GetName()
controllerss, err := manifest.StatefulSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml", config.Namespace)
framework.ExpectNoError(err, "Failed to create StatefulSet from manifest")
controllerss.Spec.Template.Spec.ServiceAccountName = controllerSA.GetName()
controllerservice, err := manifest.SvcFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml")
framework.ExpectNoError(err, "Failed to create Service from manifest")
// Got all objects from manifests now try to delete objects
err = client.CoreV1().Services(config.Namespace).Delete(controllerservice.GetName(), nil)
if err != nil {
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to delete Service: %v", controllerservice.GetName())
}
}
err = client.AppsV1().StatefulSets(config.Namespace).Delete(controllerss.Name, nil)
if err != nil {
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to delete StatefulSet: %v", controllerss.GetName())
}
}
err = client.AppsV1().DaemonSets(config.Namespace).Delete(nodeds.Name, nil)
if err != nil {
if !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to delete DaemonSet: %v", nodeds.GetName())
}
}
if teardown {
return
}
// Create new API Objects through client
_, err = client.CoreV1().Services(config.Namespace).Create(controllerservice)
framework.ExpectNoError(err, "Failed to create Service: %v", controllerservice.Name)
_, err = client.AppsV1().StatefulSets(config.Namespace).Create(controllerss)
framework.ExpectNoError(err, "Failed to create StatefulSet: %v", controllerss.Name)
_, err = client.AppsV1().DaemonSets(config.Namespace).Create(nodeds)
framework.ExpectNoError(err, "Failed to create DaemonSet: %v", nodeds.Name)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -17,157 +17,37 @@ limitations under the License.
package storage
import (
"fmt"
"math/rand"
"time"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
csiExternalAttacherImage string = "quay.io/k8scsi/csi-attacher:v0.2.0"
csiExternalProvisionerImage string = "quay.io/k8scsi/csi-provisioner:v0.2.0"
csiDriverRegistrarImage string = "quay.io/k8scsi/driver-registrar:v0.2.0"
csiExternalProvisionerClusterRoleName string = "system:csi-external-provisioner"
csiExternalAttacherClusterRoleName string = "system:csi-external-attacher"
csiDriverRegistrarClusterRoleName string = "csi-driver-registrar"
)
// Create the driver registrar cluster role if it doesn't exist, no teardown so that tests
// are parallelizable. This role will be shared with many of the CSI tests.
func csiDriverRegistrarClusterRole(
config framework.VolumeTestConfig,
) *rbacv1.ClusterRole {
// TODO(Issue: #62237) Remove impersonation workaround and cluster role when issue resolved
By("Creating an impersonating superuser kubernetes clientset to define cluster role")
rc, err := framework.LoadConfig()
framework.ExpectNoError(err)
rc.Impersonate = restclient.ImpersonationConfig{
UserName: "superuser",
Groups: []string{"system:masters"},
}
superuserClientset, err := clientset.NewForConfig(rc)
By("Creating the CSI driver registrar cluster role")
clusterRoleClient := superuserClientset.RbacV1().ClusterRoles()
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: csiDriverRegistrarClusterRoleName,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"events"},
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "update", "patch"},
},
},
}
ret, err := clusterRoleClient.Create(role)
if err != nil {
if apierrs.IsAlreadyExists(err) {
return ret
}
framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err)
}
return ret
type csiTestDriver interface {
createCSIDriver()
cleanupCSIDriver()
createStorageClassTest(node v1.Node) storageClassTest
}
func csiServiceAccount(
client clientset.Interface,
config framework.VolumeTestConfig,
componentName string,
teardown bool,
) *v1.ServiceAccount {
By("Creating a CSI service account")
serviceAccountName := config.Prefix + "-" + componentName + "-service-account"
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
sa := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
},
}
serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := serviceAccountClient.Create(sa)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err)
}
return ret
}
func csiClusterRoleBindings(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
sa *v1.ServiceAccount,
clusterRolesNames []string,
) {
By("Binding cluster roles to the CSI service account")
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
for _, clusterRoleName := range clusterRolesNames {
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-" + clusterRoleName + "-" + config.Namespace + "-role-binding",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: clusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return
}
_, err = clusterRoleBindingClient.Create(binding)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
}
var csiTestDrivers = map[string]func(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver{
"hostPath": initCSIHostpath,
// Feature tag to skip test in CI, pending fix of #62237
"[Feature: GCE PD CSI Plugin] gcePD": initCSIgcePD,
}
var _ = utils.SIGDescribe("CSI Volumes [Flaky]", func() {
@ -195,46 +75,151 @@ var _ = utils.SIGDescribe("CSI Volumes [Flaky]", func() {
csiDriverRegistrarClusterRole(config)
})
// Create one of these for each of the drivers to be tested
// CSI hostPath driver test
Describe("Sanity CSI plugin test using hostPath CSI driver", func() {
var (
serviceAccount *v1.ServiceAccount
combinedClusterRoleNames []string = []string{
csiExternalAttacherClusterRoleName,
csiExternalProvisionerClusterRoleName,
csiDriverRegistrarClusterRoleName,
}
)
for driverName, initCSIDriver := range csiTestDrivers {
curDriverName := driverName
curInitCSIDriver := initCSIDriver
BeforeEach(func() {
By("deploying csi hostpath driver")
serviceAccount = csiServiceAccount(cs, config, "hostpath", false)
csiClusterRoleBindings(cs, config, false, serviceAccount, combinedClusterRoleNames)
csiHostPathPod(cs, config, false, f, serviceAccount)
Context(fmt.Sprintf("CSI plugin test using CSI driver: %s", curDriverName), func() {
var (
driver csiTestDriver
)
BeforeEach(func() {
driver = curInitCSIDriver(f, config)
driver.createCSIDriver()
})
AfterEach(func() {
driver.cleanupCSIDriver()
})
It("should provision storage", func() {
t := driver.createStorageClassTest(node)
claim := newClaim(t, ns.GetName(), "")
class := newStorageClass(t, ns.GetName(), "")
claim.Spec.StorageClassName = &class.ObjectMeta.Name
testDynamicProvisioning(t, cs, claim, class)
})
})
AfterEach(func() {
By("uninstalling csi hostpath driver")
csiHostPathPod(cs, config, true, f, serviceAccount)
csiClusterRoleBindings(cs, config, true, serviceAccount, combinedClusterRoleNames)
csiServiceAccount(cs, config, "hostpath", true)
})
It("should provision storage with a hostPath CSI driver", func() {
t := storageClassTest{
name: "csi-hostpath",
provisioner: "csi-hostpath",
parameters: map[string]string{},
claimSize: "1Gi",
expectedSize: "1Gi",
nodeName: node.Name,
}
claim := newClaim(t, ns.GetName(), "")
class := newStorageClass(t, ns.GetName(), "")
claim.Spec.StorageClassName = &class.ObjectMeta.Name
testDynamicProvisioning(t, cs, claim, class)
})
})
}
})
type hostpathCSIDriver struct {
combinedClusterRoleNames []string
serviceAccount *v1.ServiceAccount
f *framework.Framework
config framework.VolumeTestConfig
}
func initCSIHostpath(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver {
return &hostpathCSIDriver{
combinedClusterRoleNames: []string{
csiExternalAttacherClusterRoleName,
csiExternalProvisionerClusterRoleName,
csiDriverRegistrarClusterRoleName,
},
f: f,
config: config,
}
}
func (h *hostpathCSIDriver) createStorageClassTest(node v1.Node) storageClassTest {
return storageClassTest{
name: "csi-hostpath",
provisioner: "csi-hostpath",
parameters: map[string]string{},
claimSize: "1Gi",
expectedSize: "1Gi",
nodeName: node.Name,
}
}
func (h *hostpathCSIDriver) createCSIDriver() {
By("deploying csi hostpath driver")
f := h.f
cs := f.ClientSet
config := h.config
h.serviceAccount = csiServiceAccount(cs, config, "hostpath", false)
csiClusterRoleBindings(cs, config, false, h.serviceAccount, h.combinedClusterRoleNames)
csiHostPathPod(cs, config, false, f, h.serviceAccount)
}
func (h *hostpathCSIDriver) cleanupCSIDriver() {
By("uninstalling csi hostpath driver")
f := h.f
cs := f.ClientSet
config := h.config
csiHostPathPod(cs, config, true, f, h.serviceAccount)
csiClusterRoleBindings(cs, config, true, h.serviceAccount, h.combinedClusterRoleNames)
csiServiceAccount(cs, config, "hostpath", true)
}
type gcePDCSIDriver struct {
controllerClusterRoles []string
nodeClusterRoles []string
controllerServiceAccount *v1.ServiceAccount
nodeServiceAccount *v1.ServiceAccount
f *framework.Framework
config framework.VolumeTestConfig
}
func initCSIgcePD(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver {
cs := f.ClientSet
framework.SkipUnlessProviderIs("gce", "gke")
// Currently you will need to manually add the required GCP Credentials as a secret "cloud-sa"
// kubectl create generic cloud-sa --from-file=PATH/TO/cloud-sa.json --namespace={{config.Namespace}}
// TODO(#62561): Inject the necessary credentials automatically to the driver containers in e2e test
framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute)
return &gcePDCSIDriver{
nodeClusterRoles: []string{
csiDriverRegistrarClusterRoleName,
},
controllerClusterRoles: []string{
csiExternalAttacherClusterRoleName,
csiExternalProvisionerClusterRoleName,
},
f: f,
config: config,
}
}
func (g *gcePDCSIDriver) createStorageClassTest(node v1.Node) storageClassTest {
nodeZone, ok := node.GetLabels()[kubeletapis.LabelZoneFailureDomain]
Expect(ok).To(BeTrue(), "Could not get label %v from node %v", kubeletapis.LabelZoneFailureDomain, node.GetName())
return storageClassTest{
name: "csi-gce-pd",
provisioner: "csi-gce-pd",
parameters: map[string]string{"type": "pd-standard", "zone": nodeZone},
claimSize: "5Gi",
expectedSize: "5Gi",
nodeName: node.Name,
}
}
func (g *gcePDCSIDriver) createCSIDriver() {
By("deploying gce-pd driver")
f := g.f
cs := f.ClientSet
config := g.config
g.controllerServiceAccount = csiServiceAccount(cs, config, "gce-controller", false /* teardown */)
g.nodeServiceAccount = csiServiceAccount(cs, config, "gce-node", false /* teardown */)
csiClusterRoleBindings(cs, config, false /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
csiClusterRoleBindings(cs, config, false /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
deployGCEPDCSIDriver(cs, config, false /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
}
func (g *gcePDCSIDriver) cleanupCSIDriver() {
By("uninstalling gce-pd driver")
f := g.f
cs := f.ClientSet
config := g.config
deployGCEPDCSIDriver(cs, config, true /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
csiClusterRoleBindings(cs, config, true /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
csiClusterRoleBindings(cs, config, true /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
csiServiceAccount(cs, config, "gce-controller", true /* teardown */)
csiServiceAccount(cs, config, "gce-node", true /* teardown */)
}

View File

@ -0,0 +1,12 @@
kind: Service
apiVersion: v1
metadata:
name: csi-gce-pd
labels:
app: csi-gce-pd
spec:
selector:
app: csi-gce-pd
ports:
- name: dummy
port: 12345

View File

@ -0,0 +1,70 @@
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
name: csi-gce-controller
spec:
serviceName: "csi-gce-pd"
replicas: 1
selector:
matchLabels:
app: csi-gce-pd-driver
template:
metadata:
labels:
app: csi-gce-pd-driver
spec:
serviceAccount: csi-gce-pd
containers:
- name: csi-external-provisioner
imagePullPolicy: Always
image: quay.io/k8scsi/csi-provisioner:v0.2.0
args:
- "--v=5"
- "--provisioner=csi-gce-pd"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-attacher
imagePullPolicy: Always
image: quay.io/k8scsi/csi-attacher:v0.2.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: gce-driver
imagePullPolicy: Always
image: gcr.io/google-containers/volume-csi/compute-persistent-disk-csi-driver:v0.2.0.alpha
args:
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(KUBE_NODE_NAME)"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: GOOGLE_APPLICATION_CREDENTIALS
value: "/etc/service-account/cloud-sa.json"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: cloud-sa-volume
readOnly: true
mountPath: "/etc/service-account"
volumes:
- name: socket-dir
emptyDir: {}
- name: cloud-sa-volume
secret:
secretName: cloud-sa

View File

@ -0,0 +1,69 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-gce-node
spec:
selector:
matchLabels:
app: csi-gce-driver
serviceName: csi-gce
template:
metadata:
labels:
app: csi-gce-driver
spec:
serviceAccount: csi-gce-pd
containers:
- name: csi-driver-registrar
imagePullPolicy: Always
image: quay.io/k8scsi/driver-registrar:v0.2.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: gce-driver
securityContext:
privileged: true
imagePullPolicy: Always
image: gcr.io/google-containers/volume-csi/compute-persistent-disk-csi-driver:v0.2.0.alpha
args:
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(KUBE_NODE_NAME)"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /csi
- name: device-dir
mountPath: /host/dev
volumes:
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/com.google.csi.gcepd/
type: DirectoryOrCreate
- name: device-dir
hostPath:
path: /dev
type: Directory