Merge pull request #70941 from verult/e2e-csi-topology

CSI topology e2e tests
pull/564/head
Kubernetes Prow Robot 2019-01-11 20:10:28 -08:00 committed by GitHub
commit 60e0c2f37e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 332 additions and 117 deletions

View File

@ -43,6 +43,8 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/rand"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
)
// List of testDrivers to be executed in below loop
@ -153,6 +155,46 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
})
}
Context("CSI Topology test using GCE PD driver [Feature: CSINodeInfo]", func() {
newConfig := config
newConfig.TopologyEnabled = true
driver := drivers.InitGcePDCSIDriver(newConfig).(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite.
BeforeEach(func() {
driver.CreateDriver()
})
AfterEach(func() {
driver.CleanupDriver()
})
It("should provision zonal PD with immediate volume binding and AllowedTopologies set and mount the volume to a pod", func() {
suffix := "topology-positive"
testTopologyPositive(cs, suffix, ns.GetName(), false /* delayBinding */, true /* allowedTopologies */)
})
It("should provision zonal PD with delayed volume binding and mount the volume to a pod", func() {
suffix := "delayed"
testTopologyPositive(cs, suffix, ns.GetName(), true /* delayBinding */, false /* allowedTopologies */)
})
It("should provision zonal PD with delayed volume binding and AllowedTopologies set and mount the volume to a pod", func() {
suffix := "delayed-topology-positive"
testTopologyPositive(cs, suffix, ns.GetName(), true /* delayBinding */, true /* allowedTopologies */)
})
It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with immediate volume binding", func() {
framework.SkipUnlessMultizone(cs)
suffix := "topology-negative"
testTopologyNegative(cs, suffix, ns.GetName(), false /* delayBinding */)
})
It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with delayed volume binding", func() {
framework.SkipUnlessMultizone(cs)
suffix := "delayed-topology-negative"
testTopologyNegative(cs, suffix, ns.GetName(), true /* delayBinding */)
})
})
// The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12.
Context("CSI attach test using HostPath driver [Feature:CSIDriverRegistry]", func() {
var (
@ -374,6 +416,54 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
})
})
func testTopologyPositive(cs clientset.Interface, suffix, namespace string, delayBinding, allowedTopologies bool) {
test := createGCEPDStorageClassTest()
test.DelayBinding = delayBinding
class := newStorageClass(test, namespace, suffix)
if allowedTopologies {
topoZone := getRandomClusterZone(cs)
addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, topoZone)
}
claim := newClaim(test, namespace, suffix)
claim.Spec.StorageClassName = &class.Name
if delayBinding {
_, node := testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class)
Expect(node).ToNot(BeNil(), "Unexpected nil node found")
} else {
testsuites.TestDynamicProvisioning(test, cs, claim, class)
}
}
func testTopologyNegative(cs clientset.Interface, suffix, namespace string, delayBinding bool) {
framework.SkipUnlessMultizone(cs)
// Use different zones for pod and PV
zones, err := framework.GetClusterZones(cs)
Expect(err).ToNot(HaveOccurred())
Expect(zones.Len()).To(BeNumerically(">=", 2))
zonesList := zones.UnsortedList()
podZoneIndex := rand.Intn(zones.Len())
podZone := zonesList[podZoneIndex]
pvZone := zonesList[(podZoneIndex+1)%zones.Len()]
test := createGCEPDStorageClassTest()
test.DelayBinding = delayBinding
test.NodeSelector = map[string]string{kubeletapis.LabelZoneFailureDomain: podZone}
test.ExpectUnschedulable = true
class := newStorageClass(test, namespace, suffix)
addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, pvZone)
claim := newClaim(test, namespace, suffix)
claim.Spec.StorageClassName = &class.Name
if delayBinding {
testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class)
} else {
testsuites.TestDynamicProvisioning(test, cs, claim, class)
}
}
func createCSIDriver(csics csiclient.Interface, name string, attachable bool, podInfoOnMountVersion *string) *csiv1alpha1.CSIDriver {
By("Creating CSIDriver instance")
driver := &csiv1alpha1.CSIDriver{
@ -520,3 +610,25 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai
return nil
}
}
func addSingleCSIZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *storagev1.StorageClass, zone string) {
term := v1.TopologySelectorTerm{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: drivers.GCEPDCSIZoneTopologyKey,
Values: []string{zone},
},
},
}
sc.AllowedTopologies = append(sc.AllowedTopologies, term)
}
func createGCEPDStorageClassTest() testsuites.StorageClassTest {
return testsuites.StorageClassTest{
Name: drivers.GCEPDCSIProvisionerName,
Provisioner: drivers.GCEPDCSIProvisionerName,
Parameters: map[string]string{"type": "pd-standard"},
ClaimSize: "5Gi",
ExpectedSize: "5Gi",
}
}

View File

@ -48,6 +48,11 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
GCEPDCSIProvisionerName = "pd.csi.storage.gke.io"
GCEPDCSIZoneTopologyKey = "topology.gke.io/zone"
)
// hostpathCSI
type hostpathCSIDriver struct {
cleanup func()
@ -265,7 +270,7 @@ var _ testsuites.DynamicPVTestDriver = &gcePDCSIDriver{}
func InitGcePDCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
return &gcePDCSIDriver{
driverInfo: testsuites.DriverInfo{
Name: "pd.csi.storage.gke.io",
Name: GCEPDCSIProvisionerName,
FeatureTag: "[Serial]",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedFsType: sets.NewString(
@ -293,7 +298,12 @@ func (g *gcePDCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
f := g.driverInfo.Config.Framework
framework.SkipUnlessProviderIs("gce", "gke")
framework.SkipIfMultizone(f.ClientSet)
if !g.driverInfo.Config.TopologyEnabled {
// Topology is disabled in external-provisioner, so in a multizone cluster, a pod could be
// scheduled in a different zone from the provisioned volume, causing basic provisioning
// tests to fail.
framework.SkipIfMultizone(f.ClientSet)
}
}
func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
@ -326,14 +336,20 @@ func (g *gcePDCSIDriver) CreateDriver() {
// }
createGCESecrets(g.driverInfo.Config.Framework.ClientSet, g.driverInfo.Config.Framework.Namespace.Name)
cleanup, err := g.driverInfo.Config.Framework.CreateFromManifests(nil,
manifests := []string{
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml",
"test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml",
"test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml",
)
}
if g.driverInfo.Config.TopologyEnabled {
manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss_alpha.yaml")
} else {
manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml")
}
cleanup, err := g.driverInfo.Config.Framework.CreateFromManifests(nil, manifests...)
g.cleanup = cleanup
if err != nil {
framework.Failf("deploying csi gce-pd driver: %v", err)
@ -359,8 +375,7 @@ var _ testsuites.DynamicPVTestDriver = &gcePDExternalCSIDriver{}
func InitGcePDExternalCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
return &gcePDExternalCSIDriver{
driverInfo: testsuites.DriverInfo{
Name: "pd.csi.storage.gke.io",
Name: GCEPDCSIProvisionerName,
// TODO(#70258): this is temporary until we can figure out how to make e2e tests a library
FeatureTag: "[Feature: gcePD-external]",
MaxFileSize: testpatterns.FileSizeMedium,

View File

@ -319,7 +319,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
claim.Spec.StorageClassName = &class.Name
claims = append(claims, claim)
}
pvs, node := testBindingWaitForFirstConsumerMultiPVC(c, claims, class)
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class)
if node == nil {
framework.Failf("unexpected nil node found")
}
@ -376,7 +376,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s
claim.Spec.StorageClassName = &class.Name
claims = append(claims, claim)
}
pvs, node := testBindingWaitForFirstConsumerMultiPVC(c, claims, class)
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class)
if node == nil {
framework.Failf("unexpected nil node found")
}

View File

@ -20,6 +20,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",

View File

@ -29,6 +29,7 @@ import (
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
@ -37,18 +38,20 @@ import (
// StorageClassTest represents parameters to be used by provisioning tests
type StorageClassTest struct {
Name string
CloudProviders []string
Provisioner string
StorageClassName string
Parameters map[string]string
DelayBinding bool
ClaimSize string
ExpectedSize string
PvCheck func(volume *v1.PersistentVolume) error
NodeName string
SkipWriteReadCheck bool
VolumeMode *v1.PersistentVolumeMode
Name string
CloudProviders []string
Provisioner string
StorageClassName string
Parameters map[string]string
DelayBinding bool
ClaimSize string
ExpectedSize string
PvCheck func(volume *v1.PersistentVolume) error
NodeName string
SkipWriteReadCheck bool
VolumeMode *v1.PersistentVolumeMode
NodeSelector map[string]string // NodeSelector for the pod
ExpectUnschedulable bool // Whether the test pod is expected to be unschedulable
}
type provisioningTestSuite struct {
@ -280,10 +283,10 @@ func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, cla
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
}
command += " || (mount | grep 'on /mnt/test'; false)"
runInPodWithVolume(client, claim.Namespace, claim.Name, t.NodeName, command)
runInPodWithVolume(client, claim.Namespace, claim.Name, t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable)
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, t.NodeName, "grep 'hello world' /mnt/test/data")
runInPodWithVolume(client, claim.Namespace, claim.Name, t.NodeName, "grep 'hello world' /mnt/test/data", t.NodeSelector, t.ExpectUnschedulable)
}
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
@ -303,8 +306,97 @@ func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, cla
return pv
}
func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) (*v1.PersistentVolume, *v1.Node) {
pvs, node := TestBindingWaitForFirstConsumerMultiPVC(t, client, []*v1.PersistentVolumeClaim{claim}, class)
if pvs == nil {
return nil, node
}
return pvs[0], node
}
func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass) ([]*v1.PersistentVolume, *v1.Node) {
var err error
Expect(len(claims)).ToNot(Equal(0))
namespace := claims[0].Namespace
By("creating a storage class " + class.Name)
class, err = client.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(client, class.Name)
By("creating claims")
var claimNames []string
var createdClaims []*v1.PersistentVolumeClaim
for _, claim := range claims {
c, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
claimNames = append(claimNames, c.Name)
createdClaims = append(createdClaims, c)
Expect(err).NotTo(HaveOccurred())
}
defer func() {
var errors map[string]error
for _, claim := range createdClaims {
err := framework.DeletePersistentVolumeClaim(client, claim.Name, claim.Namespace)
if err != nil {
errors[claim.Name] = err
}
}
if len(errors) > 0 {
for claimName, err := range errors {
framework.Logf("Failed to delete PVC: %s due to error: %v", claimName, err)
}
}
}()
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
By("checking the claims are in pending state")
err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true)
Expect(err).To(HaveOccurred())
verifyPVCsPending(client, createdClaims)
By("creating a pod referring to the claims")
// Create a pod referring to the claim and wait for it to get to running
var pod *v1.Pod
if t.ExpectUnschedulable {
pod, err = framework.CreateUnschedulablePod(client, namespace, t.NodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
} else {
pod, err = framework.CreatePod(client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
}
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.DeletePodOrFail(client, pod.Namespace, pod.Name)
framework.WaitForPodToDisappear(client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
}()
if t.ExpectUnschedulable {
// Verify that no claims are provisioned.
verifyPVCsPending(client, createdClaims)
return nil, nil
}
// collect node details
node, err := client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("re-checking the claims to see they binded")
var pvs []*v1.PersistentVolume
for _, claim := range createdClaims {
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// make sure claim did bind
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pvs = append(pvs, pv)
}
Expect(len(pvs)).To(Equal(len(createdClaims)))
return pvs, node
}
// runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command string) {
func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command string, nodeSelector map[string]string, unschedulable bool) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -340,6 +432,7 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command
},
},
},
NodeSelector: nodeSelector,
},
}
@ -357,5 +450,19 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command
}
framework.DeletePodOrFail(c, ns, pod.Name)
}()
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
if unschedulable {
framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(c, pod.Name, pod.Namespace))
} else {
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
}
}
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
for _, claim := range pvcs {
// Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
}
}

View File

@ -130,4 +130,8 @@ type TestConfig struct {
// the configuration that then has to be used to run tests.
// The values above are ignored for such tests.
ServerConfig *framework.VolumeTestConfig
// TopologyEnabled indicates that the Topology feature gate
// should be enabled in external-provisioner
TopologyEnabled bool
}

View File

@ -39,6 +39,7 @@ import (
storagebeta "k8s.io/api/storage/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
clientset "k8s.io/client-go/kubernetes"
@ -58,85 +59,6 @@ const (
multiPVCcount = 3
)
func testBindingWaitForFirstConsumer(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) (*v1.PersistentVolume, *v1.Node) {
pvs, node := testBindingWaitForFirstConsumerMultiPVC(client, []*v1.PersistentVolumeClaim{claim}, class)
return pvs[0], node
}
func testBindingWaitForFirstConsumerMultiPVC(client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass) ([]*v1.PersistentVolume, *v1.Node) {
var err error
Expect(len(claims)).ToNot(Equal(0))
namespace := claims[0].Namespace
By("creating a storage class " + class.Name)
class, err = client.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(client, class.Name)
By("creating claims")
var claimNames []string
var createdClaims []*v1.PersistentVolumeClaim
for _, claim := range claims {
c, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
claimNames = append(claimNames, c.Name)
createdClaims = append(createdClaims, c)
Expect(err).NotTo(HaveOccurred())
}
defer func() {
var errors map[string]error
for _, claim := range createdClaims {
err := framework.DeletePersistentVolumeClaim(client, claim.Name, claim.Namespace)
if err != nil {
errors[claim.Name] = err
}
}
if len(errors) > 0 {
for claimName, err := range errors {
framework.Logf("Failed to delete PVC: %s due to error: %v", claimName, err)
}
}
}()
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
By("checking the claims are in pending state")
err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, client, namespace, claimNames, 2*time.Second, framework.ClaimProvisionShortTimeout, true)
Expect(err).To(HaveOccurred())
for _, claim := range createdClaims {
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
}
By("creating a pod referring to the claims")
// Create a pod referring to the claim and wait for it to get to running
pod, err := framework.CreatePod(client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.DeletePodOrFail(client, pod.Namespace, pod.Name)
}()
// collect node details
node, err := client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("re-checking the claims to see they binded")
var pvs []*v1.PersistentVolume
for _, claim := range createdClaims {
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// make sure claim did bind
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pvs = append(pvs, pv)
}
Expect(len(pvs)).ToNot(Equal(0))
return pvs, node
}
func checkZoneFromLabelAndAffinity(pv *v1.PersistentVolume, zone string, matchZone bool) {
checkZonesFromLabelAndAffinity(pv, sets.NewString(zone), matchZone)
}
@ -295,7 +217,7 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop
if specifyAllowedTopology {
action += " and allowedTopologies"
suffix += "-topo"
topoZone = getRandomCloudZone(c)
topoZone = getRandomClusterZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, class, topoZone)
}
By(action)
@ -305,7 +227,7 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop
claim.Spec.StorageClassName = &class.Name
claims = append(claims, claim)
}
pvs, node := testBindingWaitForFirstConsumerMultiPVC(c, claims, class)
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class)
if node == nil {
framework.Failf("unexpected nil node found")
}
@ -336,7 +258,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Describe("DynamicProvisioner [Slow]", func() {
It("should provision storage with different parameters", func() {
cloudZone := getRandomCloudZone(c)
cloudZone := getRandomClusterZone(c)
// This test checks that dynamic provisioning can provision a volume
// that can be used to persist data among pods.
@ -968,7 +890,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
By("creating a claim with class with allowedTopologies set")
suffix := "topology"
class := newStorageClass(test, ns, suffix)
zone := getRandomCloudZone(c)
zone := getRandomClusterZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, class, zone)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
@ -1244,10 +1166,11 @@ func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.Persisten
}
}
func getRandomCloudZone(c clientset.Interface) string {
func getRandomClusterZone(c clientset.Interface) string {
zones, err := framework.GetClusterZones(c)
Expect(err).ToNot(HaveOccurred())
// return "" in case that no node has zone label
zone, _ := zones.PopAny()
return zone
Expect(len(zones)).ToNot(Equal(0))
zonesList := zones.UnsortedList()
return zonesList[rand.Intn(zones.Len())]
}

View File

@ -16,16 +16,15 @@ spec:
serviceAccountName: csi-controller-sa
containers:
- name: csi-provisioner
image: gcr.io/gke-release/csi-provisioner:v1.0.0-gke.0
image: gcr.io/gke-release/csi-provisioner:v1.0.1-gke.0
args:
- "--v=5"
- "--provisioner=pd.csi.storage.gke.io"
- "--csi-address=/csi/csi.sock"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-attacher
image: gcr.io/gke-release/csi-attacher:v1.0.0-gke.0
image: gcr.io/gke-release/csi-attacher:v1.0.1-gke.0
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
@ -33,7 +32,7 @@ spec:
- name: socket-dir
mountPath: /csi
- name: gce-pd-driver
image: gcr.io/gke-release/gcp-compute-persistent-disk-csi-driver:v0.3.0-gke.0
image: gcr.io/gke-release/gcp-compute-persistent-disk-csi-driver:v0.3.1-gke.0
args:
- "--v=5"
- "--endpoint=unix:/csi/csi.sock"

View File

@ -0,0 +1,54 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-gce-pd-controller
spec:
serviceName: "csi-gce-pd"
replicas: 1
selector:
matchLabels:
app: gcp-compute-persistent-disk-csi-driver
template:
metadata:
labels:
app: gcp-compute-persistent-disk-csi-driver
spec:
serviceAccountName: csi-controller-sa
containers:
- name: csi-provisioner
image: gcr.io/gke-release/csi-provisioner:v1.0.1-gke.0
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--feature-gates=Topology=true"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-attacher
image: gcr.io/gke-release/csi-attacher:v1.0.1-gke.0
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: gce-pd-driver
image: gcr.io/gke-release/gcp-compute-persistent-disk-csi-driver:v0.3.1-gke.0
args:
- "--v=5"
- "--endpoint=unix:/csi/csi.sock"
env:
- name: GOOGLE_APPLICATION_CREDENTIALS
value: "/etc/cloud-sa/cloud-sa.json"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: cloud-sa-volume
readOnly: true
mountPath: "/etc/cloud-sa"
volumes:
- name: socket-dir
emptyDir: {}
- name: cloud-sa-volume
secret:
secretName: cloud-sa

View File

@ -36,7 +36,7 @@ spec:
- name: gce-pd-driver
securityContext:
privileged: true
image: gcr.io/gke-release/gcp-compute-persistent-disk-csi-driver:v0.3.0-gke.0
image: gcr.io/gke-release/gcp-compute-persistent-disk-csi-driver:v0.3.1-gke.0
args:
- "--v=5"
- "--endpoint=unix:/csi/csi.sock"