Clean up local volum provisioner e2e tests

pull/564/head
Yecheng Fu 2019-01-06 22:30:56 +08:00
parent 5c4b536f48
commit b7a33511e5
2 changed files with 0 additions and 674 deletions

View File

@ -82,7 +82,6 @@ go_library(
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/prometheus/common/model:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
"//vendor/sigs.k8s.io/yaml:go_default_library",
],
)

View File

@ -18,24 +18,19 @@ package storage
import (
"fmt"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/yaml"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
@ -137,24 +132,6 @@ const (
testFileContent = "test-file-content"
testSCPrefix = "local-volume-test-storageclass"
// Following are constants used for provisioner e2e tests.
//
// testServiceAccount is the service account for bootstrapper
testServiceAccount = "local-storage-admin"
// volumeConfigName is the configmap passed to bootstrapper and provisioner
volumeConfigName = "local-volume-config"
// provisioner image used for e2e tests
provisionerImageName = "quay.io/external_storage/local-volume-provisioner:v2.1.0"
// provisioner daemonSetName name
daemonSetName = "local-volume-provisioner"
// provisioner default mount point folder
provisionerDefaultMountRoot = "/mnt/local-storage"
// provisioner node/pv cluster role binding
nodeBindingName = "local-storage:provisioner-node-binding"
pvBindingName = "local-storage:provisioner-pv-binding"
systemRoleNode = "system:node"
systemRolePVProvisioner = "system:persistent-volume-provisioner"
// A sample request size
testRequestSize = "10Mi"
@ -398,107 +375,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
})
})
Context("Local volume provisioner [Serial]", func() {
var volumePath string
BeforeEach(func() {
setupStorageClass(config, &immediateMode)
setupLocalVolumeProvisioner(config)
volumePath = path.Join(config.discoveryDir, fmt.Sprintf("vol-%v", string(uuid.NewUUID())))
setupLocalVolumeProvisionerMountPoint(config, volumePath, config.node0)
})
AfterEach(func() {
cleanupLocalVolumeProvisionerMountPoint(config, volumePath, config.node0)
cleanupLocalVolumeProvisioner(config)
cleanupStorageClass(config)
})
It("should create and recreate local persistent volume", func() {
By("Starting a provisioner daemonset")
createProvisionerDaemonset(config)
By("Waiting for a PersistentVolume to be created")
oldPV, err := waitForLocalPersistentVolume(config.client, volumePath)
Expect(err).NotTo(HaveOccurred())
// Create a persistent volume claim for local volume: the above volume will be bound.
By("Creating a persistent volume claim")
claim, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(newLocalClaim(config))
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(
v1.ClaimBound, config.client, claim.Namespace, claim.Name, framework.Poll, 1*time.Minute)
Expect(err).NotTo(HaveOccurred())
claim, err = config.client.CoreV1().PersistentVolumeClaims(config.ns).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(claim.Spec.VolumeName).To(Equal(oldPV.Name))
// Delete the persistent volume claim: file will be cleaned up and volume be re-created.
By("Deleting the persistent volume claim to clean up persistent volume and re-create one")
writeCmd := createWriteCmd(volumePath, testFile, testFileContent, DirectoryLocalVolumeType)
err = issueNodeCommand(config, writeCmd, config.node0)
Expect(err).NotTo(HaveOccurred())
err = config.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for a new PersistentVolume to be re-created")
newPV, err := waitForLocalPersistentVolume(config.client, volumePath)
Expect(err).NotTo(HaveOccurred())
Expect(newPV.UID).NotTo(Equal(oldPV.UID))
fileDoesntExistCmd := createFileDoesntExistCmd(volumePath, testFile)
err = issueNodeCommand(config, fileDoesntExistCmd, config.node0)
Expect(err).NotTo(HaveOccurred())
By("Deleting provisioner daemonset")
deleteProvisionerDaemonset(config)
})
It("should not create local persistent volume for filesystem volume that was not bind mounted", func() {
directoryPath := filepath.Join(config.discoveryDir, "notbindmount")
By("Creating a directory, not bind mounted, in discovery directory")
mkdirCmd := fmt.Sprintf("mkdir -p %v -m 777", directoryPath)
err := issueNodeCommand(config, mkdirCmd, config.node0)
Expect(err).NotTo(HaveOccurred())
By("Starting a provisioner daemonset")
createProvisionerDaemonset(config)
By("Allowing provisioner to run for 30s and discover potential local PVs")
time.Sleep(30 * time.Second)
By("Examining provisioner logs for not an actual mountpoint message")
provisionerPodName := findProvisionerDaemonsetPodName(config)
logs, err := framework.GetPodLogs(config.client, config.ns, provisionerPodName, "" /*containerName*/)
Expect(err).NotTo(HaveOccurred(),
"Error getting logs from pod %s in namespace %s", provisionerPodName, config.ns)
expectedLogMessage := "Path \"/mnt/local-storage/notbindmount\" is not an actual mountpoint"
Expect(strings.Contains(logs, expectedLogMessage)).To(BeTrue())
By("Deleting provisioner daemonset")
deleteProvisionerDaemonset(config)
})
It("should discover dynamically created local persistent volume mountpoint in discovery directory", func() {
By("Starting a provisioner daemonset")
createProvisionerDaemonset(config)
By("Creating a volume in discovery directory")
dynamicVolumePath := path.Join(config.discoveryDir, fmt.Sprintf("vol-%v", string(uuid.NewUUID())))
setupLocalVolumeProvisionerMountPoint(config, dynamicVolumePath, config.node0)
By("Waiting for the PersistentVolume to be created")
_, err := waitForLocalPersistentVolume(config.client, dynamicVolumePath)
Expect(err).NotTo(HaveOccurred())
By("Deleting provisioner daemonset")
deleteProvisionerDaemonset(config)
By("Deleting volume in discovery directory")
cleanupLocalVolumeProvisionerMountPoint(config, dynamicVolumePath, config.node0)
})
})
Context("StatefulSet with pod affinity [Slow]", func() {
var testVols map[string][]*localTestVolume
const (
@ -556,158 +432,8 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
})
})
Context("Stress with local volume provisioner [Serial]", func() {
var testVols [][]string
const (
volsPerNode = 10 // Make this non-divisable by volsPerPod to increase changes of partial binding failure
volsPerPod = 3
podsFactor = 4
)
BeforeEach(func() {
setupStorageClass(config, &waitMode)
setupLocalVolumeProvisioner(config)
testVols = [][]string{}
for i, node := range config.nodes {
By(fmt.Sprintf("Setting up local volumes on node %q", node.Name))
paths := []string{}
for j := 0; j < volsPerNode; j++ {
volumePath := path.Join(config.discoveryDir, fmt.Sprintf("vol-%v", string(uuid.NewUUID())))
setupLocalVolumeProvisionerMountPoint(config, volumePath, &config.nodes[i])
paths = append(paths, volumePath)
}
testVols = append(testVols, paths)
}
By("Starting the local volume provisioner")
createProvisionerDaemonset(config)
})
AfterEach(func() {
By("Deleting provisioner daemonset")
deleteProvisionerDaemonset(config)
for i, paths := range testVols {
for _, volumePath := range paths {
cleanupLocalVolumeProvisionerMountPoint(config, volumePath, &config.nodes[i])
}
}
cleanupLocalVolumeProvisioner(config)
cleanupStorageClass(config)
})
It("should use be able to process many pods and reuse local volumes", func() {
var (
podsLock sync.Mutex
// Have one extra pod pending
numConcurrentPods = volsPerNode/volsPerPod*len(config.nodes) + 1
totalPods = numConcurrentPods * podsFactor
numCreated = 0
numFinished = 0
pods = map[string]*v1.Pod{}
)
// Create pods gradually instead of all at once because scheduler has
// exponential backoff
// TODO: this is still a bit slow because of the provisioner polling period
By(fmt.Sprintf("Creating %v pods periodically", numConcurrentPods))
stop := make(chan struct{})
go wait.Until(func() {
podsLock.Lock()
defer podsLock.Unlock()
if numCreated >= totalPods {
// Created all the pods for the test
return
}
if len(pods) > numConcurrentPods/2 {
// Too many outstanding pods
return
}
for i := 0; i < numConcurrentPods; i++ {
pvcs := []*v1.PersistentVolumeClaim{}
for j := 0; j < volsPerPod; j++ {
pvc := framework.MakePersistentVolumeClaim(makeLocalPVCConfig(config, DirectoryLocalVolumeType), config.ns)
pvc, err := framework.CreatePVC(config.client, config.ns, pvc)
framework.ExpectNoError(err)
pvcs = append(pvcs, pvc)
}
pod := framework.MakeSecPod(config.ns, pvcs, false, "sleep 1", false, false, selinuxLabel, nil)
pod, err := config.client.CoreV1().Pods(config.ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
pods[pod.Name] = pod
numCreated++
}
}, 2*time.Second, stop)
defer func() {
close(stop)
podsLock.Lock()
defer podsLock.Unlock()
for _, pod := range pods {
if err := deletePodAndPVCs(config, pod); err != nil {
framework.Logf("Deleting pod %v failed: %v", pod.Name, err)
}
}
}()
By("Waiting for all pods to complete successfully")
err := wait.PollImmediate(time.Second, 5*time.Minute, func() (done bool, err error) {
podsList, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{})
if err != nil {
return false, err
}
podsLock.Lock()
defer podsLock.Unlock()
for _, pod := range podsList.Items {
switch pod.Status.Phase {
case v1.PodSucceeded:
// Delete pod and its PVCs
if err := deletePodAndPVCs(config, &pod); err != nil {
return false, err
}
delete(pods, pod.Name)
numFinished++
framework.Logf("%v/%v pods finished", numFinished, totalPods)
case v1.PodFailed:
case v1.PodUnknown:
return false, fmt.Errorf("pod %v is in %v phase", pod.Name, pod.Status.Phase)
}
}
return numFinished == totalPods, nil
})
Expect(err).ToNot(HaveOccurred())
})
})
})
func deletePodAndPVCs(config *localTestConfig, pod *v1.Pod) error {
framework.Logf("Deleting pod %v", pod.Name)
if err := config.client.CoreV1().Pods(config.ns).Delete(pod.Name, nil); err != nil {
return err
}
// Delete PVCs
for _, vol := range pod.Spec.Volumes {
pvcSource := vol.VolumeSource.PersistentVolumeClaim
if pvcSource != nil {
if err := framework.DeletePersistentVolumeClaim(config.client, pvcSource.ClaimName, config.ns); err != nil {
return err
}
}
}
return nil
}
type makeLocalPodWith func(config *localTestConfig, volume *localTestVolume, nodeName string) *v1.Pod
func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeType, nodeName string, makeLocalPodFunc makeLocalPodWith, bindingMode storagev1.VolumeBindingMode) {
@ -1226,18 +952,6 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod
}
}
func makeLocalPod(config *localTestConfig, volume *localTestVolume, cmd string) *v1.Pod {
pod := framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, cmd, false, false, selinuxLabel, nil)
if pod == nil {
return pod
}
if volume.localVolumeType == BlockLocalVolumeType {
// Block e2e tests require utilities for writing to block devices (e.g. dd), and nginx has this utilities.
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx)
}
return pod
}
func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, nil)
if pod == nil {
@ -1285,16 +999,6 @@ func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume,
return
}
// createSecPod should be used when Pod requires non default SELinux labels
func createSecPod(config *localTestConfig, volume *localTestVolume, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) (*v1.Pod, error) {
pod, err := framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", hostIPC, hostPID, seLinuxLabel, nil, framework.PodStartShortTimeout)
podNodeName, podNodeNameErr := podNodeName(config, pod)
Expect(podNodeNameErr).NotTo(HaveOccurred())
framework.Logf("Security Context POD %q created on Node %q", pod.Name, podNodeName)
Expect(podNodeName).To(Equal(config.node0.Name))
return pod, err
}
func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) {
By("Creating a pod")
return framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, fsGroup, framework.PodStartShortTimeout)
@ -1368,13 +1072,6 @@ func testReadFileContent(testFileDir string, testFile string, testFileContent st
Expect(readOut).To(ContainSubstring(testFileContent))
}
// Create command to verify that the file doesn't exist
// to be executed via hostexec Pod on the node with the local PV
func createFileDoesntExistCmd(testFileDir string, testFile string) string {
testFilePath := filepath.Join(testFileDir, testFile)
return fmt.Sprintf("[ ! -e %s ]", testFilePath)
}
// Execute a read or write command in a pod.
// Fail on error
func podRWCmdExec(pod *v1.Pod, cmd string) string {
@ -1402,332 +1099,6 @@ func setupLocalVolumesPVCsPVs(
return testVols
}
func setupLocalVolumeProvisioner(config *localTestConfig) {
By("Bootstrapping local volume provisioner")
createServiceAccount(config)
createProvisionerClusterRoleBinding(config)
utils.PrivilegedTestPSPClusterRoleBinding(config.client, config.ns, false /* teardown */, []string{testServiceAccount})
createVolumeConfigMap(config)
for _, node := range config.nodes {
By(fmt.Sprintf("Initializing local volume discovery base path on node %v", node.Name))
mkdirCmd := fmt.Sprintf("mkdir -p %v -m 777", config.discoveryDir)
err := issueNodeCommand(config, mkdirCmd, &node)
Expect(err).NotTo(HaveOccurred())
}
}
func cleanupLocalVolumeProvisioner(config *localTestConfig) {
By("Cleaning up cluster role binding")
deleteClusterRoleBinding(config)
utils.PrivilegedTestPSPClusterRoleBinding(config.client, config.ns, true /* teardown */, []string{testServiceAccount})
for _, node := range config.nodes {
By(fmt.Sprintf("Removing the test discovery directory on node %v", node.Name))
removeCmd := fmt.Sprintf("[ ! -e %v ] || rm -r %v", config.discoveryDir, config.discoveryDir)
err := issueNodeCommand(config, removeCmd, &node)
Expect(err).NotTo(HaveOccurred())
}
}
func setupLocalVolumeProvisionerMountPoint(config *localTestConfig, volumePath string, node *v1.Node) {
By(fmt.Sprintf("Creating local directory at path %q", volumePath))
mkdirCmd := fmt.Sprintf("mkdir %v -m 777", volumePath)
err := issueNodeCommand(config, mkdirCmd, node)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Mounting local directory at path %q", volumePath))
mntCmd := fmt.Sprintf("sudo mount --bind %v %v", volumePath, volumePath)
err = issueNodeCommand(config, mntCmd, node)
Expect(err).NotTo(HaveOccurred())
}
func cleanupLocalVolumeProvisionerMountPoint(config *localTestConfig, volumePath string, node *v1.Node) {
By(fmt.Sprintf("Unmounting the test mount point from %q", volumePath))
umountCmd := fmt.Sprintf("[ ! -e %v ] || sudo umount %v", volumePath, volumePath)
err := issueNodeCommand(config, umountCmd, node)
Expect(err).NotTo(HaveOccurred())
By("Removing the test mount point")
removeCmd := fmt.Sprintf("[ ! -e %v ] || rm -r %v", volumePath, volumePath)
err = issueNodeCommand(config, removeCmd, node)
Expect(err).NotTo(HaveOccurred())
By("Cleaning up persistent volume")
pv, err := findLocalPersistentVolume(config.client, volumePath)
Expect(err).NotTo(HaveOccurred())
if pv != nil {
err = config.client.CoreV1().PersistentVolumes().Delete(pv.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
}
func createServiceAccount(config *localTestConfig) {
serviceAccount := v1.ServiceAccount{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ServiceAccount"},
ObjectMeta: metav1.ObjectMeta{Name: testServiceAccount, Namespace: config.ns},
}
_, err := config.client.CoreV1().ServiceAccounts(config.ns).Create(&serviceAccount)
Expect(err).NotTo(HaveOccurred())
}
// createProvisionerClusterRoleBinding creates two cluster role bindings for local volume provisioner's
// service account: systemRoleNode and systemRolePVProvisioner. These are required for
// provisioner to get node information and create persistent volumes.
func createProvisionerClusterRoleBinding(config *localTestConfig) {
subjects := []rbacv1beta1.Subject{
{
Kind: rbacv1beta1.ServiceAccountKind,
Name: testServiceAccount,
Namespace: config.ns,
},
}
pvBinding := rbacv1beta1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1beta1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: pvBindingName,
},
RoleRef: rbacv1beta1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: systemRolePVProvisioner,
},
Subjects: subjects,
}
nodeBinding := rbacv1beta1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1beta1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: nodeBindingName,
},
RoleRef: rbacv1beta1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: systemRoleNode,
},
Subjects: subjects,
}
deleteClusterRoleBinding(config)
_, err := config.client.RbacV1beta1().ClusterRoleBindings().Create(&pvBinding)
Expect(err).NotTo(HaveOccurred())
_, err = config.client.RbacV1beta1().ClusterRoleBindings().Create(&nodeBinding)
Expect(err).NotTo(HaveOccurred())
}
func deleteClusterRoleBinding(config *localTestConfig) {
// These role bindings are created in provisioner; we just ensure it's
// deleted and do not panic on error.
config.client.RbacV1beta1().ClusterRoleBindings().Delete(nodeBindingName, metav1.NewDeleteOptions(0))
config.client.RbacV1beta1().ClusterRoleBindings().Delete(pvBindingName, metav1.NewDeleteOptions(0))
}
func createVolumeConfigMap(config *localTestConfig) {
// MountConfig and ProvisionerConfiguration from
// https://github.com/kubernetes-incubator/external-storage/blob/master/local-volume/provisioner/pkg/common/common.go
type MountConfig struct {
// The hostpath directory
HostDir string `json:"hostDir" yaml:"hostDir"`
MountDir string `json:"mountDir" yaml:"mountDir"`
}
type ProvisionerConfiguration struct {
// StorageClassConfig defines configuration of Provisioner's storage classes
StorageClassConfig map[string]MountConfig `json:"storageClassMap" yaml:"storageClassMap"`
}
var provisionerConfig ProvisionerConfiguration
provisionerConfig.StorageClassConfig = map[string]MountConfig{
config.scName: {
HostDir: config.discoveryDir,
MountDir: provisionerDefaultMountRoot,
},
}
data, err := yaml.Marshal(&provisionerConfig.StorageClassConfig)
Expect(err).NotTo(HaveOccurred())
configMap := v1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: volumeConfigName,
Namespace: config.ns,
},
Data: map[string]string{
"storageClassMap": string(data),
},
}
_, err = config.client.CoreV1().ConfigMaps(config.ns).Create(&configMap)
Expect(err).NotTo(HaveOccurred())
}
func createProvisionerDaemonset(config *localTestConfig) {
provisionerPrivileged := true
mountProp := v1.MountPropagationHostToContainer
provisioner := &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: daemonSetName,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "local-volume-provisioner"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "local-volume-provisioner"},
},
Spec: v1.PodSpec{
ServiceAccountName: testServiceAccount,
Containers: []v1.Container{
{
Name: "provisioner",
Image: provisionerImageName,
ImagePullPolicy: "Always",
SecurityContext: &v1.SecurityContext{
Privileged: &provisionerPrivileged,
},
Env: []v1.EnvVar{
{
Name: "MY_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
{
Name: "MY_NAMESPACE",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeConfigName,
MountPath: "/etc/provisioner/config/",
},
{
Name: "local-disks",
MountPath: provisionerDefaultMountRoot,
MountPropagation: &mountProp,
},
},
},
},
Volumes: []v1.Volume{
{
Name: volumeConfigName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: volumeConfigName,
},
},
},
},
{
Name: "local-disks",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: config.discoveryDir,
},
},
},
},
},
},
},
}
_, err := config.client.AppsV1().DaemonSets(config.ns).Create(provisioner)
Expect(err).NotTo(HaveOccurred())
kind := schema.GroupKind{Group: "extensions", Kind: "DaemonSet"}
framework.WaitForControlledPodsRunning(config.client, config.ns, daemonSetName, kind)
}
func findProvisionerDaemonsetPodName(config *localTestConfig) string {
podList, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{})
if err != nil {
framework.Failf("could not get the pod list: %v", err)
return ""
}
pods := podList.Items
for _, pod := range pods {
if strings.HasPrefix(pod.Name, daemonSetName) && pod.Spec.NodeName == config.node0.Name {
return pod.Name
}
}
framework.Failf("Unable to find provisioner daemonset pod on node0")
return ""
}
func deleteProvisionerDaemonset(config *localTestConfig) {
ds, err := config.client.AppsV1().DaemonSets(config.ns).Get(daemonSetName, metav1.GetOptions{})
if ds == nil {
return
}
err = config.client.AppsV1().DaemonSets(config.ns).Delete(daemonSetName, nil)
Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
pods, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{})
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if metav1.IsControlledBy(&pod, ds) {
// DaemonSet pod still exists
return false, nil
}
}
// All DaemonSet pods are deleted
return true, nil
})
Expect(err).NotTo(HaveOccurred())
}
// newLocalClaim creates a new persistent volume claim.
func newLocalClaim(config *localTestConfig) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "local-pvc-",
Namespace: config.ns,
},
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: &config.scName,
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(testRequestSize),
},
},
},
}
return &claim
}
// newLocalClaim creates a new persistent volume claim.
func newLocalClaimWithName(config *localTestConfig, name string) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
@ -1751,50 +1122,6 @@ func newLocalClaimWithName(config *localTestConfig, name string) *v1.PersistentV
return &claim
}
// waitForLocalPersistentVolume waits a local persistent volume with 'volumePath' to be available.
func waitForLocalPersistentVolume(c clientset.Interface, volumePath string) (*v1.PersistentVolume, error) {
var pv *v1.PersistentVolume
for start := time.Now(); time.Since(start) < 10*time.Minute && pv == nil; time.Sleep(5 * time.Second) {
pvs, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
if len(pvs.Items) == 0 {
continue
}
for _, p := range pvs.Items {
if p.Spec.PersistentVolumeSource.Local == nil || p.Spec.PersistentVolumeSource.Local.Path != volumePath {
continue
}
if p.Status.Phase != v1.VolumeAvailable {
continue
}
pv = &p
break
}
}
if pv == nil {
return nil, fmt.Errorf("Timeout while waiting for local persistent volume with path %v to be available", volumePath)
}
return pv, nil
}
// findLocalPersistentVolume finds persistent volume with 'spec.local.path' equals 'volumePath'.
func findLocalPersistentVolume(c clientset.Interface, volumePath string) (*v1.PersistentVolume, error) {
pvs, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, p := range pvs.Items {
if p.Spec.PersistentVolumeSource.Local != nil && p.Spec.PersistentVolumeSource.Local.Path == volumePath {
return &p, nil
}
}
// Doesn't exist, that's fine, it could be invoked by early cleanup
return nil, nil
}
func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount int, anti, parallel bool) *appsv1.StatefulSet {
mounts := []v1.VolumeMount{}
claims := []v1.PersistentVolumeClaim{}