Merge pull request #59761 from vmware/remove-production-code-usage-in-e2e-tests

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Removing Production Code usage from vSphere E2E tests

**What this PR does / why we need it**: 

vSphere E2E test cases uses VCP production code for bootstrap logic, example is to get an instance of vSphere struct. Once it gets an instance vSphere struct, it will use this object to call into methods in VCP code like - DiskIsAttached(), DisksAreAttached() etc.

This direct dependency on the VCP production should be removed as any changes to the production code would create a problem in E2E test cases as well.

This PR addressed above stated need.

**Which issue(s) this PR fixes** 
Fixes # https://github.com/vmware/kubernetes/issues/434

**Special notes for your reviewer**:

**Release note**:

```release-note
NONE
```
pull/6/head
Kubernetes Submit Queue 2018-02-14 00:54:20 -08:00 committed by GitHub
commit 050bddd36a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 632 additions and 323 deletions

View File

@ -503,24 +503,18 @@ var _ = utils.SIGDescribe("Volumes", func() {
Describe("vsphere [Feature:Volumes]", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("vsphere")
vspheretest.Bootstrap(f)
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
var volumePath string
config := framework.VolumeTestConfig{
Namespace: namespace.Name,
Prefix: "vsphere",
}
By("creating a test vsphere volume")
c, err := framework.LoadClientset()
if err != nil {
return
}
vsp, err := vspheretest.GetVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, err = vspheretest.CreateVSphereVolume(vsp, nil)
volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
defer func() {
vsp.DeleteVolume(volumePath)
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}()
defer func() {

View File

@ -37,8 +37,6 @@ go_library(
],
importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere",
deps = [
"//pkg/cloudprovider/providers/vsphere:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/storage/utils:go_default_library",
@ -51,6 +49,7 @@ go_library(
"//vendor/github.com/vmware/govmomi/session:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
@ -61,7 +60,6 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",

View File

@ -17,6 +17,7 @@ limitations under the License.
package vsphere
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
"sync"
)
@ -43,10 +44,12 @@ func bootstrapOnce() {
if err != nil {
framework.Failf("Failed to bootstrap vSphere with error: %v", err)
}
// 2. Get all ready nodes
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
// 2. Get all nodes
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to get nodes: %v", err)
}
TestContext = VSphereContext{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}
// 3. Get Node to VSphere mapping
err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList)
if err != nil {

View File

@ -42,6 +42,7 @@ type Config struct {
Datacenters string
RoundTripperCount uint
DefaultDatastore string
Folder string
}
// ConfigFile represents the content of vsphere.conf file.
@ -166,6 +167,7 @@ func populateInstanceMap(cfg *ConfigFile) (map[string]*VSphere, error) {
}
vcConfig.DefaultDatastore = cfg.Workspace.DefaultDatastore
vcConfig.Folder = cfg.Workspace.Folder
vsphereIns := VSphere{
Config: vcConfig,

View File

@ -90,9 +90,8 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
for _, node := range nodeList.Items {
n := node
go func() {
nodeUUID := n.Status.NodeInfo.SystemUUID
nodeUUID := getUUIDFromProviderID(n.Spec.ProviderID)
framework.Logf("Searching for node with UUID: %s", nodeUUID)
for _, res := range queueChannel {
ctx, cancel := context.WithCancel(context.Background())
@ -107,7 +106,7 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
framework.Logf("Found node %s as vm=%+v in vc=%s and datacenter=%s",
n.Name, vm, res.vs.Config.Hostname, res.datacenter.Name())
nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), VSphere: res.vs}
nameToNodeInfo[n.Name] = nodeInfo
nm.SetNodeInfo(n.Name, nodeInfo)
break
}
}

View File

@ -24,9 +24,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -42,11 +40,11 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
clientPod *v1.Pod
pvConfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig
vsp *vsphere.VSphere
err error
node types.NodeName
node string
volLabel labels.Set
selector *metav1.LabelSelector
nodeInfo *NodeInfo
)
f := framework.NewDefaultFramework("pv")
@ -66,16 +64,17 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
clientPod = nil
pvc = nil
pv = nil
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) < 1 {
framework.Skipf("Requires at least %d node", 1)
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
volLabel = labels.Set{framework.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)
if vsp == nil {
vsp, err = getVSphere(c)
Expect(err).NotTo(HaveOccurred())
}
if volumePath == "" {
volumePath, err = createVSphereVolume(vsp, nil)
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "vspherepv-",
@ -103,10 +102,10 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
By("Creating the Client Pod")
clientPod, err = framework.CreateClientPod(c, ns, pvc)
Expect(err).NotTo(HaveOccurred())
node = types.NodeName(clientPod.Spec.NodeName)
node = clientPod.Spec.NodeName
By("Verify disk should be attached to the node")
isAttached, err := verifyVSphereDiskAttached(c, vsp, volumePath, node)
isAttached, err := diskIsAttached(volumePath, node)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk is not attached with the node")
})
@ -134,12 +133,8 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(ns) > 0 && len(volumePath) > 0 {
client, err := framework.LoadClientset()
if err != nil {
return
}
framework.ExpectNoError(waitForVSphereDiskToDetach(client, vsp, volumePath, node))
vsp.DeleteVolume(volumePath)
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node))
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
})
@ -218,6 +213,6 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
Expect(err).NotTo(HaveOccurred())
By("Verifying Persistent Disk detaches")
waitForVSphereDiskToDetach(c, vsp, volumePath, node)
waitForVSphereDiskToDetach(volumePath, node)
})
})

View File

@ -25,9 +25,7 @@ import (
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -40,6 +38,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
volumePath string
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
nodeInfo *NodeInfo
)
BeforeEach(func() {
@ -51,15 +50,15 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
utils.SIGDescribe("persistentvolumereclaim:vsphere", func() {
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
nodeInfo = GetReadySchedulableRandomNodeInfo()
pv = nil
pvc = nil
volumePath = ""
})
AfterEach(func() {
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
testCleanupVSpherePersistentVolumeReclaim(vsp, c, ns, volumePath, pv, pvc)
testCleanupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, volumePath, pv, pvc)
})
/*
@ -75,10 +74,8 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
6. Verify PV is deleted automatically.
*/
It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() {
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimDelete)
var err error
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
Expect(err).NotTo(HaveOccurred())
deletePVCAfterBind(c, ns, pvc, pv)
@ -105,10 +102,9 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
9. Verify PV should be detached from the node and automatically deleted.
*/
It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() {
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
var err error
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimDelete)
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
Expect(err).NotTo(HaveOccurred())
// Wait for PV and PVC to Bind
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
@ -116,7 +112,6 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
By("Creating the Pod")
pod, err := framework.CreateClientPod(c, ns, pvc)
Expect(err).NotTo(HaveOccurred())
node := types.NodeName(pod.Spec.NodeName)
By("Deleting the Claim")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
@ -128,19 +123,19 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(HaveOccurred())
By("Verify the volume is attached to the node")
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(c, vsp, pv.Spec.VsphereVolume.VolumePath, node)
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
Expect(isVolumeAttached).To(BeTrue())
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv}, vsp)
verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv})
framework.Logf("Verified that Volume is accessible in the POD after deleting PV claim")
By("Deleting the Pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
By("Verify PV is detached from the node after Pod is deleted")
Expect(waitForVSphereDiskToDetach(c, vsp, pv.Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName))).NotTo(HaveOccurred())
Expect(waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)).NotTo(HaveOccurred())
By("Verify PV should be deleted automatically")
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
@ -167,11 +162,10 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
*/
It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() {
var err error
var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10)
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimRetain)
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimRetain)
Expect(err).NotTo(HaveOccurred())
writeContentToVSpherePV(c, pvc, volumeFileContent)
@ -205,10 +199,10 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
})
// Test Setup for persistentvolumereclaim tests for vSphere Provider
func testSetupVSpherePersistentVolumeReclaim(vsp *vsphere.VSphere, c clientset.Interface, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) {
func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) {
By("running testSetupVSpherePersistentVolumeReclaim")
By("creating vmdk")
volumePath, err = createVSphereVolume(vsp, nil)
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
if err != nil {
return
}
@ -225,10 +219,11 @@ func testSetupVSpherePersistentVolumeReclaim(vsp *vsphere.VSphere, c clientset.I
}
// Test Cleanup for persistentvolumereclaim tests for vSphere Provider
func testCleanupVSpherePersistentVolumeReclaim(vsp *vsphere.VSphere, c clientset.Interface, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
By("running testCleanupVSpherePersistentVolumeReclaim")
if len(volumePath) > 0 {
vsp.DeleteVolume(volumePath)
err := nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
}
if pv != nil {
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)

View File

@ -56,11 +56,14 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
ssdlabels map[string]string
vvollabels map[string]string
err error
nodeInfo *NodeInfo
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
c = f.ClientSet
ns = f.Namespace.Name
Bootstrap(f)
nodeInfo = GetReadySchedulableRandomNodeInfo()
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
ssdlabels = make(map[string]string)
ssdlabels["volume-type"] = "ssd"
@ -73,11 +76,11 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
AfterEach(func() {
By("Running clean up actions")
if framework.ProviderIs("vsphere") {
testCleanupVSpherePVClabelselector(c, ns, volumePath, pv_ssd, pvc_ssd, pvc_vvol)
testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pv_ssd, pvc_ssd, pvc_vvol)
}
})
It("should bind volume with claim for given label", func() {
volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, ns, ssdlabels, vvollabels)
volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels)
Expect(err).NotTo(HaveOccurred())
By("wait for the pvc_ssd to bind with pv_ssd")
@ -101,12 +104,11 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
})
})
func testSetupVSpherePVClabelselector(c clientset.Interface, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) {
func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) {
volumePath = ""
By("creating vmdk")
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, err = createVSphereVolume(vsp, nil)
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
if err != nil {
return
}
@ -131,12 +133,10 @@ func testSetupVSpherePVClabelselector(c clientset.Interface, ns string, ssdlabel
return
}
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) {
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) {
By("running testCleanupVSpherePVClabelselector")
if len(volumePath) > 0 {
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
vsp.DeleteVolume(volumePath)
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
if pvc_ssd != nil {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)

View File

@ -17,11 +17,26 @@ limitations under the License.
package vsphere
import (
"fmt"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
"k8s.io/kubernetes/test/e2e/framework"
"path/filepath"
"strconv"
"strings"
"time"
)
const (
VolDir = "kubevols"
DefaultDiskCapacityKB = 2097152
DefaultDiskFormat = "thin"
DefaultSCSIControllerType = "lsiLogic"
VirtualMachineType = "VirtualMachine"
)
// Represents a vSphere instance where one or more kubernetes nodes are running.
@ -30,6 +45,15 @@ type VSphere struct {
Client *govmomi.Client
}
// VolumeOptions specifies various options for a volume.
type VolumeOptions struct {
Name string
CapacityKB int
DiskFormat string
SCSIControllerType string
Datastore string
}
// GetDatacenter returns the DataCenter Object for the given datacenterPath
func (vs *VSphere) GetDatacenter(ctx context.Context, datacenterPath string) (*object.Datacenter, error) {
Connect(ctx, vs)
@ -37,6 +61,12 @@ func (vs *VSphere) GetDatacenter(ctx context.Context, datacenterPath string) (*o
return finder.Datacenter(ctx, datacenterPath)
}
// GetDatacenter returns the DataCenter Object for the given datacenterPath
func (vs *VSphere) GetDatacenterFromObjectReference(ctx context.Context, dc object.Reference) *object.Datacenter {
Connect(ctx, vs)
return object.NewDatacenter(vs.Client.Client, dc.Reference())
}
// GetAllDatacenter returns all the DataCenter Objects
func (vs *VSphere) GetAllDatacenter(ctx context.Context) ([]*object.Datacenter, error) {
Connect(ctx, vs)
@ -44,11 +74,167 @@ func (vs *VSphere) GetAllDatacenter(ctx context.Context) ([]*object.Datacenter,
return finder.DatacenterList(ctx, "*")
}
// GetVMByUUID gets the VM object from the given vmUUID
// GetVMByUUID gets the VM object Reference from the given vmUUID
func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Reference) (object.Reference, error) {
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference())
datacenter := vs.GetDatacenterFromObjectReference(ctx, dc)
s := object.NewSearchIndex(vs.Client.Client)
vmUUID = strings.ToLower(strings.TrimSpace(vmUUID))
return s.FindByUuid(ctx, datacenter, vmUUID, true, nil)
}
// GetFolderByPath gets the Folder Object Reference from the given folder path
// folderPath should be the full path to folder
func (vs *VSphere) GetFolderByPath(ctx context.Context, dc object.Reference, folderPath string) (vmFolderMor types.ManagedObjectReference, err error) {
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference())
finder := find.NewFinder(datacenter.Client(), true)
finder.SetDatacenter(datacenter)
vmFolder, err := finder.Folder(ctx, folderPath)
if err != nil {
framework.Logf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
return vmFolderMor, err
}
return vmFolder.Reference(), nil
}
// CreateVolume creates a vsphere volume using given volume paramemters specified in VolumeOptions.
// If volume is created successfully the canonical disk path is returned else error is returned.
func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef types.ManagedObjectReference) (string, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dataCenterRef)
var (
err error
directoryAlreadyPresent = false
)
if datacenter == nil {
return "", fmt.Errorf("datacenter is nil")
}
vs.initVolumeOptions(volumeOptions)
finder := find.NewFinder(datacenter.Client(), true)
finder.SetDatacenter(datacenter)
ds, err := finder.Datastore(ctx, volumeOptions.Datastore)
if err != nil {
return "", fmt.Errorf("Failed while searching for datastore: %s. err: %+v", volumeOptions.Datastore, err)
}
directoryPath := filepath.Clean(ds.Path(VolDir)) + "/"
fileManager := object.NewFileManager(ds.Client())
err = fileManager.MakeDirectory(ctx, directoryPath, datacenter, false)
if err != nil {
if soap.IsSoapFault(err) {
soapFault := soap.ToSoapFault(err)
if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok {
directoryAlreadyPresent = true
framework.Logf("Directory with the path %+q is already present", directoryPath)
}
}
if !directoryAlreadyPresent {
framework.Logf("Cannot create dir %#v. err %s", directoryPath, err)
return "", err
}
}
framework.Logf("Created dir with path as %+q", directoryPath)
vmdkPath := directoryPath + volumeOptions.Name + ".vmdk"
// Create a virtual disk manager
vdm := object.NewVirtualDiskManager(ds.Client())
// Create specification for new virtual disk
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
VirtualDiskSpec: types.VirtualDiskSpec{
AdapterType: volumeOptions.SCSIControllerType,
DiskType: volumeOptions.DiskFormat,
},
CapacityKb: int64(volumeOptions.CapacityKB),
}
// Create virtual disk
task, err := vdm.CreateVirtualDisk(ctx, vmdkPath, datacenter, vmDiskSpec)
if err != nil {
framework.Logf("Failed to create virtual disk: %s. err: %+v", vmdkPath, err)
return "", err
}
taskInfo, err := task.WaitForResult(ctx, nil)
if err != nil {
framework.Logf("Failed to complete virtual disk creation: %s. err: %+v", vmdkPath, err)
return "", err
}
volumePath := taskInfo.Result.(string)
canonicalDiskPath, err := getCanonicalVolumePath(ctx, datacenter, volumePath)
if err != nil {
return "", err
}
return canonicalDiskPath, nil
}
// DeleteVolume deletes the vmdk file specified in the volumePath.
// if an error is encountered while deleting volume, error is returned.
func (vs *VSphere) DeleteVolume(volumePath string, dataCenterRef types.ManagedObjectReference) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dataCenterRef)
virtualDiskManager := object.NewVirtualDiskManager(datacenter.Client())
diskPath := removeStorageClusterORFolderNameFromVDiskPath(volumePath)
// Delete virtual disk
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter)
if err != nil {
framework.Logf("Failed to delete virtual disk. err: %v", err)
return err
}
err = task.Wait(ctx)
if err != nil {
framework.Logf("Failed to delete virtual disk. err: %v", err)
return err
}
return nil
}
// IsVMPresent checks if VM with the name specified in the vmName argument, is present in the vCenter inventory.
// if VM is present, function returns true else false.
func (vs *VSphere) IsVMPresent(vmName string, dataCenterRef types.ManagedObjectReference) (isVMPresent bool, err error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Connect(ctx, vs)
folderMor, err := vs.GetFolderByPath(ctx, dataCenterRef, vs.Config.Folder)
if err != nil {
return
}
vmFolder := object.NewFolder(vs.Client.Client, folderMor)
vmFoldersChildren, err := vmFolder.Children(ctx)
if err != nil {
framework.Logf("Failed to get children from Folder: %s. err: %+v", vmFolder.InventoryPath, err)
return
}
for _, vmFoldersChild := range vmFoldersChildren {
if vmFoldersChild.Reference().Type == VirtualMachineType {
if object.NewVirtualMachine(vs.Client.Client, vmFoldersChild.Reference()).Name() == vmName {
return true, nil
}
}
}
return
}
// initVolumeOptions function sets default values for volumeOptions parameters if not set
func (vs *VSphere) initVolumeOptions(volumeOptions *VolumeOptions) {
if volumeOptions == nil {
volumeOptions = &VolumeOptions{}
}
if volumeOptions.Datastore == "" {
volumeOptions.Datastore = vs.Config.DefaultDatastore
}
if volumeOptions.CapacityKB == 0 {
volumeOptions.CapacityKB = DefaultDiskCapacityKB
}
if volumeOptions.Name == "" {
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
}
if volumeOptions.DiskFormat == "" {
volumeOptions.DiskFormat = DefaultDiskFormat
}
if volumeOptions.SCSIControllerType == "" {
volumeOptions.SCSIControllerType = DefaultSCSIControllerType
}
}

View File

@ -25,9 +25,7 @@ import (
"k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -71,6 +69,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeVolumeMapChan = make(chan map[string][]string)
@ -111,7 +110,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
It("vsphere scale tests", func() {
var pvcClaimList []string
nodeVolumeMap := make(map[k8stypes.NodeName][]string)
nodeVolumeMap := make(map[string][]string)
// Volumes will be provisioned with each different types of Storage Class
scArrays := make([]*storageV1.StorageClass, len(scNames))
for index, scname := range scNames {
@ -137,22 +136,19 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
scArrays[index] = sc
}
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
volumeCountPerInstance := volumeCount / numberOfInstances
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
if instanceCount == numberOfInstances-1 {
volumeCountPerInstance = volumeCount
}
volumeCount = volumeCount - volumeCountPerInstance
go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan, vsp)
go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
}
// Get the list of all volumes attached to each node from the go routines by reading the data from the channel
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
for node, volumeList := range <-nodeVolumeMapChan {
nodeVolumeMap[k8stypes.NodeName(node)] = append(nodeVolumeMap[k8stypes.NodeName(node)], volumeList...)
nodeVolumeMap[node] = append(nodeVolumeMap[node], volumeList...)
}
}
podList, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
@ -163,7 +159,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for volumes to be detached from the node")
err = waitForVSphereDisksToDetach(client, vsp, nodeVolumeMap)
err = waitForVSphereDisksToDetach(nodeVolumeMap)
Expect(err).NotTo(HaveOccurred())
for _, pvcClaim := range pvcClaimList {
@ -185,7 +181,7 @@ func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
}
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string, vsp *vsphere.VSphere) {
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
defer GinkgoRecover()
nodeVolumeMap := make(map[string][]string)
nodeSelectorIndex := 0
@ -215,7 +211,7 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
}
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
nodeSelectorIndex++
}
nodeVolumeMapChan <- nodeVolumeMap

View File

@ -22,7 +22,6 @@ import (
. "github.com/onsi/gomega"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -60,6 +59,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
framework.SkipUnlessProviderIs("vsphere")
namespace = f.Namespace.Name
client = f.ClientSet
Bootstrap(f)
})
AfterEach(func() {
framework.Logf("Deleting all statefulset in namespace: %v", namespace)
@ -104,9 +104,6 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
Expect(scaledownErr).NotTo(HaveOccurred())
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1)
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// After scale down, verify vsphere volumes are detached from deleted pods
By("Verify Volumes are detached from Nodes after Statefulsets is scaled down")
for _, sspod := range ssPodsBeforeScaleDown.Items {
@ -117,7 +114,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
if volumespec.PersistentVolumeClaim != nil {
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName)
Expect(waitForVSphereDiskToDetach(client, vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName))).NotTo(HaveOccurred())
Expect(waitForVSphereDiskToDetach(vSpherediskPath, sspod.Spec.NodeName)).NotTo(HaveOccurred())
}
}
}
@ -146,7 +143,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
// Verify scale up has re-attached the same volumes and not introduced new volume
Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse())
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(client, vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName)
Expect(isVolumeAttached).To(BeTrue())
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
}

View File

@ -25,7 +25,6 @@ import (
"k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -125,8 +124,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) {
defer wg.Done()
defer GinkgoRecover()
vsp, err := getVSphere(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
for iterationCount := 0; iterationCount < iterations; iterationCount++ {
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1)
By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
@ -153,19 +151,19 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(isVolumeAttached).To(BeTrue())
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
err = waitForVSphereDiskToDetach(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName))
err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))

View File

@ -18,31 +18,33 @@ package vsphere
import (
"fmt"
"math/rand"
"path/filepath"
"strconv"
"time"
"github.com/golang/glog"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
vim25types "github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
"context"
"github.com/vmware/govmomi/find"
vimtypes "github.com/vmware/govmomi/vim25/types"
"regexp"
"strings"
)
const (
@ -51,6 +53,8 @@ const (
storageclass2 = "sc-vsan"
storageclass3 = "sc-spbm"
storageclass4 = "sc-user-specified-ds"
DummyDiskName = "kube-dummyDisk.vmdk"
ProviderPrefix = "vsphere://"
)
// volumeState represents the state of a volume.
@ -61,37 +65,16 @@ const (
volumeStateAttached volumeState = 2
)
// Sanity check for vSphere testing. Verify the persistent disk attached to the node.
func verifyVSphereDiskAttached(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) (bool, error) {
var (
isAttached bool
err error
)
if vsp == nil {
vsp, err = getVSphere(c)
Expect(err).NotTo(HaveOccurred())
}
isAttached, err = vsp.DiskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
return isAttached, err
}
// Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes
func waitForVSphereDisksToDetach(c clientset.Interface, vsp *vsphere.VSphere, nodeVolumes map[types.NodeName][]string) error {
func waitForVSphereDisksToDetach(nodeVolumes map[string][]string) error {
var (
err error
disksAttached = true
detachTimeout = 5 * time.Minute
detachPollTime = 10 * time.Second
)
if vsp == nil {
vsp, err = getVSphere(c)
if err != nil {
return err
}
}
err = wait.Poll(detachPollTime, detachTimeout, func() (bool, error) {
attachedResult, err := vsp.DisksAreAttached(nodeVolumes)
attachedResult, err := disksAreAttached(nodeVolumes)
if err != nil {
return false, err
}
@ -117,7 +100,7 @@ func waitForVSphereDisksToDetach(c clientset.Interface, vsp *vsphere.VSphere, no
}
// Wait until vsphere vmdk moves to expected state on the given node, or time out after 6 minutes
func waitForVSphereDiskStatus(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName, expectedState volumeState) error {
func waitForVSphereDiskStatus(volumePath string, nodeName string, expectedState volumeState) error {
var (
err error
diskAttached bool
@ -137,7 +120,7 @@ func waitForVSphereDiskStatus(c clientset.Interface, vsp *vsphere.VSphere, volum
}
err = wait.Poll(pollTime, timeout, func() (bool, error) {
diskAttached, err = verifyVSphereDiskAttached(c, vsp, volumePath, nodeName)
diskAttached, err = diskIsAttached(volumePath, nodeName)
if err != nil {
return true, err
}
@ -161,13 +144,13 @@ func waitForVSphereDiskStatus(c clientset.Interface, vsp *vsphere.VSphere, volum
}
// Wait until vsphere vmdk is attached from the given node or time out after 6 minutes
func waitForVSphereDiskToAttach(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error {
return waitForVSphereDiskStatus(c, vsp, volumePath, nodeName, volumeStateAttached)
func waitForVSphereDiskToAttach(volumePath string, nodeName string) error {
return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateAttached)
}
// Wait until vsphere vmdk is detached from the given node or time out after 6 minutes
func waitForVSphereDiskToDetach(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error {
return waitForVSphereDiskStatus(c, vsp, volumePath, nodeName, volumeStateDetached)
func waitForVSphereDiskToDetach(volumePath string, nodeName string) error {
return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateDetached)
}
// function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
@ -241,27 +224,6 @@ func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]str
return pvc
}
// function to create vmdk volume
func createVSphereVolume(vsp *vsphere.VSphere, volumeOptions *vclib.VolumeOptions) (string, error) {
var (
volumePath string
err error
)
if volumeOptions == nil {
volumeOptions = new(vclib.VolumeOptions)
volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
}
volumePath, err = vsp.CreateVolume(volumeOptions)
Expect(err).NotTo(HaveOccurred())
return volumePath, nil
}
// CreateVSphereVolume creates a vmdk volume
func CreateVSphereVolume(vsp *vsphere.VSphere, volumeOptions *vclib.VolumeOptions) (string, error) {
return createVSphereVolume(vsp, volumeOptions)
}
// function to write content to the volume backed by given PVC
func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
@ -426,12 +388,12 @@ func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths
}
// verify volumes are attached to the node and are accessible in pod
func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) {
func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume) {
nodeName := pod.Spec.NodeName
namespace := pod.Namespace
for index, pv := range persistentvolumes {
// Verify disks are attached to the node
isAttached, err := verifyVSphereDiskAttached(c, vsp, pv.Spec.VsphereVolume.VolumePath, types.NodeName(nodeName))
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
// Verify Volumes are accessible
@ -450,29 +412,173 @@ func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string,
return pv.Spec.VsphereVolume.VolumePath
}
func addNodesToVCP(vsp *vsphere.VSphere, c clientset.Interface) error {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
// Get canonical volume path for volume Path.
// Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk
// Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path.
func getCanonicalVolumePath(ctx context.Context, dc *object.Datacenter, volumePath string) (string, error) {
var folderID string
canonicalVolumePath := volumePath
dsPathObj, err := getDatastorePathObjFromVMDiskPath(volumePath)
if err != nil {
return err
return "", err
}
for _, node := range nodes.Items {
vsp.NodeAdded(&node)
dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/")
if len(dsPath) <= 1 {
return canonicalVolumePath, nil
}
return nil
datastore := dsPathObj.Datastore
dsFolder := dsPath[0]
// Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
if !isValidUUID(dsFolder) {
dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + DummyDiskName
// Querying a non-existent dummy disk on the datastore folder.
// It would fail and return an folder ID in the error message.
_, err := getVirtualDiskPage83Data(ctx, dc, dummyDiskVolPath)
if err != nil {
re := regexp.MustCompile("File (.*?) was not found")
match := re.FindStringSubmatch(err.Error())
canonicalVolumePath = match[1]
}
}
diskPath := getPathFromVMDiskPath(canonicalVolumePath)
if diskPath == "" {
return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath)
}
folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0]
canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1)
return canonicalVolumePath, nil
}
func getVSphere(c clientset.Interface) (*vsphere.VSphere, error) {
vsp, err := vsphere.GetVSphere()
// getPathFromVMDiskPath retrieves the path from VM Disk Path.
// Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is kubevols/volume.vmdk
func getPathFromVMDiskPath(vmDiskPath string) string {
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
framework.Logf("Failed to parse vmDiskPath: %s", vmDiskPath)
return ""
}
return datastorePathObj.Path
}
//getDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path.
func getDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) {
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
framework.Logf("Failed to parse volPath: %s", vmDiskPath)
return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
}
return datastorePathObj, nil
}
// getVirtualDiskPage83Data gets the virtual disk UUID by diskPath
func getVirtualDiskPage83Data(ctx context.Context, dc *object.Datacenter, diskPath string) (string, error) {
if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" {
diskPath += ".vmdk"
}
vdm := object.NewVirtualDiskManager(dc.Client())
// Returns uuid of vmdk virtual disk
diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc)
if err != nil {
glog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
return "", err
}
diskUUID = formatVirtualDiskUUID(diskUUID)
return diskUUID, nil
}
// formatVirtualDiskUUID removes any spaces and hyphens in UUID
// Example UUID input is 42375390-71f9-43a3-a770-56803bcd7baa and output after format is 4237539071f943a3a77056803bcd7baa
func formatVirtualDiskUUID(uuid string) string {
uuidwithNoSpace := strings.Replace(uuid, " ", "", -1)
uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1)
return strings.ToLower(uuidWithNoHypens)
}
//isValidUUID checks if the string is a valid UUID.
func isValidUUID(uuid string) bool {
r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
return r.MatchString(uuid)
}
// removeStorageClusterORFolderNameFromVDiskPath removes the cluster or folder path from the vDiskPath
// for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
// for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1]
if filepath.Base(datastore) != datastore {
vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1)
}
return vDiskPath
}
// getVirtualDeviceByPath gets the virtual device by path
func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) {
vmDevices, err := vm.Device(ctx)
if err != nil {
framework.Logf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err
}
addNodesToVCP(vsp, c)
return vsp, nil
// filter vm devices to retrieve device for the given vmdk file identified by disk path
for _, device := range vmDevices {
if vmDevices.TypeName(device) == "VirtualDisk" {
virtualDevice := device.GetVirtualDevice()
if backing, ok := virtualDevice.Backing.(*vim25types.VirtualDiskFlatVer2BackingInfo); ok {
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
return device, nil
} else {
framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
}
}
}
}
return nil, nil
}
// GetVSphere returns vsphere cloud provider
func GetVSphere(c clientset.Interface) (*vsphere.VSphere, error) {
return getVSphere(c)
func matchVirtualDiskAndVolPath(diskPath, volPath string) bool {
fileExt := ".vmdk"
diskPath = strings.TrimSuffix(diskPath, fileExt)
volPath = strings.TrimSuffix(volPath, fileExt)
return diskPath == volPath
}
// convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath
func convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[string][]string) (map[string][]string, error) {
vmVolumes := make(map[string][]string)
for nodeName, volPaths := range nodeVolumes {
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
datacenter := nodeInfo.VSphere.GetDatacenterFromObjectReference(ctx, nodeInfo.DataCenterRef)
for i, volPath := range volPaths {
deviceVolPath, err := convertVolPathToDevicePath(ctx, datacenter, volPath)
if err != nil {
framework.Logf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
return nil, err
}
volPaths[i] = deviceVolPath
}
vmVolumes[nodeName] = volPaths
}
return vmVolumes, nil
}
// convertVolPathToDevicePath takes volPath and returns canonical volume path
func convertVolPathToDevicePath(ctx context.Context, dc *object.Datacenter, volPath string) (string, error) {
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
// Get the canonical volume path for volPath.
canonicalVolumePath, err := getCanonicalVolumePath(ctx, dc, volPath)
if err != nil {
framework.Logf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
return "", err
}
// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" {
canonicalVolumePath += ".vmdk"
}
return canonicalVolumePath, nil
}
// get .vmx file path for a virtual machine
@ -567,3 +673,86 @@ func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.Reso
poweronNodeVM(nodeName, vm)
}
// disksAreAttached takes map of node and it's volumes and returns map of node, its volumes and attachment state
func disksAreAttached(nodeVolumes map[string][]string) (nodeVolumesAttachMap map[string]map[string]bool, err error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
disksAttached := make(map[string]map[string]bool)
if len(nodeVolumes) == 0 {
return disksAttached, nil
}
// Convert VolPaths into canonical form so that it can be compared with the VM device path.
vmVolumes, err := convertVolPathsToDevicePaths(ctx, nodeVolumes)
if err != nil {
framework.Logf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
return nil, err
}
for vm, volumes := range vmVolumes {
volumeAttachedMap := make(map[string]bool)
for _, volume := range volumes {
attached, err := diskIsAttached(volume, vm)
if err != nil {
return nil, err
}
volumeAttachedMap[volume] = attached
}
nodeVolumesAttachMap[vm] = volumeAttachedMap
}
return disksAttached, nil
}
// diskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
func diskIsAttached(volPath string, nodeName string) (bool, error) {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
Connect(ctx, nodeInfo.VSphere)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
device, err := getVirtualDeviceByPath(ctx, vm, volPath)
if err != nil {
framework.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
volPath,
nodeName)
return false, err
}
if device != nil {
framework.Logf("diskIsAttached found the disk %q attached on node %q",
volPath,
nodeName)
}
return true, nil
}
// getUUIDFromProviderID strips ProviderPrefix - "vsphere://" from the providerID
// this gives the VM UUID which can be used to find Node VM from vCenter
func getUUIDFromProviderID(providerID string) string {
return strings.TrimPrefix(providerID, ProviderPrefix)
}
// GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
func GetReadySchedulableNodeInfos() []*NodeInfo {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
var nodesInfo []*NodeInfo
for _, node := range nodeList.Items {
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name)
if nodeInfo != nil {
nodesInfo = append(nodesInfo, nodeInfo)
}
}
return nodesInfo
}
// GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node.
// if multiple nodes are present with Ready and Scheduable state then one of the Node is selected randomly
// and it's associated NodeInfo object is returned.
func GetReadySchedulableRandomNodeInfo() *NodeInfo {
nodesInfo := GetReadySchedulableNodeInfos()
rand.Seed(time.Now().Unix())
Expect(nodesInfo).NotTo(BeEmpty())
return nodesInfo[rand.Int()%len(nodesInfo)]
}

View File

@ -18,13 +18,10 @@ package vsphere
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -47,6 +44,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
namespace string
scParameters map[string]string
clusterDatastore string
nodeInfo *NodeInfo
)
BeforeEach(func() {
@ -54,6 +52,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeInfo = GetReadySchedulableRandomNodeInfo()
scParameters = make(map[string]string)
clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore)
})
@ -70,21 +69,19 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
It("verify static provisioning on clustered datastore", func() {
var volumePath string
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
By("creating a test vsphere volume")
volumeOptions := new(vclib.VolumeOptions)
volumeOptions := new(VolumeOptions)
volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + namespace
volumeOptions.Datastore = clusterDatastore
volumePath, err = createVSphereVolume(vsp, volumeOptions)
volumePath, err := nodeInfo.VSphere.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("Deleting the vsphere volume")
vsp.DeleteVolume(volumePath)
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}()
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil)
@ -98,10 +95,10 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
nodeName := types.NodeName(pod.Spec.NodeName)
nodeName := pod.Spec.NodeName
By("Verifying volume is attached")
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, nodeName)
isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node: %v", volumePath, nodeName))
@ -110,7 +107,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
Expect(err).NotTo(HaveOccurred())
By("Waiting for volumes to be detached from the node")
err = waitForVSphereDiskToDetach(client, vsp, volumePath, nodeName)
err = waitForVSphereDiskToDetach(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
})

View File

@ -21,11 +21,11 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stype "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
@ -68,13 +68,8 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
nodeName = nodeList.Items[0].Name
} else {
framework.Failf("Unable to find ready and schedulable Node")
}
if !isNodeLabeled {
nodeName = GetReadySchedulableRandomNodeInfo().Name
nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel[NodeLabelKey] = nodeLabelValue
@ -147,19 +142,21 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
Expect(err).NotTo(HaveOccurred())
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
verifyVSphereDiskAttached(client, vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
By("Waiting for pod to be running")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
Expect(isAttached).To(BeTrue())
Expect(err).NotTo(HaveOccurred())
By("Verify Disk Format")
Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed")
var volumePaths []string
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
By("Delete pod and wait for volume to be detached from node")
deletePodAndWaitForVolumeToDetach(f, client, pod, vsp, nodeName, volumePaths)
deletePodAndWaitForVolumeToDetach(f, client, pod, nodeName, volumePaths)
}
@ -173,12 +170,9 @@ func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
nodeInfo, err := vsp.NodeManager().GetNodeInfo(k8stype.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
vmDevices, err := nodeInfo.VM().Device(ctx)
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
vmDevices, err := vm.Device(ctx)
Expect(err).NotTo(HaveOccurred())
disks := vmDevices.SelectByType((*types.VirtualDisk)(nil))

View File

@ -59,10 +59,6 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
namespace = f.Namespace.Name
scParameters = make(map[string]string)
datastore = GetAndExpectStringEnvVar(StorageClassDatastoreName)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
})
It("verify dynamically provisioned pv using storageclass with an invalid disk size fails", func() {

View File

@ -24,9 +24,7 @@ import (
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -75,8 +73,7 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() {
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).NotTo(BeZero(), "Unable to find ready and schedulable Node")
Expect(GetReadySchedulableNodeInfos).NotTo(BeEmpty())
})
It("verify fstype - ext3 formatted volume", func() {
@ -99,28 +96,25 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
framework.Logf("Invoking Test for fstype: %s", fstype)
scParameters := make(map[string]string)
scParameters["fstype"] = fstype
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// Create Persistent Volume
By("Creating Storage Class With Fstype")
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters)
// Create Pod and verify the persistent volume is accessible
pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes, vsp)
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute)
pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes)
_, err := framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute)
Expect(err).NotTo(HaveOccurred())
// Detach and delete volume
detachVolume(f, client, vsp, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
deleteVolume(client, pvclaim.Name, namespace)
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
Expect(err).To(BeNil())
}
func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) {
scParameters := make(map[string]string)
scParameters["fstype"] = fstype
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// Create Persistent Volume
By("Creating Storage Class With Invalid Fstype")
@ -136,8 +130,9 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{})
// Detach and delete volume
detachVolume(f, client, vsp, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
deleteVolume(client, pvclaim.Name, namespace)
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
Expect(err).To(BeNil())
Expect(eventList.Items).NotTo(BeEmpty())
errorMsg := `MountVolume.MountDevice failed for volume "` + persistentvolumes[0].Name + `" : executable file not found`
@ -167,7 +162,7 @@ func createVolume(client clientset.Interface, namespace string, scParameters map
return pvclaim, persistentvolumes
}
func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) *v1.Pod {
func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod {
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
By("Creating pod to attach PV to the node")
@ -177,18 +172,18 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st
// Asserts: Right disk is attached to the pod
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
return pod
}
func detachVolume(f *framework.Framework, client clientset.Interface, vsp *vsphere.VSphere, pod *v1.Pod, volPath string) {
// detachVolume delete the volume passed in the argument and wait until volume is detached from the node,
func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) {
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).To(BeNil())
nodeName := pod.Spec.NodeName
By("Deleting pod")
framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(client, vsp, volPath, k8stype.NodeName(pod.Spec.NodeName))
}
func deleteVolume(client clientset.Interface, pvclaimName string, namespace string) {
framework.DeletePersistentVolumeClaim(client, pvclaimName, namespace)
waitForVSphereDiskToDetach(volPath, nodeName)
}

View File

@ -24,7 +24,6 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
@ -54,6 +53,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
numNodes int
nodeKeyValueLabelList []map[string]string
nodeNameList []string
nodeInfo *NodeInfo
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
@ -67,7 +67,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
if numNodes < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
for i := 0; i < numNodes; i++ {
nodeName := nodes.Items[i].Name
nodeNameList = append(nodeNameList, nodeName)
@ -80,15 +80,11 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
})
It("verify volume remains attached after master kubelet restart", func() {
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// Create pod on each node
for i := 0; i < numNodes; i++ {
By(fmt.Sprintf("%d: Creating a test vsphere volume", i))
volumePath, err := createVSphereVolume(vsp, nil)
volumePath, err := nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i]))
@ -105,9 +101,9 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
pods = append(pods, pod)
nodeName := types.NodeName(pod.Spec.NodeName)
By(fmt.Sprintf("Verify volume %s is attached to the pod %v", volumePath, nodeName))
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, types.NodeName(nodeName))
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("Verify volume %s is attached to the pod %s", volumePath, nodeName))
isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
@ -115,7 +111,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
By("Restarting kubelet on master node")
masterAddress := framework.GetMasterHost() + ":22"
err = framework.RestartKubelet(masterAddress)
err := framework.RestartKubelet(masterAddress)
Expect(err).NotTo(HaveOccurred(), "Unable to restart kubelet on master node")
By("Verifying the kubelet on master node is up")
@ -124,23 +120,22 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
for i, pod := range pods {
volumePath := volumePaths[i]
nodeName := types.NodeName(pod.Spec.NodeName)
nodeName := pod.Spec.NodeName
By(fmt.Sprintf("After master restart, verify volume %v is attached to the pod %v", volumePath, nodeName))
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePaths[i], types.NodeName(nodeName))
isAttached, err := diskIsAttached(volumePaths[i], nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
By(fmt.Sprintf("Deleting pod on node %v", nodeName))
By(fmt.Sprintf("Deleting pod on node %s", nodeName))
err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for volume %s to be detached from the node %v", volumePath, nodeName))
err = waitForVSphereDiskToDetach(client, vsp, volumePath, types.NodeName(nodeName))
By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName))
err = waitForVSphereDiskToDetach(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Deleting volume %s", volumePath))
err = vsp.DeleteVolume(volumePath)
err = nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
}
})

View File

@ -24,14 +24,13 @@ import (
. "github.com/onsi/gomega"
"golang.org/x/net/context"
"github.com/vmware/govmomi/object"
vimtypes "github.com/vmware/govmomi/vim25/types"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -46,8 +45,6 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
var (
client clientset.Interface
namespace string
vsp *vsphere.VSphere
err error
)
BeforeEach(func() {
@ -59,8 +56,6 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test")
vsp, err = getVSphere(client)
Expect(err).NotTo(HaveOccurred())
})
/*
@ -105,17 +100,17 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
podList, err := framework.GetPodsForDeployment(client, deployment)
Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0]
node1 := types.NodeName(pod.Spec.NodeName)
node1 := pod.Spec.NodeName
By(fmt.Sprintf("Verify disk is attached to the node: %v", node1))
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, node1)
isAttached, err := diskIsAttached(volumePath, node1)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "Disk is not attached to the node")
By(fmt.Sprintf("Power off the node: %v", node1))
nodeInfo, err := vsp.NodeManager().GetNodeInfo(node1)
Expect(err).NotTo(HaveOccurred())
vm := nodeInfo.VM()
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
ctx, _ := context.WithCancel(context.Background())
_, err = vm.PowerOff(ctx)
Expect(err).NotTo(HaveOccurred())
@ -129,11 +124,11 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
Expect(err).NotTo(HaveOccurred(), "Pod did not fail over to a different node")
By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2))
err = waitForVSphereDiskToAttach(client, vsp, volumePath, node2)
err = waitForVSphereDiskToAttach(volumePath, node2)
Expect(err).NotTo(HaveOccurred(), "Disk is not attached to the node")
By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1))
err = waitForVSphereDiskToDetach(client, vsp, volumePath, node1)
err = waitForVSphereDiskToDetach(volumePath, node1)
Expect(err).NotTo(HaveOccurred(), "Disk is not detached from the node")
By(fmt.Sprintf("Power on the previous node: %v", node1))
@ -144,10 +139,10 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
})
// Wait until the pod failed over to a different node, or time out after 3 minutes
func waitForPodToFailover(client clientset.Interface, deployment *extensions.Deployment, oldNode types.NodeName) (types.NodeName, error) {
func waitForPodToFailover(client clientset.Interface, deployment *extensions.Deployment, oldNode string) (string, error) {
var (
err error
newNode types.NodeName
newNode string
timeout = 3 * time.Minute
pollTime = 10 * time.Second
)
@ -178,10 +173,11 @@ func waitForPodToFailover(client clientset.Interface, deployment *extensions.Dep
return getNodeForDeployment(client, deployment)
}
func getNodeForDeployment(client clientset.Interface, deployment *extensions.Deployment) (types.NodeName, error) {
// getNodeForDeployment returns node name for the Deployment
func getNodeForDeployment(client clientset.Interface, deployment *extensions.Deployment) (string, error) {
podList, err := framework.GetPodsForDeployment(client, deployment)
if err != nil {
return "", err
}
return types.NodeName(podList.Items[0].Spec.NodeName), nil
return podList.Items[0].Spec.NodeName, nil
}

View File

@ -25,9 +25,7 @@ import (
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -59,17 +57,13 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
persistentvolumes []*v1.PersistentVolume
err error
volume_ops_scale int
vsp *vsphere.VSphere
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) == 0 {
framework.Failf("Unable to find ready and schedulable Node")
}
Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty())
if os.Getenv("VOLUME_OPS_SCALE") != "" {
volume_ops_scale, err = strconv.Atoi(os.Getenv("VOLUME_OPS_SCALE"))
Expect(err).NotTo(HaveOccurred())
@ -77,8 +71,6 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
volume_ops_scale = DEFAULT_VOLUME_OPS_SCALE
}
pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale)
vsp, err = getVSphere(client)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
By("Deleting PVCs")
@ -115,14 +107,14 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
Expect(err).NotTo(HaveOccurred())
By("Verify all volumes are accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, client, pod))
By("Waiting for volumes to be detached from the node")
for _, pv := range persistentvolumes {
waitForVSphereDiskToDetach(client, vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
}
})
})

View File

@ -24,7 +24,6 @@ import (
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -161,7 +160,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
totalpvs [][]*v1.PersistentVolume
totalpods []*v1.Pod
)
nodeVolumeMap := make(map[types.NodeName][]string)
nodeVolumeMap := make(map[string][]string)
latency = make(map[string]float64)
numPods := volumeCount / volumesPerPod
@ -198,18 +197,14 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
elapsed = time.Since(start)
latency[AttachOp] = elapsed.Seconds()
// Verify access to the volumes
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
for i, pod := range totalpods {
verifyVSphereVolumesAccessible(client, pod, totalpvs[i], vsp)
verifyVSphereVolumesAccessible(client, pod, totalpvs[i])
}
By("Deleting pods")
start = time.Now()
for _, pod := range totalpods {
err = framework.DeletePodWithWait(f, client, pod)
err := framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
}
elapsed = time.Since(start)
@ -217,12 +212,11 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
for i, pod := range totalpods {
for _, pv := range totalpvs[i] {
nodeName := types.NodeName(pod.Spec.NodeName)
nodeVolumeMap[nodeName] = append(nodeVolumeMap[nodeName], pv.Spec.VsphereVolume.VolumePath)
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
}
}
err = waitForVSphereDisksToDetach(client, vsp, nodeVolumeMap)
err := waitForVSphereDisksToDetach(nodeVolumeMap)
Expect(err).NotTo(HaveOccurred())
By("Deleting the PVCs")

View File

@ -24,11 +24,8 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -41,14 +38,14 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
var (
c clientset.Interface
ns string
vsp *vsphere.VSphere
volumePaths []string
node1Name string
node1KeyValueLabel map[string]string
node2Name string
node2KeyValueLabel map[string]string
isNodeLabeled bool
err error
nodeInfo *NodeInfo
vsp *VSphere
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
@ -59,18 +56,18 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
if !isNodeLabeled {
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
isNodeLabeled = true
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
vsp = nodeInfo.VSphere
}
By("creating vmdk")
vsp, err = getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, err := createVSphereVolume(vsp, nil)
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
})
AfterEach(func() {
for _, volumePath := range volumePaths {
vsp.DeleteVolume(volumePath)
vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
volumePaths = nil
})
@ -107,24 +104,24 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
It("should create and delete pod with the same volume source on the same worker node", func() {
var volumeFiles []string
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the same node: %v", node1Name))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
})
/*
@ -147,23 +144,23 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() {
var volumeFiles []string
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the another node: %v", node2Name))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node2Name, node2KeyValueLabel, volumePaths)
pod = createPodWithVolumeAndNodeSelector(c, ns, node2Name, node2KeyValueLabel, volumePaths)
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node2Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node2Name, volumePaths)
})
/*
@ -182,12 +179,12 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
It("should create and delete pod with multiple volumes from same datastore", func() {
By("creating another vmdk")
volumePath, err := createVSphereVolume(vsp, nil)
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
volumeFiles := []string{
@ -195,9 +192,9 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
}
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFilesNames := []string{
@ -224,17 +221,18 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
*/
It("should create and delete pod with multiple volumes from different datastore", func() {
By("creating another vmdk on non default shared datastore")
var volumeOptions *vclib.VolumeOptions
volumeOptions = new(vclib.VolumeOptions)
var volumeOptions *VolumeOptions
volumeOptions = new(VolumeOptions)
volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
volumeOptions.Datastore = GetAndExpectStringEnvVar(SecondSharedDatastore)
volumePath, err := createVSphereVolume(vsp, volumeOptions)
volumePath, err := vsp.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
@ -243,10 +241,10 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
}
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths)
pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileNames := []string{
@ -256,7 +254,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
volumeFiles = append(volumeFiles, newEmptyFileNames[0])
volumeFiles = append(volumeFiles, newEmptyFileNames[1])
createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFileNames, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths)
deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
})
/*
@ -289,24 +287,24 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name)
By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
for _, volumePath := range volumePaths {
framework.ExpectNoError(waitForVSphereDiskToDetach(c, vsp, volumePath, types.NodeName(node1Name)))
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name))
}
}()
testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0])
// Create another VMDK Volume
By("creating another vmdk")
volumePath, err := createVSphereVolume(vsp, nil)
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath)
testvolumePathsPodB = append(testvolumePathsPodA, volumePath)
for index := 0; index < 5; index++ {
By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0]))
podA = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, testvolumePathsPodA)
podA = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA)
By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0]))
podB = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, testvolumePathsPodB)
podB = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB)
podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1)
podBFileName := fmt.Sprintf("/mnt/volume1/podB_%v_%v.txt", ns, index+1)
@ -353,7 +351,7 @@ func testSetupVolumePlacement(client clientset.Interface, namespace string) (nod
return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel
}
func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, vsp *vsphere.VSphere, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod {
func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod {
var pod *v1.Pod
var err error
By(fmt.Sprintf("Creating pod on the node: %v", nodeName))
@ -366,7 +364,7 @@ func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace st
By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName))
for _, volumePath := range volumePaths {
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, types.NodeName(nodeName))
isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk:"+volumePath+" is not attached with the node")
}
@ -383,12 +381,12 @@ func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfile
verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck)
}
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, vsp *vsphere.VSphere, nodeName string, volumePaths []string) {
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) {
By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
By("Waiting for volume to be detached from the node")
for _, volumePath := range volumePaths {
framework.ExpectNoError(waitForVSphereDiskToDetach(c, vsp, volumePath, types.NodeName(nodeName)))
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, nodeName))
}
}

View File

@ -27,7 +27,6 @@ import (
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -95,6 +94,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
scParameters map[string]string
policyName string
tagPolicy string
masterNode string
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
@ -109,6 +109,9 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
masternodes, _ := framework.GetMasterAndWorkerNodesOrDie(client)
Expect(masternodes).NotTo(BeEmpty())
masterNode = masternodes.List()[0]
})
// Valid policy.
@ -222,7 +225,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
scParameters[Datastore] = VsanDatastore
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName)
invokeStaleDummyVMTestWithStoragePolicy(client, namespace, kubernetesClusterName, scParameters)
invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters)
})
It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() {
@ -290,16 +293,14 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By("Deleting pod")
framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
}
func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
@ -321,7 +322,7 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
}
func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, namespace string, clusterName string, scParameters map[string]string) {
func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) {
By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
@ -348,7 +349,6 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, namespa
fnvHash.Write([]byte(vmName))
dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.."
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
Expect(vsp.IsDummyVMPresent(dummyVMFullName)).NotTo(BeTrue(), errorMsg)
nodeInfo := TestContext.NodeMapper.GetNodeInfo(masterNode)
Expect(nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)).NotTo(BeTrue(), errorMsg)
}