Addressed review comments

pull/6/head
Divyen Patel 2018-02-13 12:39:54 -08:00
parent c0490fa623
commit 8823c229d5
9 changed files with 37 additions and 53 deletions

View File

@ -504,10 +504,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("vsphere")
vspheretest.Bootstrap(f)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
nodeInfo := vspheretest.TestContext.NodeMapper.GetNodeInfo(nodeList.Items[0].Name)
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
var volumePath string
config := framework.VolumeTestConfig{
Namespace: namespace.Name,

View File

@ -17,6 +17,7 @@ limitations under the License.
package vsphere
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
"sync"
)
@ -43,10 +44,12 @@ func bootstrapOnce() {
if err != nil {
framework.Failf("Failed to bootstrap vSphere with error: %v", err)
}
// 2. Get all ready nodes
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
// 2. Get all nodes
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to get nodes: %v", err)
}
TestContext = VSphereContext{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}
// 3. Get Node to VSphere mapping
err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList)
if err != nil {

View File

@ -51,11 +51,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) < 1 {
framework.Skipf("Requires at least %d node", 1)
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
nodeInfo = GetReadySchedulableRandomNodeInfo()
pv = nil
pvc = nil
volumePath = ""

View File

@ -63,11 +63,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
c = f.ClientSet
ns = f.Namespace.Name
Bootstrap(f)
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) < 1 {
framework.Skipf("Requires at least %d node", 1)
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
nodeInfo = GetReadySchedulableRandomNodeInfo()
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
ssdlabels = make(map[string]string)
ssdlabels["volume-type"] = "ssd"

View File

@ -18,6 +18,7 @@ package vsphere
import (
"fmt"
"math/rand"
"path/filepath"
"time"
@ -513,18 +514,6 @@ func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
return vDiskPath
}
// isDiskAttached checks if disk is attached to the VM.
func isDiskAttached(ctx context.Context, vm *object.VirtualMachine, diskPath string) (bool, error) {
device, err := getVirtualDeviceByPath(ctx, vm, diskPath)
if err != nil {
return false, err
}
if device != nil {
return true, nil
}
return false, nil
}
// getVirtualDeviceByPath gets the virtual device by path
func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) {
vmDevices, err := vm.Device(ctx)
@ -541,6 +530,8 @@ func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, disk
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
return device, nil
} else {
framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
}
}
}
@ -715,21 +706,34 @@ func diskIsAttached(volPath string, nodeName string) (bool, error) {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
Connect(ctx, nodeInfo.VSphere)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
attached, err := isDiskAttached(ctx, vm, volPath)
device, err := getVirtualDeviceByPath(ctx, vm, volPath)
if err != nil {
framework.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
volPath,
nodeName)
return false, err
}
framework.Logf("diskIsAttached result: %v and error: %v, for volume: %s", attached, err, volPath)
return attached, err
if device != nil {
framework.Logf("diskIsAttached found the disk %q attached on node %q",
volPath,
nodeName)
}
return true, nil
}
func getUUIDFromProviderID(providerID string) string {
return strings.TrimPrefix(providerID, ProviderPrefix)
}
func GetReadySchedulableRandomNodeInfo() *NodeInfo {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
rand.Seed(time.Now().Unix())
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeList.Items[rand.Int()%len(nodeList.Items)].Name)
Expect(nodeInfo).NotTo(BeNil())
return nodeInfo
}

View File

@ -18,9 +18,6 @@ package vsphere
import (
"fmt"
"math/rand"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -55,11 +52,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
rand.Seed(time.Now().Unix())
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodeList.Items[rand.Int()%len(nodeList.Items)].Name)
nodeInfo = GetReadySchedulableRandomNodeInfo()
scParameters = make(map[string]string)
clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore)
})

View File

@ -17,9 +17,7 @@ limitations under the License.
package vsphere
import (
"math/rand"
"path/filepath"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -70,14 +68,8 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
rand.Seed(time.Now().Unix())
nodeName = nodeList.Items[rand.Int()%len(nodeList.Items)].Name
} else {
framework.Failf("Unable to find ready and schedulable Node")
}
if !isNodeLabeled {
nodeName = GetReadySchedulableRandomNodeInfo().Name
nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel[NodeLabelKey] = nodeLabelValue

View File

@ -175,11 +175,14 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st
}
func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) {
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).To(BeNil())
nodeName := pod.Spec.NodeName
By("Deleting pod")
framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(volPath, pod.Spec.NodeName)
waitForVSphereDiskToDetach(volPath, nodeName)
}
func deleteVolume(client clientset.Interface, pvclaimName string, namespace string) {

View File

@ -56,9 +56,9 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
if !isNodeLabeled {
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
isNodeLabeled = true
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
vsp = nodeInfo.VSphere
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
vsp = nodeInfo.VSphere
By("creating vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())