mirror of https://github.com/k3s-io/k3s
Merge pull request #78961 from subramanian-neelakantan/automated-cherry-pick-of-#75187-upstream-release-1.14
Automated cherry pick of #75187: Use any host that mounts the datastore to create Volumek3s-v1.14.4
commit
8daa63bd22
|
@ -32,6 +32,7 @@ go_library(
|
|||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/find:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/object:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/property:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/vapi/rest:go_default_library",
|
||||
|
|
|
@ -39,6 +39,11 @@ type NodeInfo struct {
|
|||
zone *cloudprovider.Zone
|
||||
}
|
||||
|
||||
func (n NodeInfo) String() string {
|
||||
return fmt.Sprintf("{datacenter: %v, vm: %v, vcServer: %s, vmUUID: %s, zone: %v}",
|
||||
*n.dataCenter, n.vm.Reference(), n.vcServer, n.vmUUID, *n.zone)
|
||||
}
|
||||
|
||||
type NodeManager struct {
|
||||
// TODO: replace map with concurrent map when k8s supports go v1.9
|
||||
|
||||
|
|
|
@ -52,6 +52,7 @@ const (
|
|||
DatacenterType = "Datacenter"
|
||||
ClusterComputeResourceType = "ClusterComputeResource"
|
||||
HostSystemType = "HostSystem"
|
||||
NameProperty = "name"
|
||||
)
|
||||
|
||||
// Test Constants
|
||||
|
|
|
@ -187,6 +187,28 @@ func (dc *Datacenter) GetDatastoreByName(ctx context.Context, name string) (*Dat
|
|||
return &datastore, nil
|
||||
}
|
||||
|
||||
// GetDatastoreInfoByName gets the Datastore object for the given datastore name
|
||||
func (dc *Datacenter) GetDatastoreInfoByName(ctx context.Context, name string) (*DatastoreInfo, error) {
|
||||
finder := getFinder(dc)
|
||||
ds, err := finder.Datastore(ctx, name)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed while searching for datastore: %s. err: %+v", name, err)
|
||||
return nil, err
|
||||
}
|
||||
datastore := Datastore{ds, dc}
|
||||
var dsMo mo.Datastore
|
||||
pc := property.DefaultCollector(dc.Client())
|
||||
properties := []string{DatastoreInfoProperty}
|
||||
err = pc.RetrieveOne(ctx, ds.Reference(), properties, &dsMo)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get Datastore managed objects from datastore reference."+
|
||||
" dsRef: %+v, err: %+v", ds.Reference(), err)
|
||||
return nil, err
|
||||
}
|
||||
klog.V(9).Infof("Result dsMo: %+v", dsMo)
|
||||
return &DatastoreInfo{Datastore: &datastore, Info: dsMo.Info.GetDatastoreInfo()}, nil
|
||||
}
|
||||
|
||||
// GetResourcePool gets the resource pool for the given path
|
||||
func (dc *Datacenter) GetResourcePool(ctx context.Context, resourcePoolPath string) (*object.ResourcePool, error) {
|
||||
finder := getFinder(dc)
|
||||
|
|
|
@ -96,8 +96,14 @@ func (ds *Datastore) GetDatastoreHostMounts(ctx context.Context) ([]types.Manage
|
|||
return nil, err
|
||||
}
|
||||
hosts := make([]types.ManagedObjectReference, len(dsMo.Host))
|
||||
for _, dsHostMount := range dsMo.Host {
|
||||
hosts = append(hosts, dsHostMount.Key)
|
||||
for i, dsHostMount := range dsMo.Host {
|
||||
hosts[i] = dsHostMount.Key
|
||||
}
|
||||
return hosts, nil
|
||||
}
|
||||
|
||||
// Exists returns whether the given file exists in this datastore
|
||||
func (ds *Datastore) Exists(ctx context.Context, file string) bool {
|
||||
_, err := ds.Datastore.Stat(ctx, file)
|
||||
return err == nil
|
||||
}
|
||||
|
|
|
@ -211,7 +211,7 @@ func (vmdisk vmDiskManager) createDummyVM(ctx context.Context, datacenter *vclib
|
|||
}
|
||||
|
||||
// CleanUpDummyVMs deletes stale dummyVM's
|
||||
func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datacenter) error {
|
||||
func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder) error {
|
||||
vmList, err := folder.GetVirtualMachines(ctx)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err)
|
||||
|
@ -230,7 +230,7 @@ func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datace
|
|||
continue
|
||||
}
|
||||
if strings.HasPrefix(vmName, vclib.DummyVMPrefixName) {
|
||||
vmObj := vclib.VirtualMachine{VirtualMachine: object.NewVirtualMachine(dc.Client(), vm.Reference()), Datacenter: dc}
|
||||
vmObj := vclib.VirtualMachine{VirtualMachine: object.NewVirtualMachine(folder.Client(), vm.Reference())}
|
||||
dummyVMList = append(dummyVMList, &vmObj)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ func (pbmClient *PbmClient) IsDatastoreCompatible(ctx context.Context, storagePo
|
|||
|
||||
// GetCompatibleDatastores filters and returns compatible list of datastores for given storage policy id
|
||||
// For Non Compatible Datastores, fault message with the Datastore Name is also returned
|
||||
func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, dc *Datacenter, storagePolicyID string, datastores []*DatastoreInfo) ([]*DatastoreInfo, string, error) {
|
||||
func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, storagePolicyID string, datastores []*DatastoreInfo) ([]*DatastoreInfo, string, error) {
|
||||
var (
|
||||
dsMorNameMap = getDsMorNameMap(ctx, datastores)
|
||||
localizedMessagesForNotCompatibleDatastores = ""
|
||||
|
|
|
@ -235,7 +235,7 @@ func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*Da
|
|||
|
||||
var dsMoList []mo.Datastore
|
||||
pc := property.DefaultCollector(vm.Client())
|
||||
properties := []string{DatastoreInfoProperty}
|
||||
properties := []string{DatastoreInfoProperty, NameProperty}
|
||||
err = pc.Retrieve(ctx, dsRefList, properties, &dsMoList)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get Datastore managed objects from datastore objects."+
|
||||
|
@ -243,12 +243,19 @@ func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*Da
|
|||
return nil, err
|
||||
}
|
||||
klog.V(9).Infof("Result dsMoList: %+v", dsMoList)
|
||||
finder := getFinder(vm.Datacenter)
|
||||
var dsObjList []*DatastoreInfo
|
||||
for _, dsMo := range dsMoList {
|
||||
// use the finder so that InventoryPath is set correctly in ds
|
||||
ds, err := finder.Datastore(ctx, dsMo.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed finding datastore: %s. err: %+v", dsMo.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
datastore := Datastore{ds, vm.Datacenter}
|
||||
dsObjList = append(dsObjList,
|
||||
&DatastoreInfo{
|
||||
&Datastore{object.NewDatastore(vm.Client(), dsMo.Reference()),
|
||||
vm.Datacenter},
|
||||
&datastore,
|
||||
dsMo.Info.GetDatastoreInfo()})
|
||||
}
|
||||
return dsObjList, nil
|
||||
|
|
|
@ -1164,15 +1164,9 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
|
|||
func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) {
|
||||
klog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions)
|
||||
createVolumeInternal := func(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) {
|
||||
var datastore string
|
||||
var datastoreInfo *vclib.DatastoreInfo
|
||||
var dsList []*vclib.DatastoreInfo
|
||||
// If datastore not specified, then use default datastore
|
||||
if volumeOptions.Datastore == "" {
|
||||
datastore = vs.cfg.Workspace.DefaultDatastore
|
||||
} else {
|
||||
datastore = volumeOptions.Datastore
|
||||
}
|
||||
datastore = strings.TrimSpace(datastore)
|
||||
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
@ -1180,16 +1174,29 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
|
||||
// If datastore not specified, then use default datastore
|
||||
datastoreName := strings.TrimSpace(volumeOptions.Datastore)
|
||||
if datastoreName == "" {
|
||||
datastoreName = strings.TrimSpace(vs.cfg.Workspace.DefaultDatastore)
|
||||
}
|
||||
// The given datastoreName may be present in more than one datacenter
|
||||
candidateDatastoreInfos, err := vs.FindDatastoreByName(ctx, datastoreName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// Each of the datastores found is a candidate for Volume creation.
|
||||
// One of these will be selected based on given policy and/or zone.
|
||||
candidateDatastores := make(map[string]*vclib.DatastoreInfo)
|
||||
for _, dsInfo := range candidateDatastoreInfos {
|
||||
candidateDatastores[dsInfo.Info.Url] = dsInfo
|
||||
}
|
||||
|
||||
var vmOptions *vclib.VMOptions
|
||||
if volumeOptions.VSANStorageProfileData != "" || volumeOptions.StoragePolicyName != "" {
|
||||
// If datastore and zone are specified, first validate if the datastore is in the provided zone.
|
||||
if len(volumeOptions.Zone) != 0 && volumeOptions.Datastore != "" {
|
||||
klog.V(4).Infof("Specified zone : %s, datastore : %s", volumeOptions.Zone, volumeOptions.Datastore)
|
||||
dsList, err = getDatastoresForZone(ctx, dc, vs.nodeManager, volumeOptions.Zone)
|
||||
dsList, err = getDatastoresForZone(ctx, vs.nodeManager, volumeOptions.Zone)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -1197,8 +1204,7 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
|
|||
// Validate if the datastore provided belongs to the zone. If not, fail the operation.
|
||||
found := false
|
||||
for _, ds := range dsList {
|
||||
if ds.Info.Name == volumeOptions.Datastore {
|
||||
found = true
|
||||
if datastoreInfo, found = candidateDatastores[ds.Info.Url]; found {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -1220,19 +1226,14 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
|
|||
cleanUpRoutineInitialized = true
|
||||
}
|
||||
cleanUpRoutineInitLock.Unlock()
|
||||
vmOptions, err = vs.setVMOptions(ctx, dc, vs.cfg.Workspace.ResourcePoolPath)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to set VM options requires to create a vsphere volume. err: %+v", err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if volumeOptions.StoragePolicyName != "" && volumeOptions.Datastore == "" {
|
||||
if len(volumeOptions.Zone) == 0 {
|
||||
klog.V(4).Infof("Selecting a shared datastore as per the storage policy %s", volumeOptions.StoragePolicyName)
|
||||
datastore, err = getPbmCompatibleDatastore(ctx, dc, volumeOptions.StoragePolicyName, vs.nodeManager)
|
||||
datastoreInfo, err = getPbmCompatibleDatastore(ctx, vsi.conn.Client, volumeOptions.StoragePolicyName, vs.nodeManager)
|
||||
} else {
|
||||
// If zone is specified, first get the datastores in the zone.
|
||||
dsList, err = getDatastoresForZone(ctx, dc, vs.nodeManager, volumeOptions.Zone)
|
||||
dsList, err = getDatastoresForZone(ctx, vs.nodeManager, volumeOptions.Zone)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to find a shared datastore matching zone %s. err: %+v", volumeOptions.Zone, err)
|
||||
|
@ -1248,18 +1249,18 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
|
|||
klog.V(4).Infof("Specified zone : %s. Picking a datastore as per the storage policy %s among the zoned datastores : %s", volumeOptions.Zone,
|
||||
volumeOptions.StoragePolicyName, dsList)
|
||||
// Among the compatible datastores, select the one based on the maximum free space.
|
||||
datastore, err = getPbmCompatibleZonedDatastore(ctx, dc, volumeOptions.StoragePolicyName, dsList)
|
||||
datastoreInfo, err = getPbmCompatibleZonedDatastore(ctx, vsi.conn.Client, volumeOptions.StoragePolicyName, dsList)
|
||||
}
|
||||
klog.V(1).Infof("Datastore selected as per policy : %s", datastore)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err)
|
||||
return "", err
|
||||
}
|
||||
klog.V(1).Infof("Datastore selected as per policy : %s", datastoreInfo.Info.Name)
|
||||
} else {
|
||||
// If zone is specified, pick the datastore in the zone with maximum free space within the zone.
|
||||
if volumeOptions.Datastore == "" && len(volumeOptions.Zone) != 0 {
|
||||
klog.V(4).Infof("Specified zone : %s", volumeOptions.Zone)
|
||||
dsList, err = getDatastoresForZone(ctx, dc, vs.nodeManager, volumeOptions.Zone)
|
||||
dsList, err = getDatastoresForZone(ctx, vs.nodeManager, volumeOptions.Zone)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to find a shared datastore matching zone %s. err: %+v", volumeOptions.Zone, err)
|
||||
|
@ -1272,40 +1273,40 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
|
|||
return "", err
|
||||
}
|
||||
|
||||
datastore, err = getMostFreeDatastoreName(ctx, nil, dsList)
|
||||
datastoreInfo, err = getMostFreeDatastore(ctx, nil, dsList)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get shared datastore: %+v", err)
|
||||
return "", err
|
||||
}
|
||||
klog.V(1).Infof("Specified zone : %s. Selected datastore : %s", volumeOptions.StoragePolicyName, datastore)
|
||||
klog.V(1).Infof("Specified zone : %s. Selected datastore : %s", volumeOptions.Zone, datastoreInfo.Info.Name)
|
||||
} else {
|
||||
var sharedDsList []*vclib.DatastoreInfo
|
||||
var err error
|
||||
if len(volumeOptions.Zone) == 0 {
|
||||
// If zone is not provided, get the shared datastore across all node VMs.
|
||||
klog.V(4).Infof("Validating if datastore %s is shared across all node VMs", datastore)
|
||||
sharedDsList, err = getSharedDatastoresInK8SCluster(ctx, dc, vs.nodeManager)
|
||||
klog.V(4).Infof("Validating if datastore %s is shared across all node VMs", datastoreName)
|
||||
sharedDsList, err = getSharedDatastoresInK8SCluster(ctx, vs.nodeManager)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get shared datastore: %+v", err)
|
||||
return "", err
|
||||
}
|
||||
// Prepare error msg to be used later, if required.
|
||||
err = fmt.Errorf("The specified datastore %s is not a shared datastore across node VMs", datastore)
|
||||
err = fmt.Errorf("The specified datastore %s is not a shared datastore across node VMs", datastoreName)
|
||||
} else {
|
||||
// If zone is provided, get the shared datastores in that zone.
|
||||
klog.V(4).Infof("Validating if datastore %s is in zone %s ", datastore, volumeOptions.Zone)
|
||||
sharedDsList, err = getDatastoresForZone(ctx, dc, vs.nodeManager, volumeOptions.Zone)
|
||||
klog.V(4).Infof("Validating if datastore %s is in zone %s ", datastoreName, volumeOptions.Zone)
|
||||
sharedDsList, err = getDatastoresForZone(ctx, vs.nodeManager, volumeOptions.Zone)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to find a shared datastore matching zone %s. err: %+v", volumeOptions.Zone, err)
|
||||
return "", err
|
||||
}
|
||||
// Prepare error msg to be used later, if required.
|
||||
err = fmt.Errorf("The specified datastore %s does not match the provided zones : %s", datastore, volumeOptions.Zone)
|
||||
err = fmt.Errorf("The specified datastore %s does not match the provided zones : %s", datastoreName, volumeOptions.Zone)
|
||||
}
|
||||
found := false
|
||||
// Check if the selected datastore belongs to the list of shared datastores computed.
|
||||
for _, sharedDs := range sharedDsList {
|
||||
if datastore == sharedDs.Info.Name {
|
||||
if datastoreInfo, found = candidateDatastores[sharedDs.Info.Url]; found {
|
||||
klog.V(4).Infof("Datastore validation succeeded")
|
||||
found = true
|
||||
break
|
||||
|
@ -1317,11 +1318,19 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
|
|||
}
|
||||
}
|
||||
}
|
||||
ds, err := dc.GetDatastoreByName(ctx, datastore)
|
||||
|
||||
// if datastoreInfo is still not determined, it is an error condition
|
||||
if datastoreInfo == nil {
|
||||
klog.Errorf("Ambigous datastore name %s, cannot be found among: %v", datastoreName, candidateDatastoreInfos)
|
||||
return "", fmt.Errorf("Ambigous datastore name %s", datastoreName)
|
||||
}
|
||||
ds := datastoreInfo.Datastore
|
||||
volumeOptions.Datastore = datastoreInfo.Info.Name
|
||||
vmOptions, err = vs.setVMOptions(ctx, vsi.conn, ds)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to set VM options required to create a vsphere volume. err: %+v", err)
|
||||
return "", err
|
||||
}
|
||||
volumeOptions.Datastore = datastore
|
||||
kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/"
|
||||
err = ds.CreateDirectory(ctx, kubeVolsPath, false)
|
||||
if err != nil && err != vclib.ErrFileAlreadyExist {
|
||||
|
@ -1336,18 +1345,18 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
|
|||
}
|
||||
volumePath, err = disk.Create(ctx, ds)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, datastore, err)
|
||||
klog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, ds, err)
|
||||
return "", err
|
||||
}
|
||||
// Get the canonical path for the volume path.
|
||||
canonicalVolumePath, err = getcanonicalVolumePath(ctx, dc, volumePath)
|
||||
canonicalVolumePath, err = getcanonicalVolumePath(ctx, datastoreInfo.Datacenter, volumePath)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get canonical vsphere volume path for volume: %s with volumeOptions: %+v on datastore: %s. err: %+v", volumePath, volumeOptions, datastore, err)
|
||||
klog.Errorf("Failed to get canonical vsphere volume path for volume: %s with volumeOptions: %+v on datastore: %s. err: %+v", volumePath, volumeOptions, ds, err)
|
||||
return "", err
|
||||
}
|
||||
if filepath.Base(datastore) != datastore {
|
||||
if filepath.Base(datastoreName) != datastoreName {
|
||||
// If datastore is within cluster, add cluster path to the volumePath
|
||||
canonicalVolumePath = strings.Replace(canonicalVolumePath, filepath.Base(datastore), datastore, 1)
|
||||
canonicalVolumePath = strings.Replace(canonicalVolumePath, filepath.Base(datastoreName), datastoreName, 1)
|
||||
}
|
||||
return canonicalVolumePath, nil
|
||||
}
|
||||
|
@ -1576,12 +1585,29 @@ func (vs *VSphere) GetVolumeLabels(volumePath string) (map[string]string, error)
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// Find the datastore on which this volume resides
|
||||
datastorePathObj, err := vclib.GetDatastorePathObjFromVMDiskPath(volumePath)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get datastore for volume: %v: %+v", volumePath, err)
|
||||
return nil, err
|
||||
}
|
||||
dsZones, err := vs.GetZonesForDatastore(ctx, datastorePathObj.Datastore)
|
||||
dsInfos, err := vs.FindDatastoreByName(ctx, datastorePathObj.Datastore)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get datastore by name: %v: %+v", datastorePathObj.Datastore, err)
|
||||
return nil, err
|
||||
}
|
||||
var datastore *vclib.Datastore
|
||||
for _, dsInfo := range dsInfos {
|
||||
if dsInfo.Datastore.Exists(ctx, datastorePathObj.Path) {
|
||||
datastore = dsInfo.Datastore
|
||||
}
|
||||
}
|
||||
if datastore == nil {
|
||||
klog.Errorf("Could not find %s among %v", volumePath, dsInfos)
|
||||
return nil, fmt.Errorf("Could not find the datastore for volume: %s", volumePath)
|
||||
}
|
||||
|
||||
dsZones, err := vs.GetZonesForDatastore(ctx, datastore)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get zones for datastore %v: %+v", datastorePathObj.Datastore, err)
|
||||
return nil, err
|
||||
|
@ -1619,25 +1645,16 @@ func (vs *VSphere) collapseZonesInRegion(ctx context.Context, zones []cloudprovi
|
|||
}
|
||||
|
||||
// GetZonesForDatastore returns all the zones from which this datastore is visible
|
||||
func (vs *VSphere) GetZonesForDatastore(ctx context.Context, datastore string) ([]cloudprovider.Zone, error) {
|
||||
func (vs *VSphere) GetZonesForDatastore(ctx context.Context, datastore *vclib.Datastore) ([]cloudprovider.Zone, error) {
|
||||
vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get vSphere instance: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get datacenter: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get the hosts mounted on this datastore
|
||||
// datastore -> ["host-1", "host-2", "host-3", ...]
|
||||
ds, err := dc.GetDatastoreByName(ctx, datastore)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get datastore by name: %v: %+v", datastore, err)
|
||||
return nil, err
|
||||
}
|
||||
dsHosts, err := ds.GetDatastoreHostMounts(ctx)
|
||||
dsHosts, err := datastore.GetDatastoreHostMounts(ctx)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get datastore host mounts for %v: %+v", datastore, err)
|
||||
return nil, err
|
||||
|
|
|
@ -20,20 +20,22 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/govmomi/find"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/property"
|
||||
"github.com/vmware/govmomi/vim25"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/soap"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
|
||||
|
@ -43,6 +45,7 @@ import (
|
|||
const (
|
||||
DatastoreProperty = "datastore"
|
||||
DatastoreInfoProperty = "info"
|
||||
DatastoreNameProperty = "name"
|
||||
Folder = "Folder"
|
||||
VirtualMachine = "VirtualMachine"
|
||||
DummyDiskName = "kube-dummyDisk.vmdk"
|
||||
|
@ -121,7 +124,7 @@ func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nod
|
|||
}
|
||||
|
||||
// Get all datastores accessible for the virtual machine object.
|
||||
func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
|
||||
func getSharedDatastoresInK8SCluster(ctx context.Context, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
|
||||
nodeVmDetails, err := nodeManager.GetNodeDetails()
|
||||
if err != nil {
|
||||
klog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err)
|
||||
|
@ -155,12 +158,6 @@ func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter,
|
|||
}
|
||||
}
|
||||
klog.V(9).Infof("sharedDatastores : %+v", sharedDatastores)
|
||||
sharedDatastores, err = getDatastoresForEndpointVC(ctx, dc, sharedDatastores)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
klog.V(9).Infof("sharedDatastores at endpoint VC: %+v", sharedDatastores)
|
||||
return sharedDatastores, nil
|
||||
}
|
||||
|
||||
|
@ -182,7 +179,7 @@ func intersect(list1 []*vclib.DatastoreInfo, list2 []*vclib.DatastoreInfo) []*vc
|
|||
}
|
||||
|
||||
// getMostFreeDatastore gets the best fit compatible datastore by free space.
|
||||
func getMostFreeDatastoreName(ctx context.Context, client *vim25.Client, dsInfoList []*vclib.DatastoreInfo) (string, error) {
|
||||
func getMostFreeDatastore(ctx context.Context, client *vim25.Client, dsInfoList []*vclib.DatastoreInfo) (*vclib.DatastoreInfo, error) {
|
||||
var curMax int64
|
||||
curMax = -1
|
||||
var index int
|
||||
|
@ -193,65 +190,46 @@ func getMostFreeDatastoreName(ctx context.Context, client *vim25.Client, dsInfoL
|
|||
index = i
|
||||
}
|
||||
}
|
||||
return dsInfoList[index].Info.GetDatastoreInfo().Name, nil
|
||||
return dsInfoList[index], nil
|
||||
}
|
||||
|
||||
// Returns the datastores in the given datacenter by performing lookup based on datastore URL.
|
||||
func getDatastoresForEndpointVC(ctx context.Context, dc *vclib.Datacenter, sharedDsInfos []*vclib.DatastoreInfo) ([]*vclib.DatastoreInfo, error) {
|
||||
var datastores []*vclib.DatastoreInfo
|
||||
allDsInfoMap, err := dc.GetAllDatastores(ctx)
|
||||
func getPbmCompatibleDatastore(ctx context.Context, vcClient *vim25.Client, storagePolicyName string, nodeManager *NodeManager) (*vclib.DatastoreInfo, error) {
|
||||
pbmClient, err := vclib.NewPbmClient(ctx, vcClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, sharedDsInfo := range sharedDsInfos {
|
||||
dsInfo, ok := allDsInfoMap[sharedDsInfo.Info.Url]
|
||||
if ok {
|
||||
datastores = append(datastores, dsInfo)
|
||||
} else {
|
||||
klog.V(4).Infof("Warning: Shared datastore with URL %s does not exist in endpoint VC", sharedDsInfo.Info.Url)
|
||||
}
|
||||
}
|
||||
klog.V(9).Infof("Datastore from endpoint VC: %+v", datastores)
|
||||
return datastores, nil
|
||||
}
|
||||
|
||||
func getPbmCompatibleDatastore(ctx context.Context, dc *vclib.Datacenter, storagePolicyName string, nodeManager *NodeManager) (string, error) {
|
||||
pbmClient, err := vclib.NewPbmClient(ctx, dc.Client())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
sharedDs, err := getSharedDatastoresInK8SCluster(ctx, dc, nodeManager)
|
||||
sharedDs, err := getSharedDatastoresInK8SCluster(ctx, nodeManager)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get shared datastores. err: %+v", err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
if len(sharedDs) == 0 {
|
||||
msg := "No shared datastores found in the endpoint virtual center"
|
||||
klog.Errorf(msg)
|
||||
return "", errors.New(msg)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, dc, storagePolicyID, sharedDs)
|
||||
compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, storagePolicyID, sharedDs)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v",
|
||||
sharedDs, storagePolicyID, err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
klog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores)
|
||||
datastore, err := getMostFreeDatastoreName(ctx, dc.Client(), compatibleDatastores)
|
||||
datastore, err := getMostFreeDatastore(ctx, vcClient, compatibleDatastores)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
klog.V(4).Infof("Most free datastore : %+s", datastore)
|
||||
klog.V(4).Infof("Most free datastore : %+s", datastore.Info.Name)
|
||||
return datastore, err
|
||||
}
|
||||
|
||||
func getDatastoresForZone(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager, selectedZones []string) ([]*vclib.DatastoreInfo, error) {
|
||||
func getDatastoresForZone(ctx context.Context, nodeManager *NodeManager, selectedZones []string) ([]*vclib.DatastoreInfo, error) {
|
||||
|
||||
var sharedDatastores []*vclib.DatastoreInfo
|
||||
|
||||
|
@ -279,7 +257,7 @@ func getDatastoresForZone(ctx context.Context, dc *vclib.Datacenter, nodeManager
|
|||
|
||||
var dsMoList []mo.Datastore
|
||||
pc := property.DefaultCollector(host.Client())
|
||||
properties := []string{DatastoreInfoProperty}
|
||||
properties := []string{DatastoreInfoProperty, DatastoreNameProperty}
|
||||
err = pc.Retrieve(ctx, dsRefList, properties, &dsMoList)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get Datastore managed objects from datastore objects."+
|
||||
|
@ -288,12 +266,37 @@ func getDatastoresForZone(ctx context.Context, dc *vclib.Datacenter, nodeManager
|
|||
}
|
||||
klog.V(9).Infof("Datastore mo details: %+v", dsMoList)
|
||||
|
||||
// find the Datacenter parent for this host
|
||||
mes, err := mo.Ancestors(ctx, host.Client(), pc.Reference(), host.Reference())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var dcMoref *types.ManagedObjectReference
|
||||
for i := len(mes) - 1; i > 0; i-- {
|
||||
if mes[i].Self.Type == "Datacenter" {
|
||||
dcMoref = &mes[i].Self
|
||||
break
|
||||
}
|
||||
}
|
||||
if dcMoref == nil {
|
||||
return nil, fmt.Errorf("Failed to find the Datacenter of host %s", host)
|
||||
}
|
||||
|
||||
dc := object.NewDatacenter(host.Client(), *dcMoref)
|
||||
finder := find.NewFinder(host.Client(), false)
|
||||
finder.SetDatacenter(dc)
|
||||
var dsObjList []*vclib.DatastoreInfo
|
||||
for _, dsMo := range dsMoList {
|
||||
// use the finder so that InventoryPath is set correctly in dsObj
|
||||
dsObj, err := finder.Datastore(ctx, dsMo.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to find datastore named %s in datacenter %s", dsMo.Name, dc)
|
||||
return nil, err
|
||||
}
|
||||
dsObjList = append(dsObjList,
|
||||
&vclib.DatastoreInfo{
|
||||
Datastore: &vclib.Datastore{Datastore: object.NewDatastore(host.Client(), dsMo.Reference()),
|
||||
Datacenter: nil},
|
||||
Datastore: &vclib.Datastore{Datastore: dsObj,
|
||||
Datacenter: &vclib.Datacenter{Datacenter: dc}},
|
||||
Info: dsMo.Info.GetDatastoreInfo()})
|
||||
}
|
||||
|
||||
|
@ -317,40 +320,48 @@ func getDatastoresForZone(ctx context.Context, dc *vclib.Datacenter, nodeManager
|
|||
return sharedDatastores, nil
|
||||
}
|
||||
|
||||
func getPbmCompatibleZonedDatastore(ctx context.Context, dc *vclib.Datacenter, storagePolicyName string, zonedDatastores []*vclib.DatastoreInfo) (string, error) {
|
||||
pbmClient, err := vclib.NewPbmClient(ctx, dc.Client())
|
||||
func getPbmCompatibleZonedDatastore(ctx context.Context, vcClient *vim25.Client, storagePolicyName string, zonedDatastores []*vclib.DatastoreInfo) (*vclib.DatastoreInfo, error) {
|
||||
pbmClient, err := vclib.NewPbmClient(ctx, vcClient)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, dc, storagePolicyID, zonedDatastores)
|
||||
compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, storagePolicyID, zonedDatastores)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v",
|
||||
zonedDatastores, storagePolicyID, err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
klog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores)
|
||||
datastore, err := getMostFreeDatastoreName(ctx, dc.Client(), compatibleDatastores)
|
||||
datastore, err := getMostFreeDatastore(ctx, vcClient, compatibleDatastores)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
klog.V(4).Infof("Most free datastore : %+s", datastore)
|
||||
klog.V(4).Infof("Most free datastore : %+s", datastore.Info.Name)
|
||||
return datastore, err
|
||||
}
|
||||
|
||||
func (vs *VSphere) setVMOptions(ctx context.Context, dc *vclib.Datacenter, resourcePoolPath string) (*vclib.VMOptions, error) {
|
||||
func (vs *VSphere) setVMOptions(ctx context.Context, connection *vclib.VSphereConnection, ds *vclib.Datastore) (*vclib.VMOptions, error) {
|
||||
var vmOptions vclib.VMOptions
|
||||
resourcePool, err := dc.GetResourcePool(ctx, resourcePoolPath)
|
||||
dsHosts, err := ds.GetDatastoreHostMounts(ctx)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get datastore host mounts for %v: %+v", ds, err)
|
||||
return nil, err
|
||||
}
|
||||
klog.V(9).Infof("Resource pool path %s, resourcePool %+v", resourcePoolPath, resourcePool)
|
||||
folder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
|
||||
// pick a host at random to use for Volume creation
|
||||
dsHostMoref := dsHosts[rand.New(rand.NewSource(time.Now().UnixNano())).Intn(len(dsHosts))]
|
||||
dummyVMHost := object.NewHostSystem(connection.Client, dsHostMoref)
|
||||
resourcePool, err := dummyVMHost.ResourcePool(ctx)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get resource pool from host %v", dummyVMHost)
|
||||
return nil, err
|
||||
}
|
||||
folder, err := ds.Datacenter.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -366,32 +377,30 @@ func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) {
|
|||
defer cancel()
|
||||
for {
|
||||
time.Sleep(CleanUpDummyVMRoutineInterval * time.Minute)
|
||||
vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
|
||||
datacenters, err := vs.GetWorkspaceDatacenters(ctx)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Failed to get VSphere instance with err: %+v. Retrying again...", err)
|
||||
klog.V(4).Infof("Failed to get datacenters from VC. err: %+v", err)
|
||||
continue
|
||||
}
|
||||
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Workspace.Datacenter, err)
|
||||
continue
|
||||
}
|
||||
// Get the folder reference for global working directory where the dummy VM needs to be created.
|
||||
vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Workspace.Folder, err)
|
||||
continue
|
||||
}
|
||||
// A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests.
|
||||
cleanUpDummyVMs := func() {
|
||||
cleanUpDummyVMLock.Lock()
|
||||
defer cleanUpDummyVMLock.Unlock()
|
||||
err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc)
|
||||
// Clean up dummy VMs in each datacenter
|
||||
for _, dc := range datacenters {
|
||||
// Get the folder reference for global working directory where the dummy VM needs to be created.
|
||||
vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err)
|
||||
klog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Workspace.Folder, err)
|
||||
continue
|
||||
}
|
||||
// A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests.
|
||||
cleanUpDummyVMs := func() {
|
||||
cleanUpDummyVMLock.Lock()
|
||||
defer cleanUpDummyVMLock.Unlock()
|
||||
err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err)
|
||||
}
|
||||
}
|
||||
cleanUpDummyVMs()
|
||||
}
|
||||
cleanUpDummyVMs()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -586,43 +595,6 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN
|
|||
return nodesToRetry, nil
|
||||
}
|
||||
|
||||
func (vs *VSphere) IsDummyVMPresent(vmName string) (bool, error) {
|
||||
isDummyVMPresent := false
|
||||
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
|
||||
if err != nil {
|
||||
return isDummyVMPresent, err
|
||||
}
|
||||
|
||||
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
|
||||
if err != nil {
|
||||
return isDummyVMPresent, err
|
||||
}
|
||||
|
||||
vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
|
||||
if err != nil {
|
||||
return isDummyVMPresent, err
|
||||
}
|
||||
|
||||
vms, err := vmFolder.GetVirtualMachines(ctx)
|
||||
if err != nil {
|
||||
return isDummyVMPresent, err
|
||||
}
|
||||
|
||||
for _, vm := range vms {
|
||||
if vm.Name() == vmName {
|
||||
isDummyVMPresent = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return isDummyVMPresent, nil
|
||||
}
|
||||
|
||||
func (vs *VSphere) GetNodeNameFromProviderID(providerID string) (string, error) {
|
||||
var nodeName string
|
||||
nodes, err := vs.nodeManager.GetNodeDetails()
|
||||
|
@ -698,3 +670,63 @@ func GetVMUUID() (string, error) {
|
|||
uuid = fmt.Sprintf("%s-%s-%s-%s-%s", uuid[0:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:32])
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// GetWorkspaceDatacenters returns the Datacenter objects that VCP has access to.
|
||||
// User can configure the list of datacenters in vsphere.conf. Otherwise all the
|
||||
// Datacenters in the configured list of VCs are returned.
|
||||
func (vs *VSphere) GetWorkspaceDatacenters(ctx context.Context) ([]*vclib.Datacenter, error) {
|
||||
var datacenterObjs []*vclib.Datacenter
|
||||
for vc, vsi := range vs.vsphereInstanceMap {
|
||||
// ensure connection to VC
|
||||
err := vs.nodeManager.vcConnect(ctx, vsi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if vsi.cfg.Datacenters == "" {
|
||||
vcDatacenterObjs, err := vclib.GetAllDatacenter(ctx, vsi.conn)
|
||||
if err != nil {
|
||||
klog.Errorf("Error fetching list of datacenters from VC %s: %+v", vc, err)
|
||||
return nil, err
|
||||
}
|
||||
datacenterObjs = append(datacenterObjs, vcDatacenterObjs...)
|
||||
} else {
|
||||
datacenters := strings.Split(vsi.cfg.Datacenters, ",")
|
||||
for _, dc := range datacenters {
|
||||
dc = strings.TrimSpace(dc)
|
||||
if dc == "" {
|
||||
continue
|
||||
}
|
||||
datacenterObj, err := vclib.GetDatacenter(ctx, vsi.conn, dc)
|
||||
if err != nil {
|
||||
klog.Errorf("Error fetching datacenter %s from VC %s: %+v", dc, vc, err)
|
||||
return nil, err
|
||||
}
|
||||
datacenterObjs = append(datacenterObjs, datacenterObj)
|
||||
}
|
||||
}
|
||||
}
|
||||
return datacenterObjs, nil
|
||||
}
|
||||
|
||||
// FindDatastoreByName looks for the given datastore by name across all available datacenters.
|
||||
// If more than one Datacenter has a Datastore with the given name, then returns reference to all of them.
|
||||
func (vs *VSphere) FindDatastoreByName(ctx context.Context, datastoreName string) ([]*vclib.DatastoreInfo, error) {
|
||||
datacenters, err := vs.GetWorkspaceDatacenters(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var datastoreInfos []*vclib.DatastoreInfo
|
||||
for _, dc := range datacenters {
|
||||
datastoreInfo, err := dc.GetDatastoreInfoByName(ctx, datastoreName)
|
||||
if err != nil {
|
||||
klog.V(9).Infof("Did not find datastore %s in datacenter %s, still looking.", datastoreName, dc.Name())
|
||||
continue
|
||||
}
|
||||
datastoreInfos = append(datastoreInfos, datastoreInfo)
|
||||
}
|
||||
if len(datastoreInfos) == 0 {
|
||||
return nil, fmt.Errorf("Datastore '%s' not found", datastoreName)
|
||||
}
|
||||
klog.V(4).Infof("Found datastore infos %v for datastore %s", datastoreInfos, datastoreName)
|
||||
return datastoreInfos, nil
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
|
|||
scParameters[DiskFormat] = ThinDisk
|
||||
err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters)
|
||||
Expect(err).To(HaveOccurred())
|
||||
errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": The specified datastore ` + InvalidDatastore + ` is not a shared datastore across node VMs`
|
||||
errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": Datastore ` + InvalidDatastore + ` not found`
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
Expect(err).NotTo(HaveOccurred(), errorMsg)
|
||||
}
|
||||
|
|
|
@ -176,6 +176,13 @@ var _ = utils.SIGDescribe("Zone Support", func() {
|
|||
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
|
||||
})
|
||||
|
||||
It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() {
|
||||
By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy))
|
||||
scParameters[SpbmStoragePolicy] = compatPolicy
|
||||
zones = append(zones, zoneB)
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
|
||||
})
|
||||
|
||||
It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() {
|
||||
By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy))
|
||||
scParameters[SpbmStoragePolicy] = nonCompatPolicy
|
||||
|
|
Loading…
Reference in New Issue