mirror of https://github.com/k3s-io/k3s
Merge pull request #43545 from luomiao/vsphere-remove-loginInfo-on-workers-update
Automatic merge from submit-queue (batch tested with PRs 43545, 44293, 44221, 43888) Remove credentials on worker nodes for vSphere cloud provider. **What this PR does / why we need it**: Remove the dependency of login information on worker nodes for vsphere cloud provider: 1. VM Name is required to be set in the cloud provider configuration file. 2. Remove the requirement of login for Instance functions when querying local node information. **Which issue this PR fixes** : fixes #https://github.com/kubernetes/kubernetes/issues/35339 **Release note**:pull/6/head
commit
6283077fb5
|
@ -21,6 +21,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
@ -68,6 +69,8 @@ const (
|
|||
RoundTripperDefaultCount = 3
|
||||
DummyVMName = "kubernetes-helper-vm"
|
||||
VSANDatastoreType = "vsan"
|
||||
MAC_OUI_VC = "00:50:56"
|
||||
MAC_OUI_ESX = "00:0c:29"
|
||||
)
|
||||
|
||||
// Controller types that are currently supported for hot attach of disks
|
||||
|
@ -128,6 +131,10 @@ type VSphereConfig struct {
|
|||
// property in VmConfigInfo, or also set as vc.uuid in VMX file.
|
||||
// If not set, will be fetched from the machine via sysfs (requires root)
|
||||
VMUUID string `gcfg:"vm-uuid"`
|
||||
// VMName is the VM name of virtual machine
|
||||
// Combining the WorkingDir and VMName can form a unique InstanceID.
|
||||
// When vm-name is set, no username/password is required on worker nodes.
|
||||
VMName string `gcfg:"vm-name"`
|
||||
}
|
||||
|
||||
Network struct {
|
||||
|
@ -280,15 +287,23 @@ func newVSphere(cfg VSphereConfig) (*VSphere, error) {
|
|||
glog.Warningf("port is a deprecated field in vsphere.conf and will be removed in future release.")
|
||||
}
|
||||
|
||||
var c *govmomi.Client
|
||||
var id string
|
||||
if cfg.Global.VMName == "" {
|
||||
// if VMName is not set in the cloud config file, each nodes (including worker nodes) need credentials to obtain VMName from vCenter
|
||||
glog.V(4).Infof("Cannot find VMName from cloud config file, start obtaining it from vCenter")
|
||||
c, err := newClient(context.TODO(), &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, err := getVMName(c, &cfg)
|
||||
id, err = getVMName(c, &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
id = cfg.Global.VMName
|
||||
}
|
||||
|
||||
vs := VSphere{
|
||||
client: c,
|
||||
|
@ -311,8 +326,10 @@ func checkControllerSupported(ctrlType string) bool {
|
|||
}
|
||||
|
||||
func logout(vs *VSphere) {
|
||||
if vs.client != nil {
|
||||
vs.client.Logout(context.TODO())
|
||||
}
|
||||
}
|
||||
|
||||
func newClient(ctx context.Context, cfg *VSphereConfig) (*govmomi.Client, error) {
|
||||
// Parse URL from string
|
||||
|
@ -396,119 +413,102 @@ func getVirtualMachineByName(ctx context.Context, cfg *VSphereConfig, c *govmomi
|
|||
return vm, nil
|
||||
}
|
||||
|
||||
func getVirtualMachineManagedObjectReference(ctx context.Context, c *govmomi.Client, vm *object.VirtualMachine, field string, dst interface{}) error {
|
||||
collector := property.DefaultCollector(c.Client)
|
||||
|
||||
// Retrieve required field from VM object
|
||||
err := collector.RetrieveOne(ctx, vm.Reference(), []string{field}, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns names of running VMs inside VM folder.
|
||||
func getInstances(ctx context.Context, cfg *VSphereConfig, c *govmomi.Client, filter string) ([]string, error) {
|
||||
f := find.NewFinder(c.Client, true)
|
||||
dc, err := f.Datacenter(ctx, cfg.Global.Datacenter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.SetDatacenter(dc)
|
||||
|
||||
vmRegex := cfg.Global.WorkingDir + filter
|
||||
|
||||
//TODO: get all vms inside subfolders
|
||||
vms, err := f.VirtualMachineList(ctx, vmRegex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var vmRef []types.ManagedObjectReference
|
||||
for _, vm := range vms {
|
||||
vmRef = append(vmRef, vm.Reference())
|
||||
}
|
||||
|
||||
pc := property.DefaultCollector(c.Client)
|
||||
|
||||
var vmt []mo.VirtualMachine
|
||||
err = pc.Retrieve(ctx, vmRef, []string{"name", "summary"}, &vmt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var vmList []string
|
||||
for _, vm := range vmt {
|
||||
if vm.Summary.Runtime.PowerState == ActivePowerState {
|
||||
vmList = append(vmList, vm.Name)
|
||||
} else if vm.Summary.Config.Template == false {
|
||||
glog.Warningf("VM %s, is not in %s state", vm.Name, ActivePowerState)
|
||||
}
|
||||
}
|
||||
return vmList, nil
|
||||
}
|
||||
|
||||
type Instances struct {
|
||||
client *govmomi.Client
|
||||
cfg *VSphereConfig
|
||||
localInstanceID string
|
||||
}
|
||||
|
||||
// Instances returns an implementation of Instances for vSphere.
|
||||
func (vs *VSphere) Instances() (cloudprovider.Instances, bool) {
|
||||
// Ensure client is logged in and session is valid
|
||||
err := vSphereLogin(context.TODO(), vs)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to login into vCenter - %v", err)
|
||||
return nil, false
|
||||
}
|
||||
return &Instances{vs.client, vs.cfg, vs.localInstanceID}, true
|
||||
return vs, true
|
||||
}
|
||||
|
||||
// List returns names of VMs (inside vm folder) by applying filter and which are currently running.
|
||||
func (vs *VSphere) list(filter string) ([]k8stypes.NodeName, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
func getLocalIP() ([]v1.NodeAddress, error) {
|
||||
addrs := []v1.NodeAddress{}
|
||||
|
||||
vmList, err := getInstances(ctx, vs.cfg, vs.client, filter)
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
glog.Errorf("net.Interfaces() failed for NodeAddresses - %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Found %d instances matching %s: %s",
|
||||
len(vmList), filter, vmList)
|
||||
|
||||
var nodeNames []k8stypes.NodeName
|
||||
for _, n := range vmList {
|
||||
nodeNames = append(nodeNames, k8stypes.NodeName(n))
|
||||
for _, i := range ifaces {
|
||||
localAddrs, err := i.Addrs()
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to extract addresses for NodeAddresses - %v", err)
|
||||
} else {
|
||||
for _, addr := range localAddrs {
|
||||
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
||||
if ipnet.IP.To4() != nil {
|
||||
// Filter external IP by MAC address OUIs from vCenter and from ESX
|
||||
var addressType v1.NodeAddressType
|
||||
if strings.HasPrefix(i.HardwareAddr.String(), MAC_OUI_VC) ||
|
||||
strings.HasPrefix(i.HardwareAddr.String(), MAC_OUI_ESX) {
|
||||
addressType = v1.NodeExternalIP
|
||||
} else {
|
||||
addressType = v1.NodeInternalIP
|
||||
}
|
||||
return nodeNames, nil
|
||||
v1.AddToNodeAddresses(&addrs,
|
||||
v1.NodeAddress{
|
||||
Type: addressType,
|
||||
Address: ipnet.IP.String(),
|
||||
},
|
||||
)
|
||||
glog.V(4).Infof("Find local IP address %v and set type to %v", ipnet.IP.String(), addressType)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return addrs, nil
|
||||
}
|
||||
|
||||
// getVMandMO returns the VM object and required field from the VM object
|
||||
func (vs *VSphere) getVMandMO(ctx context.Context, nodeName k8stypes.NodeName, field string) (vm *object.VirtualMachine, mvm *mo.VirtualMachine, err error) {
|
||||
// Ensure client is logged in and session is valid
|
||||
err = vSphereLogin(ctx, vs)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to login into vCenter - %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
vm, err = getVirtualMachineByName(ctx, vs.cfg, vs.client, nodeName)
|
||||
if err != nil {
|
||||
if _, ok := err.(*find.NotFoundError); ok {
|
||||
return nil, nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Retrieve required field from VM object
|
||||
var movm mo.VirtualMachine
|
||||
collector := property.DefaultCollector(vs.client.Client)
|
||||
err = collector.RetrieveOne(ctx, vm.Reference(), []string{field}, &movm)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return vm, &movm, nil
|
||||
}
|
||||
|
||||
// NodeAddresses is an implementation of Instances.NodeAddresses.
|
||||
func (i *Instances) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) {
|
||||
func (vs *VSphere) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) {
|
||||
if vs.localInstanceID == nodeNameToVMName(nodeName) {
|
||||
/* Get local IP addresses if node is local node */
|
||||
return getLocalIP()
|
||||
}
|
||||
|
||||
addrs := []v1.NodeAddress{}
|
||||
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
vm, err := getVirtualMachineByName(ctx, i.cfg, i.client, nodeName)
|
||||
_, mvm, err := vs.getVMandMO(ctx, nodeName, "guest.net")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mvm mo.VirtualMachine
|
||||
err = getVirtualMachineManagedObjectReference(ctx, i.client, vm, "guest.net", &mvm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
glog.Errorf("Failed to getVMandMO for NodeAddresses: err %v", err)
|
||||
return addrs, err
|
||||
}
|
||||
|
||||
// retrieve VM's ip(s)
|
||||
for _, v := range mvm.Guest.Net {
|
||||
var addressType v1.NodeAddressType
|
||||
if i.cfg.Network.PublicNetwork == v.Network {
|
||||
if vs.cfg.Network.PublicNetwork == v.Network {
|
||||
addressType = v1.NodeExternalIP
|
||||
} else {
|
||||
addressType = v1.NodeInternalIP
|
||||
|
@ -528,16 +528,16 @@ func (i *Instances) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress,
|
|||
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (i *Instances) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
func (vs *VSphere) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
||||
return []v1.NodeAddress{}, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
func (vs *VSphere) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
||||
return errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (i *Instances) CurrentNodeName(hostname string) (k8stypes.NodeName, error) {
|
||||
return k8stypes.NodeName(i.localInstanceID), nil
|
||||
func (vs *VSphere) CurrentNodeName(hostname string) (k8stypes.NodeName, error) {
|
||||
return vmNameToNodeName(vs.localInstanceID), nil
|
||||
}
|
||||
|
||||
// nodeNameToVMName maps a NodeName to the vmware infrastructure name
|
||||
|
@ -551,22 +551,18 @@ func vmNameToNodeName(vmName string) k8stypes.NodeName {
|
|||
}
|
||||
|
||||
// ExternalID returns the cloud provider ID of the node with the specified Name (deprecated).
|
||||
func (i *Instances) ExternalID(nodeName k8stypes.NodeName) (string, error) {
|
||||
func (vs *VSphere) ExternalID(nodeName k8stypes.NodeName) (string, error) {
|
||||
if vs.localInstanceID == nodeNameToVMName(nodeName) {
|
||||
return vs.cfg.Global.WorkingDir + vs.localInstanceID, nil
|
||||
}
|
||||
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
vm, err := getVirtualMachineByName(ctx, i.cfg, i.client, nodeName)
|
||||
if err != nil {
|
||||
if _, ok := err.(*find.NotFoundError); ok {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
var mvm mo.VirtualMachine
|
||||
err = getVirtualMachineManagedObjectReference(ctx, i.client, vm, "summary", &mvm)
|
||||
vm, mvm, err := vs.getVMandMO(ctx, nodeName, "summary")
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to getVMandMO for ExternalID: err %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -584,22 +580,18 @@ func (i *Instances) ExternalID(nodeName k8stypes.NodeName) (string, error) {
|
|||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the node with the specified Name.
|
||||
func (i *Instances) InstanceID(nodeName k8stypes.NodeName) (string, error) {
|
||||
func (vs *VSphere) InstanceID(nodeName k8stypes.NodeName) (string, error) {
|
||||
if vs.localInstanceID == nodeNameToVMName(nodeName) {
|
||||
return vs.cfg.Global.WorkingDir + vs.localInstanceID, nil
|
||||
}
|
||||
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
vm, err := getVirtualMachineByName(ctx, i.cfg, i.client, nodeName)
|
||||
if err != nil {
|
||||
if _, ok := err.(*find.NotFoundError); ok {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
var mvm mo.VirtualMachine
|
||||
err = getVirtualMachineManagedObjectReference(ctx, i.client, vm, "summary", &mvm)
|
||||
vm, mvm, err := vs.getVMandMO(ctx, nodeName, "summary")
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to getVMandMO for InstanceID: err %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -619,11 +611,11 @@ func (i *Instances) InstanceID(nodeName k8stypes.NodeName) (string, error) {
|
|||
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (i *Instances) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
func (vs *VSphere) InstanceTypeByProviderID(providerID string) (string, error) {
|
||||
return "", errors.New("unimplemented")
|
||||
}
|
||||
|
||||
func (i *Instances) InstanceType(name k8stypes.NodeName) (string, error) {
|
||||
func (vs *VSphere) InstanceType(name k8stypes.NodeName) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
|
@ -940,7 +932,7 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b
|
|||
vSphereInstance = nodeNameToVMName(nodeName)
|
||||
}
|
||||
|
||||
nodeExist, err := vs.NodeExists(vs.client, nodeName)
|
||||
nodeExist, err := vs.NodeExists(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check whether node exist. err: %s.", err)
|
||||
return false, err
|
||||
|
@ -991,7 +983,7 @@ func (vs *VSphere) DisksAreAttached(volPaths []string, nodeName k8stypes.NodeNam
|
|||
vSphereInstance = nodeNameToVMName(nodeName)
|
||||
}
|
||||
|
||||
nodeExist, err := vs.NodeExists(vs.client, nodeName)
|
||||
nodeExist, err := vs.NodeExists(nodeName)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check whether node exist. err: %s.", err)
|
||||
|
@ -1332,7 +1324,7 @@ func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
|
|||
|
||||
// NodeExists checks if the node with given nodeName exist.
|
||||
// Returns false if VM doesn't exist or VM is in powerOff state.
|
||||
func (vs *VSphere) NodeExists(c *govmomi.Client, nodeName k8stypes.NodeName) (bool, error) {
|
||||
func (vs *VSphere) NodeExists(nodeName k8stypes.NodeName) (bool, error) {
|
||||
if nodeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -1340,19 +1332,9 @@ func (vs *VSphere) NodeExists(c *govmomi.Client, nodeName k8stypes.NodeName) (bo
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
vm, err := getVirtualMachineByName(ctx, vs.cfg, c, nodeName)
|
||||
_, mvm, err := vs.getVMandMO(ctx, nodeName, "summary")
|
||||
if err != nil {
|
||||
if _, ok := err.(*find.NotFoundError); ok {
|
||||
return false, nil
|
||||
}
|
||||
glog.Errorf("Failed to get virtual machine object for node %+q. err %s", nodeName, err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
var mvm mo.VirtualMachine
|
||||
err = getVirtualMachineManagedObjectReference(ctx, c, vm, "summary", &mvm)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get virtual machine object reference for node %+q. err %s", nodeName, err)
|
||||
glog.Errorf("Failed to getVMandMO for NodeExists: err %v", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@ func configFromEnv() (cfg VSphereConfig, ok bool) {
|
|||
cfg.Global.Datastore = os.Getenv("VSPHERE_DATASTORE")
|
||||
cfg.Disk.SCSIControllerType = os.Getenv("VSPHERE_SCSICONTROLLER_TYPE")
|
||||
cfg.Global.WorkingDir = os.Getenv("VSPHERE_WORKING_DIR")
|
||||
cfg.Global.VMName = os.Getenv("VSPHERE_VM_NAME")
|
||||
if os.Getenv("VSPHERE_INSECURE") != "" {
|
||||
InsecureFlag, err = strconv.ParseBool(os.Getenv("VSPHERE_INSECURE"))
|
||||
} else {
|
||||
|
@ -71,6 +72,7 @@ password = password
|
|||
insecure-flag = true
|
||||
datacenter = us-west
|
||||
vm-uuid = 1234
|
||||
vm-name = vmname
|
||||
`))
|
||||
if err != nil {
|
||||
t.Fatalf("Should succeed when a valid config is provided: %s", err)
|
||||
|
@ -87,6 +89,10 @@ vm-uuid = 1234
|
|||
if cfg.Global.VMUUID != "1234" {
|
||||
t.Errorf("incorrect vm-uuid: %s", cfg.Global.VMUUID)
|
||||
}
|
||||
|
||||
if cfg.Global.VMName != "vmname" {
|
||||
t.Errorf("incorrect vm-name: %s", cfg.Global.VMName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewVSphere(t *testing.T) {
|
||||
|
@ -156,21 +162,16 @@ func TestInstances(t *testing.T) {
|
|||
t.Fatalf("Instances() returned false")
|
||||
}
|
||||
|
||||
srvs, err := vs.list("*")
|
||||
nodeName, err := vs.CurrentNodeName("")
|
||||
if err != nil {
|
||||
t.Fatalf("list() failed: %s", err)
|
||||
t.Fatalf("CurrentNodeName() failed: %s", err)
|
||||
}
|
||||
|
||||
if len(srvs) == 0 {
|
||||
t.Fatalf("list() returned zero servers")
|
||||
}
|
||||
t.Logf("Found servers (%d): %s\n", len(srvs), srvs)
|
||||
|
||||
externalId, err := i.ExternalID(srvs[0])
|
||||
externalId, err := i.ExternalID(nodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.ExternalID(%s) failed: %s", srvs[0], err)
|
||||
t.Fatalf("Instances.ExternalID(%s) failed: %s", nodeName, err)
|
||||
}
|
||||
t.Logf("Found ExternalID(%s) = %s\n", srvs[0], externalId)
|
||||
t.Logf("Found ExternalID(%s) = %s\n", nodeName, externalId)
|
||||
|
||||
nonExistingVM := types.NodeName(rand.String(15))
|
||||
externalId, err = i.ExternalID(nonExistingVM)
|
||||
|
@ -182,11 +183,11 @@ func TestInstances(t *testing.T) {
|
|||
t.Fatalf("Instances.ExternalID did not fail as expected, err: %v", err)
|
||||
}
|
||||
|
||||
instanceId, err := i.InstanceID(srvs[0])
|
||||
instanceId, err := i.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.InstanceID(%s) failed: %s", srvs[0], err)
|
||||
t.Fatalf("Instances.InstanceID(%s) failed: %s", nodeName, err)
|
||||
}
|
||||
t.Logf("Found InstanceID(%s) = %s\n", srvs[0], instanceId)
|
||||
t.Logf("Found InstanceID(%s) = %s\n", nodeName, instanceId)
|
||||
|
||||
instanceId, err = i.InstanceID(nonExistingVM)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
|
@ -197,11 +198,11 @@ func TestInstances(t *testing.T) {
|
|||
t.Fatalf("Instances.InstanceID did not fail as expected, err: %v", err)
|
||||
}
|
||||
|
||||
addrs, err := i.NodeAddresses(srvs[0])
|
||||
addrs, err := i.NodeAddresses(nodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.NodeAddresses(%s) failed: %s", srvs[0], err)
|
||||
t.Fatalf("Instances.NodeAddresses(%s) failed: %s", nodeName, err)
|
||||
}
|
||||
t.Logf("Found NodeAddresses(%s) = %s\n", srvs[0], addrs)
|
||||
t.Logf("Found NodeAddresses(%s) = %s\n", nodeName, addrs)
|
||||
}
|
||||
|
||||
func TestVolumes(t *testing.T) {
|
||||
|
@ -215,12 +216,9 @@ func TestVolumes(t *testing.T) {
|
|||
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
|
||||
}
|
||||
|
||||
srvs, err := vs.list("*")
|
||||
nodeName, err := vs.CurrentNodeName("")
|
||||
if err != nil {
|
||||
t.Fatalf("list() failed: %s", err)
|
||||
}
|
||||
if len(srvs) == 0 {
|
||||
t.Fatalf("list() returned zero servers")
|
||||
t.Fatalf("CurrentNodeName() failed: %s", err)
|
||||
}
|
||||
|
||||
volumeOptions := &VolumeOptions{
|
||||
|
@ -236,12 +234,12 @@ func TestVolumes(t *testing.T) {
|
|||
|
||||
_, _, err = vs.AttachDisk(volPath, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, srvs[0], err)
|
||||
t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, nodeName, err)
|
||||
}
|
||||
|
||||
err = vs.DetachDisk(volPath, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot detach disk(%s) from VM(%s): %v", volPath, srvs[0], err)
|
||||
t.Fatalf("Cannot detach disk(%s) from VM(%s): %v", volPath, nodeName, err)
|
||||
}
|
||||
|
||||
// todo: Deleting a volume after detach currently not working through API or UI (vSphere)
|
||||
|
|
|
@ -48,6 +48,7 @@ func getVSphereConfig() *VSphereConfig {
|
|||
cfg.Global.Datacenter = os.Getenv("VSPHERE_DATACENTER")
|
||||
cfg.Global.Datastore = os.Getenv("VSPHERE_DATASTORE")
|
||||
cfg.Global.WorkingDir = os.Getenv("VSPHERE_WORKING_DIR")
|
||||
cfg.Global.VMName = os.Getenv("VSPHERE_VM_NAME")
|
||||
cfg.Global.InsecureFlag = false
|
||||
if strings.ToLower(os.Getenv("VSPHERE_INSECURE")) == "true" {
|
||||
cfg.Global.InsecureFlag = true
|
||||
|
|
Loading…
Reference in New Issue