Adding vSphere Volume support for vSphere Cloud Provider

pull/6/head
Abitha Palaniappan 2016-04-13 17:36:05 -07:00
parent 346f965871
commit 95c009dbdb
31 changed files with 2672 additions and 493 deletions

60
Godeps/Godeps.json generated
View File

@ -1835,78 +1835,78 @@
},
{
"ImportPath": "github.com/vmware/govmomi",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/find",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/list",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/object",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/property",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/session",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/task",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/debug",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/methods",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/mo",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/progress",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/soap",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/types",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/vmware/govmomi/vim25/xml",
"Comment": "v0.5.0",
"Rev": "c1b29993f383c32fc3fadb90892909668699810a"
"Comment": "v0.6.2",
"Rev": "9051bd6b44125d9472e0c148b5965692ab283d4a"
},
{
"ImportPath": "github.com/xiang90/probing",

View File

@ -32,6 +32,7 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/aws_ebs"
@ -39,6 +40,7 @@ import (
"k8s.io/kubernetes/pkg/volume/gce_pd"
"k8s.io/kubernetes/pkg/volume/host_path"
"k8s.io/kubernetes/pkg/volume/nfs"
"k8s.io/kubernetes/pkg/volume/vsphere_volume"
"github.com/golang/glog"
)
@ -79,6 +81,7 @@ func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) []
allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...)
return allPlugins
}
@ -97,6 +100,8 @@ func NewVolumeProvisioner(cloud cloudprovider.Interface, config componentconfig.
return getProvisionablePluginFromVolumePlugins(gce_pd.ProbeVolumePlugins())
case cloud != nil && openstack.ProviderName == cloud.ProviderName():
return getProvisionablePluginFromVolumePlugins(cinder.ProbeVolumePlugins())
case cloud != nil && vsphere.ProviderName == cloud.ProviderName():
return getProvisionablePluginFromVolumePlugins(vsphere_volume.ProbeVolumePlugins())
}
return nil, nil
}

View File

@ -46,6 +46,7 @@ import (
"k8s.io/kubernetes/pkg/volume/nfs"
"k8s.io/kubernetes/pkg/volume/rbd"
"k8s.io/kubernetes/pkg/volume/secret"
"k8s.io/kubernetes/pkg/volume/vsphere_volume"
// Cloud providers
_ "k8s.io/kubernetes/pkg/cloudprovider/providers"
)
@ -80,6 +81,7 @@ func ProbeVolumePlugins(pluginDir string) []volume.VolumePlugin {
allPlugins = append(allPlugins, flexvolume.ProbeVolumePlugins(pluginDir)...)
allPlugins = append(allPlugins, azure_file.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, configmap.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...)
return allPlugins
}

View File

@ -180,6 +180,7 @@ func init() {
DeepCopy_api_Volume,
DeepCopy_api_VolumeMount,
DeepCopy_api_VolumeSource,
DeepCopy_api_VsphereVirtualDiskVolumeSource,
DeepCopy_api_WeightedPodAffinityTerm,
); err != nil {
// if one of the deep copy functions is malformed, detect it immediately.
@ -1925,6 +1926,15 @@ func DeepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *Persist
} else {
out.AzureFile = nil
}
if in.VsphereVolume != nil {
in, out := in.VsphereVolume, &out.VsphereVolume
*out = new(VsphereVirtualDiskVolumeSource)
if err := DeepCopy_api_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil {
return err
}
} else {
out.VsphereVolume = nil
}
return nil
}
@ -3183,6 +3193,21 @@ func DeepCopy_api_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion
} else {
out.ConfigMap = nil
}
if in.VsphereVolume != nil {
in, out := in.VsphereVolume, &out.VsphereVolume
*out = new(VsphereVirtualDiskVolumeSource)
if err := DeepCopy_api_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil {
return err
}
} else {
out.VsphereVolume = nil
}
return nil
}
func DeepCopy_api_VsphereVirtualDiskVolumeSource(in VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, c *conversion.Cloner) error {
out.VolumePath = in.VolumePath
out.FSType = in.FSType
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -229,6 +229,8 @@ type VolumeSource struct {
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"`
// ConfigMap represents a configMap that should populate this volume
ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -267,6 +269,8 @@ type PersistentVolumeSource struct {
Flocker *FlockerVolumeSource `json:"flocker,omitempty"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty"`
}
type PersistentVolumeClaimVolumeSource struct {
@ -716,6 +720,16 @@ type AzureFileVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty"`
}
// Represents a vSphere volume resource.
type VsphereVirtualDiskVolumeSource struct {
// Path that identifies vSphere volume vmdk
VolumePath string `json:"volumePath"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty"`
}
// Adapts a ConfigMap into a volume.
//
// The contents of the target ConfigMap's Data field will be presented in a

View File

@ -41,6 +41,11 @@ import (
const ProviderName = "vsphere"
const ActivePowerState = "poweredOn"
const DefaultDiskController = "scsi"
const DefaultSCSIControllerType = "lsilogic"
var ErrNoDiskUUIDFound = errors.New("no disk UUID found")
var ErrNoDevicesFound = errors.New("No devices found")
// VSphere is an implementation of cloud provider Interface for VSphere.
type VSphere struct {
@ -63,6 +68,10 @@ type VSphereConfig struct {
Network struct {
PublicNetwork string `gcfg:"public-network"`
}
Disk struct {
DiskController string `dcfg:"diskcontroller"`
SCSIControllerType string `dcfg:"scsicontrollertype"`
}
}
func readConfig(config io.Reader) (VSphereConfig, error) {
@ -136,6 +145,12 @@ func newVSphere(cfg VSphereConfig) (*VSphere, error) {
return nil, err
}
if cfg.Disk.DiskController == "" {
cfg.Disk.DiskController = DefaultDiskController
}
if cfg.Disk.SCSIControllerType == "" {
cfg.Disk.SCSIControllerType = DefaultSCSIControllerType
}
vs := VSphere{
cfg: &cfg,
localInstanceID: id,
@ -429,3 +444,292 @@ func (vs *VSphere) Routes() (cloudprovider.Routes, bool) {
func (vs *VSphere) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
return nameservers, searches
}
func getVirtualMachineDevices(cfg *VSphereConfig, ctx context.Context, c *govmomi.Client, name string) (*object.VirtualMachine, object.VirtualDeviceList, *object.Datastore, error) {
// Create a new finder
f := find.NewFinder(c.Client, true)
// Fetch and set data center
dc, err := f.Datacenter(ctx, cfg.Global.Datacenter)
if err != nil {
return nil, nil, nil, err
}
f.SetDatacenter(dc)
// Find datastores
ds, err := f.Datastore(ctx, cfg.Global.Datastore)
if err != nil {
return nil, nil, nil, err
}
vm, err := f.VirtualMachine(ctx, name)
if err != nil {
return nil, nil, nil, err
}
// Get devices from VM
vmDevices, err := vm.Device(ctx)
if err != nil {
return nil, nil, nil, err
}
return vm, vmDevices, ds, nil
}
//cleaning up the controller
func cleanUpController(newSCSIController types.BaseVirtualDevice, vmDevices object.VirtualDeviceList, vm *object.VirtualMachine, ctx context.Context) error {
ctls := vmDevices.SelectByType(newSCSIController)
if len(ctls) < 1 {
return ErrNoDevicesFound
}
newScsi := ctls[len(ctls)-1]
err := vm.RemoveDevice(ctx, true, newScsi)
if err != nil {
return err
}
return nil
}
// Attaches given virtual disk volume to the compute running kubelet.
func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string, diskUUID string, err error) {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create vSphere client
c, err := vsphereLogin(vs.cfg, ctx)
if err != nil {
return "", "", err
}
defer c.Logout(ctx)
// Find virtual machine to attach disk to
var vSphereInstance string
if nodeName == "" {
vSphereInstance = vs.localInstanceID
} else {
vSphereInstance = nodeName
}
// Get VM device list
vm, vmDevices, ds, err := getVirtualMachineDevices(vs.cfg, ctx, c, vSphereInstance)
if err != nil {
return "", "", err
}
// find SCSI controller to attach the disk
var newSCSICreated bool = false
var newSCSIController types.BaseVirtualDevice
diskController, err := vmDevices.FindDiskController(vs.cfg.Disk.DiskController)
if err != nil {
// create a scsi controller if there is not one
newSCSIController, err := vmDevices.CreateSCSIController(vs.cfg.Disk.SCSIControllerType)
if err != nil {
glog.V(3).Infof("Cannot create new SCSI controller - %v", err)
return "", "", err
}
configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController()
hotAndRemove := true
configNewSCSIController.HotAddRemove = &hotAndRemove
configNewSCSIController.SharedBus = types.VirtualSCSISharing(types.VirtualSCSISharingNoSharing)
// add the scsi controller to virtual machine
err = vm.AddDevice(context.TODO(), newSCSIController)
if err != nil {
glog.V(3).Infof("Cannot add SCSI controller to vm - %v", err)
// attempt clean up of scsi controller
if vmDevices, err := vm.Device(ctx); err == nil {
cleanUpController(newSCSIController, vmDevices, vm, ctx)
}
return "", "", err
}
// verify scsi controller in virtual machine
vmDevices, err = vm.Device(ctx)
if err != nil {
//cannot cleanup if there is no device list
return "", "", err
}
if diskController, err = vmDevices.FindDiskController(vs.cfg.Disk.DiskController); err != nil {
glog.V(3).Infof("Cannot find disk controller - %v", err)
// attempt clean up of scsi controller
cleanUpController(newSCSIController, vmDevices, vm, ctx)
return "", "", err
}
newSCSICreated = true
}
disk := vmDevices.CreateDisk(diskController, ds.Reference(), vmDiskPath)
backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent)
disk = vmDevices.ChildDisk(disk)
// Attach disk to the VM
err = vm.AddDevice(ctx, disk)
if err != nil {
glog.V(3).Infof("Cannot add disk to the vm - %v", err)
if newSCSICreated {
cleanUpController(newSCSIController, vmDevices, vm, ctx)
}
return "", "", err
}
vmDevices, err = vm.Device(ctx)
if err != nil {
if newSCSICreated {
cleanUpController(newSCSIController, vmDevices, vm, ctx)
}
return "", "", err
}
devices := vmDevices.SelectByType(disk)
if len(devices) < 1 {
if newSCSICreated {
cleanUpController(newSCSIController, vmDevices, vm, ctx)
}
return "", "", ErrNoDevicesFound
}
// get new disk id
newDevice := devices[len(devices)-1]
deviceName := devices.Name(newDevice)
// get device uuid
diskUUID, err = getVirtualDiskUUID(newDevice)
if err != nil {
if newSCSICreated {
cleanUpController(newSCSIController, vmDevices, vm, ctx)
}
vs.DetachDisk(deviceName, vSphereInstance)
return "", "", err
}
return deviceName, diskUUID, nil
}
func getVirtualDiskUUID(newDevice types.BaseVirtualDevice) (string, error) {
vd := newDevice.GetVirtualDevice()
if b, ok := vd.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
uuidWithNoHypens := strings.Replace(b.Uuid, "-", "", -1)
return uuidWithNoHypens, nil
}
return "", ErrNoDiskUUIDFound
}
// Detaches given virtual disk volume from the compute running kubelet.
func (vs *VSphere) DetachDisk(diskID string, nodeName string) error {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create vSphere client
c, err := vsphereLogin(vs.cfg, ctx)
if err != nil {
return err
}
defer c.Logout(ctx)
// Find VM to detach disk from
var vSphereInstance string
if nodeName == "" {
vSphereInstance = vs.localInstanceID
} else {
vSphereInstance = nodeName
}
vm, vmDevices, _, err := getVirtualMachineDevices(vs.cfg, ctx, c, vSphereInstance)
if err != nil {
return err
}
// Remove disk from VM
device := vmDevices.Find(diskID)
if device == nil {
return fmt.Errorf("device '%s' not found", diskID)
}
err = vm.RemoveDevice(ctx, false, device)
if err != nil {
return err
}
return nil
}
// Create a volume of given size (in KiB).
func (vs *VSphere) CreateVolume(name string, size int, tags *map[string]string) (volumePath string, err error) {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create vSphere client
c, err := vsphereLogin(vs.cfg, ctx)
if err != nil {
return "", err
}
defer c.Logout(ctx)
// Create a new finder
f := find.NewFinder(c.Client, true)
// Fetch and set data center
dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter)
f.SetDatacenter(dc)
// Create a virtual disk manager
vmDiskPath := "[" + vs.cfg.Global.Datastore + "] " + name + ".vmdk"
virtualDiskManager := object.NewVirtualDiskManager(c.Client)
// Create specification for new virtual disk
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
VirtualDiskSpec: types.VirtualDiskSpec{
AdapterType: (*tags)["adapterType"],
DiskType: (*tags)["diskType"],
},
CapacityKb: int64(size),
}
// Create virtual disk
task, err := virtualDiskManager.CreateVirtualDisk(ctx, vmDiskPath, dc, vmDiskSpec)
if err != nil {
return "", err
}
err = task.Wait(ctx)
if err != nil {
return "", err
}
return vmDiskPath, nil
}
// Deletes a volume given volume name.
func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create vSphere client
c, err := vsphereLogin(vs.cfg, ctx)
if err != nil {
return err
}
defer c.Logout(ctx)
// Create a new finder
f := find.NewFinder(c.Client, true)
// Fetch and set data center
dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter)
f.SetDatacenter(dc)
// Create a virtual disk manager
virtualDiskManager := object.NewVirtualDiskManager(c.Client)
// Delete virtual disk
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, vmDiskPath, dc)
if err != nil {
return err
}
return task.Wait(ctx)
}

View File

@ -24,6 +24,7 @@ import (
"testing"
"golang.org/x/net/context"
"k8s.io/kubernetes/pkg/util/rand"
)
func configFromEnv() (cfg VSphereConfig, ok bool) {
@ -35,6 +36,7 @@ func configFromEnv() (cfg VSphereConfig, ok bool) {
cfg.Global.Password = os.Getenv("VSPHERE_PASSWORD")
cfg.Global.Datacenter = os.Getenv("VSPHERE_DATACENTER")
cfg.Network.PublicNetwork = os.Getenv("VSPHERE_PUBLIC_NETWORK")
cfg.Global.Datastore = os.Getenv("VSPHERE_DATASTORE")
if os.Getenv("VSPHERE_INSECURE") != "" {
InsecureFlag, err = strconv.ParseBool(os.Getenv("VSPHERE_INSECURE"))
} else {
@ -187,3 +189,54 @@ func TestInstances(t *testing.T) {
}
t.Logf("Found NodeAddresses(%s) = %s\n", srvs[0], addrs)
}
func TestVolumes(t *testing.T) {
cfg, ok := configFromEnv()
if !ok {
t.Skipf("No config found in environment")
}
vs, err := newVSphere(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
i, ok := vs.Instances()
if !ok {
t.Fatalf("Instances() returned false")
}
srvs, err := i.List("*")
if err != nil {
t.Fatalf("Instances.List() failed: %s", err)
}
if len(srvs) == 0 {
t.Fatalf("Instances.List() returned zero servers")
}
tags := map[string]string{
"adapterType": "lsiLogic",
"diskType": "thin",
}
volPath, err := vs.CreateVolume("kubernetes-test-volume-"+rand.String(10), 1*1024*1024, &tags)
if err != nil {
t.Fatalf("Cannot create a new VMDK volume: %v", err)
}
diskID, _, err := vs.AttachDisk(volPath, "")
if err != nil {
t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, srvs[0], err)
}
err = vs.DetachDisk(diskID, "")
if err != nil {
t.Fatalf("Cannot detach disk(%s) from VM(%s): %v", diskID, srvs[0], err)
}
// todo: Deleting a volume after detach currently not working through API or UI (vSphere)
// err = vs.DeleteVolume(volPath)
// if err != nil {
// t.Fatalf("Cannot delete VMDK volume %s: %v", volPath, err)
// }
}

View File

@ -0,0 +1,419 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"errors"
"fmt"
"os"
"path"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&vsphereVolumePlugin{}}
}
type vsphereVolumePlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &vsphereVolumePlugin{}
var _ volume.PersistentVolumePlugin = &vsphereVolumePlugin{}
var _ volume.DeletableVolumePlugin = &vsphereVolumePlugin{}
var _ volume.ProvisionableVolumePlugin = &vsphereVolumePlugin{}
const (
vsphereVolumePluginName = "kubernetes.io/vsphere-volume"
)
// vSphere Volume Plugin
func (plugin *vsphereVolumePlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *vsphereVolumePlugin) Name() string {
return vsphereVolumePluginName
}
func (plugin *vsphereVolumePlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.VsphereVolume != nil) ||
(spec.Volume != nil && spec.Volume.VsphereVolume != nil)
}
func (plugin *vsphereVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, &VsphereDiskUtil{}, plugin.host.GetMounter())
}
func (plugin *vsphereVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, &VsphereDiskUtil{}, plugin.host.GetMounter())
}
func (plugin *vsphereVolumePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Mounter, error) {
var vvol *api.VsphereVirtualDiskVolumeSource
if spec.Volume != nil && spec.Volume.VsphereVolume != nil {
vvol = spec.Volume.VsphereVolume
} else {
vvol = spec.PersistentVolume.Spec.VsphereVolume
}
volPath := vvol.VolumePath
fsType := vvol.FSType
return &vsphereVolumeMounter{
vsphereVolume: &vsphereVolume{
podUID: podUID,
volName: spec.Name(),
volPath: volPath,
manager: manager,
mounter: mounter,
plugin: plugin,
},
fsType: fsType,
diskMounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}}, nil
}
func (plugin *vsphereVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Unmounter, error) {
return &vsphereVolumeUnmounter{
&vsphereVolume{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
}}, nil
}
func (plugin *vsphereVolumePlugin) getCloudProvider() (*vsphere.VSphere, error) {
cloud := plugin.host.GetCloudProvider()
if cloud == nil {
glog.Errorf("Cloud provider not initialized properly")
return nil, errors.New("Cloud provider not initialized properly")
}
vs := cloud.(*vsphere.VSphere)
if vs == nil {
return nil, errors.New("Invalid cloud provider: expected vSphere")
}
return vs, nil
}
// Abstract interface to disk operations.
type vdManager interface {
// Attaches the disk to the kubelet's host machine.
AttachDisk(mounter *vsphereVolumeMounter, globalPDPath string) error
// Detaches the disk from the kubelet's host machine.
DetachDisk(unmounter *vsphereVolumeUnmounter) error
// Creates a volume
CreateVolume(provisioner *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeGB int, err error)
// Deletes a volume
DeleteVolume(deleter *vsphereVolumeDeleter) error
}
// vspherePersistentDisk volumes are disk resources are attached to the kubelet's host machine and exposed to the pod.
type vsphereVolume struct {
volName string
podUID types.UID
// Unique identifier of the volume, used to find the disk resource in the provider.
volPath string
// Filesystem type, optional.
fsType string
//diskID for detach disk
diskID string
// Utility interface that provides API calls to the provider to attach/detach disks.
manager vdManager
// Mounter interface that provides system calls to mount the global path to the pod local path.
mounter mount.Interface
// diskMounter provides the interface that is used to mount the actual block device.
diskMounter mount.Interface
plugin *vsphereVolumePlugin
volume.MetricsNil
}
func detachDiskLogError(vv *vsphereVolume) {
err := vv.manager.DetachDisk(&vsphereVolumeUnmounter{vv})
if err != nil {
glog.Warningf("Failed to detach disk: %v (%v)", vv, err)
}
}
var _ volume.Mounter = &vsphereVolumeMounter{}
type vsphereVolumeMounter struct {
*vsphereVolume
fsType string
diskMounter *mount.SafeFormatAndMount
}
func (b *vsphereVolumeMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
SupportsSELinux: true,
}
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *vsphereVolumeMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *vsphereVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(5).Infof("vSphere volume setup %s to %s", b.volPath, dir)
// TODO: handle failed mounts here.
notmnt, err := b.mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
glog.V(4).Infof("IsLikelyNotMountPoint failed: %v", err)
return err
}
if !notmnt {
glog.V(4).Infof("Something is already mounted to target %s", dir)
return nil
}
globalPDPath := makeGlobalPDPath(b.plugin.host, b.volPath)
if err := b.manager.AttachDisk(b, globalPDPath); err != nil {
glog.V(4).Infof("AttachDisk failed: %v", err)
return err
}
glog.V(3).Infof("vSphere volume %s attached", b.volPath)
options := []string{"bind"}
if err := os.MkdirAll(dir, 0750); err != nil {
// TODO: we should really eject the attach/detach out into its own control loop.
glog.V(4).Infof("Could not create directory %s: %v", dir, err)
detachDiskLogError(b.vsphereVolume)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
err = b.mounter.Mount(globalPDPath, dir, "", options)
if err != nil {
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
return err
}
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath())
return err
}
}
os.Remove(dir)
detachDiskLogError(b.vsphereVolume)
return err
}
glog.V(3).Infof("vSphere volume %s mounted to %s", b.volPath, dir)
return nil
}
var _ volume.Unmounter = &vsphereVolumeUnmounter{}
type vsphereVolumeUnmounter struct {
*vsphereVolume
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (v *vsphereVolumeUnmounter) TearDown() error {
return v.TearDownAt(v.GetPath())
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (v *vsphereVolumeUnmounter) TearDownAt(dir string) error {
glog.V(5).Infof("vSphere Volume TearDown of %s", dir)
notmnt, err := v.mounter.IsLikelyNotMountPoint(dir)
if err != nil {
glog.V(4).Infof("Error checking if mountpoint ", dir, ": ", err)
return err
}
if notmnt {
glog.V(4).Infof("Not mount point,deleting")
return os.Remove(dir)
}
// Find vSphere volumeID to lock the right volume
refs, err := mount.GetMountRefs(v.mounter, dir)
if err != nil {
glog.V(4).Infof("Error getting mountrefs for ", dir, ": ", err)
return err
}
if len(refs) == 0 {
glog.V(4).Infof("Directory %s is not mounted", dir)
return fmt.Errorf("directory %s is not mounted", dir)
}
v.volPath = path.Base(refs[0])
glog.V(4).Infof("Found volume %s mounted to %s", v.volPath, dir)
// Reload list of references, there might be SetUpAt finished in the meantime
refs, err = mount.GetMountRefs(v.mounter, dir)
if err != nil {
glog.V(4).Infof("GetMountRefs failed: %v", err)
return err
}
if err := v.mounter.Unmount(dir); err != nil {
glog.V(4).Infof("Unmount failed: %v", err)
return err
}
glog.V(3).Infof("Successfully unmounted: %s\n", dir)
// If refCount is 1, then all bind mounts have been removed, and the
// remaining reference is the global mount. It is safe to detach.
if len(refs) == 1 {
if err := v.manager.DetachDisk(v); err != nil {
glog.V(4).Infof("DetachDisk failed: %v", err)
return err
}
glog.V(3).Infof("Volume %s detached", v.volPath)
}
notmnt, mntErr := v.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if notmnt {
if err := os.Remove(dir); err != nil {
glog.V(4).Infof("Failed to remove directory after unmount: %v", err)
return err
}
}
return nil
}
func makeGlobalPDPath(host volume.VolumeHost, devName string) string {
return path.Join(host.GetPluginDir(vsphereVolumePluginName), "mounts", devName)
}
func (vv *vsphereVolume) GetPath() string {
name := vsphereVolumePluginName
return vv.plugin.host.GetPodVolumeDir(vv.podUID, utilstrings.EscapeQualifiedNameForDisk(name), vv.volName)
}
// vSphere Persistent Volume Plugin
func (plugin *vsphereVolumePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
}
}
// vSphere Deletable Volume Plugin
type vsphereVolumeDeleter struct {
*vsphereVolume
}
var _ volume.Deleter = &vsphereVolumeDeleter{}
func (plugin *vsphereVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, &VsphereDiskUtil{})
}
func (plugin *vsphereVolumePlugin) newDeleterInternal(spec *volume.Spec, manager vdManager) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.VsphereVolume == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.VsphereVolume is nil")
}
return &vsphereVolumeDeleter{
&vsphereVolume{
volName: spec.Name(),
volPath: spec.PersistentVolume.Spec.VsphereVolume.VolumePath,
manager: manager,
plugin: plugin,
}}, nil
}
func (r *vsphereVolumeDeleter) Delete() error {
return r.manager.DeleteVolume(r)
}
// vSphere Provisionable Volume Plugin
type vsphereVolumeProvisioner struct {
*vsphereVolume
options volume.VolumeOptions
}
var _ volume.Provisioner = &vsphereVolumeProvisioner{}
func (plugin *vsphereVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
if len(options.AccessModes) == 0 {
options.AccessModes = plugin.GetAccessModes()
}
return plugin.newProvisionerInternal(options, &VsphereDiskUtil{})
}
func (plugin *vsphereVolumePlugin) newProvisionerInternal(options volume.VolumeOptions, manager vdManager) (volume.Provisioner, error) {
return &vsphereVolumeProvisioner{
vsphereVolume: &vsphereVolume{
manager: manager,
plugin: plugin,
},
options: options,
}, nil
}
func (v *vsphereVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
vmDiskPath, sizeKB, err := v.manager.CreateVolume(v)
if err != nil {
return nil, err
}
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: v.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
"kubernetes.io/createdby": "vsphere-volume-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: v.options.PersistentVolumeReclaimPolicy,
AccessModes: v.options.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", sizeKB)),
},
PersistentVolumeSource: api.PersistentVolumeSource{
VsphereVolume: &api.VsphereVirtualDiskVolumeSource{
VolumePath: vmDiskPath,
FSType: "ext4",
},
},
},
}
return pv, nil
}

View File

@ -0,0 +1,219 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"fmt"
"os"
"path"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("vsphereVolumeTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.Name() != "kubernetes.io/vsphere-volume" {
t.Errorf("Wrong name: %s", plug.Name())
}
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{VsphereVolume: &api.VsphereVirtualDiskVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{VsphereVolume: &api.VsphereVirtualDiskVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
type fakePDManager struct {
attachCalled bool
detachCalled bool
}
func getFakeDeviceName(host volume.VolumeHost, volPath string) string {
return path.Join(host.GetPluginDir(vsphereVolumePluginName), "device", volPath)
}
func (fake *fakePDManager) AttachDisk(b *vsphereVolumeMounter, globalPDPath string) error {
fakeDeviceName := getFakeDeviceName(b.plugin.host, b.volPath)
err := os.MkdirAll(fakeDeviceName, 0750)
if err != nil {
return err
}
fake.attachCalled = true
// Simulate the global mount so that the fakeMounter returns the
// expected number of mounts for the attached disk.
err = b.mounter.Mount(fakeDeviceName, globalPDPath, "", []string{"bind"})
if err != nil {
return err
}
return nil
}
func (fake *fakePDManager) DetachDisk(v *vsphereVolumeUnmounter) error {
globalPath := makeGlobalPDPath(v.plugin.host, v.volPath)
fakeDeviceName := getFakeDeviceName(v.plugin.host, v.volPath)
err := v.mounter.Unmount(globalPath)
if err != nil {
return err
}
// "Detach" the fake "device"
err = os.RemoveAll(fakeDeviceName)
if err != nil {
return err
}
fake.detachCalled = true
return nil
}
func (fake *fakePDManager) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeKB int, err error) {
return "[local] test-volume-name.vmdk", 100, nil
}
func (fake *fakePDManager) DeleteVolume(vd *vsphereVolumeDeleter) error {
if vd.volPath != "[local] test-volume-name.vmdk" {
return fmt.Errorf("Deleter got unexpected volume path: %s", vd.volPath)
}
return nil
}
func TestPlugin(t *testing.T) {
// Initial setup to test volume plugin
tmpDir, err := utiltesting.MkTmpdir("vsphereVolumeTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
VsphereVolume: &api.VsphereVirtualDiskVolumeSource{
VolumePath: "[local] test-volume-name.vmdk",
FSType: "ext4",
},
},
}
// Test Mounter
fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{}
mounter, err := plug.(*vsphereVolumePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
mntPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~vsphere-volume/vol1")
path := mounter.GetPath()
if path != mntPath {
t.Errorf("Got unexpected path: %s", path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if !fakeManager.attachCalled {
t.Errorf("Attach watch not called")
}
// Test Unmounter
fakeManager = &fakePDManager{}
unmounter, err := plug.(*vsphereVolumePlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err)
}
if !fakeManager.detachCalled {
t.Errorf("Detach watch not called")
}
// Test Provisioner
cap := resource.MustParse("100Mi")
options := volume.VolumeOptions{
Capacity: cap,
AccessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*vsphereVolumePlugin).newProvisionerInternal(options, &fakePDManager{})
persistentSpec, err := provisioner.Provision()
if err != nil {
t.Errorf("Provision() failed: %v", err)
}
if persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath != "[local] test-volume-name.vmdk" {
t.Errorf("Provision() returned unexpected path %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath)
}
cap = persistentSpec.Spec.Capacity[api.ResourceStorage]
size := cap.Value()
if size != 100*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)
}
// Test Deleter
volSpec := &volume.Spec{
PersistentVolume: persistentSpec,
}
deleter, err := plug.(*vsphereVolumePlugin).newDeleterInternal(volSpec, &fakePDManager{})
err = deleter.Delete()
if err != nil {
t.Errorf("Deleter() failed: %v", err)
}
}

View File

@ -0,0 +1,199 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"errors"
"io/ioutil"
"os"
"path"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/keymutex"
"k8s.io/kubernetes/pkg/volume"
)
const (
maxRetries = 10
)
// Singleton key mutex for keeping attach/detach operations for the same PD atomic
var attachDetachMutex = keymutex.NewKeyMutex()
type VsphereDiskUtil struct{}
// Attaches a disk to the current kubelet.
// Mounts the disk to it's global path.
func (util *VsphereDiskUtil) AttachDisk(vm *vsphereVolumeMounter, globalPDPath string) error {
options := []string{}
// Block execution until any pending attach/detach operations for this PD have completed
attachDetachMutex.LockKey(vm.volPath)
defer attachDetachMutex.UnlockKey(vm.volPath)
cloud, err := vm.plugin.getCloudProvider()
if err != nil {
return err
}
diskID, diskUUID, attachError := cloud.AttachDisk(vm.volPath, "")
if attachError != nil {
return err
} else if diskUUID == "" {
return errors.New("Disk UUID has no value")
}
// diskID for detach Disk
vm.diskID = diskID
var devicePath string
numTries := 0
for {
devicePath = verifyDevicePath(diskUUID)
// probe the attached vol so that symlink in /dev/disk/by-id is created
probeAttachedVolume()
_, err := os.Stat(devicePath)
if err == nil {
break
}
if err != nil && !os.IsNotExist(err) {
return err
}
numTries++
if numTries == maxRetries {
return errors.New("Could not attach disk: Timeout after 60s")
}
time.Sleep(time.Second * 60)
}
notMnt, err := vm.mounter.IsLikelyNotMountPoint(globalPDPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
if notMnt {
err = vm.diskMounter.FormatAndMount(devicePath, globalPDPath, vm.fsType, options)
if err != nil {
os.Remove(globalPDPath)
return err
}
glog.V(2).Infof("Safe mount successful: %q\n", devicePath)
}
return nil
}
func verifyDevicePath(diskUUID string) string {
files, _ := ioutil.ReadDir("/dev/disk/by-id/")
for _, f := range files {
// TODO: should support other controllers
if strings.Contains(f.Name(), "scsi-") {
devID := f.Name()[len("scsi-"):len(f.Name())]
if strings.Contains(diskUUID, devID) {
glog.V(4).Infof("Found disk attached as %q; full devicepath: %s\n", f.Name(), path.Join("/dev/disk/by-id/", f.Name()))
return path.Join("/dev/disk/by-id/", f.Name())
}
}
}
glog.Warningf("Failed to find device for the diskid: %q\n", diskUUID)
return ""
}
func probeAttachedVolume() error {
executor := exec.New()
args := []string{"trigger"}
cmd := executor.Command("/usr/bin/udevadm", args...)
_, err := cmd.CombinedOutput()
if err != nil {
glog.Errorf("error running udevadm trigger %v\n", err)
return err
}
glog.V(4).Infof("Successfully probed all attachments")
return nil
}
// Unmounts the device and detaches the disk from the kubelet's host machine.
func (util *VsphereDiskUtil) DetachDisk(vu *vsphereVolumeUnmounter) error {
// Block execution until any pending attach/detach operations for this PD have completed
attachDetachMutex.LockKey(vu.volPath)
defer attachDetachMutex.UnlockKey(vu.volPath)
globalPDPath := makeGlobalPDPath(vu.plugin.host, vu.volPath)
if err := vu.mounter.Unmount(globalPDPath); err != nil {
return err
}
if err := os.Remove(globalPDPath); err != nil {
return err
}
glog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath)
cloud, err := vu.plugin.getCloudProvider()
if err != nil {
return err
}
if err = cloud.DetachDisk(vu.diskID, ""); err != nil {
return err
}
glog.V(2).Infof("Successfully detached vSphere volume %s", vu.volPath)
return nil
}
// CreateVolume creates a vSphere volume.
func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeKB int, err error) {
cloud, err := v.plugin.getCloudProvider()
if err != nil {
return "", 0, err
}
volSizeBytes := v.options.Capacity.Value()
// vSphere works with kilobytes, convert to KiB with rounding up
volSizeKB := int(volume.RoundUpSize(volSizeBytes, 1024))
name := volume.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255)
vmDiskPath, err = cloud.CreateVolume(name, volSizeKB, v.options.CloudTags)
if err != nil {
glog.V(2).Infof("Error creating vsphere volume: %v", err)
return "", 0, err
}
glog.V(2).Infof("Successfully created vsphere volume %s", name)
return vmDiskPath, volSizeKB, nil
}
// DeleteVolume deletes a vSphere volume.
func (util *VsphereDiskUtil) DeleteVolume(vd *vsphereVolumeDeleter) error {
cloud, err := vd.plugin.getCloudProvider()
if err != nil {
return err
}
if err = cloud.DeleteVolume(vd.volPath); err != nil {
glog.V(2).Infof("Error deleting vsphere volume %s: %v", vd.volPath, err)
return err
}
glog.V(2).Infof("Successfully deleted vsphere volume %s", vd.volPath)
return nil
}

View File

@ -9,15 +9,9 @@ build:
- GOVC_INSECURE=1
- VCA=1
commands:
- go get golang.org/x/tools/cmd/vet
- go get golang.org/x/tools/cmd/goimports
- go get github.com/davecgh/go-spew/spew
- go get
- make all
- make install
- make all install
- git clone https://github.com/sstephenson/bats.git /tmp/bats
- /tmp/bats/install.sh /usr/local
- apt-get -qq update && apt-get install -yqq uuid-runtime bsdmainutils jq
- govc/test/images/update.sh
- bats govc/test
- govc/test/clean.sh

1
vendor/github.com/vmware/govmomi/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
secrets.yml

View File

@ -1,11 +1,12 @@
sudo: false
language: go
go: 1.4
go:
- 1.6
before_install:
- go get golang.org/x/tools/cmd/vet
- go get golang.org/x/tools/cmd/goimports
- go get github.com/davecgh/go-spew/spew
- make vendor
script:
- make check test

View File

@ -1,5 +1,33 @@
# changelog
### 0.6.2 (2016-05-11)
* Get complete file details in Datastore.Stat
* SOAP decoding fixes
* Add VirtualMachine.RemoveAllSnapshot
### 0.6.1 (2016-04-30)
* Fix mo.Entity interface
### 0.6.0 (2016-04-29)
* Add Common.Rename method
* Add mo.Entity interface
* Add OptionManager
* Add Finder.FolderList method
* Add VirtualMachine.WaitForNetIP method
* Add VirtualMachine.RevertToSnapshot method
* Add Datastore.Download method
### 0.5.0 (2016-03-30)
Generated fields using xsd type 'int' change to Go type 'int32'

View File

@ -7,11 +7,13 @@ Alvaro Miranda <kikitux@gmail.com>
Amit Bathla <abathla@.vmware.com>
Andrew Chin <andrew@andrewtchin.com>
Arran Walker <arran.walker@zopa.com>
Austin Parker <aparker@apprenda.com>
Bob Killen <killen.bob@gmail.com>
Bruce Downs <bdowns@vmware.com>
Clint Greenwood <cgreenwood@vmware.com> <clint.greenwood@gmail.com>
Cédric Blomart <cblomart@gmail.com>
Danny Lockard <danny.lockard@banno.com>
Dave Tucker <dave@dtucker.co.uk>
Doug MacEachern <dougm@vmware.com>
Eloy Coto <eloy.coto@gmail.com>
Eric Yutao <eric.yutao@gmail.com>
@ -28,6 +30,7 @@ Mevan Samaratunga <mevansam@gmail.com>
Pieter Noordhuis <pnoordhuis@vmware.com> <pcnoordhuis@gmail.com>
runner.mei <runner.mei@gmail.com>
S.Çağlar Onur <conur@vmware.com>
Sergey Ignatov <sergey.ignatov@jetbrains.com>
Takaaki Furukawa <takaaki.frkw@gmail.com> <takaaki.furukawa@mail.rakuten.com>
Steve Purcell <steve@sanityinc.com>
Yang Yang <yangy@vmware.com>

View File

@ -4,7 +4,12 @@ all: check test
check: goimports govet
goimports:
vendor:
go get golang.org/x/tools/cmd/goimports
go get github.com/davecgh/go-spew/spew
go get golang.org/x/net/context
goimports: vendor
@echo checking go imports...
@! goimports -d . 2>&1 | egrep -v '^$$'
@ -12,9 +17,8 @@ govet:
@echo checking go vet...
@go tool vet -structtags=false -methods=false .
test:
go get
test: vendor
go test -v $(TEST_OPTS) ./...
install:
install: vendor
go install github.com/vmware/govmomi/govc

View File

@ -762,28 +762,42 @@ func (f *Finder) VirtualApp(ctx context.Context, path string) (*object.VirtualAp
return apps[0], nil
}
func (f *Finder) Folder(ctx context.Context, path string) (*object.Folder, error) {
mo, err := f.ManagedObjectList(ctx, path)
func (f *Finder) FolderList(ctx context.Context, path string) ([]*object.Folder, error) {
es, err := f.ManagedObjectList(ctx, path)
if err != nil {
return nil, err
}
if len(mo) == 0 {
var folders []*object.Folder
for _, e := range es {
switch o := e.Object.(type) {
case mo.Folder:
folder := object.NewFolder(f.client, o.Reference())
folder.InventoryPath = e.Path
folders = append(folders, folder)
case *object.Folder:
// RootFolder
folders = append(folders, o)
}
}
if len(folders) == 0 {
return nil, &NotFoundError{"folder", path}
}
if len(mo) > 1 {
return folders, nil
}
func (f *Finder) Folder(ctx context.Context, path string) (*object.Folder, error) {
folders, err := f.FolderList(ctx, path)
if err != nil {
return nil, err
}
if len(folders) > 1 {
return nil, &MultipleFoundError{"folder", path}
}
ref := mo[0].Object.Reference()
if ref.Type != "Folder" {
return nil, &NotFoundError{"folder", path}
}
folder := object.NewFolder(f.client, ref)
folder.InventoryPath = mo[0].Path
return folder, nil
return folders[0], nil
}

View File

@ -69,3 +69,17 @@ func (c Common) Destroy(ctx context.Context) (*Task, error) {
return NewTask(c.c, res.Returnval), nil
}
func (c Common) Rename(ctx context.Context, name string) (*Task, error) {
req := types.Rename_Task{
This: c.Reference(),
NewName: name,
}
res, err := methods.Rename_Task(ctx, c.c, &req)
if err != nil {
return nil, err
}
return NewTask(c.c, res.Returnval), nil
}

View File

@ -220,7 +220,16 @@ func (d Datastore) UploadFile(ctx context.Context, file string, path string, par
return d.Client().UploadFile(file, u, p)
}
// DownloadFile via soap.Upload with an http service ticket
// Download via soap.Download with an http service ticket
func (d Datastore) Download(ctx context.Context, path string, param *soap.Download) (io.ReadCloser, int64, error) {
u, p, err := d.downloadTicket(ctx, path, param)
if err != nil {
return nil, 0, err
}
return d.Client().Download(u, p)
}
// DownloadFile via soap.Download with an http service ticket
func (d Datastore) DownloadFile(ctx context.Context, path string, file string, param *soap.Download) error {
u, p, err := d.downloadTicket(ctx, path, param)
if err != nil {
@ -306,7 +315,9 @@ func (d Datastore) Stat(ctx context.Context, file string) (types.BaseFileInfo, e
spec := types.HostDatastoreBrowserSearchSpec{
Details: &types.FileQueryFlags{
FileType: true,
FileOwner: types.NewBool(true), // TODO: omitempty is generated, but seems to be required
FileSize: true,
Modification: true,
FileOwner: types.NewBool(true),
},
MatchPattern: []string{path.Base(file)},
}

View File

@ -37,7 +37,9 @@ func NewFolder(c *vim25.Client, ref types.ManagedObjectReference) *Folder {
}
func NewRootFolder(c *vim25.Client) *Folder {
return NewFolder(c, c.ServiceContent.RootFolder)
f := NewFolder(c, c.ServiceContent.RootFolder)
f.InventoryPath = "/"
return f
}
func (f Folder) Children(ctx context.Context) ([]Reference, error) {
@ -196,3 +198,17 @@ func (f Folder) CreateDVS(ctx context.Context, spec types.DVSCreateSpec) (*Task,
return NewTask(f.c, res.Returnval), nil
}
func (f Folder) MoveInto(ctx context.Context, list []types.ManagedObjectReference) (*Task, error) {
req := types.MoveIntoFolder_Task{
This: f.Reference(),
List: list,
}
res, err := methods.MoveIntoFolder_Task(ctx, f.c, &req)
if err != nil {
return nil, err
}
return NewTask(f.c, res.Returnval), nil
}

View File

@ -0,0 +1,64 @@
/*
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package object
import (
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/methods"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
)
type HostAccountManager struct {
Common
}
func NewHostAccountManager(c *vim25.Client, ref types.ManagedObjectReference) *HostAccountManager {
return &HostAccountManager{
Common: NewCommon(c, ref),
}
}
func (m HostAccountManager) Create(ctx context.Context, user *types.HostAccountSpec) error {
req := types.CreateUser{
This: m.Reference(),
User: user,
}
_, err := methods.CreateUser(ctx, m.Client(), &req)
return err
}
func (m HostAccountManager) Update(ctx context.Context, user *types.HostAccountSpec) error {
req := types.UpdateUser{
This: m.Reference(),
User: user,
}
_, err := methods.UpdateUser(ctx, m.Client(), &req)
return err
}
func (m HostAccountManager) Remove(ctx context.Context, userName string) error {
req := types.RemoveUser{
This: m.Reference(),
UserName: userName,
}
_, err := methods.RemoveUser(ctx, m.Client(), &req)
return err
}

View File

@ -98,3 +98,25 @@ func (m HostConfigManager) VsanSystem(ctx context.Context) (*HostVsanSystem, err
return NewHostVsanSystem(m.c, *h.ConfigManager.VsanSystem), nil
}
func (m HostConfigManager) AccountManager(ctx context.Context) (*HostAccountManager, error) {
var h mo.HostSystem
err := m.Properties(ctx, m.Reference(), []string{"configManager.accountManager"}, &h)
if err != nil {
return nil, err
}
return NewHostAccountManager(m.c, *h.ConfigManager.AccountManager), nil
}
func (m HostConfigManager) OptionManager(ctx context.Context) (*OptionManager, error) {
var h mo.HostSystem
err := m.Properties(ctx, m.Reference(), []string{"configManager.advancedOption"}, &h)
if err != nil {
return nil, err
}
return NewOptionManager(m.c, *h.ConfigManager.AdvancedOption), nil
}

View File

@ -0,0 +1,58 @@
/*
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package object
import (
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/methods"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
)
type OptionManager struct {
Common
}
func NewOptionManager(c *vim25.Client, ref types.ManagedObjectReference) *OptionManager {
return &OptionManager{
Common: NewCommon(c, ref),
}
}
func (m OptionManager) Query(ctx context.Context, name string) ([]types.BaseOptionValue, error) {
req := types.QueryOptions{
This: m.Reference(),
Name: name,
}
res, err := methods.QueryOptions(ctx, m.Client(), &req)
if err != nil {
return nil, err
}
return res.Returnval, nil
}
func (m OptionManager) Update(ctx context.Context, value []types.BaseOptionValue) error {
req := types.UpdateOptions{
This: m.Reference(),
ChangedValue: value,
}
_, err := methods.UpdateOptions(ctx, m.Client(), &req)
return err
}

View File

@ -63,7 +63,6 @@ func EthernetCardTypes() VirtualDeviceList {
&types.VirtualVmxnet3{},
}).Select(func(device types.BaseVirtualDevice) bool {
c := device.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard()
c.AddressType = string(types.VirtualEthernetCardMacTypeGenerated)
c.GetVirtualDevice().Key = -1
return true
})

View File

@ -19,6 +19,7 @@ package object
import (
"errors"
"fmt"
"net"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25"
@ -245,6 +246,77 @@ func (v VirtualMachine) WaitForIP(ctx context.Context) (string, error) {
return ip, nil
}
// WaitForNetIP waits for the VM guest.net property to report an IP address for all VM NICs.
// Only consider IPv4 addresses if the v4 param is true.
// Returns a map with MAC address as the key and IP address list as the value.
func (v VirtualMachine) WaitForNetIP(ctx context.Context, v4 bool) (map[string][]string, error) {
macs := make(map[string][]string)
p := property.DefaultCollector(v.c)
// Wait for all NICs to have a MacAddress, which may not be generated yet.
err := property.Wait(ctx, p, v.Reference(), []string{"config.hardware.device"}, func(pc []types.PropertyChange) bool {
for _, c := range pc {
if c.Op != types.PropertyChangeOpAssign {
continue
}
devices := c.Val.(types.ArrayOfVirtualDevice).VirtualDevice
for _, device := range devices {
if nic, ok := device.(types.BaseVirtualEthernetCard); ok {
mac := nic.GetVirtualEthernetCard().MacAddress
if mac == "" {
return false
}
macs[mac] = nil
}
}
}
return true
})
err = property.Wait(ctx, p, v.Reference(), []string{"guest.net"}, func(pc []types.PropertyChange) bool {
for _, c := range pc {
if c.Op != types.PropertyChangeOpAssign {
continue
}
nics := c.Val.(types.ArrayOfGuestNicInfo).GuestNicInfo
for _, nic := range nics {
mac := nic.MacAddress
if mac == "" || nic.IpConfig == nil {
continue
}
for _, ip := range nic.IpConfig.IpAddress {
if _, ok := macs[mac]; !ok {
continue // Ignore any that don't correspond to a VM device
}
if v4 && net.ParseIP(ip.IpAddress).To4() == nil {
continue // Ignore non IPv4 address
}
macs[mac] = append(macs[mac], ip.IpAddress)
}
}
}
for _, ips := range macs {
if len(ips) == 0 {
return false
}
}
return true
})
if err != nil {
return nil, err
}
return macs, nil
}
// Device returns the VirtualMachine's config.hardware.device property.
func (v VirtualMachine) Device(ctx context.Context) (VirtualDeviceList, error) {
var o mo.VirtualMachine
@ -336,8 +408,12 @@ func (v VirtualMachine) EditDevice(ctx context.Context, device ...types.BaseVirt
}
// RemoveDevice removes the given devices on the VirtualMachine
func (v VirtualMachine) RemoveDevice(ctx context.Context, device ...types.BaseVirtualDevice) error {
return v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationRemove, types.VirtualDeviceConfigSpecFileOperationDestroy, device...)
func (v VirtualMachine) RemoveDevice(ctx context.Context, keepFiles bool, device ...types.BaseVirtualDevice) error {
fop := types.VirtualDeviceConfigSpecFileOperationDestroy
if keepFiles {
fop = ""
}
return v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationRemove, fop, device...)
}
// BootOptions returns the VirtualMachine's config.bootOptions property.
@ -400,6 +476,76 @@ func (v VirtualMachine) CreateSnapshot(ctx context.Context, name string, descrip
return NewTask(v.c, res.Returnval), nil
}
// RemoveAllSnapshot removes all snapshots of a virtual machine
func (v VirtualMachine) RemoveAllSnapshot(ctx context.Context, consolidate *bool) (*Task, error) {
req := types.RemoveAllSnapshots_Task{
This: v.Reference(),
Consolidate: consolidate,
}
res, err := methods.RemoveAllSnapshots_Task(ctx, v.c, &req)
if err != nil {
return nil, err
}
return NewTask(v.c, res.Returnval), nil
}
// RevertToSnapshot reverts to a named snapshot
func (v VirtualMachine) RevertToSnapshot(ctx context.Context, name string, suppressPowerOn bool) (*Task, error) {
var o mo.VirtualMachine
err := v.Properties(ctx, v.Reference(), []string{"snapshot"}, &o)
snapshotTree := o.Snapshot.RootSnapshotList
if len(snapshotTree) < 1 {
return nil, errors.New("No snapshots for this VM")
}
snapshot, err := traverseSnapshotInTree(snapshotTree, name)
if err != nil {
return nil, err
}
req := types.RevertToSnapshot_Task{
This: snapshot,
SuppressPowerOn: types.NewBool(suppressPowerOn),
}
res, err := methods.RevertToSnapshot_Task(ctx, v.c, &req)
if err != nil {
return nil, err
}
return NewTask(v.c, res.Returnval), nil
}
// traverseSnapshotInTree is a recursive function that will traverse a snapshot tree to find a given snapshot
func traverseSnapshotInTree(tree []types.VirtualMachineSnapshotTree, name string) (types.ManagedObjectReference, error) {
var o types.ManagedObjectReference
if tree == nil {
return o, errors.New("Snapshot tree is empty")
}
for _, s := range tree {
if s.Name == name {
o = s.Snapshot
break
} else {
childTree := s.ChildSnapshotList
var err error
o, err = traverseSnapshotInTree(childTree, name)
if err != nil {
return o, err
}
}
}
if o.Value == "" {
return o, errors.New("Snapshot not found")
}
return o, nil
}
// IsToolsRunning returns true if VMware Tools is currently running in the guest OS, and false otherwise.
func (v VirtualMachine) IsToolsRunning(ctx context.Context) (bool, error) {
var o mo.VirtualMachine

24
vendor/github.com/vmware/govmomi/vim25/mo/entity.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
/*
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mo
// Entity is the interface that is implemented by all managed objects
// that extend ManagedEntity.
type Entity interface {
Reference
Entity() *ManagedEntity
}

View File

@ -130,6 +130,10 @@ type ComputeResource struct {
ConfigurationEx types.BaseComputeResourceConfigInfo `mo:"configurationEx"`
}
func (m *ComputeResource) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["ComputeResource"] = reflect.TypeOf((*ComputeResource)(nil)).Elem()
}
@ -187,6 +191,10 @@ type Datacenter struct {
Configuration types.DatacenterConfigInfo `mo:"configuration"`
}
func (m *Datacenter) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["Datacenter"] = reflect.TypeOf((*Datacenter)(nil)).Elem()
}
@ -203,6 +211,10 @@ type Datastore struct {
IormConfiguration *types.StorageIORMInfo `mo:"iormConfiguration"`
}
func (m *Datastore) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["Datastore"] = reflect.TypeOf((*Datastore)(nil)).Elem()
}
@ -255,6 +267,10 @@ type DistributedVirtualSwitch struct {
Runtime *types.DVSRuntimeInfo `mo:"runtime"`
}
func (m *DistributedVirtualSwitch) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["DistributedVirtualSwitch"] = reflect.TypeOf((*DistributedVirtualSwitch)(nil)).Elem()
}
@ -359,6 +375,10 @@ type Folder struct {
ChildEntity []types.ManagedObjectReference `mo:"childEntity"`
}
func (m *Folder) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["Folder"] = reflect.TypeOf((*Folder)(nil)).Elem()
}
@ -878,6 +898,10 @@ type HostSystem struct {
SystemResources *types.HostSystemResourceInfo `mo:"systemResources"`
}
func (m *HostSystem) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["HostSystem"] = reflect.TypeOf((*HostSystem)(nil)).Elem()
}
@ -1117,6 +1141,10 @@ type Network struct {
Vm []types.ManagedObjectReference `mo:"vm"`
}
func (m *Network) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["Network"] = reflect.TypeOf((*Network)(nil)).Elem()
}
@ -1286,6 +1314,10 @@ type ResourcePool struct {
ChildConfiguration []types.ResourceConfigSpec `mo:"childConfiguration"`
}
func (m *ResourcePool) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["ResourcePool"] = reflect.TypeOf((*ResourcePool)(nil)).Elem()
}
@ -1551,6 +1583,10 @@ type VirtualMachine struct {
GuestHeartbeatStatus types.ManagedEntityStatus `mo:"guestHeartbeatStatus"`
}
func (m *VirtualMachine) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["VirtualMachine"] = reflect.TypeOf((*VirtualMachine)(nil)).Elem()
}

View File

@ -189,12 +189,20 @@ func assignValue(val reflect.Value, fi []int, pv reflect.Value) {
npv := reflect.New(pt)
npv.Elem().Set(pv)
pv = npv
pt = pv.Type()
} else {
panic(fmt.Sprintf("type %s doesn't implement %s", pt.Name(), rt.Name()))
}
}
if pt.AssignableTo(rt) {
rv.Set(pv)
} else if rt.ConvertibleTo(pt) {
rv.Set(pv.Convert(rt))
} else {
panic(fmt.Sprintf("cannot assign %s (%s) to %s (%s)", rt.Name(), rt.Kind(), pt.Name(), pt.Kind()))
}
return
}

View File

@ -428,23 +428,12 @@ var DefaultDownload = Download{
Method: "GET",
}
// DownloadFile GETs the given URL to a local file
func (c *Client) DownloadFile(file string, u *url.URL, param *Download) error {
var err error
if param == nil {
param = &DefaultDownload
}
fh, err := os.Create(file)
if err != nil {
return err
}
defer fh.Close()
// Download GETs the remote file from the given URL
func (c *Client) Download(u *url.URL, param *Download) (io.ReadCloser, int64, error) {
req, err := http.NewRequest(param.Method, u.String(), nil)
if err != nil {
return err
return nil, 0, err
}
if param.Ticket != nil {
@ -453,11 +442,9 @@ func (c *Client) DownloadFile(file string, u *url.URL, param *Download) error {
res, err := c.Client.Do(req)
if err != nil {
return err
return nil, 0, err
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
default:
@ -465,12 +452,37 @@ func (c *Client) DownloadFile(file string, u *url.URL, param *Download) error {
}
if err != nil {
return err
return nil, 0, err
}
var r io.Reader = res.Body
var r io.ReadCloser = res.Body
return r, res.ContentLength, nil
}
// DownloadFile GETs the given URL to a local file
func (c *Client) DownloadFile(file string, u *url.URL, param *Download) error {
var err error
if param == nil {
param = &DefaultDownload
}
rc, contentLength, err := c.Download(u, param)
if err != nil {
return err
}
defer rc.Close()
var r io.Reader = rc
fh, err := os.Create(file)
if err != nil {
return err
}
defer fh.Close()
if param.Progress != nil {
pr := progress.NewReader(param.Progress, res.Body, res.ContentLength)
pr := progress.NewReader(param.Progress, r, contentLength)
r = pr
// Mark progress reader as done when returning from this function.

View File

@ -271,9 +271,19 @@ var (
// Find reflect.Type for an element's type attribute.
func (p *Decoder) typeForElement(val reflect.Value, start *StartElement) reflect.Type {
t := ""
for _, a := range start.Attr {
for i, a := range start.Attr {
if a.Name == xmlSchemaInstance {
t = a.Value
// HACK: ensure xsi:type is last in the list to avoid using that value for
// a "type" attribute, such as ManagedObjectReference.Type for example.
// Note that xsi:type is already the last attribute in VC/ESX responses.
// This is only an issue with govmomi simulator generated responses.
// Proper fix will require finding a few needles in this xml package haystack.
x := len(start.Attr) - 1
if i != x {
start.Attr[i] = start.Attr[x]
start.Attr[x] = a
}
break
}
}