mirror of https://github.com/k3s-io/k3s
Merge pull request #26658 from hpcloud/hpe/vsphere-vol-bugfixes
Automatic merge from submit-queue Fixing vSphere Volume plugin bugs This PR fixes #26646 and targeted for 1.3pull/6/head
commit
4fcbc0ada7
|
@ -44,7 +44,8 @@ const ActivePowerState = "poweredOn"
|
|||
const DefaultDiskController = "scsi"
|
||||
const DefaultSCSIControllerType = "lsilogic"
|
||||
|
||||
var ErrNoDiskUUIDFound = errors.New("no disk UUID found")
|
||||
var ErrNoDiskUUIDFound = errors.New("No disk UUID found")
|
||||
var ErrNoDiskIDFound = errors.New("No vSphere disk ID found")
|
||||
var ErrNoDevicesFound = errors.New("No devices found")
|
||||
|
||||
// VSphere is an implementation of cloud provider Interface for VSphere.
|
||||
|
@ -273,7 +274,7 @@ func (i *Instances) List(filter string) ([]string, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Found %s instances matching %s: %s",
|
||||
glog.V(3).Infof("found %s instances matching %s: %s",
|
||||
len(vmList), filter, vmList)
|
||||
|
||||
return vmList, nil
|
||||
|
@ -435,9 +436,9 @@ func (vs *VSphere) GetZone() (cloudprovider.Zone, error) {
|
|||
return cloudprovider.Zone{Region: vs.cfg.Global.Datacenter}, nil
|
||||
}
|
||||
|
||||
// Routes returns an implementation of Routes for vSphere.
|
||||
// Routes returns a false since the interface is not supported for vSphere.
|
||||
func (vs *VSphere) Routes() (cloudprovider.Routes, bool) {
|
||||
return nil, true
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ScrubDNS filters DNS settings for pods.
|
||||
|
@ -525,7 +526,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string
|
|||
// create a scsi controller if there is not one
|
||||
newSCSIController, err := vmDevices.CreateSCSIController(vs.cfg.Disk.SCSIControllerType)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("Cannot create new SCSI controller - %v", err)
|
||||
glog.V(3).Infof("cannot create new SCSI controller - %v", err)
|
||||
return "", "", err
|
||||
}
|
||||
configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController()
|
||||
|
@ -536,7 +537,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string
|
|||
// add the scsi controller to virtual machine
|
||||
err = vm.AddDevice(context.TODO(), newSCSIController)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("Cannot add SCSI controller to vm - %v", err)
|
||||
glog.V(3).Infof("cannot add SCSI controller to vm - %v", err)
|
||||
// attempt clean up of scsi controller
|
||||
if vmDevices, err := vm.Device(ctx); err == nil {
|
||||
cleanUpController(newSCSIController, vmDevices, vm, ctx)
|
||||
|
@ -551,7 +552,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string
|
|||
return "", "", err
|
||||
}
|
||||
if diskController, err = vmDevices.FindDiskController(vs.cfg.Disk.DiskController); err != nil {
|
||||
glog.V(3).Infof("Cannot find disk controller - %v", err)
|
||||
glog.V(3).Infof("cannot find disk controller - %v", err)
|
||||
// attempt clean up of scsi controller
|
||||
cleanUpController(newSCSIController, vmDevices, vm, ctx)
|
||||
return "", "", err
|
||||
|
@ -562,12 +563,11 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string
|
|||
disk := vmDevices.CreateDisk(diskController, ds.Reference(), vmDiskPath)
|
||||
backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
|
||||
backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent)
|
||||
disk = vmDevices.ChildDisk(disk)
|
||||
|
||||
// Attach disk to the VM
|
||||
err = vm.AddDevice(ctx, disk)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("Cannot add disk to the vm - %v", err)
|
||||
glog.V(3).Infof("cannot attach disk to the vm - %v", err)
|
||||
if newSCSICreated {
|
||||
cleanUpController(newSCSIController, vmDevices, vm, ctx)
|
||||
}
|
||||
|
@ -611,13 +611,29 @@ func getVirtualDiskUUID(newDevice types.BaseVirtualDevice) (string, error) {
|
|||
|
||||
if b, ok := vd.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
|
||||
uuidWithNoHypens := strings.Replace(b.Uuid, "-", "", -1)
|
||||
return uuidWithNoHypens, nil
|
||||
return strings.ToLower(uuidWithNoHypens), nil
|
||||
}
|
||||
return "", ErrNoDiskUUIDFound
|
||||
}
|
||||
|
||||
func getVirtualDiskID(volPath string, vmDevices object.VirtualDeviceList) (string, error) {
|
||||
// filter vm devices to retrieve disk ID for the given vmdk file
|
||||
for _, device := range vmDevices {
|
||||
if vmDevices.TypeName(device) == "VirtualDisk" {
|
||||
d := device.GetVirtualDevice()
|
||||
if b, ok := d.Backing.(types.BaseVirtualDeviceFileBackingInfo); ok {
|
||||
fileName := b.GetVirtualDeviceFileBackingInfo().FileName
|
||||
if fileName == volPath {
|
||||
return vmDevices.Name(device), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", ErrNoDiskIDFound
|
||||
}
|
||||
|
||||
// Detaches given virtual disk volume from the compute running kubelet.
|
||||
func (vs *VSphere) DetachDisk(diskID string, nodeName string) error {
|
||||
func (vs *VSphere) DetachDisk(volPath string, nodeName string) error {
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
@ -642,13 +658,19 @@ func (vs *VSphere) DetachDisk(diskID string, nodeName string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
diskID, err := getVirtualDiskID(volPath, vmDevices)
|
||||
if err != nil {
|
||||
glog.Warningf("disk ID not found for %v ", volPath)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove disk from VM
|
||||
device := vmDevices.Find(diskID)
|
||||
if device == nil {
|
||||
return fmt.Errorf("device '%s' not found", diskID)
|
||||
}
|
||||
|
||||
err = vm.RemoveDevice(ctx, false, device)
|
||||
err = vm.RemoveDevice(ctx, true, device)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -224,19 +224,19 @@ func TestVolumes(t *testing.T) {
|
|||
t.Fatalf("Cannot create a new VMDK volume: %v", err)
|
||||
}
|
||||
|
||||
diskID, _, err := vs.AttachDisk(volPath, "")
|
||||
_, _, err = vs.AttachDisk(volPath, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, srvs[0], err)
|
||||
}
|
||||
|
||||
err = vs.DetachDisk(diskID, "")
|
||||
err = vs.DetachDisk(volPath, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot detach disk(%s) from VM(%s): %v", diskID, srvs[0], err)
|
||||
t.Fatalf("Cannot detach disk(%s) from VM(%s): %v", volPath, srvs[0], err)
|
||||
}
|
||||
|
||||
// todo: Deleting a volume after detach currently not working through API or UI (vSphere)
|
||||
// err = vs.DeleteVolume(volPath)
|
||||
// if err != nil {
|
||||
// t.Fatalf("Cannot delete VMDK volume %s: %v", volPath, err)
|
||||
// t.Fatalf("Cannot delete VMDK volume %s: %v", volPath, err)
|
||||
// }
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
@ -208,7 +209,7 @@ func (b *vsphereVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
globalPDPath := makeGlobalPDPath(b.plugin.host, b.volPath)
|
||||
if err := b.manager.AttachDisk(b, globalPDPath); err != nil {
|
||||
glog.V(4).Infof("AttachDisk failed: %v", err)
|
||||
glog.V(3).Infof("AttachDisk failed: %v", err)
|
||||
return err
|
||||
}
|
||||
glog.V(3).Infof("vSphere volume %s attached", b.volPath)
|
||||
|
@ -291,7 +292,11 @@ func (v *vsphereVolumeUnmounter) TearDownAt(dir string) error {
|
|||
return fmt.Errorf("directory %s is not mounted", dir)
|
||||
}
|
||||
|
||||
v.volPath = path.Base(refs[0])
|
||||
// space between datastore and vmdk name in volumePath is encoded as '\040' when returned by GetMountRefs().
|
||||
// volumePath eg: "[local] xxx.vmdk" provided to attach/mount
|
||||
// replacing \040 with space to match the actual volumePath
|
||||
mountPath := strings.Replace(path.Base(refs[0]), "\\040", " ", -1)
|
||||
v.volPath = mountPath
|
||||
glog.V(4).Infof("Found volume %s mounted to %s", v.volPath, dir)
|
||||
|
||||
// Reload list of references, there might be SetUpAt finished in the meantime
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
@ -34,6 +33,8 @@ const (
|
|||
maxRetries = 10
|
||||
)
|
||||
|
||||
var ErrProbeVolume = errors.New("Error scanning attached volumes")
|
||||
|
||||
// Singleton key mutex for keeping attach/detach operations for the same PD atomic
|
||||
var attachDetachMutex = keymutex.NewKeyMutex()
|
||||
|
||||
|
@ -55,7 +56,7 @@ func (util *VsphereDiskUtil) AttachDisk(vm *vsphereVolumeMounter, globalPDPath s
|
|||
|
||||
diskID, diskUUID, attachError := cloud.AttachDisk(vm.volPath, "")
|
||||
if attachError != nil {
|
||||
return err
|
||||
return attachError
|
||||
} else if diskUUID == "" {
|
||||
return errors.New("Disk UUID has no value")
|
||||
}
|
||||
|
@ -68,7 +69,7 @@ func (util *VsphereDiskUtil) AttachDisk(vm *vsphereVolumeMounter, globalPDPath s
|
|||
for {
|
||||
devicePath = verifyDevicePath(diskUUID)
|
||||
// probe the attached vol so that symlink in /dev/disk/by-id is created
|
||||
probeAttachedVolume()
|
||||
scsiHostRescan()
|
||||
|
||||
_, err := os.Stat(devicePath)
|
||||
if err == nil {
|
||||
|
@ -106,13 +107,25 @@ func (util *VsphereDiskUtil) AttachDisk(vm *vsphereVolumeMounter, globalPDPath s
|
|||
return nil
|
||||
}
|
||||
|
||||
// rescan scsi bus
|
||||
func scsiHostRescan() {
|
||||
scsi_path := "/sys/class/scsi_host/"
|
||||
if dirs, err := ioutil.ReadDir(scsi_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := scsi_path + f.Name() + "/scan"
|
||||
data := []byte("- - -")
|
||||
ioutil.WriteFile(name, data, 0666)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyDevicePath(diskUUID string) string {
|
||||
files, _ := ioutil.ReadDir("/dev/disk/by-id/")
|
||||
for _, f := range files {
|
||||
// TODO: should support other controllers
|
||||
if strings.Contains(f.Name(), "scsi-") {
|
||||
devID := f.Name()[len("scsi-"):len(f.Name())]
|
||||
if strings.Contains(diskUUID, devID) {
|
||||
if strings.Contains(devID, diskUUID) {
|
||||
glog.V(4).Infof("Found disk attached as %q; full devicepath: %s\n", f.Name(), path.Join("/dev/disk/by-id/", f.Name()))
|
||||
return path.Join("/dev/disk/by-id/", f.Name())
|
||||
}
|
||||
|
@ -122,19 +135,6 @@ func verifyDevicePath(diskUUID string) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func probeAttachedVolume() error {
|
||||
executor := exec.New()
|
||||
args := []string{"trigger"}
|
||||
cmd := executor.Command("/usr/bin/udevadm", args...)
|
||||
_, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("error running udevadm trigger %v\n", err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("Successfully probed all attachments")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmounts the device and detaches the disk from the kubelet's host machine.
|
||||
func (util *VsphereDiskUtil) DetachDisk(vu *vsphereVolumeUnmounter) error {
|
||||
|
||||
|
@ -156,7 +156,7 @@ func (util *VsphereDiskUtil) DetachDisk(vu *vsphereVolumeUnmounter) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err = cloud.DetachDisk(vu.diskID, ""); err != nil {
|
||||
if err = cloud.DetachDisk(vu.volPath, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully detached vSphere volume %s", vu.volPath)
|
||||
|
|
Loading…
Reference in New Issue