mirror of https://github.com/k3s-io/k3s
Improve Azure disk operations for vmas and vmss
parent
cc845246e4
commit
a54cb15d7b
|
@ -21,9 +21,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -110,65 +112,11 @@ func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName t
|
|||
return ss.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
// 1. vmType is standard, get with availabilitySet.GetDiskLun.
|
||||
// getNodeDataDisks invokes vmSet interfaces to get data disks for the node.
|
||||
func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
// 1. vmType is standard, get data disks with availabilitySet.GetDataDisks.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then get with availabilitySet.GetDiskLun.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, get with scaleSet.GetDiskLun.
|
||||
return ss.GetDiskLun(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
// 1. vmType is standard, get with availabilitySet.GetNextDiskLun.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then get with availabilitySet.GetNextDiskLun.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, get with scaleSet.GetNextDiskLun.
|
||||
return ss.GetNextDiskLun(nodeName)
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
// 1. vmType is standard, check with availabilitySet.DisksAreAttached.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet.DisksAreAttached(diskNames, nodeName)
|
||||
return c.cloud.vmSet.GetDataDisks(nodeName)
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
|
@ -177,16 +125,88 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N
|
|||
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then check with availabilitySet.DisksAreAttached.
|
||||
// 3. If the node is managed by availability set, then get with availabilitySet.GetDataDisks.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.DisksAreAttached(diskNames, nodeName)
|
||||
return ss.availabilitySet.GetDataDisks(nodeName)
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss, check with scaleSet.DisksAreAttached.
|
||||
return ss.DisksAreAttached(diskNames, nodeName)
|
||||
// 4. Node is managed by vmss, detach with scaleSet.GetDataDisks.
|
||||
return ss.GetDataDisks(nodeName)
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
used := make([]bool, maxLUN)
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("all luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
return attached, err
|
||||
}
|
||||
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
|
@ -156,71 +155,16 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
|||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (as *availabilitySet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
func (as *availabilitySet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
return nil, err
|
||||
}
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
|
||||
if vm.StorageProfile.DataDisks == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (as *availabilitySet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("All Luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (as *availabilitySet) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
} else if err != nil {
|
||||
return attached, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
|
||||
return *vm.StorageProfile.DataDisks, nil
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
|
@ -138,76 +137,16 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
|||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (ss *scaleSet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
func (ss *scaleSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
if vm.StorageProfile.DataDisks == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (ss *scaleSet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("All Luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (ss *scaleSet) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
return attached, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
|
||||
return *vm.StorageProfile.DataDisks, nil
|
||||
}
|
||||
|
|
|
@ -1180,14 +1180,6 @@ func (f *fakeVMSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
|||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
return -1, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
return -1, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
|
|
@ -62,10 +62,6 @@ type VMSet interface {
|
|||
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
|
||||
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error)
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
GetNextDiskLun(nodeName types.NodeName) (int32, error)
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue