Add vmType checking in Azure disk controller common

pull/6/head
Pengfei Ni 2018-02-13 10:10:23 +08:00
parent 1d3cf76d75
commit fbc871be32
3 changed files with 117 additions and 40 deletions

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -17,6 +17,7 @@ limitations under the License.
package azure
import (
"fmt"
"time"
"github.com/Azure/azure-sdk-for-go/arm/compute"
@ -57,25 +58,135 @@ type controllerCommon struct {
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
return c.cloud.vmSet.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
// 1. vmType is standard, attach with availabilitySet.AttachDisk.
if c.cloud.VMType == vmTypeStandard {
return c.cloud.vmSet.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
}
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
ss, ok := c.cloud.vmSet.(*scaleSet)
if !ok {
return fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
}
// 3. If the node is managed by availability set, then attach with availabilitySet.AttachDisk.
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
if err != nil {
return err
}
if managedByAS {
// vm is managed by availability set.
return ss.availabilitySet.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
}
// 4. Node is managed by vmss, attach with scaleSet.AttachDisk.
return ss.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
}
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
return c.cloud.vmSet.DetachDiskByName(diskName, diskURI, nodeName)
// 1. vmType is standard, detach with availabilitySet.DetachDiskByName.
if c.cloud.VMType == vmTypeStandard {
return c.cloud.vmSet.DetachDiskByName(diskName, diskURI, nodeName)
}
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
ss, ok := c.cloud.vmSet.(*scaleSet)
if !ok {
return fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
}
// 3. If the node is managed by availability set, then detach with availabilitySet.DetachDiskByName.
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
if err != nil {
return err
}
if managedByAS {
// vm is managed by availability set.
return ss.availabilitySet.DetachDiskByName(diskName, diskURI, nodeName)
}
// 4. Node is managed by vmss, detach with scaleSet.DetachDiskByName.
return ss.DetachDiskByName(diskName, diskURI, nodeName)
}
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
return c.cloud.vmSet.GetDiskLun(diskName, diskURI, nodeName)
// 1. vmType is standard, get with availabilitySet.GetDiskLun.
if c.cloud.VMType == vmTypeStandard {
return c.cloud.vmSet.GetDiskLun(diskName, diskURI, nodeName)
}
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
ss, ok := c.cloud.vmSet.(*scaleSet)
if !ok {
return -1, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
}
// 3. If the node is managed by availability set, then get with availabilitySet.GetDiskLun.
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
if err != nil {
return -1, err
}
if managedByAS {
// vm is managed by availability set.
return ss.availabilitySet.GetDiskLun(diskName, diskURI, nodeName)
}
// 4. Node is managed by vmss, get with scaleSet.GetDiskLun.
return ss.GetDiskLun(diskName, diskURI, nodeName)
}
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
return c.cloud.vmSet.GetNextDiskLun(nodeName)
// 1. vmType is standard, get with availabilitySet.GetNextDiskLun.
if c.cloud.VMType == vmTypeStandard {
return c.cloud.vmSet.GetNextDiskLun(nodeName)
}
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
ss, ok := c.cloud.vmSet.(*scaleSet)
if !ok {
return -1, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
}
// 3. If the node is managed by availability set, then get with availabilitySet.GetNextDiskLun.
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
if err != nil {
return -1, err
}
if managedByAS {
// vm is managed by availability set.
return ss.availabilitySet.GetNextDiskLun(nodeName)
}
// 4. Node is managed by vmss, get with scaleSet.GetNextDiskLun.
return ss.GetNextDiskLun(nodeName)
}
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
return c.cloud.vmSet.DisksAreAttached(diskNames, nodeName)
// 1. vmType is standard, check with availabilitySet.DisksAreAttached.
if c.cloud.VMType == vmTypeStandard {
return c.cloud.vmSet.DisksAreAttached(diskNames, nodeName)
}
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
ss, ok := c.cloud.vmSet.(*scaleSet)
if !ok {
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
}
// 3. If the node is managed by availability set, then check with availabilitySet.DisksAreAttached.
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
if err != nil {
return nil, err
}
if managedByAS {
// vm is managed by availability set.
return ss.availabilitySet.DisksAreAttached(diskNames, nodeName)
}
// 4. Node is managed by vmss, check with scaleSet.DisksAreAttached.
return ss.DisksAreAttached(diskNames, nodeName)
}

View File

@ -33,10 +33,6 @@ import (
func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
if err != nil {
if err == ErrorNotVmssInstance {
glog.V(3).Infof("AttachDisk: node %q is not managed by VMSS, retry with availabilitySet", nodeName)
return ss.availabilitySet.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
}
return err
}
@ -98,11 +94,6 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
if err != nil {
if err == ErrorNotVmssInstance {
glog.V(3).Infof("DetachDiskByName: node %q is not managed by VMSS, retry with availabilitySet", nodeName)
return ss.availabilitySet.DetachDiskByName(diskName, diskURI, nodeName)
}
return err
}
@ -152,11 +143,6 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
func (ss *scaleSet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
_, _, vm, err := ss.getVmssVM(string(nodeName))
if err != nil {
if err == ErrorNotVmssInstance {
glog.V(3).Infof("GetDiskLun: node %q is not managed by VMSS, retry with availabilitySet", nodeName)
return ss.availabilitySet.GetDiskLun(diskName, diskURI, nodeName)
}
return -1, err
}
@ -178,11 +164,6 @@ func (ss *scaleSet) GetDiskLun(diskName, diskURI string, nodeName types.NodeName
func (ss *scaleSet) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
_, _, vm, err := ss.getVmssVM(string(nodeName))
if err != nil {
if err == ErrorNotVmssInstance {
glog.V(3).Infof("GetNextDiskLun: node %q is not managed by VMSS, retry with availabilitySet", nodeName)
return ss.availabilitySet.GetNextDiskLun(nodeName)
}
return -1, err
}
@ -210,11 +191,6 @@ func (ss *scaleSet) DisksAreAttached(diskNames []string, nodeName types.NodeName
_, _, vm, err := ss.getVmssVM(string(nodeName))
if err != nil {
if err == ErrorNotVmssInstance {
glog.V(3).Infof("DisksAreAttached: node %q is not managed by VMSS, retry with availabilitySet", nodeName)
return ss.availabilitySet.DisksAreAttached(diskNames, nodeName)
}
if err == cloudprovider.InstanceNotFound {
// if host doesn't exist, no need to detach
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",

View File

@ -90,16 +90,6 @@ func newScaleSet(az *Cloud) (VMSet, error) {
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm computepreview.VirtualMachineScaleSetVM, err error) {
// Known node not managed by scale sets.
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName)
if err != nil {
return "", "", vm, err
}
if managedByAS {
glog.V(8).Infof("Found node %q in availabilitySetNodesCache", nodeName)
return "", "", vm, ErrorNotVmssInstance
}
instanceID, err = getScaleSetVMInstanceID(nodeName)
if err != nil {
return ssName, instanceID, vm, err