mirror of https://github.com/k3s-io/k3s
Merge pull request #59652 from feiskyer/vmss-cache
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Add generic cache for Azure VMSS **What this PR does / why we need it**: This PR adds a generic cache for VMSS and removes old list-based cache. **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Continue of ##58770. **Special notes for your reviewer**: Depends on #59520. **Release note**: ```release-note Add generic cache for Azure VMSS ```pull/6/head
commit
69324f90e6
|
@ -28,6 +28,7 @@ go_library(
|
|||
"azure_storageaccount.go",
|
||||
"azure_vmsets.go",
|
||||
"azure_vmss.go",
|
||||
"azure_vmss_cache.go",
|
||||
"azure_wrap.go",
|
||||
"azure_zones.go",
|
||||
],
|
||||
|
@ -75,6 +76,7 @@ go_test(
|
|||
"azure_storage_test.go",
|
||||
"azure_storageaccount_test.go",
|
||||
"azure_test.go",
|
||||
"azure_vmss_cache_test.go",
|
||||
"azure_vmss_test.go",
|
||||
"azure_wrap_test.go",
|
||||
],
|
||||
|
|
|
@ -244,7 +244,10 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
|||
}
|
||||
|
||||
if strings.EqualFold(vmTypeVMSS, az.Config.VMType) {
|
||||
az.vmSet = newScaleSet(&az)
|
||||
az.vmSet, err = newScaleSet(&az)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
az.vmSet = newAvailabilitySet(&az)
|
||||
}
|
||||
|
|
|
@ -674,7 +674,7 @@ func (fVMC *fakeVirtualMachineScaleSetVMsClient) Get(resourceGroupName string, V
|
|||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
vmKey := fmt.Sprintf("%s-%s", VMScaleSetName, instanceID)
|
||||
vmKey := fmt.Sprintf("%s_%s", VMScaleSetName, instanceID)
|
||||
if scaleSetMap, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := scaleSetMap[vmKey]; ok {
|
||||
return entity, nil
|
||||
|
|
|
@ -23,8 +23,6 @@ import (
|
|||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
|
@ -33,7 +31,6 @@ import (
|
|||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
@ -45,26 +42,6 @@ var (
|
|||
scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
|
||||
)
|
||||
|
||||
// scaleSetVMInfo includes basic information of a virtual machine.
|
||||
type scaleSetVMInfo struct {
|
||||
// The ID of the machine.
|
||||
ID string
|
||||
// Instance ID of the machine (only for scale sets vm).
|
||||
InstanceID string
|
||||
// Node name of the machine.
|
||||
NodeName string
|
||||
// Set name of the machine.
|
||||
ScaleSetName string
|
||||
// The type of the machine.
|
||||
Type string
|
||||
// The region of the machine.
|
||||
Region string
|
||||
// Primary interface ID of the machine.
|
||||
PrimaryInterfaceID string
|
||||
// Fault domain of the machine.
|
||||
FaultDomain string
|
||||
}
|
||||
|
||||
// scaleSet implements VMSet interface for Azure scale set.
|
||||
type scaleSet struct {
|
||||
*Cloud
|
||||
|
@ -73,207 +50,123 @@ type scaleSet struct {
|
|||
// (e.g. master nodes) may not belong to any scale sets.
|
||||
availabilitySet VMSet
|
||||
|
||||
cacheMutex sync.Mutex
|
||||
// A local cache of scale sets. The key is scale set name and the value is a
|
||||
// list of virtual machines belonging to the scale set.
|
||||
cache map[string][]scaleSetVMInfo
|
||||
availabilitySetNodesCache sets.String
|
||||
vmssCache *timedCache
|
||||
vmssVMCache *timedCache
|
||||
nodeNameToScaleSetMappingCache *timedCache
|
||||
availabilitySetNodesCache *timedCache
|
||||
}
|
||||
|
||||
// newScaleSet creates a new scaleSet.
|
||||
func newScaleSet(az *Cloud) VMSet {
|
||||
func newScaleSet(az *Cloud) (VMSet, error) {
|
||||
var err error
|
||||
ss := &scaleSet{
|
||||
Cloud: az,
|
||||
availabilitySet: newAvailabilitySet(az),
|
||||
availabilitySetNodesCache: sets.NewString(),
|
||||
cache: make(map[string][]scaleSetVMInfo),
|
||||
}
|
||||
|
||||
go wait.Until(func() {
|
||||
ss.cacheMutex.Lock()
|
||||
defer ss.cacheMutex.Unlock()
|
||||
|
||||
if err := ss.updateCache(); err != nil {
|
||||
glog.Errorf("updateCache failed: %v", err)
|
||||
ss.nodeNameToScaleSetMappingCache, err = ss.newNodeNameToScaleSetMappingCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}, 5*time.Minute, wait.NeverStop)
|
||||
|
||||
return ss
|
||||
ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.vmssCache, err = ss.newVmssCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.vmssVMCache, err = ss.newVmssVMCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// updateCache updates scale sets cache. It should be called within a lock.
|
||||
func (ss *scaleSet) updateCache() error {
|
||||
scaleSetNames, err := ss.listScaleSetsWithRetry()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
localCache := make(map[string][]scaleSetVMInfo)
|
||||
for _, scaleSetName := range scaleSetNames {
|
||||
if _, ok := localCache[scaleSetName]; !ok {
|
||||
localCache[scaleSetName] = make([]scaleSetVMInfo, 0)
|
||||
}
|
||||
vms, err := ss.listScaleSetVMsWithRetry(scaleSetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, vm := range vms {
|
||||
nodeName := ""
|
||||
if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil {
|
||||
nodeName = strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
}
|
||||
|
||||
vmSize := ""
|
||||
if vm.Sku != nil && vm.Sku.Name != nil {
|
||||
vmSize = *vm.Sku.Name
|
||||
}
|
||||
|
||||
primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm)
|
||||
if err != nil {
|
||||
glog.Errorf("getPrimaryInterfaceID for %s failed: %v", nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
faultDomain := ""
|
||||
if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
|
||||
faultDomain = strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain))
|
||||
}
|
||||
|
||||
localCache[scaleSetName] = append(localCache[scaleSetName], scaleSetVMInfo{
|
||||
ID: *vm.ID,
|
||||
Type: vmSize,
|
||||
NodeName: nodeName,
|
||||
FaultDomain: faultDomain,
|
||||
ScaleSetName: scaleSetName,
|
||||
Region: *vm.Location,
|
||||
InstanceID: *vm.InstanceID,
|
||||
PrimaryInterfaceID: primaryInterfaceID,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Only update cache after all steps are success.
|
||||
ss.cache = localCache
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getCachedVirtualMachine gets virtualMachine by nodeName from cache.
|
||||
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
|
||||
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
|
||||
func (ss *scaleSet) getCachedVirtualMachine(nodeName string) (scaleSetVMInfo, error) {
|
||||
ss.cacheMutex.Lock()
|
||||
defer ss.cacheMutex.Unlock()
|
||||
|
||||
getVMFromCache := func(nodeName string) (scaleSetVMInfo, bool) {
|
||||
glog.V(8).Infof("Getting scaleSetVMInfo for %q from cache %v", nodeName, ss.cache)
|
||||
for scaleSetName := range ss.cache {
|
||||
for _, vm := range ss.cache[scaleSetName] {
|
||||
if vm.NodeName == nodeName {
|
||||
return vm, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return scaleSetVMInfo{}, false
|
||||
}
|
||||
|
||||
vm, found := getVMFromCache(nodeName)
|
||||
if found {
|
||||
return vm, nil
|
||||
}
|
||||
|
||||
func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
// Known node not managed by scale sets.
|
||||
if ss.availabilitySetNodesCache.Has(nodeName) {
|
||||
glog.V(10).Infof("Found node %q in availabilitySetNodesCache", nodeName)
|
||||
return scaleSetVMInfo{}, cloudprovider.InstanceNotFound
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName)
|
||||
if err != nil {
|
||||
return "", "", vm, err
|
||||
}
|
||||
if managedByAS {
|
||||
glog.V(8).Infof("Found node %q in availabilitySetNodesCache", nodeName)
|
||||
return "", "", vm, ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
// Update cache and try again.
|
||||
glog.V(10).Infof("vmss cache before updateCache: %v", ss.cache)
|
||||
if err := ss.updateCache(); err != nil {
|
||||
glog.Errorf("updateCache failed with error: %v", err)
|
||||
return scaleSetVMInfo{}, err
|
||||
}
|
||||
glog.V(10).Infof("vmss cache after updateCache: %v", ss.cache)
|
||||
vm, found = getVMFromCache(nodeName)
|
||||
if found {
|
||||
return vm, nil
|
||||
instanceID, err = getScaleSetVMInstanceID(nodeName)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
|
||||
// Node still not found, assuming it is not managed by scale sets.
|
||||
glog.V(8).Infof("Node %q doesn't belong to any scale sets, adding it to availabilitySetNodesCache", nodeName)
|
||||
ss.availabilitySetNodesCache.Insert(nodeName)
|
||||
return scaleSetVMInfo{}, cloudprovider.InstanceNotFound
|
||||
ssName, err = ss.getScaleSetNameByNodeName(nodeName)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
|
||||
if ssName == "" {
|
||||
return "", "", vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
glog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName)
|
||||
cachedVM, err := ss.vmssVMCache.Get(ss.makeVmssVMName(ssName, instanceID))
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
|
||||
if cachedVM == nil {
|
||||
glog.Errorf("Can't find node (%q) in any scale sets", nodeName)
|
||||
return ssName, instanceID, vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
|
||||
// The node must belong to one of scale sets.
|
||||
func (ss *scaleSet) getCachedVirtualMachineByInstanceID(scaleSetName, instanceID string) (scaleSetVMInfo, error) {
|
||||
ss.cacheMutex.Lock()
|
||||
defer ss.cacheMutex.Unlock()
|
||||
|
||||
getVMByID := func(scaleSetName, instanceID string) (scaleSetVMInfo, bool) {
|
||||
glog.V(8).Infof("Getting scaleSetVMInfo with scaleSetName: %q and instanceID %q from cache %v", scaleSetName, instanceID, ss.cache)
|
||||
vms, ok := ss.cache[scaleSetName]
|
||||
if !ok {
|
||||
glog.V(4).Infof("scale set (%s) not found", scaleSetName)
|
||||
return scaleSetVMInfo{}, false
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
vmName := ss.makeVmssVMName(scaleSetName, instanceID)
|
||||
cachedVM, err := ss.vmssVMCache.Get(vmName)
|
||||
if err != nil {
|
||||
return vm, err
|
||||
}
|
||||
|
||||
for _, vm := range vms {
|
||||
if vm.InstanceID == instanceID {
|
||||
glog.V(4).Infof("getCachedVirtualMachineByInstanceID gets vm (%s) by instanceID (%s) within scale set (%s)", vm.NodeName, instanceID, scaleSetName)
|
||||
return vm, true
|
||||
}
|
||||
if cachedVM == nil {
|
||||
glog.Errorf("cound't find vmss virtual machine by scaleSetName (%q) and instanceID (%q)", scaleSetName, instanceID)
|
||||
return vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
glog.V(4).Infof("instanceID (%s) not found in scale set (%s)", instanceID, scaleSetName)
|
||||
return scaleSetVMInfo{}, false
|
||||
}
|
||||
|
||||
vm, found := getVMByID(scaleSetName, instanceID)
|
||||
if found {
|
||||
return vm, nil
|
||||
}
|
||||
|
||||
// Update cache and try again.
|
||||
if err := ss.updateCache(); err != nil {
|
||||
glog.Errorf("updateCache failed with error: %v", err)
|
||||
return scaleSetVMInfo{}, err
|
||||
}
|
||||
vm, found = getVMByID(scaleSetName, instanceID)
|
||||
if found {
|
||||
return vm, nil
|
||||
}
|
||||
|
||||
return scaleSetVMInfo{}, cloudprovider.InstanceNotFound
|
||||
return *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
|
||||
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
|
||||
// not exist or is no longer running.
|
||||
func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
vm, err := ss.getCachedVirtualMachine(name)
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
glog.V(4).Infof("GetInstanceIDByNodeName: node %q is not found in scale sets, assuming it is managed by availability set", name)
|
||||
|
||||
// Retry with standard type because master nodes may not belong to any vmss.
|
||||
// TODO: find a better way to identify the type of VM.
|
||||
if err == ErrorNotVmssInstance {
|
||||
glog.V(4).Infof("GetInstanceIDByNodeName: node %q is managed by availability set", name)
|
||||
// Retry with standard type because nodes are not managed by vmss.
|
||||
return ss.availabilitySet.GetInstanceIDByNodeName(name)
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
return vm.ID, nil
|
||||
return *vm.ID, nil
|
||||
}
|
||||
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
// NodeName is not part of providerID for vmss instances.
|
||||
scaleSetName, err := extractScaleSetNameByVMID(providerID)
|
||||
scaleSetName, err := extractScaleSetNameByExternalID(providerID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
|
@ -285,49 +178,60 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
|
|||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
}
|
||||
|
||||
vm, err := ss.getCachedVirtualMachineByInstanceID(scaleSetName, instanceID)
|
||||
vm, err := ss.getVmssVMByInstanceID(scaleSetName, instanceID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return types.NodeName(vm.NodeName), nil
|
||||
if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil {
|
||||
nodeName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
return types.NodeName(nodeName), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
vm, err := ss.getCachedVirtualMachine(name)
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
glog.V(4).Infof("GetInstanceTypeByNodeName: node %q is not found in scale sets, assuming it is managed by availability set", name)
|
||||
|
||||
// Retry with standard type because master nodes may not belong to any vmss.
|
||||
// TODO: find a better way to identify the type of VM.
|
||||
if err == ErrorNotVmssInstance {
|
||||
glog.V(4).Infof("GetInstanceTypeByNodeName: node %q is managed by availability set", name)
|
||||
// Retry with standard type because nodes are not managed by vmss.
|
||||
return ss.availabilitySet.GetInstanceTypeByNodeName(name)
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
return vm.Type, nil
|
||||
if vm.Sku != nil && vm.Sku.Name != nil {
|
||||
return *vm.Sku.Name, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetZoneByNodeName gets cloudprovider.Zone by node name.
|
||||
func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
vm, err := ss.getCachedVirtualMachine(name)
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
glog.V(4).Infof("GetZoneByNodeName: node %q is not found in scale sets, assuming it is managed by availability set", name)
|
||||
// Retry with standard type because master nodes may not belong to any vmss.
|
||||
// TODO: find a better way to identify the type of VM.
|
||||
if err == ErrorNotVmssInstance {
|
||||
glog.V(4).Infof("GetZoneByNodeName: node %q is managed by availability set", name)
|
||||
// Retry with standard type because nodes are not managed by vmss.
|
||||
return ss.availabilitySet.GetZoneByNodeName(name)
|
||||
}
|
||||
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
|
||||
return cloudprovider.Zone{
|
||||
FailureDomain: vm.FaultDomain,
|
||||
Region: vm.Region,
|
||||
FailureDomain: strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain)),
|
||||
Region: *vm.Location,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return cloudprovider.Zone{}, nil
|
||||
}
|
||||
|
||||
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
|
||||
|
@ -386,9 +290,9 @@ func getScaleSetVMInstanceID(machineName string) (string, error) {
|
|||
return fmt.Sprintf("%d", instanceID), nil
|
||||
}
|
||||
|
||||
// extractScaleSetNameByVMID extracts the scaleset name by scaleSetVirtualMachine's ID.
|
||||
func extractScaleSetNameByVMID(vmID string) (string, error) {
|
||||
matches := scaleSetNameRE.FindStringSubmatch(vmID)
|
||||
// extractScaleSetNameByExternalID extracts the scaleset name by node's externalID.
|
||||
func extractScaleSetNameByExternalID(externalID string) (string, error) {
|
||||
matches := scaleSetNameRE.FindStringSubmatch(externalID)
|
||||
if len(matches) != 2 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
@ -493,41 +397,20 @@ func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.Vir
|
|||
// getAgentPoolScaleSets lists the virtual machines for for the resource group and then builds
|
||||
// a list of scale sets that match the nodes available to k8s.
|
||||
func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
|
||||
ss.cacheMutex.Lock()
|
||||
defer ss.cacheMutex.Unlock()
|
||||
|
||||
// Always update cache to get latest lists of scale sets and virtual machines.
|
||||
if err := ss.updateCache(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vmNameToScaleSetName := make(map[string]string)
|
||||
for scaleSetName := range ss.cache {
|
||||
vms := ss.cache[scaleSetName]
|
||||
for idx := range vms {
|
||||
vm := vms[idx]
|
||||
if vm.NodeName != "" {
|
||||
vmNameToScaleSetName[vm.NodeName] = scaleSetName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
agentPoolScaleSets := &[]string{}
|
||||
availableScaleSetNames := sets.NewString()
|
||||
for nx := range nodes {
|
||||
if isMasterNode(nodes[nx]) {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeName := nodes[nx].Name
|
||||
ssName, ok := vmNameToScaleSetName[nodeName]
|
||||
if !ok {
|
||||
// TODO: support master nodes not managed by VMSS.
|
||||
glog.Errorf("Node %q is not belonging to any known scale sets", nodeName)
|
||||
return nil, fmt.Errorf("node %q is not belonging to any known scale sets", nodeName)
|
||||
ssName, err := ss.getScaleSetNameByNodeName(nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if availableScaleSetNames.Has(ssName) {
|
||||
if ssName == "" {
|
||||
glog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -589,11 +472,11 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN
|
|||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Interface, error) {
|
||||
vm, err := ss.getCachedVirtualMachine(nodeName)
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// Retry with standard type because master nodes may not belong to any vmss.
|
||||
// TODO: find a better way to identify the type of VM.
|
||||
if err == ErrorNotVmssInstance {
|
||||
glog.V(4).Infof("GetPrimaryInterface: node %q is managed by availability set", nodeName)
|
||||
// Retry with standard type because nodes are not managed by vmss.
|
||||
return ss.availabilitySet.GetPrimaryInterface(nodeName, "")
|
||||
}
|
||||
|
||||
|
@ -602,59 +485,55 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName, vmSetName string) (network.Int
|
|||
}
|
||||
|
||||
// Check scale set name.
|
||||
if vmSetName != "" && !strings.EqualFold(vm.ScaleSetName, vmSetName) {
|
||||
if vmSetName != "" && !strings.EqualFold(ssName, vmSetName) {
|
||||
return network.Interface{}, errNotInVMSet
|
||||
}
|
||||
|
||||
nicName, err := getLastSegment(vm.PrimaryInterfaceID)
|
||||
primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, vm.PrimaryInterfaceID, err)
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ss.ResourceGroup, vm.ScaleSetName, vm.InstanceID, nicName, "")
|
||||
nicName, err := getLastSegment(primaryInterfaceID)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, vm.ScaleSetName, nicName, err)
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ss.ResourceGroup, ssName, instanceID, nicName, "")
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, ssName, nicName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Fix interface's location, which is required when updating the interface.
|
||||
// TODO: is this a bug of azure SDK?
|
||||
if nic.Location == nil || *nic.Location == "" {
|
||||
nic.Location = &vm.Region
|
||||
nic.Location = vm.Location
|
||||
}
|
||||
|
||||
return nic, nil
|
||||
}
|
||||
|
||||
// getScaleSet gets a scale set by name.
|
||||
func (ss *scaleSet) getScaleSet(name string) (compute.VirtualMachineScaleSet, bool, error) {
|
||||
result, err := ss.VirtualMachineScaleSetsClient.Get(ss.ResourceGroup, name)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return result, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return result, false, nil
|
||||
}
|
||||
|
||||
return result, exists, err
|
||||
}
|
||||
|
||||
// getScaleSetWithRetry gets scale set with exponential backoff retry
|
||||
func (ss *scaleSet) getScaleSetWithRetry(name string) (compute.VirtualMachineScaleSet, bool, error) {
|
||||
var result compute.VirtualMachineScaleSet
|
||||
var exists bool
|
||||
|
||||
err := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
result, exists, retryErr = ss.getScaleSet(name)
|
||||
cached, retryErr := ss.vmssCache.Get(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Info("backoff: success")
|
||||
glog.V(4).Info("backoff: success for scale set %q", name)
|
||||
|
||||
if cached != nil {
|
||||
exists = true
|
||||
result = *(cached.(*compute.VirtualMachineScaleSet))
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
|
||||
|
@ -783,7 +662,7 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
|||
// Construct instanceIDs from nodes.
|
||||
instanceIDs := []string{}
|
||||
for _, curNode := range nodes {
|
||||
curScaleSetName, err := extractScaleSetNameByVMID(curNode.Spec.ExternalID)
|
||||
curScaleSetName, err := extractScaleSetNameByExternalID(curNode.Spec.ExternalID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Node %q is not belonging to any scale sets, omitting it", curNode.Name)
|
||||
continue
|
||||
|
|
|
@ -0,0 +1,193 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
var (
|
||||
vmssNameSeparator = "_"
|
||||
|
||||
nodeNameToScaleSetMappingKey = "k8sNodeNameToScaleSetMappingKey"
|
||||
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
|
||||
|
||||
vmssCacheTTL = time.Minute
|
||||
vmssVMCacheTTL = time.Minute
|
||||
availabilitySetNodesCacheTTL = 15 * time.Minute
|
||||
nodeNameToScaleSetMappingCacheTTL = 15 * time.Minute
|
||||
)
|
||||
|
||||
// nodeNameToScaleSetMapping maps nodeName to scaleSet name.
|
||||
// The map is required because vmss nodeName is not equal to its vmName.
|
||||
type nodeNameToScaleSetMapping map[string]string
|
||||
|
||||
func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
|
||||
return fmt.Sprintf("%s%s%s", scaleSetName, vmssNameSeparator, instanceID)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) extractVmssVMName(name string) (string, string, error) {
|
||||
ret := strings.Split(name, vmssNameSeparator)
|
||||
if len(ret) != 2 {
|
||||
glog.Errorf("Failed to extract vmssVMName %q", name)
|
||||
return "", "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return ret[0], ret[1], nil
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
result, err := ss.VirtualMachineScaleSetsClient.Get(ss.ResourceGroup, key)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmssCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
scaleSetNames, err := ss.listScaleSetsWithRetry()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localCache := make(nodeNameToScaleSetMapping)
|
||||
for _, ssName := range scaleSetNames {
|
||||
vms, err := ss.listScaleSetVMsWithRetry(ssName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, vm := range vms {
|
||||
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
|
||||
glog.Warningf("failed to get computerName for vmssVM (%q)", vm.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
localCache[computerName] = ssName
|
||||
}
|
||||
}
|
||||
|
||||
return localCache, nil
|
||||
}
|
||||
|
||||
return newTimedcache(nodeNameToScaleSetMappingCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
vmList, err := ss.Cloud.VirtualMachineClientListWithRetry()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localCache := sets.NewString()
|
||||
for _, vm := range vmList {
|
||||
localCache.Insert(*vm.Name)
|
||||
}
|
||||
|
||||
return localCache, nil
|
||||
}
|
||||
|
||||
return newTimedcache(availabilitySetNodesCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
// vmssVM name's format is 'scaleSetName_instanceID'
|
||||
ssName, instanceID, err := ss.extractVmssVMName(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Not found, the VM doesn't belong to any known scale sets.
|
||||
if ssName == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ss.ResourceGroup, ssName, instanceID)
|
||||
exists, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmssVMCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) getScaleSetNameByNodeName(nodeName string) (string, error) {
|
||||
getScaleSetName := func(nodeName string) (string, error) {
|
||||
nodeNameMapping, err := ss.nodeNameToScaleSetMappingCache.Get(nodeNameToScaleSetMappingKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
realMapping := nodeNameMapping.(nodeNameToScaleSetMapping)
|
||||
if ssName, ok := realMapping[nodeName]; ok {
|
||||
return ssName, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
ssName, err := getScaleSetName(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if ssName != "" {
|
||||
return ssName, nil
|
||||
}
|
||||
|
||||
// ssName is still not found, it is likely that new Nodes are created.
|
||||
// Force refresh the cache and try again.
|
||||
ss.nodeNameToScaleSetMappingCache.Delete(nodeNameToScaleSetMappingKey)
|
||||
return getScaleSetName(nodeName)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string) (bool, error) {
|
||||
cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
availabilitySetNodes := cached.(sets.String)
|
||||
return availabilitySetNodes.Has(nodeName), nil
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestExtractVmssVMName(t *testing.T) {
|
||||
ss := &scaleSet{}
|
||||
cases := []struct {
|
||||
description string
|
||||
vmName string
|
||||
expectError bool
|
||||
expectedScaleSet string
|
||||
expectedInstanceID string
|
||||
}{
|
||||
{
|
||||
description: "wrong vmss VM name should report error",
|
||||
vmName: "vm1234",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "wrong VM name separator should report error",
|
||||
vmName: "vm-1234",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "correct vmss VM name should return correct scaleSet and instanceID",
|
||||
vmName: "vm_1234",
|
||||
expectedScaleSet: "vm",
|
||||
expectedInstanceID: "1234",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ssName, instanceID, err := ss.extractVmssVMName(c.vmName)
|
||||
if c.expectError {
|
||||
assert.Error(t, err, c.description)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Equal(t, c.expectedScaleSet, ssName, c.description)
|
||||
assert.Equal(t, c.expectedInstanceID, instanceID, c.description)
|
||||
}
|
||||
}
|
|
@ -24,12 +24,15 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func newTestScaleSet(scaleSetName string, vmList []string) *scaleSet {
|
||||
func newTestScaleSet(scaleSetName string, vmList []string) (*scaleSet, error) {
|
||||
cloud := getTestCloud()
|
||||
setTestVirtualMachineCloud(cloud, scaleSetName, vmList)
|
||||
ss := newScaleSet(cloud)
|
||||
ss, err := newScaleSet(cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ss.(*scaleSet)
|
||||
return ss.(*scaleSet), nil
|
||||
}
|
||||
|
||||
func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string) {
|
||||
|
@ -46,16 +49,16 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string)
|
|||
ssVMs := make(map[string]map[string]compute.VirtualMachineScaleSetVM)
|
||||
ssVMs["rg"] = make(map[string]compute.VirtualMachineScaleSetVM)
|
||||
for i := range vmList {
|
||||
ID := fmt.Sprintf("azure:///subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
|
||||
ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
|
||||
nodeName := vmList[i]
|
||||
instanceID := fmt.Sprintf("%d", i)
|
||||
vmKey := fmt.Sprintf("%s-%s", scaleSetName, nodeName)
|
||||
vmName := fmt.Sprintf("%s_%s", scaleSetName, instanceID)
|
||||
networkInterfaces := []compute.NetworkInterfaceReference{
|
||||
{
|
||||
ID: &nodeName,
|
||||
},
|
||||
}
|
||||
ssVMs["rg"][vmKey] = compute.VirtualMachineScaleSetVM{
|
||||
ssVMs["rg"][vmName] = compute.VirtualMachineScaleSetVM{
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
OsProfile: &compute.OSProfile{
|
||||
ComputerName: &nodeName,
|
||||
|
@ -66,6 +69,7 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string)
|
|||
},
|
||||
ID: &ID,
|
||||
InstanceID: &instanceID,
|
||||
Name: &vmName,
|
||||
Location: &ss.Location,
|
||||
}
|
||||
}
|
||||
|
@ -116,28 +120,29 @@ func TestGetInstanceIDByNodeName(t *testing.T) {
|
|||
{
|
||||
description: "scaleSet should get instance by node name",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vm1", "vm2"},
|
||||
nodeName: "vm1",
|
||||
expected: "azure:///subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/0",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "vmssee6c2000001",
|
||||
expected: "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/1",
|
||||
},
|
||||
{
|
||||
description: "scaleSet should get instance by node name with upper cases hostname",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"VM1", "vm2"},
|
||||
nodeName: "vm1",
|
||||
expected: "azure:///subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/0",
|
||||
vmList: []string{"VMSSEE6C2000000", "VMSSEE6C2000001"},
|
||||
nodeName: "vmssee6c2000000",
|
||||
expected: "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/0",
|
||||
},
|
||||
{
|
||||
description: "scaleSet should not get instance for non-exist nodes",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"VM1", "vm2"},
|
||||
nodeName: "vm3",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "agente6c2000005",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
ss := newTestScaleSet(test.scaleSet, test.vmList)
|
||||
ss, err := newTestScaleSet(test.scaleSet, test.vmList)
|
||||
assert.NoError(t, err, test.description)
|
||||
|
||||
real, err := ss.GetInstanceIDByNodeName(test.nodeName)
|
||||
if test.expectError {
|
||||
|
|
Loading…
Reference in New Issue