Merge pull request #57695 from feiskyer/vmss

Automatic merge from submit-queue (batch tested with PRs 49856, 56257, 57027, 57695, 57432). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Fix vmss listing for Azure cloud provider

**What this PR does / why we need it**:

Fix a stupid bug of vmss listing: if there is only one instance, listScaleSetsWithRetry  and listScaleSetVMsWithRetry will return empty list.

This PR also adds more verbose logs.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Related of #43287.

**Special notes for your reviewer**:

**Release note**:

```release-note
NONE
```
pull/6/head
Kubernetes Submit Queue 2018-01-02 13:15:49 -08:00 committed by GitHub
commit ff9b21192f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 30 additions and 21 deletions

View File

@ -160,79 +160,88 @@ func (ss *scaleSet) updateCache() error {
}
// getCachedVirtualMachine gets virtualMachine by nodeName from cache.
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
func (ss *scaleSet) getCachedVirtualMachine(nodeName string) (scaleSetVMInfo, error) {
ss.cacheMutex.Lock()
defer ss.cacheMutex.Unlock()
getVMFromCache := func(nodeName string) (scaleSetVMInfo, error) {
getVMFromCache := func(nodeName string) (scaleSetVMInfo, bool) {
glog.V(8).Infof("Getting scaleSetVMInfo for %q from cache %v", nodeName, ss.cache)
for scaleSetName := range ss.cache {
for _, vm := range ss.cache[scaleSetName] {
if vm.NodeName == nodeName {
return vm, nil
return vm, true
}
}
}
return scaleSetVMInfo{}, cloudprovider.InstanceNotFound
return scaleSetVMInfo{}, false
}
vm, err := getVMFromCache(nodeName)
if err == nil {
vm, found := getVMFromCache(nodeName)
if found {
return vm, nil
}
// Known node not managed by scale sets.
if ss.availabilitySetNodesCache.Has(nodeName) {
glog.V(10).Infof("Found node %q in availabilitySetNodesCache", nodeName)
return scaleSetVMInfo{}, cloudprovider.InstanceNotFound
}
// Update cache and try again.
if err = ss.updateCache(); err != nil {
if err := ss.updateCache(); err != nil {
glog.Errorf("updateCache failed with error: %v", err)
return scaleSetVMInfo{}, err
}
vm, err = getVMFromCache(nodeName)
if err == nil {
vm, found = getVMFromCache(nodeName)
if found {
return vm, nil
}
// Node still not found, assuming it is not managed by scale sets.
glog.V(8).Infof("Node %q doesn't belong to any scale sets, adding it to availabilitySetNodesCache", nodeName)
ss.availabilitySetNodesCache.Insert(nodeName)
return scaleSetVMInfo{}, cloudprovider.InstanceNotFound
}
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
// The node must belong to one of scale sets.
func (ss *scaleSet) getCachedVirtualMachineByInstanceID(scaleSetName, instanceID string) (scaleSetVMInfo, error) {
ss.cacheMutex.Lock()
defer ss.cacheMutex.Unlock()
getVMByID := func(scaleSetName, instanceID string) (scaleSetVMInfo, error) {
getVMByID := func(scaleSetName, instanceID string) (scaleSetVMInfo, bool) {
glog.V(8).Infof("Getting scaleSetVMInfo with scaleSetName: %q and instanceID %q from cache %v", scaleSetName, instanceID, ss.cache)
vms, ok := ss.cache[scaleSetName]
if !ok {
glog.V(4).Infof("scale set (%s) not found", scaleSetName)
return scaleSetVMInfo{}, cloudprovider.InstanceNotFound
return scaleSetVMInfo{}, false
}
for _, vm := range vms {
if vm.InstanceID == instanceID {
glog.V(4).Infof("getCachedVirtualMachineByInstanceID gets vm (%s) by instanceID (%s) within scale set (%s)", vm.NodeName, instanceID, scaleSetName)
return vm, nil
return vm, true
}
}
glog.V(4).Infof("instanceID (%s) not found in scale set (%s)", instanceID, scaleSetName)
return scaleSetVMInfo{}, cloudprovider.InstanceNotFound
return scaleSetVMInfo{}, false
}
vm, err := getVMByID(scaleSetName, instanceID)
if err == nil {
vm, found := getVMByID(scaleSetName, instanceID)
if found {
return vm, nil
}
// Update cache and try again.
if err = ss.updateCache(); err != nil {
if err := ss.updateCache(); err != nil {
glog.Errorf("updateCache failed with error: %v", err)
return scaleSetVMInfo{}, err
}
vm, err = getVMByID(scaleSetName, instanceID)
if err == nil {
vm, found = getVMByID(scaleSetName, instanceID)
if found {
return vm, nil
}
@ -407,7 +416,7 @@ func (ss *scaleSet) listScaleSetsWithRetry() ([]string, error) {
return nil, backoffError
}
appendResults := (result.Value != nil && len(*result.Value) > 1)
appendResults := (result.Value != nil && len(*result.Value) > 0)
for appendResults {
for _, scaleSet := range *result.Value {
allScaleSets = append(allScaleSets, *scaleSet.Name)
@ -431,7 +440,7 @@ func (ss *scaleSet) listScaleSetsWithRetry() ([]string, error) {
return nil, backoffError
}
appendResults = (result.Value != nil && len(*result.Value) > 1)
appendResults = (result.Value != nil && len(*result.Value) > 0)
}
}
@ -461,7 +470,7 @@ func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.Vir
return nil, backoffError
}
appendResults := (result.Value != nil && len(*result.Value) > 1)
appendResults := (result.Value != nil && len(*result.Value) > 0)
for appendResults {
allVMs = append(allVMs, *result.Value...)
appendResults = false
@ -483,7 +492,7 @@ func (ss *scaleSet) listScaleSetVMsWithRetry(scaleSetName string) ([]compute.Vir
return nil, backoffError
}
appendResults = (result.Value != nil && len(*result.Value) > 1)
appendResults = (result.Value != nil && len(*result.Value) > 0)
}
}