Merge pull request #59086 from feiskyer/typo

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Fix typo and comments

**What this PR does / why we need it**:

Fix typo across the azure package.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note
NONE
```
pull/6/head
Kubernetes Submit Queue 2018-01-30 22:51:42 -08:00 committed by GitHub
commit 494664a736
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 17 additions and 17 deletions

View File

@ -76,7 +76,7 @@ type Config struct {
// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
// In other words, if you use multiple agent pools (availability sets), you MUST set this field.
PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName" yaml:"primaryAvailabilitySetName"`
// The type of azure nodes. Candidate valudes are: vmss and standard.
// The type of azure nodes. Candidate values are: vmss and standard.
// If not set, it will be default to standard.
VMType string `json:"vmType" yaml:"vmType"`
// The name of the scale set that should be used as the load balancer backend.

View File

@ -292,7 +292,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
c.accounts[storageAccountName].diskCount = int32(diskCount)
} else {
glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
return nil // we have failed to aquire a new count. not an error condition
return nil // we have failed to acquire a new count. not an error condition
}
}
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1)
@ -602,7 +602,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
// avg are not create and we should create more accounts if we can
if aboveAvg && countAccounts < maxStorageAccounts {
glog.V(2).Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
glog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
SAName = getAccountNameForNum(c.getNextAccountNum())
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
if err != nil {
@ -613,7 +613,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
// averages are not ok and we are at capacity (max storage accounts allowed)
if aboveAvg && countAccounts == maxStorageAccounts {
glog.Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
glog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts)
}

View File

@ -66,7 +66,7 @@ func (fLBC *fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBala
for idx, config := range *parameters.FrontendIPConfigurations {
if config.PrivateIPAllocationMethod == network.Dynamic {
// Here we randomly assign an ip as private ip
// It dosen't smart enough to know whether it is in the subnet's range
// It doesn't smart enough to know whether it is in the subnet's range
(*parameters.FrontendIPConfigurations)[idx].PrivateIPAddress = getRandomIPPtr()
}
}

View File

@ -231,10 +231,10 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
return defaultLB, nil, false, nil
}
// select load balancer for the service in the cluster
// the selection algorithm selectes the the load balancer with currently has
// the minimum lb rules, there there are multiple LB's with same number of rules
// it selects the first one (sorted based on name)
// selectLoadBalancer selects load balancer for the service in the cluster.
// The selection algorithm selects the the load balancer with currently has
// the minimum lb rules. If there are multiple LBs with same number of rules,
// then selects the first one (sorted based on name).
func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
isInternal := requiresInternalLoadBalancer(service)
serviceName := getServiceName(service)

View File

@ -73,7 +73,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
diskID := ""
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
provisonState, id, err := c.getDisk(diskName)
provisionState, id, err := c.getDisk(diskName)
diskID = id
// We are waiting for provisioningState==Succeeded
// We don't want to hand-off managed disks to k8s while they are
@ -81,7 +81,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
if err != nil {
return false, err
}
if strings.ToLower(provisonState) == "succeeded" {
if strings.ToLower(provisionState) == "succeeded" {
return true, nil
}
return false, nil

View File

@ -42,10 +42,10 @@ type metricContext struct {
attributes []string
}
func newMetricContext(prefix, request, resouceGroup, subscriptionID string) *metricContext {
func newMetricContext(prefix, request, resourceGroup, subscriptionID string) *metricContext {
return &metricContext{
start: time.Now(),
attributes: []string{prefix + "_" + request, resouceGroup, subscriptionID},
attributes: []string{prefix + "_" + request, resourceGroup, subscriptionID},
}
}

View File

@ -34,7 +34,7 @@ func TestIsMasterNode(t *testing.T) {
},
},
}) {
t.Errorf("Node labelled 'workerk' should not be master!")
t.Errorf("Node labelled 'worker' should not be master!")
}
if !isMasterNode(&v1.Node{
ObjectMeta: meta.ObjectMeta{

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider"
)
// VMSet defines functions all vmsets (including scale set and availabitlity
// VMSet defines functions all vmsets (including scale set and availability
// set) should be implemented.
type VMSet interface {
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
@ -48,7 +48,7 @@ type VMSet interface {
GetPrimaryVMSetName() string
// GetVMSetNames selects all possible availability sets or scale sets
// (depending vmType configured) for service load balancer, if the service has
// no loadbalancer mode annotaion returns the primary VMSet. If service annotation
// no loadbalancer mode annotation returns the primary VMSet. If service annotation
// for loadbalancer exists then return the eligible VMSet.
GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error)
// EnsureHostsInPool ensures the given Node's primary IP configurations are

View File

@ -539,7 +539,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
// GetVMSetNames selects all possible availability sets or scale sets
// (depending vmType configured) for service load balancer. If the service has
// no loadbalancer mode annotaion returns the primary VMSet. If service annotation
// no loadbalancer mode annotation returns the primary VMSet. If service annotation
// for loadbalancer exists then return the eligible VMSet.
func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetNames *[]string, err error) {
hasMode, isAuto, serviceVMSetNames := getServiceLoadBalancerMode(service)