Merge pull request #65649 from rsc/fix-printf

Automatic merge from submit-queue (batch tested with PRs 66076, 65792, 65649). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

kubernetes: fix printf format errors

These are all flagged by Go 1.11's
more accurate printf checking in go vet,
which runs as part of go test.

```release-note
NONE
```
pull/8/head
Kubernetes Submit Queue 2018-07-11 14:09:08 -07:00 committed by GitHub
commit 0972ce1acc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 38 additions and 38 deletions

View File

@ -97,7 +97,7 @@ func buildKubeletArgMap(opts kubeletFlagsOpts) map[string]string {
// Make sure the node name we're passed will work with Kubelet
if opts.nodeRegOpts.Name != "" && opts.nodeRegOpts.Name != opts.defaultHostname {
glog.V(1).Info("setting kubelet hostname-override to %q", opts.nodeRegOpts.Name)
glog.V(1).Infof("setting kubelet hostname-override to %q", opts.nodeRegOpts.Name)
kubeletFlags["hostname-override"] = opts.nodeRegOpts.Name
}

View File

@ -2358,7 +2358,7 @@ func (c *Cloud) GetVolumeLabels(volumeName KubernetesVolumeID) (map[string]strin
labels := make(map[string]string)
az := aws.StringValue(info.AvailabilityZone)
if az == "" {
return nil, fmt.Errorf("volume did not have AZ information: %q", info.VolumeId)
return nil, fmt.Errorf("volume did not have AZ information: %q", aws.StringValue(info.VolumeId))
}
labels[kubeletapis.LabelZoneFailureDomain] = az

View File

@ -119,7 +119,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type
info, err := disk.describeVolume()
if err != nil {
glog.Warning("Error describing volume %s with %v", diskName, err)
glog.Warningf("Error describing volume %s with %v", diskName, err)
awsDiskInfo.volumeState = "unknown"
return awsDiskInfo, false, err
}

View File

@ -845,9 +845,9 @@ func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGro
return
}
glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): start", resourceGroupName)
glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): start", resourceGroupName)
defer func() {
glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): end", resourceGroupName)
glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName)
}()
mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID)
@ -876,9 +876,9 @@ func (az *azVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context,
return
}
glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): start", resourceGroupName, VMScaleSetName, VMInstanceIDs)
glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): start", resourceGroupName, VMScaleSetName, VMInstanceIDs)
defer func() {
glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): end", resourceGroupName, VMScaleSetName, VMInstanceIDs)
glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): end", resourceGroupName, VMScaleSetName, VMInstanceIDs)
}()
mc := newMetricContext("vmss", "update_instances", resourceGroupName, az.client.SubscriptionID)

View File

@ -262,7 +262,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
isInternal := requiresInternalLoadBalancer(service)
serviceName := getServiceName(service)
glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%s) - start", serviceName, isInternal)
glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal)
vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
if err != nil {
glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
@ -842,7 +842,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
// This entails adding required, missing SecurityRules and removing stale rules.
func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) {
serviceName := getServiceName(service)
glog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q lbName=%q", serviceName, clusterName)
glog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName)
ports := service.Spec.Ports
if ports == nil {

View File

@ -475,7 +475,7 @@ func (ss *scaleSet) getScaleSetWithRetry(name string) (compute.VirtualMachineSca
glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr)
return false, nil
}
glog.V(4).Info("backoff: success for scale set %q", name)
glog.V(4).Infof("backoff: success for scale set %q", name)
if cached != nil {
exists = true
@ -845,7 +845,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd
ssName, err := extractScaleSetNameByProviderID(*ipConfigurations.ID)
if err != nil {
glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it")
glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it", *ipConfigurations.ID)
continue
}

View File

@ -99,7 +99,7 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
for _, vm := range vms {
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
glog.Warningf("failed to get computerName for vmssVM (%q)", vm.Name)
glog.Warningf("failed to get computerName for vmssVM (%q)", ssName)
continue
}

View File

@ -169,7 +169,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) {
if am.isManagedAddress(addr) {
// The address with this name is checked at the beginning of 'HoldAddress()', but for some reason
// it was re-created by this point. May be possible that two controllers are running.
glog.Warning("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP)
glog.Warningf("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP)
} else {
// If the retrieved address is not named with the loadbalancer name, then the controller does not own it, but will allow use of it.
glog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description)

View File

@ -176,13 +176,13 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error {
defer cancel()
vm, err := res.datacenter.GetVMByUUID(ctx, nodeUUID)
if err != nil {
glog.V(4).Infof("Error %q while looking for vm=%+v in vc=%s and datacenter=%s",
err, node.Name, vm, res.vc, res.datacenter.Name())
glog.V(4).Infof("Error while looking for vm=%+v in vc=%s and datacenter=%s: %v",
vm, res.vc, res.datacenter.Name(), err)
if err != vclib.ErrNoVMFound {
setGlobalErr(err)
} else {
glog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s",
node.Name, res.vc, res.datacenter.Name(), err)
node.Name, res.vc, res.datacenter.Name())
}
continue
}
@ -309,7 +309,7 @@ func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) {
if err != nil {
return nil, err
}
glog.V(4).Infof("Updated NodeInfo %q for node %q.", nodeInfo, nodeName)
glog.V(4).Infof("Updated NodeInfo %v for node %q.", nodeInfo, nodeName)
nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm, nodeInfo.vmUUID})
}
return nodeDetails, nil

View File

@ -102,7 +102,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
dummyVM, err = datastore.Datacenter.GetVMByPath(ctx, vmdisk.vmOptions.VMFolder.InventoryPath+"/"+dummyVMFullName)
if err != nil {
// Create a dummy VM
glog.V(1).Info("Creating Dummy VM: %q", dummyVMFullName)
glog.V(1).Infof("Creating Dummy VM: %q", dummyVMFullName)
dummyVM, err = vmdisk.createDummyVM(ctx, datastore.Datacenter, dummyVMFullName)
if err != nil {
glog.Errorf("Failed to create Dummy VM. err: %v", err)
@ -132,7 +132,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
fileAlreadyExist = isAlreadyExists(vmdisk.diskPath, err)
if fileAlreadyExist {
//Skip error and continue to detach the disk as the disk was already created on the datastore.
glog.V(vclib.LogLevel).Info("File: %v already exists", vmdisk.diskPath)
glog.V(vclib.LogLevel).Infof("File: %v already exists", vmdisk.diskPath)
} else {
glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err)
return "", err
@ -143,7 +143,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
if err != nil {
if vclib.DiskNotFoundErrMsg == err.Error() && fileAlreadyExist {
// Skip error if disk was already detached from the dummy VM but still present on the datastore.
glog.V(vclib.LogLevel).Info("File: %v is already detached", vmdisk.diskPath)
glog.V(vclib.LogLevel).Infof("File: %v is already detached", vmdisk.diskPath)
} else {
glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err)
return "", err

View File

@ -846,7 +846,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN
if err == nil {
glog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName))
diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName)
glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", convertToString(nodeName), diskUUID, err)
glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", diskUUID, err)
}
}
}
@ -963,7 +963,7 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b
volPath,
vSphereInstance)
}
glog.V(4).Infof("DiskIsAttached result: %q and error: %q, for volume: %q", attached, err, volPath)
glog.V(4).Infof("DiskIsAttached result: %v and error: %q, for volume: %q", attached, err, volPath)
return attached, err
}
requestTime := time.Now()
@ -1054,7 +1054,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
return nodesToRetry, nil
}
glog.V(4).Info("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes)
glog.V(4).Infof("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes)
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

View File

@ -479,7 +479,7 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN
return nodesToRetry, err
}
nodeUUID = strings.ToLower(nodeUUID)
glog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %s", nodeName, nodeUUID, vmMoMap)
glog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap)
vclib.VerifyVolumePathsForVM(vmMoMap[nodeUUID], nodeVolumes[nodeName], convertToString(nodeName), attached)
}
return nodesToRetry, nil

View File

@ -460,7 +460,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
switch {
case len(solid) != 0:
glog.V(2).Infof("object %s has at least one existing owner: %#v, will not garbage collect", solid, item.identity)
glog.V(2).Infof("object %#v has at least one existing owner: %#v, will not garbage collect", solid, item.identity)
if len(dangling) == 0 && len(waitingForDependentsDeletion) == 0 {
return nil
}

View File

@ -504,7 +504,7 @@ func (adc *attachDetachController) processVolumesInUse(
err := adc.actualStateOfWorld.SetVolumeMountedByNode(attachedVolume.VolumeName, nodeName, mounted)
if err != nil {
glog.Warningf(
"SetVolumeMountedByNode(%q, %q, %q) returned an error: %v",
"SetVolumeMountedByNode(%q, %q, %v) returned an error: %v",
attachedVolume.VolumeName, nodeName, mounted, err)
}
}

View File

@ -68,7 +68,7 @@ func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, pvcLister coreli
glog.V(10).Infof(
"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
volumeSpec.Name,
volumeSpec.Name(),
pvName,
podNamespace,
pvcSource.ClaimName,

View File

@ -163,7 +163,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error {
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
_, err := c.client.CoreV1().PersistentVolumes().Update(pvClone)
if err != nil {
glog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name)
glog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err)
return err
}
glog.V(3).Infof("Added protection finalizer to PV %s", pv.Name)

View File

@ -277,7 +277,7 @@ func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.M
case *mountMode == v1.MountPropagationNone:
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
default:
return 0, fmt.Errorf("invalid MountPropagation mode: %q", mountMode)
return 0, fmt.Errorf("invalid MountPropagation mode: %q", *mountMode)
}
}

View File

@ -513,7 +513,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
glog.V(5).Infof(
"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
volumeSpec.Name,
volumeSpec.Name(),
pvName,
podNamespace,
pvcSource.ClaimName,

View File

@ -581,7 +581,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset
err = ipset.DestroySet(set.name)
if err != nil {
if !utilipset.IsNotFoundError(err) {
glog.Errorf("Error removing ipset %s, error: %v", set, err)
glog.Errorf("Error removing ipset %s, error: %v", set.name, err)
encounteredError = true
}
}

View File

@ -552,7 +552,7 @@ func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) {
}
if len(pttype) > 0 {
glog.V(4).Infof("Disk %s detected partition table type: %s", pttype)
glog.V(4).Infof("Disk %s detected partition table type: %s", disk, pttype)
// Returns a special non-empty string as filesystem type, then kubelet
// will not format it.
return "unknown data, probably partitions", nil

View File

@ -610,7 +610,7 @@ func (testcase *testcase) DiskIsAttachedByName(nodeName types.NodeName, volumeID
return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong instanceID")
}
glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret)
glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret)
return expected.isAttached, expected.instanceID, expected.ret
}

View File

@ -365,7 +365,7 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri
if err.Error() != volumepathhandler.ErrDeviceNotFound {
return fmt.Errorf("fc: failed to get loopback for destination path: %v, err: %v", dstPath, err)
}
glog.Warning("fc: loopback for destination path: %s not found", dstPath)
glog.Warningf("fc: loopback for destination path: %s not found", dstPath)
}
// Detach volume from kubelet node

View File

@ -516,7 +516,7 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string)
if err.Error() != volumepathhandler.ErrDeviceNotFound {
return fmt.Errorf("failed to get loopback for device: %v, err: %v", devicePath, err)
}
glog.Warning("iscsi: loopback for device: %s not found", device)
glog.Warningf("iscsi: loopback for device: %s not found", device)
}
// Detach a volume from kubelet node
err = util.detachISCSIDisk(c.exec, portals, iqn, iface, volName, initiatorName, found)

View File

@ -211,7 +211,7 @@ func (util *PortworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource
newSizeInBytes := uint64(volutil.RoundUpToGiB(newSize) * volutil.GIB)
if vol.Spec.Size >= newSizeInBytes {
glog.Infof("Portworx volume: %s already at size: %d greater than or equal to new "+
"requested size: %d. Skipping resize.", vol.Spec.Size, newSizeInBytes)
"requested size: %d. Skipping resize.", spec.Name(), vol.Spec.Size, newSizeInBytes)
return nil
}

View File

@ -140,7 +140,7 @@ func getDefaultClass(lister storagelisters.StorageClassLister) (*storage.Storage
return nil, nil
}
if len(defaultClasses) > 1 {
glog.V(4).Infof("getDefaultClass %s defaults found", len(defaultClasses))
glog.V(4).Infof("getDefaultClass %d defaults found", len(defaultClasses))
return nil, errors.NewInternalError(fmt.Errorf("%d default StorageClasses were found", len(defaultClasses)))
}
return defaultClasses[0], nil

View File

@ -166,5 +166,5 @@ func NewInternalError(reason string) InternalError {
}
func NewInternalErrorf(format string, a ...interface{}) InternalError {
return InternalError{fmt.Sprintf(format, a)}
return InternalError{fmt.Sprintf(format, a...)}
}