From 59ca5986dd8cba39c4933160c11896c401e58a4f Mon Sep 17 00:00:00 2001 From: Michal Rostecki Date: Tue, 14 Jun 2016 14:04:38 +0200 Subject: [PATCH] Print/log pointers of structs with %#v instead of %+v There are many places in k8s where %+v is used to format a pointer to struct, which isn't working as expected. Fixes #26591 --- pkg/api/rest/resttest/resttest.go | 2 +- pkg/controller/controller_utils.go | 4 +-- pkg/controller/controller_utils_test.go | 4 +-- pkg/controller/daemon/daemoncontroller.go | 28 +++++++++---------- .../deployment/deployment_controller.go | 18 ++++++------ .../deployment/util/deployment_util_test.go | 6 ++-- .../endpoint/endpoints_controller.go | 2 +- pkg/controller/petset/pet_set_utils.go | 4 +-- pkg/controller/replicaset/replica_set.go | 6 ++-- pkg/controller/replicaset/replica_set_test.go | 18 ++++++------ .../replication/replication_controller.go | 4 +-- .../replication_controller_test.go | 18 ++++++------ pkg/controller/service/servicecontroller.go | 2 +- .../volume/persistentvolume/controller.go | 12 ++++---- .../persistentvolume/controller_base.go | 18 ++++++------ .../persistentvolume/controller_test.go | 2 +- pkg/dns/dns_test.go | 2 +- pkg/kubectl/rolling_updater_test.go | 2 +- pkg/kubelet/config/common.go | 2 +- pkg/kubelet/dockertools/docker_manager.go | 6 ++-- .../dockertools/docker_manager_test.go | 8 +++--- pkg/kubelet/kubelet_cadvisor_test.go | 2 +- pkg/kubelet/kubelet_test.go | 2 +- pkg/kubelet/pleg/generic.go | 2 +- pkg/kubelet/rkt/rkt.go | 2 +- pkg/kubelet/status/status_manager_test.go | 2 +- pkg/labels/selector.go | 4 +-- pkg/labels/selector_test.go | 4 +-- pkg/proxy/userspace/port_allocator_test.go | 2 +- pkg/proxy/userspace/proxier.go | 2 +- pkg/proxy/userspace/roundrobin.go | 4 +-- pkg/util/pod/pod.go | 2 +- pkg/util/replicaset/replicaset.go | 2 +- pkg/volume/plugins.go | 2 +- pkg/volume/plugins_test.go | 4 +-- 35 files changed, 102 insertions(+), 102 deletions(-) diff --git a/pkg/api/rest/resttest/resttest.go b/pkg/api/rest/resttest/resttest.go index a6f50a804b..e4c1bbb392 100644 --- a/pkg/api/rest/resttest/resttest.go +++ b/pkg/api/rest/resttest/resttest.go @@ -960,7 +960,7 @@ func (t *Tester) testDeleteGracefulShorten(obj runtime.Object, createFn CreateFu objectMeta = t.getObjectMetaOrFail(object) if objectMeta.DeletionTimestamp == nil || objectMeta.DeletionGracePeriodSeconds == nil || *objectMeta.DeletionGracePeriodSeconds != expectedGrace || !objectMeta.DeletionTimestamp.Before(deletionTimestamp) { - t.Errorf("unexpected deleted meta: %+v", objectMeta) + t.Errorf("unexpected deleted meta: %#v", objectMeta) } } diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index 8c2eb5edf9..38cb99feb3 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -190,7 +190,7 @@ func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, de if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { exp.Add(int64(-add), int64(-del)) // The expectations might've been modified since the update on the previous line. - glog.V(4).Infof("Lowered expectations %+v", exp) + glog.V(4).Infof("Lowered expectations %#v", exp) } } @@ -199,7 +199,7 @@ func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, de if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { exp.Add(int64(add), int64(del)) // The expectations might've been modified since the update on the previous line. - glog.V(4).Infof("Raised expectations %+v", exp) + glog.V(4).Infof("Raised expectations %#v", exp) } } diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index 29dd072593..ca97704266 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -125,7 +125,7 @@ func TestControllerExpectations(t *testing.T) { // RC fires off adds and deletes at apiserver, then sets expectations rcKey, err := KeyFunc(rc) if err != nil { - t.Errorf("Couldn't get key for object %+v: %v", rc, err) + t.Errorf("Couldn't get key for object %#v: %v", rc, err) } e.SetExpectations(rcKey, adds, dels) var wg sync.WaitGroup @@ -202,7 +202,7 @@ func TestUIDExpectations(t *testing.T) { podList := newPodList(nil, 5, api.PodRunning, rc) rcKey, err := KeyFunc(rc) if err != nil { - t.Fatalf("Couldn't get key for object %+v: %v", rc, err) + t.Fatalf("Couldn't get key for object %#v: %v", rc, err) } rcKeys = append(rcKeys, rcKey) rcPodNames := []string{} diff --git a/pkg/controller/daemon/daemoncontroller.go b/pkg/controller/daemon/daemoncontroller.go index fc0d102ae7..c713f4a72a 100644 --- a/pkg/controller/daemon/daemoncontroller.go +++ b/pkg/controller/daemon/daemoncontroller.go @@ -217,12 +217,12 @@ func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Couldn't get object from tombstone %+v", obj) + glog.Errorf("Couldn't get object from tombstone %#v", obj) return } ds, ok = tombstone.Obj.(*extensions.DaemonSet) if !ok { - glog.Errorf("Tombstone contained object that is not a DaemonSet %+v", obj) + glog.Errorf("Tombstone contained object that is not a DaemonSet %#v", obj) return } } @@ -267,7 +267,7 @@ func (dsc *DaemonSetsController) runWorker() { func (dsc *DaemonSetsController) enqueueDaemonSet(ds *extensions.DaemonSet) { key, err := controller.KeyFunc(ds) if err != nil { - glog.Errorf("Couldn't get key for object %+v: %v", ds, err) + glog.Errorf("Couldn't get key for object %#v: %v", ds, err) return } @@ -342,7 +342,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) { if ds := dsc.getPodDaemonSet(pod); ds != nil { dsKey, err := controller.KeyFunc(ds) if err != nil { - glog.Errorf("Couldn't get key for object %+v: %v", ds, err) + glog.Errorf("Couldn't get key for object %#v: %v", ds, err) return } dsc.expectations.CreationObserved(dsKey) @@ -386,12 +386,12 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Couldn't get object from tombstone %+v", obj) + glog.Errorf("Couldn't get object from tombstone %#v", obj) return } pod, ok = tombstone.Obj.(*api.Pod) if !ok { - glog.Errorf("Tombstone contained object that is not a pod %+v", obj) + glog.Errorf("Tombstone contained object that is not a pod %#v", obj) return } } @@ -399,7 +399,7 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) { if ds := dsc.getPodDaemonSet(pod); ds != nil { dsKey, err := controller.KeyFunc(ds) if err != nil { - glog.Errorf("Couldn't get key for object %+v: %v", ds, err) + glog.Errorf("Couldn't get key for object %#v: %v", ds, err) return } dsc.expectations.DeletionObserved(dsKey) @@ -468,14 +468,14 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) { // Find out which nodes are running the daemon pods selected by ds. nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds) if err != nil { - glog.Errorf("Error getting node to daemon pod mapping for daemon set %+v: %v", ds, err) + glog.Errorf("Error getting node to daemon pod mapping for daemon set %#v: %v", ds, err) } // For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon // pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node. nodeList, err := dsc.nodeStore.List() if err != nil { - glog.Errorf("Couldn't get list of nodes when syncing daemon set %+v: %v", ds, err) + glog.Errorf("Couldn't get list of nodes when syncing daemon set %#v: %v", ds, err) } var nodesNeedingDaemonPods, podsToDelete []string for _, node := range nodeList.Items { @@ -505,7 +505,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) { // We need to set expectations before creating/deleting pods to avoid race conditions. dsKey, err := controller.KeyFunc(ds) if err != nil { - glog.Errorf("Couldn't get key for object %+v: %v", ds, err) + glog.Errorf("Couldn't get key for object %#v: %v", ds, err) return } @@ -583,13 +583,13 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet) glog.V(4).Infof("Updating daemon set status") nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds) if err != nil { - glog.Errorf("Error getting node to daemon pod mapping for daemon set %+v: %v", ds, err) + glog.Errorf("Error getting node to daemon pod mapping for daemon set %#v: %v", ds, err) return } nodeList, err := dsc.nodeStore.List() if err != nil { - glog.Errorf("Couldn't get list of nodes when updating daemon set %+v: %v", ds, err) + glog.Errorf("Couldn't get list of nodes when updating daemon set %#v: %v", ds, err) return } @@ -613,7 +613,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet) err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled) if err != nil { - glog.Errorf("Error storing status for daemon set %+v: %v", ds, err) + glog.Errorf("Error storing status for daemon set %#v: %v", ds, err) } } @@ -655,7 +655,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error { // then we do not want to call manage on foo until the daemon pods have been created. dsKey, err := controller.KeyFunc(ds) if err != nil { - glog.Errorf("Couldn't get key for object %+v: %v", ds, err) + glog.Errorf("Couldn't get key for object %#v: %v", ds, err) return err } dsNeedsSync := dsc.expectations.SatisfiedExpectations(dsKey) diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index 8bbc62618b..c20e23c404 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -229,12 +229,12 @@ func (dc *DeploymentController) deleteDeploymentNotification(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Couldn't get object from tombstone %+v", obj) + glog.Errorf("Couldn't get object from tombstone %#v", obj) return } d, ok = tombstone.Obj.(*extensions.Deployment) if !ok { - glog.Errorf("Tombstone contained object that is not a Deployment %+v", obj) + glog.Errorf("Tombstone contained object that is not a Deployment %#v", obj) return } } @@ -305,12 +305,12 @@ func (dc *DeploymentController) deleteReplicaSet(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Couldn't get object from tombstone %+v, could take up to %v before a deployment recreates/updates replicasets", obj, FullDeploymentResyncPeriod) + glog.Errorf("Couldn't get object from tombstone %#v, could take up to %v before a deployment recreates/updates replicasets", obj, FullDeploymentResyncPeriod) return } rs, ok = tombstone.Obj.(*extensions.ReplicaSet) if !ok { - glog.Errorf("Tombstone contained object that is not a ReplicaSet %+v, could take up to %v before a deployment recreates/updates replicasets", obj, FullDeploymentResyncPeriod) + glog.Errorf("Tombstone contained object that is not a ReplicaSet %#v, could take up to %v before a deployment recreates/updates replicasets", obj, FullDeploymentResyncPeriod) return } } @@ -344,7 +344,7 @@ func (dc *DeploymentController) addPod(obj interface{}) { if !ok { return } - glog.V(4).Infof("Pod %s created: %+v.", pod.Name, pod) + glog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod) if d := dc.getDeploymentForPod(pod); d != nil { dc.enqueueDeployment(d) } @@ -382,16 +382,16 @@ func (dc *DeploymentController) deletePod(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Couldn't get object from tombstone %+v", obj) + glog.Errorf("Couldn't get object from tombstone %#v", obj) return } pod, ok = tombstone.Obj.(*api.Pod) if !ok { - glog.Errorf("Tombstone contained object that is not a pod %+v", obj) + glog.Errorf("Tombstone contained object that is not a pod %#v", obj) return } } - glog.V(4).Infof("Pod %s deleted: %+v.", pod.Name, pod) + glog.V(4).Infof("Pod %s deleted: %#v.", pod.Name, pod) if d := dc.getDeploymentForPod(pod); d != nil { dc.enqueueDeployment(d) } @@ -400,7 +400,7 @@ func (dc *DeploymentController) deletePod(obj interface{}) { func (dc *DeploymentController) enqueueDeployment(deployment *extensions.Deployment) { key, err := controller.KeyFunc(deployment) if err != nil { - glog.Errorf("Couldn't get key for object %+v: %v", deployment, err) + glog.Errorf("Couldn't get key for object %#v: %v", deployment, err) return } diff --git a/pkg/controller/deployment/util/deployment_util_test.go b/pkg/controller/deployment/util/deployment_util_test.go index 262631da8a..7dca90f42e 100644 --- a/pkg/controller/deployment/util/deployment_util_test.go +++ b/pkg/controller/deployment/util/deployment_util_test.go @@ -286,7 +286,7 @@ func TestGetNewRC(t *testing.T) { t.Errorf("In test case %s, got unexpected error %v", test.test, err) } if !api.Semantic.DeepEqual(rs, test.expected) { - t.Errorf("In test case %s, expected %+v, got %+v", test.test, test.expected, rs) + t.Errorf("In test case %s, expected %#v, got %#v", test.test, test.expected, rs) } } } @@ -381,11 +381,11 @@ func TestGetOldRCs(t *testing.T) { if !equal(rss, test.expected) { t.Errorf("In test case %q, expected:", test.test) for _, rs := range test.expected { - t.Errorf("rs = %+v", rs) + t.Errorf("rs = %#v", rs) } t.Errorf("In test case %q, got:", test.test) for _, rs := range rss { - t.Errorf("rs = %+v", rs) + t.Errorf("rs = %#v", rs) } } } diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index d9ed3a89b5..ce557bda81 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -272,7 +272,7 @@ func (e *EndpointController) deletePod(obj interface{}) { } podKey, err := keyFunc(obj) if err != nil { - glog.Errorf("Couldn't get key for object %+v: %v", obj, err) + glog.Errorf("Couldn't get key for object %#v: %v", obj, err) return } glog.Infof("Pod %q was deleted but we don't have a record of its final state, so it will take up to %v before it will be removed from all endpoint records.", podKey, FullServiceResyncPeriod) diff --git a/pkg/controller/petset/pet_set_utils.go b/pkg/controller/petset/pet_set_utils.go index 21b13c1660..dac6c378c9 100644 --- a/pkg/controller/petset/pet_set_utils.go +++ b/pkg/controller/petset/pet_set_utils.go @@ -159,10 +159,10 @@ func pcbKeyFunc(obj interface{}) (string, error) { } p, ok := obj.(*pcb) if !ok { - return "", fmt.Errorf("not a valid pet control block %+v", p) + return "", fmt.Errorf("not a valid pet control block %#v", p) } if p.parent == nil { - return "", fmt.Errorf("cannot compute pet control block key without parent pointer %+v", p) + return "", fmt.Errorf("cannot compute pet control block key without parent pointer %#v", p) } return controller.KeyFunc(p.parent) } diff --git a/pkg/controller/replicaset/replica_set.go b/pkg/controller/replicaset/replica_set.go index 89c50d8c2d..4ffa0b4837 100644 --- a/pkg/controller/replicaset/replica_set.go +++ b/pkg/controller/replicaset/replica_set.go @@ -327,7 +327,7 @@ func isReplicaSetMatch(pod *api.Pod, rs *extensions.ReplicaSet) bool { // When a pod is created, enqueue the replica set that manages it and update it's expectations. func (rsc *ReplicaSetController) addPod(obj interface{}) { pod := obj.(*api.Pod) - glog.V(4).Infof("Pod %s created: %+v.", pod.Name, pod) + glog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod) rs := rsc.getPodReplicaSet(pod) if rs == nil { @@ -405,11 +405,11 @@ func (rsc *ReplicaSetController) deletePod(obj interface{}) { } pod, ok = tombstone.Obj.(*api.Pod) if !ok { - glog.Errorf("Tombstone contained object that is not a pod %+v", obj) + glog.Errorf("Tombstone contained object that is not a pod %#v", obj) return } } - glog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %+v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod) + glog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %#v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod) if rs := rsc.getPodReplicaSet(pod); rs != nil { rsKey, err := controller.KeyFunc(rs) if err != nil { diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index 95b2d6e68e..833d6c7b0f 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -353,7 +353,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) { // Get the key for the controller rsKey, err := controller.KeyFunc(rsSpec) if err != nil { - t.Errorf("Couldn't get key for object %+v: %v", rsSpec, err) + t.Errorf("Couldn't get key for object %#v: %v", rsSpec, err) } // Lowering expectations should lead to a sync that creates a replica, however the @@ -704,7 +704,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) rsKey, err := controller.KeyFunc(rsSpec) if err != nil { - t.Errorf("Couldn't get key for object %+v: %v", rsSpec, err) + t.Errorf("Couldn't get key for object %#v: %v", rsSpec, err) } // Size up the controller, then size it down, and confirm the expected create/delete pattern @@ -885,7 +885,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) { // Get the ReplicaSet key rsKey, err := controller.KeyFunc(rs) if err != nil { - t.Errorf("Couldn't get key for object %+v: %v", rs, err) + t.Errorf("Couldn't get key for object %#v: %v", rs, err) } // This is to simulate a concurrent addPod, that has a handle on the expectations @@ -987,7 +987,7 @@ func TestDeletionTimestamp(t *testing.T) { manager.rsStore.Store.Add(rs) rsKey, err := controller.KeyFunc(rs) if err != nil { - t.Errorf("Couldn't get key for object %+v: %v", rs, err) + t.Errorf("Couldn't get key for object %#v: %v", rs, err) } pod := newPodList(nil, 1, api.PodPending, labelMap, rs, "pod").Items[0] pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()} @@ -1004,7 +1004,7 @@ func TestDeletionTimestamp(t *testing.T) { podExp, exists, err := manager.expectations.GetExpectations(rsKey) if !exists || err != nil || !podExp.Fulfilled() { - t.Fatalf("Wrong expectations %+v", podExp) + t.Fatalf("Wrong expectations %#v", podExp) } // An update from no deletion timestamp to having one should be treated @@ -1021,7 +1021,7 @@ func TestDeletionTimestamp(t *testing.T) { podExp, exists, err = manager.expectations.GetExpectations(rsKey) if !exists || err != nil || !podExp.Fulfilled() { - t.Fatalf("Wrong expectations %+v", podExp) + t.Fatalf("Wrong expectations %#v", podExp) } // An update to the pod (including an update to the deletion timestamp) @@ -1039,7 +1039,7 @@ func TestDeletionTimestamp(t *testing.T) { podExp, exists, err = manager.expectations.GetExpectations(rsKey) if !exists || err != nil || podExp.Fulfilled() { - t.Fatalf("Wrong expectations %+v", podExp) + t.Fatalf("Wrong expectations %#v", podExp) } // A pod with a non-nil deletion timestamp should also be ignored by the @@ -1047,7 +1047,7 @@ func TestDeletionTimestamp(t *testing.T) { manager.deletePod(&pod) podExp, exists, err = manager.expectations.GetExpectations(rsKey) if !exists || err != nil || podExp.Fulfilled() { - t.Fatalf("Wrong expectations %+v", podExp) + t.Fatalf("Wrong expectations %#v", podExp) } // Deleting the second pod should clear expectations. @@ -1061,7 +1061,7 @@ func TestDeletionTimestamp(t *testing.T) { podExp, exists, err = manager.expectations.GetExpectations(rsKey) if !exists || err != nil || !podExp.Fulfilled() { - t.Fatalf("Wrong expectations %+v", podExp) + t.Fatalf("Wrong expectations %#v", podExp) } } diff --git a/pkg/controller/replication/replication_controller.go b/pkg/controller/replication/replication_controller.go index bedb2295d5..e5ae8d1c1f 100644 --- a/pkg/controller/replication/replication_controller.go +++ b/pkg/controller/replication/replication_controller.go @@ -416,12 +416,12 @@ func (rm *ReplicationManager) deletePod(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Couldn't get object from tombstone %+v", obj) + glog.Errorf("Couldn't get object from tombstone %#v", obj) return } pod, ok = tombstone.Obj.(*api.Pod) if !ok { - glog.Errorf("Tombstone contained object that is not a pod %+v", obj) + glog.Errorf("Tombstone contained object that is not a pod %#v", obj) return } } diff --git a/pkg/controller/replication/replication_controller_test.go b/pkg/controller/replication/replication_controller_test.go index ccaa892e6d..f1c9a067ba 100644 --- a/pkg/controller/replication/replication_controller_test.go +++ b/pkg/controller/replication/replication_controller_test.go @@ -343,7 +343,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) { // Get the key for the controller rcKey, err := controller.KeyFunc(controllerSpec) if err != nil { - t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err) + t.Errorf("Couldn't get key for object %#v: %v", controllerSpec, err) } // Lowering expectations should lead to a sync that creates a replica, however the @@ -686,7 +686,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) rcKey, err := controller.KeyFunc(controllerSpec) if err != nil { - t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err) + t.Errorf("Couldn't get key for object %#v: %v", controllerSpec, err) } // Size up the controller, then size it down, and confirm the expected create/delete pattern @@ -865,7 +865,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) { // Get the RC key rcKey, err := controller.KeyFunc(rc) if err != nil { - t.Errorf("Couldn't get key for object %+v: %v", rc, err) + t.Errorf("Couldn't get key for object %#v: %v", rc, err) } // This is to simulate a concurrent addPod, that has a handle on the expectations @@ -965,7 +965,7 @@ func TestDeletionTimestamp(t *testing.T) { manager.rcStore.Indexer.Add(controllerSpec) rcKey, err := controller.KeyFunc(controllerSpec) if err != nil { - t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err) + t.Errorf("Couldn't get key for object %#v: %v", controllerSpec, err) } pod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0] pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()} @@ -982,7 +982,7 @@ func TestDeletionTimestamp(t *testing.T) { podExp, exists, err := manager.expectations.GetExpectations(rcKey) if !exists || err != nil || !podExp.Fulfilled() { - t.Fatalf("Wrong expectations %+v", podExp) + t.Fatalf("Wrong expectations %#v", podExp) } // An update from no deletion timestamp to having one should be treated @@ -999,7 +999,7 @@ func TestDeletionTimestamp(t *testing.T) { podExp, exists, err = manager.expectations.GetExpectations(rcKey) if !exists || err != nil || !podExp.Fulfilled() { - t.Fatalf("Wrong expectations %+v", podExp) + t.Fatalf("Wrong expectations %#v", podExp) } // An update to the pod (including an update to the deletion timestamp) @@ -1017,7 +1017,7 @@ func TestDeletionTimestamp(t *testing.T) { podExp, exists, err = manager.expectations.GetExpectations(rcKey) if !exists || err != nil || podExp.Fulfilled() { - t.Fatalf("Wrong expectations %+v", podExp) + t.Fatalf("Wrong expectations %#v", podExp) } // A pod with a non-nil deletion timestamp should also be ignored by the @@ -1025,7 +1025,7 @@ func TestDeletionTimestamp(t *testing.T) { manager.deletePod(&pod) podExp, exists, err = manager.expectations.GetExpectations(rcKey) if !exists || err != nil || podExp.Fulfilled() { - t.Fatalf("Wrong expectations %+v", podExp) + t.Fatalf("Wrong expectations %#v", podExp) } // Deleting the second pod should clear expectations. @@ -1039,7 +1039,7 @@ func TestDeletionTimestamp(t *testing.T) { podExp, exists, err = manager.expectations.GetExpectations(rcKey) if !exists || err != nil || !podExp.Fulfilled() { - t.Fatalf("Wrong expectations %+v", podExp) + t.Fatalf("Wrong expectations %#v", podExp) } } diff --git a/pkg/controller/service/servicecontroller.go b/pkg/controller/service/servicecontroller.go index 5e09c421d1..5c70320950 100644 --- a/pkg/controller/service/servicecontroller.go +++ b/pkg/controller/service/servicecontroller.go @@ -233,7 +233,7 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, time.Durati // cache for deleting. key, ok := delta.Object.(cache.DeletedFinalStateUnknown) if !ok { - return fmt.Errorf("delta contained object that wasn't a service or a deleted key: %+v", delta), doNotRetry + return fmt.Errorf("delta contained object that wasn't a service or a deleted key: %#v", delta), doNotRetry } cachedService, ok = s.cache.get(key.Key) if !ok { diff --git a/pkg/controller/volume/persistentvolume/controller.go b/pkg/controller/volume/persistentvolume/controller.go index 07847f6d6d..66cf6e3d71 100644 --- a/pkg/controller/volume/persistentvolume/controller.go +++ b/pkg/controller/volume/persistentvolume/controller.go @@ -324,7 +324,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu } else { volume, ok := obj.(*api.PersistentVolume) if !ok { - return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj) + return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, obj) } glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) @@ -407,7 +407,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) var ok bool claim, ok = obj.(*api.PersistentVolumeClaim) if !ok { - return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj) + return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, obj) } glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s found: %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef), getClaimStatusForLogging(claim)) } @@ -891,7 +891,7 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *api.PersistentVolu func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) { volume, ok := arg.(*api.PersistentVolume) if !ok { - glog.Errorf("Cannot convert recycleVolumeOperation argument to volume, got %+v", arg) + glog.Errorf("Cannot convert recycleVolumeOperation argument to volume, got %#v", arg) return } glog.V(4).Infof("recycleVolumeOperation [%s] started", volume.Name) @@ -979,7 +979,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) { volume, ok := arg.(*api.PersistentVolume) if !ok { - glog.Errorf("Cannot convert deleteVolumeOperation argument to volume, got %+v", arg) + glog.Errorf("Cannot convert deleteVolumeOperation argument to volume, got %#v", arg) return } glog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name) @@ -1057,7 +1057,7 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *api.PersistentV var ok bool claim, ok = obj.(*api.PersistentVolumeClaim) if !ok { - return false, fmt.Errorf("Cannot convert object from claim cache to claim!?: %+v", obj) + return false, fmt.Errorf("Cannot convert object from claim cache to claim!?: %#v", obj) } } if claim != nil && claim.UID == volume.Spec.ClaimRef.UID { @@ -1117,7 +1117,7 @@ func (ctrl *PersistentVolumeController) provisionClaim(claim *api.PersistentVolu func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interface{}) { claim, ok := claimObj.(*api.PersistentVolumeClaim) if !ok { - glog.Errorf("Cannot convert provisionClaimOperation argument to claim, got %+v", claimObj) + glog.Errorf("Cannot convert provisionClaimOperation argument to claim, got %#v", claimObj) return } glog.V(4).Infof("provisionClaimOperation [%s] started", claimToClaimKey(claim)) diff --git a/pkg/controller/volume/persistentvolume/controller_base.go b/pkg/controller/volume/persistentvolume/controller_base.go index 88b5b139ac..4a5898c094 100644 --- a/pkg/controller/volume/persistentvolume/controller_base.go +++ b/pkg/controller/volume/persistentvolume/controller_base.go @@ -142,7 +142,7 @@ func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSour } volumeList, ok := volumeListObj.(*api.PersistentVolumeList) if !ok { - glog.Errorf("PersistentVolumeController can't initialize caches, expected list of volumes, got: %+v", volumeListObj) + glog.Errorf("PersistentVolumeController can't initialize caches, expected list of volumes, got: %#v", volumeListObj) return } for _, volume := range volumeList.Items { @@ -166,7 +166,7 @@ func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSour } claimList, ok := claimListObj.(*api.PersistentVolumeClaimList) if !ok { - glog.Errorf("PersistentVolumeController can't initialize caches, expected list of claims, got: %+v", claimListObj) + glog.Errorf("PersistentVolumeController can't initialize caches, expected list of claims, got: %#v", claimListObj) return } for _, claim := range claimList.Items { @@ -194,7 +194,7 @@ func (ctrl *PersistentVolumeController) storeClaimUpdate(claim *api.PersistentVo func (ctrl *PersistentVolumeController) addVolume(obj interface{}) { pv, ok := obj.(*api.PersistentVolume) if !ok { - glog.Errorf("expected PersistentVolume but handler received %+v", obj) + glog.Errorf("expected PersistentVolume but handler received %#v", obj) return } @@ -229,7 +229,7 @@ func (ctrl *PersistentVolumeController) addVolume(obj interface{}) { func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) { newVolume, ok := newObj.(*api.PersistentVolume) if !ok { - glog.Errorf("Expected PersistentVolume but handler received %+v", newObj) + glog.Errorf("Expected PersistentVolume but handler received %#v", newObj) return } @@ -271,7 +271,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) { if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { volume, ok = unknown.Obj.(*api.PersistentVolume) if !ok { - glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", unknown.Obj) + glog.Errorf("Expected PersistentVolume but deleteVolume received %#v", unknown.Obj) return } } else { @@ -302,7 +302,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) { } } } else { - glog.Errorf("Cannot convert object from claim cache to claim %q!?: %+v", claimrefToClaimKey(volume.Spec.ClaimRef), claimObj) + glog.Errorf("Cannot convert object from claim cache to claim %q!?: %#v", claimrefToClaimKey(volume.Spec.ClaimRef), claimObj) } } } @@ -381,11 +381,11 @@ func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) { if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { claim, ok = unknown.Obj.(*api.PersistentVolumeClaim) if !ok { - glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", unknown.Obj) + glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %#v", unknown.Obj) return } } else { - glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", obj) + glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %#v", obj) return } } @@ -413,7 +413,7 @@ func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) { } } } else { - glog.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, pvObj) + glog.Errorf("Cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, pvObj) } } } diff --git a/pkg/controller/volume/persistentvolume/controller_test.go b/pkg/controller/volume/persistentvolume/controller_test.go index 638fae112f..abf2981fe2 100644 --- a/pkg/controller/volume/persistentvolume/controller_test.go +++ b/pkg/controller/volume/persistentvolume/controller_test.go @@ -234,7 +234,7 @@ func storeVersion(t *testing.T, prefix string, c cache.Store, version string, ex } pv, ok := pvObj.(*api.PersistentVolume) if !ok { - t.Errorf("expected volume in the cache, got different object instead: %+v", pvObj) + t.Errorf("expected volume in the cache, got different object instead: %#v", pvObj) } if ret { diff --git a/pkg/dns/dns_test.go b/pkg/dns/dns_test.go index 505f6c3911..c7e9dce23f 100644 --- a/pkg/dns/dns_test.go +++ b/pkg/dns/dns_test.go @@ -125,7 +125,7 @@ func assertARecordsMatchIPs(t *testing.T, records []dns.RR, ips ...string) { gotEndpoints := sets.NewString() for _, r := range records { if a, ok := r.(*dns.A); !ok { - t.Errorf("Expected A record, got %+v", a) + t.Errorf("Expected A record, got %#v", a) } else { gotEndpoints.Insert(a.A.String()) } diff --git a/pkg/kubectl/rolling_updater_test.go b/pkg/kubectl/rolling_updater_test.go index ef5f969497..9ee9550989 100644 --- a/pkg/kubectl/rolling_updater_test.go +++ b/pkg/kubectl/rolling_updater_test.go @@ -817,7 +817,7 @@ Scaling foo-v2 up to 2 oldReady := next(&oldReady) newReady := next(&newReady) if oldReady == -1 || newReady == -1 { - t.Fatalf("unexpected getReadyPods call for:\noldRc: %+v\nnewRc: %+v", oldRc, newRc) + t.Fatalf("unexpected getReadyPods call for:\noldRc: %#v\nnewRc: %#v", oldRc, newRc) } return int32(oldReady), int32(newReady), nil } diff --git a/pkg/kubelet/config/common.go b/pkg/kubelet/config/common.go index ec11b227a3..4379bb4dd3 100644 --- a/pkg/kubelet/config/common.go +++ b/pkg/kubelet/config/common.go @@ -100,7 +100,7 @@ func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *a } // Check whether the object could be converted to single pod. if _, ok := obj.(*api.Pod); !ok { - err = fmt.Errorf("invalid pod: %+v", obj) + err = fmt.Errorf("invalid pod: %#v", obj) return false, pod, err } newPod := obj.(*api.Pod) diff --git a/pkg/kubelet/dockertools/docker_manager.go b/pkg/kubelet/dockertools/docker_manager.go index d7b0dddd1a..f490204993 100644 --- a/pkg/kubelet/dockertools/docker_manager.go +++ b/pkg/kubelet/dockertools/docker_manager.go @@ -1775,7 +1775,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub defer func() { metrics.ContainerManagerLatency.WithLabelValues("computePodContainerChanges").Observe(metrics.SinceInMicroseconds(start)) }() - glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod) + glog.V(5).Infof("Syncing Pod %q: %#v", format.Pod(pod), pod) containersToStart := make(map[int]string) containersToKeep := make(map[kubecontainer.DockerID]int) @@ -2062,10 +2062,10 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec initContainerResult.Fail(kubecontainer.ErrRunInitContainer, fmt.Sprintf("init container %q exited with %d", status.Name, status.ExitCode)) result.AddSyncResult(initContainerResult) if pod.Spec.RestartPolicy == api.RestartPolicyNever { - utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %+v", format.Pod(pod), status.Name, status)) + utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %#v", format.Pod(pod), status.Name, status)) return } - utilruntime.HandleError(fmt.Errorf("Error running pod %q init container %q, restarting: %+v", format.Pod(pod), status.Name, status)) + utilruntime.HandleError(fmt.Errorf("Error running pod %q init container %q, restarting: %#v", format.Pod(pod), status.Name, status)) } } diff --git a/pkg/kubelet/dockertools/docker_manager_test.go b/pkg/kubelet/dockertools/docker_manager_test.go index a14edbd54c..22ba975eb1 100644 --- a/pkg/kubelet/dockertools/docker_manager_test.go +++ b/pkg/kubelet/dockertools/docker_manager_test.go @@ -1716,7 +1716,7 @@ func verifySyncResults(t *testing.T, expectedResults []*kubecontainer.SyncResult if len(expectedResults) != len(realResult.SyncResults) { t.Errorf("expected sync result number %d, got %d", len(expectedResults), len(realResult.SyncResults)) for _, r := range expectedResults { - t.Errorf("expected result: %+v", r) + t.Errorf("expected result: %#v", r) } for _, r := range realResult.SyncResults { t.Errorf("real result: %+v", r) @@ -1733,16 +1733,16 @@ func verifySyncResults(t *testing.T, expectedResults []*kubecontainer.SyncResult // We use Contains() here because the message format may be changed, but at least we should // make sure that the expected message is contained. if realR.Error != expectR.Error || !strings.Contains(realR.Message, expectR.Message) { - t.Errorf("expected sync result %+v, got %+v", expectR, realR) + t.Errorf("expected sync result %#v, got %+v", expectR, realR) } found++ } } if found == 0 { - t.Errorf("not found expected result %+v", expectR) + t.Errorf("not found expected result %#v", expectR) } if found > 1 { - t.Errorf("got %d duplicate expected result %+v", found, expectR) + t.Errorf("got %d duplicate expected result %#v", found, expectR) } } } diff --git a/pkg/kubelet/kubelet_cadvisor_test.go b/pkg/kubelet/kubelet_cadvisor_test.go index a39bad872c..6c21547d5f 100644 --- a/pkg/kubelet/kubelet_cadvisor_test.go +++ b/pkg/kubelet/kubelet_cadvisor_test.go @@ -109,7 +109,7 @@ func TestGetRawContainerInfoSubcontainers(t *testing.T) { t.Errorf("unexpected error: %v", err) } if len(result) != 2 { - t.Errorf("Expected 2 elements, received: %+v", result) + t.Errorf("Expected 2 elements, received: %#v", result) } mockCadvisor.AssertExpectations(t) } diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 28e74bba7c..03969a8874 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -3221,7 +3221,7 @@ func TestGetPodsToSync(t *testing.T) { } } if !found { - t.Errorf("expected pod not found: %+v", expect) + t.Errorf("expected pod not found: %#v", expect) } } } else { diff --git a/pkg/kubelet/pleg/generic.go b/pkg/kubelet/pleg/generic.go index 1babc45bc7..27007b0c10 100644 --- a/pkg/kubelet/pleg/generic.go +++ b/pkg/kubelet/pleg/generic.go @@ -324,7 +324,7 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { // GetPodStatus(pod *kubecontainer.Pod) so that Docker can avoid listing // all containers again. status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace) - glog.V(4).Infof("PLEG: Write status for %s/%s: %+v (err: %v)", pod.Name, pod.Namespace, status, err) + glog.V(4).Infof("PLEG: Write status for %s/%s: %#v (err: %v)", pod.Name, pod.Namespace, status, err) g.cache.Set(pod.ID, status, err, timestamp) return err } diff --git a/pkg/kubelet/rkt/rkt.go b/pkg/kubelet/rkt/rkt.go index b71eb2c9b2..5467552ddd 100644 --- a/pkg/kubelet/rkt/rkt.go +++ b/pkg/kubelet/rkt/rkt.go @@ -306,7 +306,7 @@ func (r *Runtime) RunCommand(config *Config, args ...string) ([]string, error) { if config == nil { config = r.config } - glog.V(4).Infof("rkt: Run command: %q with config: %+v", args, config) + glog.V(4).Infof("rkt: Run command: %q with config: %#v", args, config) var stdout, stderr bytes.Buffer diff --git a/pkg/kubelet/status/status_manager_test.go b/pkg/kubelet/status/status_manager_test.go index c18a53ae0a..705c3b0039 100644 --- a/pkg/kubelet/status/status_manager_test.go +++ b/pkg/kubelet/status/status_manager_test.go @@ -673,7 +673,7 @@ func TestReconcilePodStatus(t *testing.T) { podStatus, ok := syncer.GetPodStatus(testPod.UID) if !ok { - t.Fatalf("Should find pod status for pod: %+v", testPod) + t.Fatalf("Should find pod status for pod: %#v", testPod) } testPod.Status = podStatus diff --git a/pkg/labels/selector.go b/pkg/labels/selector.go index 861b6eab89..64dd420e48 100644 --- a/pkg/labels/selector.go +++ b/pkg/labels/selector.go @@ -190,7 +190,7 @@ func (r *Requirement) Matches(ls Labels) bool { // There should be only one strValue in r.strValues, and can be converted to a integer. if len(r.strValues) != 1 { - glog.V(10).Infof("Invalid values count %+v of requirement %+v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) return false } @@ -198,7 +198,7 @@ func (r *Requirement) Matches(ls Labels) bool { for strValue := range r.strValues { rValue, err = strconv.ParseInt(strValue, 10, 64) if err != nil { - glog.V(10).Infof("ParseInt failed for value %+v in requirement %+v, for 'Gt', 'Lt' operators, the value must be an integer", strValue, r) + glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", strValue, r) return false } } diff --git a/pkg/labels/selector_test.go b/pkg/labels/selector_test.go index a659a339fd..5de69dacc1 100644 --- a/pkg/labels/selector_test.go +++ b/pkg/labels/selector_test.go @@ -368,9 +368,9 @@ func TestToString(t *testing.T) { } for _, ts := range toStringTests { if out := ts.In.String(); out == "" && ts.Valid { - t.Errorf("%+v.String() => '%v' expected no error", ts.In, out) + t.Errorf("%#v.String() => '%v' expected no error", ts.In, out) } else if out != ts.Out { - t.Errorf("%+v.String() => '%v' want '%v'", ts.In, out, ts.Out) + t.Errorf("%#v.String() => '%v' want '%v'", ts.In, out, ts.Out) } } } diff --git a/pkg/proxy/userspace/port_allocator_test.go b/pkg/proxy/userspace/port_allocator_test.go index 509c0e6136..0b6151a941 100644 --- a/pkg/proxy/userspace/port_allocator_test.go +++ b/pkg/proxy/userspace/port_allocator_test.go @@ -28,7 +28,7 @@ func TestRangeAllocatorEmpty(t *testing.T) { r.Set("0-0") defer func() { if rv := recover(); rv == nil { - t.Fatalf("expected panic because of empty port range: %+v", r) + t.Fatalf("expected panic because of empty port range: %#v", r) } }() _ = newPortRangeAllocator(*r) diff --git a/pkg/proxy/userspace/proxier.go b/pkg/proxy/userspace/proxier.go index d4a13df910..c768a4f297 100644 --- a/pkg/proxy/userspace/proxier.go +++ b/pkg/proxy/userspace/proxier.go @@ -425,7 +425,7 @@ func (proxier *Proxier) OnServiceUpdate(services []api.Service) { info.loadBalancerStatus = *api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer) info.nodePort = int(servicePort.NodePort) info.sessionAffinityType = service.Spec.SessionAffinity - glog.V(4).Infof("info: %+v", info) + glog.V(4).Infof("info: %#v", info) err = proxier.openPortal(serviceName, info) if err != nil { diff --git a/pkg/proxy/userspace/roundrobin.go b/pkg/proxy/userspace/roundrobin.go index bb553f3a77..12aa44615a 100644 --- a/pkg/proxy/userspace/roundrobin.go +++ b/pkg/proxy/userspace/roundrobin.go @@ -153,7 +153,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne // Affinity wins. endpoint := sessionAffinity.endpoint sessionAffinity.lastUsed = time.Now() - glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %+v: %s", svcPort, ipaddr, sessionAffinity, endpoint) + glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint) return endpoint, nil } } @@ -172,7 +172,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne affinity.lastUsed = time.Now() affinity.endpoint = endpoint affinity.clientIP = ipaddr - glog.V(4).Infof("Updated affinity key %s: %+v", ipaddr, state.affinity.affinityMap[ipaddr]) + glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr]) } return endpoint, nil diff --git a/pkg/util/pod/pod.go b/pkg/util/pod/pod.go index e2286c7a02..06e48c5112 100644 --- a/pkg/util/pod/pod.go +++ b/pkg/util/pod/pod.go @@ -70,7 +70,7 @@ func UpdatePodWithRetries(podClient unversionedcore.PodInterface, pod *api.Pod, // Handle returned error from wait poll if err == wait.ErrWaitTimeout { - err = fmt.Errorf("timed out trying to update pod: %+v", oldPod) + err = fmt.Errorf("timed out trying to update pod: %#v", oldPod) } // Ignore the pod not found error, but the pod isn't updated. if errors.IsNotFound(err) { diff --git a/pkg/util/replicaset/replicaset.go b/pkg/util/replicaset/replicaset.go index 0ac51203a5..388197276a 100644 --- a/pkg/util/replicaset/replicaset.go +++ b/pkg/util/replicaset/replicaset.go @@ -66,7 +66,7 @@ func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rs // Handle returned error from wait poll if err == wait.ErrWaitTimeout { - err = fmt.Errorf("timed out trying to update RS: %+v", oldRs) + err = fmt.Errorf("timed out trying to update RS: %#v", oldRs) } // Ignore the RS not found error, but the RS isn't updated. if errors.IsNotFound(err) { diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go index 669aff661b..0ab0a73d98 100644 --- a/pkg/volume/plugins.go +++ b/pkg/volume/plugins.go @@ -385,7 +385,7 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) { func (pm *VolumePluginMgr) FindPersistentPluginBySpec(spec *Spec) (PersistentVolumePlugin, error) { volumePlugin, err := pm.FindPluginBySpec(spec) if err != nil { - return nil, fmt.Errorf("Could not find volume plugin for spec: %+v", spec) + return nil, fmt.Errorf("Could not find volume plugin for spec: %#v", spec) } if persistentVolumePlugin, ok := volumePlugin.(PersistentVolumePlugin); ok { return persistentVolumePlugin, nil diff --git a/pkg/volume/plugins_test.go b/pkg/volume/plugins_test.go index 578e0e2785..863087b464 100644 --- a/pkg/volume/plugins_test.go +++ b/pkg/volume/plugins_test.go @@ -30,7 +30,7 @@ func TestSpecSourceConverters(t *testing.T) { converted := NewSpecFromVolume(v) if converted.Volume.EmptyDir == nil { - t.Errorf("Unexpected nil EmptyDir: %+v", converted) + t.Errorf("Unexpected nil EmptyDir: %#v", converted) } if v.Name != converted.Name() { t.Errorf("Expected %v but got %v", v.Name, converted.Name()) @@ -45,7 +45,7 @@ func TestSpecSourceConverters(t *testing.T) { converted = NewSpecFromPersistentVolume(pv, false) if converted.PersistentVolume.Spec.AWSElasticBlockStore == nil { - t.Errorf("Unexpected nil AWSElasticBlockStore: %+v", converted) + t.Errorf("Unexpected nil AWSElasticBlockStore: %#v", converted) } if pv.Name != converted.Name() { t.Errorf("Expected %v but got %v", pv.Name, converted.Name())