mirror of https://github.com/k3s-io/k3s
Print/log pointers of structs with %#v instead of %+v
There are many places in k8s where %+v is used to format a pointer to struct, which isn't working as expected. Fixes #26591pull/6/head
parent
ed763b8034
commit
59ca5986dd
|
@ -960,7 +960,7 @@ func (t *Tester) testDeleteGracefulShorten(obj runtime.Object, createFn CreateFu
|
|||
objectMeta = t.getObjectMetaOrFail(object)
|
||||
if objectMeta.DeletionTimestamp == nil || objectMeta.DeletionGracePeriodSeconds == nil ||
|
||||
*objectMeta.DeletionGracePeriodSeconds != expectedGrace || !objectMeta.DeletionTimestamp.Before(deletionTimestamp) {
|
||||
t.Errorf("unexpected deleted meta: %+v", objectMeta)
|
||||
t.Errorf("unexpected deleted meta: %#v", objectMeta)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, de
|
|||
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
|
||||
exp.Add(int64(-add), int64(-del))
|
||||
// The expectations might've been modified since the update on the previous line.
|
||||
glog.V(4).Infof("Lowered expectations %+v", exp)
|
||||
glog.V(4).Infof("Lowered expectations %#v", exp)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -199,7 +199,7 @@ func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, de
|
|||
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
|
||||
exp.Add(int64(add), int64(del))
|
||||
// The expectations might've been modified since the update on the previous line.
|
||||
glog.V(4).Infof("Raised expectations %+v", exp)
|
||||
glog.V(4).Infof("Raised expectations %#v", exp)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ func TestControllerExpectations(t *testing.T) {
|
|||
// RC fires off adds and deletes at apiserver, then sets expectations
|
||||
rcKey, err := KeyFunc(rc)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %+v: %v", rc, err)
|
||||
t.Errorf("Couldn't get key for object %#v: %v", rc, err)
|
||||
}
|
||||
e.SetExpectations(rcKey, adds, dels)
|
||||
var wg sync.WaitGroup
|
||||
|
@ -202,7 +202,7 @@ func TestUIDExpectations(t *testing.T) {
|
|||
podList := newPodList(nil, 5, api.PodRunning, rc)
|
||||
rcKey, err := KeyFunc(rc)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't get key for object %+v: %v", rc, err)
|
||||
t.Fatalf("Couldn't get key for object %#v: %v", rc, err)
|
||||
}
|
||||
rcKeys = append(rcKeys, rcKey)
|
||||
rcPodNames := []string{}
|
||||
|
|
|
@ -217,12 +217,12 @@ func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
|
|||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Couldn't get object from tombstone %+v", obj)
|
||||
glog.Errorf("Couldn't get object from tombstone %#v", obj)
|
||||
return
|
||||
}
|
||||
ds, ok = tombstone.Obj.(*extensions.DaemonSet)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a DaemonSet %+v", obj)
|
||||
glog.Errorf("Tombstone contained object that is not a DaemonSet %#v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ func (dsc *DaemonSetsController) runWorker() {
|
|||
func (dsc *DaemonSetsController) enqueueDaemonSet(ds *extensions.DaemonSet) {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", ds, err)
|
||||
glog.Errorf("Couldn't get key for object %#v: %v", ds, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -342,7 +342,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) {
|
|||
if ds := dsc.getPodDaemonSet(pod); ds != nil {
|
||||
dsKey, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", ds, err)
|
||||
glog.Errorf("Couldn't get key for object %#v: %v", ds, err)
|
||||
return
|
||||
}
|
||||
dsc.expectations.CreationObserved(dsKey)
|
||||
|
@ -386,12 +386,12 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) {
|
|||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Couldn't get object from tombstone %+v", obj)
|
||||
glog.Errorf("Couldn't get object from tombstone %#v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a pod %+v", obj)
|
||||
glog.Errorf("Tombstone contained object that is not a pod %#v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -399,7 +399,7 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) {
|
|||
if ds := dsc.getPodDaemonSet(pod); ds != nil {
|
||||
dsKey, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", ds, err)
|
||||
glog.Errorf("Couldn't get key for object %#v: %v", ds, err)
|
||||
return
|
||||
}
|
||||
dsc.expectations.DeletionObserved(dsKey)
|
||||
|
@ -468,14 +468,14 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) {
|
|||
// Find out which nodes are running the daemon pods selected by ds.
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting node to daemon pod mapping for daemon set %+v: %v", ds, err)
|
||||
glog.Errorf("Error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
|
||||
}
|
||||
|
||||
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
|
||||
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
|
||||
nodeList, err := dsc.nodeStore.List()
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get list of nodes when syncing daemon set %+v: %v", ds, err)
|
||||
glog.Errorf("Couldn't get list of nodes when syncing daemon set %#v: %v", ds, err)
|
||||
}
|
||||
var nodesNeedingDaemonPods, podsToDelete []string
|
||||
for _, node := range nodeList.Items {
|
||||
|
@ -505,7 +505,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) {
|
|||
// We need to set expectations before creating/deleting pods to avoid race conditions.
|
||||
dsKey, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", ds, err)
|
||||
glog.Errorf("Couldn't get key for object %#v: %v", ds, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -583,13 +583,13 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
|
|||
glog.V(4).Infof("Updating daemon set status")
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting node to daemon pod mapping for daemon set %+v: %v", ds, err)
|
||||
glog.Errorf("Error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
|
||||
return
|
||||
}
|
||||
|
||||
nodeList, err := dsc.nodeStore.List()
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get list of nodes when updating daemon set %+v: %v", ds, err)
|
||||
glog.Errorf("Couldn't get list of nodes when updating daemon set %#v: %v", ds, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -613,7 +613,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
|
|||
|
||||
err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled)
|
||||
if err != nil {
|
||||
glog.Errorf("Error storing status for daemon set %+v: %v", ds, err)
|
||||
glog.Errorf("Error storing status for daemon set %#v: %v", ds, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -655,7 +655,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
|||
// then we do not want to call manage on foo until the daemon pods have been created.
|
||||
dsKey, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", ds, err)
|
||||
glog.Errorf("Couldn't get key for object %#v: %v", ds, err)
|
||||
return err
|
||||
}
|
||||
dsNeedsSync := dsc.expectations.SatisfiedExpectations(dsKey)
|
||||
|
|
|
@ -229,12 +229,12 @@ func (dc *DeploymentController) deleteDeploymentNotification(obj interface{}) {
|
|||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Couldn't get object from tombstone %+v", obj)
|
||||
glog.Errorf("Couldn't get object from tombstone %#v", obj)
|
||||
return
|
||||
}
|
||||
d, ok = tombstone.Obj.(*extensions.Deployment)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a Deployment %+v", obj)
|
||||
glog.Errorf("Tombstone contained object that is not a Deployment %#v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -305,12 +305,12 @@ func (dc *DeploymentController) deleteReplicaSet(obj interface{}) {
|
|||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Couldn't get object from tombstone %+v, could take up to %v before a deployment recreates/updates replicasets", obj, FullDeploymentResyncPeriod)
|
||||
glog.Errorf("Couldn't get object from tombstone %#v, could take up to %v before a deployment recreates/updates replicasets", obj, FullDeploymentResyncPeriod)
|
||||
return
|
||||
}
|
||||
rs, ok = tombstone.Obj.(*extensions.ReplicaSet)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a ReplicaSet %+v, could take up to %v before a deployment recreates/updates replicasets", obj, FullDeploymentResyncPeriod)
|
||||
glog.Errorf("Tombstone contained object that is not a ReplicaSet %#v, could take up to %v before a deployment recreates/updates replicasets", obj, FullDeploymentResyncPeriod)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -344,7 +344,7 @@ func (dc *DeploymentController) addPod(obj interface{}) {
|
|||
if !ok {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s created: %+v.", pod.Name, pod)
|
||||
glog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod)
|
||||
if d := dc.getDeploymentForPod(pod); d != nil {
|
||||
dc.enqueueDeployment(d)
|
||||
}
|
||||
|
@ -382,16 +382,16 @@ func (dc *DeploymentController) deletePod(obj interface{}) {
|
|||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Couldn't get object from tombstone %+v", obj)
|
||||
glog.Errorf("Couldn't get object from tombstone %#v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a pod %+v", obj)
|
||||
glog.Errorf("Tombstone contained object that is not a pod %#v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Pod %s deleted: %+v.", pod.Name, pod)
|
||||
glog.V(4).Infof("Pod %s deleted: %#v.", pod.Name, pod)
|
||||
if d := dc.getDeploymentForPod(pod); d != nil {
|
||||
dc.enqueueDeployment(d)
|
||||
}
|
||||
|
@ -400,7 +400,7 @@ func (dc *DeploymentController) deletePod(obj interface{}) {
|
|||
func (dc *DeploymentController) enqueueDeployment(deployment *extensions.Deployment) {
|
||||
key, err := controller.KeyFunc(deployment)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", deployment, err)
|
||||
glog.Errorf("Couldn't get key for object %#v: %v", deployment, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -286,7 +286,7 @@ func TestGetNewRC(t *testing.T) {
|
|||
t.Errorf("In test case %s, got unexpected error %v", test.test, err)
|
||||
}
|
||||
if !api.Semantic.DeepEqual(rs, test.expected) {
|
||||
t.Errorf("In test case %s, expected %+v, got %+v", test.test, test.expected, rs)
|
||||
t.Errorf("In test case %s, expected %#v, got %#v", test.test, test.expected, rs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -381,11 +381,11 @@ func TestGetOldRCs(t *testing.T) {
|
|||
if !equal(rss, test.expected) {
|
||||
t.Errorf("In test case %q, expected:", test.test)
|
||||
for _, rs := range test.expected {
|
||||
t.Errorf("rs = %+v", rs)
|
||||
t.Errorf("rs = %#v", rs)
|
||||
}
|
||||
t.Errorf("In test case %q, got:", test.test)
|
||||
for _, rs := range rss {
|
||||
t.Errorf("rs = %+v", rs)
|
||||
t.Errorf("rs = %#v", rs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -272,7 +272,7 @@ func (e *EndpointController) deletePod(obj interface{}) {
|
|||
}
|
||||
podKey, err := keyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
glog.Errorf("Couldn't get key for object %#v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
glog.Infof("Pod %q was deleted but we don't have a record of its final state, so it will take up to %v before it will be removed from all endpoint records.", podKey, FullServiceResyncPeriod)
|
||||
|
|
|
@ -159,10 +159,10 @@ func pcbKeyFunc(obj interface{}) (string, error) {
|
|||
}
|
||||
p, ok := obj.(*pcb)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("not a valid pet control block %+v", p)
|
||||
return "", fmt.Errorf("not a valid pet control block %#v", p)
|
||||
}
|
||||
if p.parent == nil {
|
||||
return "", fmt.Errorf("cannot compute pet control block key without parent pointer %+v", p)
|
||||
return "", fmt.Errorf("cannot compute pet control block key without parent pointer %#v", p)
|
||||
}
|
||||
return controller.KeyFunc(p.parent)
|
||||
}
|
||||
|
|
|
@ -327,7 +327,7 @@ func isReplicaSetMatch(pod *api.Pod, rs *extensions.ReplicaSet) bool {
|
|||
// When a pod is created, enqueue the replica set that manages it and update it's expectations.
|
||||
func (rsc *ReplicaSetController) addPod(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
glog.V(4).Infof("Pod %s created: %+v.", pod.Name, pod)
|
||||
glog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod)
|
||||
|
||||
rs := rsc.getPodReplicaSet(pod)
|
||||
if rs == nil {
|
||||
|
@ -405,11 +405,11 @@ func (rsc *ReplicaSetController) deletePod(obj interface{}) {
|
|||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a pod %+v", obj)
|
||||
glog.Errorf("Tombstone contained object that is not a pod %#v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %+v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod)
|
||||
glog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %#v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod)
|
||||
if rs := rsc.getPodReplicaSet(pod); rs != nil {
|
||||
rsKey, err := controller.KeyFunc(rs)
|
||||
if err != nil {
|
||||
|
|
|
@ -353,7 +353,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
|||
// Get the key for the controller
|
||||
rsKey, err := controller.KeyFunc(rsSpec)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %+v: %v", rsSpec, err)
|
||||
t.Errorf("Couldn't get key for object %#v: %v", rsSpec, err)
|
||||
}
|
||||
|
||||
// Lowering expectations should lead to a sync that creates a replica, however the
|
||||
|
@ -704,7 +704,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
|
||||
rsKey, err := controller.KeyFunc(rsSpec)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %+v: %v", rsSpec, err)
|
||||
t.Errorf("Couldn't get key for object %#v: %v", rsSpec, err)
|
||||
}
|
||||
|
||||
// Size up the controller, then size it down, and confirm the expected create/delete pattern
|
||||
|
@ -885,7 +885,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|||
// Get the ReplicaSet key
|
||||
rsKey, err := controller.KeyFunc(rs)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %+v: %v", rs, err)
|
||||
t.Errorf("Couldn't get key for object %#v: %v", rs, err)
|
||||
}
|
||||
|
||||
// This is to simulate a concurrent addPod, that has a handle on the expectations
|
||||
|
@ -987,7 +987,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
manager.rsStore.Store.Add(rs)
|
||||
rsKey, err := controller.KeyFunc(rs)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %+v: %v", rs, err)
|
||||
t.Errorf("Couldn't get key for object %#v: %v", rs, err)
|
||||
}
|
||||
pod := newPodList(nil, 1, api.PodPending, labelMap, rs, "pod").Items[0]
|
||||
pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
|
||||
|
@ -1004,7 +1004,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
|
||||
podExp, exists, err := manager.expectations.GetExpectations(rsKey)
|
||||
if !exists || err != nil || !podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
t.Fatalf("Wrong expectations %#v", podExp)
|
||||
}
|
||||
|
||||
// An update from no deletion timestamp to having one should be treated
|
||||
|
@ -1021,7 +1021,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rsKey)
|
||||
if !exists || err != nil || !podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
t.Fatalf("Wrong expectations %#v", podExp)
|
||||
}
|
||||
|
||||
// An update to the pod (including an update to the deletion timestamp)
|
||||
|
@ -1039,7 +1039,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rsKey)
|
||||
if !exists || err != nil || podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
t.Fatalf("Wrong expectations %#v", podExp)
|
||||
}
|
||||
|
||||
// A pod with a non-nil deletion timestamp should also be ignored by the
|
||||
|
@ -1047,7 +1047,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
manager.deletePod(&pod)
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rsKey)
|
||||
if !exists || err != nil || podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
t.Fatalf("Wrong expectations %#v", podExp)
|
||||
}
|
||||
|
||||
// Deleting the second pod should clear expectations.
|
||||
|
@ -1061,7 +1061,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rsKey)
|
||||
if !exists || err != nil || !podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
t.Fatalf("Wrong expectations %#v", podExp)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -416,12 +416,12 @@ func (rm *ReplicationManager) deletePod(obj interface{}) {
|
|||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Couldn't get object from tombstone %+v", obj)
|
||||
glog.Errorf("Couldn't get object from tombstone %#v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a pod %+v", obj)
|
||||
glog.Errorf("Tombstone contained object that is not a pod %#v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -343,7 +343,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
|||
// Get the key for the controller
|
||||
rcKey, err := controller.KeyFunc(controllerSpec)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err)
|
||||
t.Errorf("Couldn't get key for object %#v: %v", controllerSpec, err)
|
||||
}
|
||||
|
||||
// Lowering expectations should lead to a sync that creates a replica, however the
|
||||
|
@ -686,7 +686,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
|||
|
||||
rcKey, err := controller.KeyFunc(controllerSpec)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err)
|
||||
t.Errorf("Couldn't get key for object %#v: %v", controllerSpec, err)
|
||||
}
|
||||
|
||||
// Size up the controller, then size it down, and confirm the expected create/delete pattern
|
||||
|
@ -865,7 +865,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|||
// Get the RC key
|
||||
rcKey, err := controller.KeyFunc(rc)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %+v: %v", rc, err)
|
||||
t.Errorf("Couldn't get key for object %#v: %v", rc, err)
|
||||
}
|
||||
|
||||
// This is to simulate a concurrent addPod, that has a handle on the expectations
|
||||
|
@ -965,7 +965,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
rcKey, err := controller.KeyFunc(controllerSpec)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err)
|
||||
t.Errorf("Couldn't get key for object %#v: %v", controllerSpec, err)
|
||||
}
|
||||
pod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0]
|
||||
pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
|
||||
|
@ -982,7 +982,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
|
||||
podExp, exists, err := manager.expectations.GetExpectations(rcKey)
|
||||
if !exists || err != nil || !podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
t.Fatalf("Wrong expectations %#v", podExp)
|
||||
}
|
||||
|
||||
// An update from no deletion timestamp to having one should be treated
|
||||
|
@ -999,7 +999,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rcKey)
|
||||
if !exists || err != nil || !podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
t.Fatalf("Wrong expectations %#v", podExp)
|
||||
}
|
||||
|
||||
// An update to the pod (including an update to the deletion timestamp)
|
||||
|
@ -1017,7 +1017,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rcKey)
|
||||
if !exists || err != nil || podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
t.Fatalf("Wrong expectations %#v", podExp)
|
||||
}
|
||||
|
||||
// A pod with a non-nil deletion timestamp should also be ignored by the
|
||||
|
@ -1025,7 +1025,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
manager.deletePod(&pod)
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rcKey)
|
||||
if !exists || err != nil || podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
t.Fatalf("Wrong expectations %#v", podExp)
|
||||
}
|
||||
|
||||
// Deleting the second pod should clear expectations.
|
||||
|
@ -1039,7 +1039,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
|||
|
||||
podExp, exists, err = manager.expectations.GetExpectations(rcKey)
|
||||
if !exists || err != nil || !podExp.Fulfilled() {
|
||||
t.Fatalf("Wrong expectations %+v", podExp)
|
||||
t.Fatalf("Wrong expectations %#v", podExp)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -233,7 +233,7 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, time.Durati
|
|||
// cache for deleting.
|
||||
key, ok := delta.Object.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
return fmt.Errorf("delta contained object that wasn't a service or a deleted key: %+v", delta), doNotRetry
|
||||
return fmt.Errorf("delta contained object that wasn't a service or a deleted key: %#v", delta), doNotRetry
|
||||
}
|
||||
cachedService, ok = s.cache.get(key.Key)
|
||||
if !ok {
|
||||
|
|
|
@ -324,7 +324,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolu
|
|||
} else {
|
||||
volume, ok := obj.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj)
|
||||
return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, obj)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume))
|
||||
|
@ -407,7 +407,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume)
|
|||
var ok bool
|
||||
claim, ok = obj.(*api.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj)
|
||||
return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, obj)
|
||||
}
|
||||
glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s found: %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef), getClaimStatusForLogging(claim))
|
||||
}
|
||||
|
@ -891,7 +891,7 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *api.PersistentVolu
|
|||
func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) {
|
||||
volume, ok := arg.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("Cannot convert recycleVolumeOperation argument to volume, got %+v", arg)
|
||||
glog.Errorf("Cannot convert recycleVolumeOperation argument to volume, got %#v", arg)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("recycleVolumeOperation [%s] started", volume.Name)
|
||||
|
@ -979,7 +979,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{})
|
|||
func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) {
|
||||
volume, ok := arg.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("Cannot convert deleteVolumeOperation argument to volume, got %+v", arg)
|
||||
glog.Errorf("Cannot convert deleteVolumeOperation argument to volume, got %#v", arg)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name)
|
||||
|
@ -1057,7 +1057,7 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *api.PersistentV
|
|||
var ok bool
|
||||
claim, ok = obj.(*api.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("Cannot convert object from claim cache to claim!?: %+v", obj)
|
||||
return false, fmt.Errorf("Cannot convert object from claim cache to claim!?: %#v", obj)
|
||||
}
|
||||
}
|
||||
if claim != nil && claim.UID == volume.Spec.ClaimRef.UID {
|
||||
|
@ -1117,7 +1117,7 @@ func (ctrl *PersistentVolumeController) provisionClaim(claim *api.PersistentVolu
|
|||
func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interface{}) {
|
||||
claim, ok := claimObj.(*api.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
glog.Errorf("Cannot convert provisionClaimOperation argument to claim, got %+v", claimObj)
|
||||
glog.Errorf("Cannot convert provisionClaimOperation argument to claim, got %#v", claimObj)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("provisionClaimOperation [%s] started", claimToClaimKey(claim))
|
||||
|
|
|
@ -142,7 +142,7 @@ func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSour
|
|||
}
|
||||
volumeList, ok := volumeListObj.(*api.PersistentVolumeList)
|
||||
if !ok {
|
||||
glog.Errorf("PersistentVolumeController can't initialize caches, expected list of volumes, got: %+v", volumeListObj)
|
||||
glog.Errorf("PersistentVolumeController can't initialize caches, expected list of volumes, got: %#v", volumeListObj)
|
||||
return
|
||||
}
|
||||
for _, volume := range volumeList.Items {
|
||||
|
@ -166,7 +166,7 @@ func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSour
|
|||
}
|
||||
claimList, ok := claimListObj.(*api.PersistentVolumeClaimList)
|
||||
if !ok {
|
||||
glog.Errorf("PersistentVolumeController can't initialize caches, expected list of claims, got: %+v", claimListObj)
|
||||
glog.Errorf("PersistentVolumeController can't initialize caches, expected list of claims, got: %#v", claimListObj)
|
||||
return
|
||||
}
|
||||
for _, claim := range claimList.Items {
|
||||
|
@ -194,7 +194,7 @@ func (ctrl *PersistentVolumeController) storeClaimUpdate(claim *api.PersistentVo
|
|||
func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
|
||||
pv, ok := obj.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("expected PersistentVolume but handler received %+v", obj)
|
||||
glog.Errorf("expected PersistentVolume but handler received %#v", obj)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -229,7 +229,7 @@ func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
|
|||
func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) {
|
||||
newVolume, ok := newObj.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("Expected PersistentVolume but handler received %+v", newObj)
|
||||
glog.Errorf("Expected PersistentVolume but handler received %#v", newObj)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -271,7 +271,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) {
|
|||
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
|
||||
volume, ok = unknown.Obj.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", unknown.Obj)
|
||||
glog.Errorf("Expected PersistentVolume but deleteVolume received %#v", unknown.Obj)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
|
@ -302,7 +302,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) {
|
|||
}
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("Cannot convert object from claim cache to claim %q!?: %+v", claimrefToClaimKey(volume.Spec.ClaimRef), claimObj)
|
||||
glog.Errorf("Cannot convert object from claim cache to claim %q!?: %#v", claimrefToClaimKey(volume.Spec.ClaimRef), claimObj)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -381,11 +381,11 @@ func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) {
|
|||
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
|
||||
claim, ok = unknown.Obj.(*api.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", unknown.Obj)
|
||||
glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %#v", unknown.Obj)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", obj)
|
||||
glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %#v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -413,7 +413,7 @@ func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) {
|
|||
}
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, pvObj)
|
||||
glog.Errorf("Cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, pvObj)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -234,7 +234,7 @@ func storeVersion(t *testing.T, prefix string, c cache.Store, version string, ex
|
|||
}
|
||||
pv, ok := pvObj.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
t.Errorf("expected volume in the cache, got different object instead: %+v", pvObj)
|
||||
t.Errorf("expected volume in the cache, got different object instead: %#v", pvObj)
|
||||
}
|
||||
|
||||
if ret {
|
||||
|
|
|
@ -125,7 +125,7 @@ func assertARecordsMatchIPs(t *testing.T, records []dns.RR, ips ...string) {
|
|||
gotEndpoints := sets.NewString()
|
||||
for _, r := range records {
|
||||
if a, ok := r.(*dns.A); !ok {
|
||||
t.Errorf("Expected A record, got %+v", a)
|
||||
t.Errorf("Expected A record, got %#v", a)
|
||||
} else {
|
||||
gotEndpoints.Insert(a.A.String())
|
||||
}
|
||||
|
|
|
@ -817,7 +817,7 @@ Scaling foo-v2 up to 2
|
|||
oldReady := next(&oldReady)
|
||||
newReady := next(&newReady)
|
||||
if oldReady == -1 || newReady == -1 {
|
||||
t.Fatalf("unexpected getReadyPods call for:\noldRc: %+v\nnewRc: %+v", oldRc, newRc)
|
||||
t.Fatalf("unexpected getReadyPods call for:\noldRc: %#v\nnewRc: %#v", oldRc, newRc)
|
||||
}
|
||||
return int32(oldReady), int32(newReady), nil
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *a
|
|||
}
|
||||
// Check whether the object could be converted to single pod.
|
||||
if _, ok := obj.(*api.Pod); !ok {
|
||||
err = fmt.Errorf("invalid pod: %+v", obj)
|
||||
err = fmt.Errorf("invalid pod: %#v", obj)
|
||||
return false, pod, err
|
||||
}
|
||||
newPod := obj.(*api.Pod)
|
||||
|
|
|
@ -1775,7 +1775,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
|
|||
defer func() {
|
||||
metrics.ContainerManagerLatency.WithLabelValues("computePodContainerChanges").Observe(metrics.SinceInMicroseconds(start))
|
||||
}()
|
||||
glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod)
|
||||
glog.V(5).Infof("Syncing Pod %q: %#v", format.Pod(pod), pod)
|
||||
|
||||
containersToStart := make(map[int]string)
|
||||
containersToKeep := make(map[kubecontainer.DockerID]int)
|
||||
|
@ -2062,10 +2062,10 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
|
|||
initContainerResult.Fail(kubecontainer.ErrRunInitContainer, fmt.Sprintf("init container %q exited with %d", status.Name, status.ExitCode))
|
||||
result.AddSyncResult(initContainerResult)
|
||||
if pod.Spec.RestartPolicy == api.RestartPolicyNever {
|
||||
utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %+v", format.Pod(pod), status.Name, status))
|
||||
utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %#v", format.Pod(pod), status.Name, status))
|
||||
return
|
||||
}
|
||||
utilruntime.HandleError(fmt.Errorf("Error running pod %q init container %q, restarting: %+v", format.Pod(pod), status.Name, status))
|
||||
utilruntime.HandleError(fmt.Errorf("Error running pod %q init container %q, restarting: %#v", format.Pod(pod), status.Name, status))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1716,7 +1716,7 @@ func verifySyncResults(t *testing.T, expectedResults []*kubecontainer.SyncResult
|
|||
if len(expectedResults) != len(realResult.SyncResults) {
|
||||
t.Errorf("expected sync result number %d, got %d", len(expectedResults), len(realResult.SyncResults))
|
||||
for _, r := range expectedResults {
|
||||
t.Errorf("expected result: %+v", r)
|
||||
t.Errorf("expected result: %#v", r)
|
||||
}
|
||||
for _, r := range realResult.SyncResults {
|
||||
t.Errorf("real result: %+v", r)
|
||||
|
@ -1733,16 +1733,16 @@ func verifySyncResults(t *testing.T, expectedResults []*kubecontainer.SyncResult
|
|||
// We use Contains() here because the message format may be changed, but at least we should
|
||||
// make sure that the expected message is contained.
|
||||
if realR.Error != expectR.Error || !strings.Contains(realR.Message, expectR.Message) {
|
||||
t.Errorf("expected sync result %+v, got %+v", expectR, realR)
|
||||
t.Errorf("expected sync result %#v, got %+v", expectR, realR)
|
||||
}
|
||||
found++
|
||||
}
|
||||
}
|
||||
if found == 0 {
|
||||
t.Errorf("not found expected result %+v", expectR)
|
||||
t.Errorf("not found expected result %#v", expectR)
|
||||
}
|
||||
if found > 1 {
|
||||
t.Errorf("got %d duplicate expected result %+v", found, expectR)
|
||||
t.Errorf("got %d duplicate expected result %#v", found, expectR)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ func TestGetRawContainerInfoSubcontainers(t *testing.T) {
|
|||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if len(result) != 2 {
|
||||
t.Errorf("Expected 2 elements, received: %+v", result)
|
||||
t.Errorf("Expected 2 elements, received: %#v", result)
|
||||
}
|
||||
mockCadvisor.AssertExpectations(t)
|
||||
}
|
||||
|
|
|
@ -3221,7 +3221,7 @@ func TestGetPodsToSync(t *testing.T) {
|
|||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("expected pod not found: %+v", expect)
|
||||
t.Errorf("expected pod not found: %#v", expect)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -324,7 +324,7 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error {
|
|||
// GetPodStatus(pod *kubecontainer.Pod) so that Docker can avoid listing
|
||||
// all containers again.
|
||||
status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace)
|
||||
glog.V(4).Infof("PLEG: Write status for %s/%s: %+v (err: %v)", pod.Name, pod.Namespace, status, err)
|
||||
glog.V(4).Infof("PLEG: Write status for %s/%s: %#v (err: %v)", pod.Name, pod.Namespace, status, err)
|
||||
g.cache.Set(pod.ID, status, err, timestamp)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -306,7 +306,7 @@ func (r *Runtime) RunCommand(config *Config, args ...string) ([]string, error) {
|
|||
if config == nil {
|
||||
config = r.config
|
||||
}
|
||||
glog.V(4).Infof("rkt: Run command: %q with config: %+v", args, config)
|
||||
glog.V(4).Infof("rkt: Run command: %q with config: %#v", args, config)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
|
||||
|
|
|
@ -673,7 +673,7 @@ func TestReconcilePodStatus(t *testing.T) {
|
|||
|
||||
podStatus, ok := syncer.GetPodStatus(testPod.UID)
|
||||
if !ok {
|
||||
t.Fatalf("Should find pod status for pod: %+v", testPod)
|
||||
t.Fatalf("Should find pod status for pod: %#v", testPod)
|
||||
}
|
||||
testPod.Status = podStatus
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ func (r *Requirement) Matches(ls Labels) bool {
|
|||
|
||||
// There should be only one strValue in r.strValues, and can be converted to a integer.
|
||||
if len(r.strValues) != 1 {
|
||||
glog.V(10).Infof("Invalid values count %+v of requirement %+v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
|
||||
glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -198,7 +198,7 @@ func (r *Requirement) Matches(ls Labels) bool {
|
|||
for strValue := range r.strValues {
|
||||
rValue, err = strconv.ParseInt(strValue, 10, 64)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("ParseInt failed for value %+v in requirement %+v, for 'Gt', 'Lt' operators, the value must be an integer", strValue, r)
|
||||
glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", strValue, r)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -368,9 +368,9 @@ func TestToString(t *testing.T) {
|
|||
}
|
||||
for _, ts := range toStringTests {
|
||||
if out := ts.In.String(); out == "" && ts.Valid {
|
||||
t.Errorf("%+v.String() => '%v' expected no error", ts.In, out)
|
||||
t.Errorf("%#v.String() => '%v' expected no error", ts.In, out)
|
||||
} else if out != ts.Out {
|
||||
t.Errorf("%+v.String() => '%v' want '%v'", ts.In, out, ts.Out)
|
||||
t.Errorf("%#v.String() => '%v' want '%v'", ts.In, out, ts.Out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ func TestRangeAllocatorEmpty(t *testing.T) {
|
|||
r.Set("0-0")
|
||||
defer func() {
|
||||
if rv := recover(); rv == nil {
|
||||
t.Fatalf("expected panic because of empty port range: %+v", r)
|
||||
t.Fatalf("expected panic because of empty port range: %#v", r)
|
||||
}
|
||||
}()
|
||||
_ = newPortRangeAllocator(*r)
|
||||
|
|
|
@ -425,7 +425,7 @@ func (proxier *Proxier) OnServiceUpdate(services []api.Service) {
|
|||
info.loadBalancerStatus = *api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
|
||||
info.nodePort = int(servicePort.NodePort)
|
||||
info.sessionAffinityType = service.Spec.SessionAffinity
|
||||
glog.V(4).Infof("info: %+v", info)
|
||||
glog.V(4).Infof("info: %#v", info)
|
||||
|
||||
err = proxier.openPortal(serviceName, info)
|
||||
if err != nil {
|
||||
|
|
|
@ -153,7 +153,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
|
|||
// Affinity wins.
|
||||
endpoint := sessionAffinity.endpoint
|
||||
sessionAffinity.lastUsed = time.Now()
|
||||
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %+v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
|
||||
glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint)
|
||||
return endpoint, nil
|
||||
}
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
|
|||
affinity.lastUsed = time.Now()
|
||||
affinity.endpoint = endpoint
|
||||
affinity.clientIP = ipaddr
|
||||
glog.V(4).Infof("Updated affinity key %s: %+v", ipaddr, state.affinity.affinityMap[ipaddr])
|
||||
glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr])
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
|
|
|
@ -70,7 +70,7 @@ func UpdatePodWithRetries(podClient unversionedcore.PodInterface, pod *api.Pod,
|
|||
|
||||
// Handle returned error from wait poll
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("timed out trying to update pod: %+v", oldPod)
|
||||
err = fmt.Errorf("timed out trying to update pod: %#v", oldPod)
|
||||
}
|
||||
// Ignore the pod not found error, but the pod isn't updated.
|
||||
if errors.IsNotFound(err) {
|
||||
|
|
|
@ -66,7 +66,7 @@ func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rs
|
|||
|
||||
// Handle returned error from wait poll
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("timed out trying to update RS: %+v", oldRs)
|
||||
err = fmt.Errorf("timed out trying to update RS: %#v", oldRs)
|
||||
}
|
||||
// Ignore the RS not found error, but the RS isn't updated.
|
||||
if errors.IsNotFound(err) {
|
||||
|
|
|
@ -385,7 +385,7 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) {
|
|||
func (pm *VolumePluginMgr) FindPersistentPluginBySpec(spec *Spec) (PersistentVolumePlugin, error) {
|
||||
volumePlugin, err := pm.FindPluginBySpec(spec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not find volume plugin for spec: %+v", spec)
|
||||
return nil, fmt.Errorf("Could not find volume plugin for spec: %#v", spec)
|
||||
}
|
||||
if persistentVolumePlugin, ok := volumePlugin.(PersistentVolumePlugin); ok {
|
||||
return persistentVolumePlugin, nil
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestSpecSourceConverters(t *testing.T) {
|
|||
|
||||
converted := NewSpecFromVolume(v)
|
||||
if converted.Volume.EmptyDir == nil {
|
||||
t.Errorf("Unexpected nil EmptyDir: %+v", converted)
|
||||
t.Errorf("Unexpected nil EmptyDir: %#v", converted)
|
||||
}
|
||||
if v.Name != converted.Name() {
|
||||
t.Errorf("Expected %v but got %v", v.Name, converted.Name())
|
||||
|
@ -45,7 +45,7 @@ func TestSpecSourceConverters(t *testing.T) {
|
|||
|
||||
converted = NewSpecFromPersistentVolume(pv, false)
|
||||
if converted.PersistentVolume.Spec.AWSElasticBlockStore == nil {
|
||||
t.Errorf("Unexpected nil AWSElasticBlockStore: %+v", converted)
|
||||
t.Errorf("Unexpected nil AWSElasticBlockStore: %#v", converted)
|
||||
}
|
||||
if pv.Name != converted.Name() {
|
||||
t.Errorf("Expected %v but got %v", pv.Name, converted.Name())
|
||||
|
|
Loading…
Reference in New Issue