From 89669283fe84619f0c15af8495ccb28a752bb1e8 Mon Sep 17 00:00:00 2001 From: Wang Guoliang Date: Sun, 11 Feb 2018 21:17:00 +0800 Subject: [PATCH] should use time.Since instead of time.Now().Sub --- pkg/cloudprovider/providers/gce/gce_op.go | 4 ++-- pkg/controller/bootstrap/tokencleaner.go | 2 +- .../certificates/certificate_controller.go | 2 +- pkg/controller/daemon/daemon_controller.go | 2 +- pkg/controller/disruption/disruption.go | 2 +- pkg/controller/endpoint/endpoints_controller.go | 2 +- pkg/controller/job/job_controller.go | 2 +- pkg/controller/namespace/namespace_controller.go | 2 +- .../resourcequota/resource_quota_controller.go | 2 +- pkg/controller/route/route_controller.go | 8 ++++---- .../serviceaccount/serviceaccounts_controller.go | 2 +- pkg/controller/statefulset/stateful_set.go | 2 +- .../pvcprotection/pvc_protection_controller.go | 2 +- .../volume/pvprotection/pv_protection_controller.go | 2 +- .../dockershim/libdocker/kube_docker_client.go | 2 +- pkg/printers/internalversion/printers.go | 3 ++- pkg/proxy/userspace/roundrobin.go | 4 ++-- pkg/proxy/winuserspace/roundrobin.go | 4 ++-- pkg/ssh/ssh.go | 2 +- .../apiserver/pkg/endpoints/metrics/metrics.go | 4 ++-- .../plugin/pkg/client/auth/openstack/openstack.go | 2 +- test/e2e/framework/util.go | 12 ++++++------ test/e2e/scalability/density.go | 2 +- test/images/liveness/server.go | 4 ++-- .../resource-consumer/consume-cpu/consume_cpu.go | 2 +- 25 files changed, 39 insertions(+), 38 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/gce_op.go b/pkg/cloudprovider/providers/gce/gce_op.go index b354d79cea..7c8c3a815b 100644 --- a/pkg/cloudprovider/providers/gce/gce_op.go +++ b/pkg/cloudprovider/providers/gce/gce_op.go @@ -45,7 +45,7 @@ func (gce *GCECloud) waitForOp(op *computev1.Operation, getOperation func(operat return wait.Poll(operationPollInterval, operationPollTimeoutDuration, func() (bool, error) { start := time.Now() gce.operationPollRateLimiter.Accept() - duration := time.Now().Sub(start) + duration := time.Since(start) if duration > 5*time.Second { glog.V(2).Infof("pollOperation: throttled %v for %v", duration, opName) } @@ -57,7 +57,7 @@ func (gce *GCECloud) waitForOp(op *computev1.Operation, getOperation func(operat done := opIsDone(pollOp) if done { - duration := time.Now().Sub(opStart) + duration := time.Since(opStart) if duration > 1*time.Minute { // Log the JSON. It's cleaner than the %v structure. enc, err := pollOp.MarshalJSON() diff --git a/pkg/controller/bootstrap/tokencleaner.go b/pkg/controller/bootstrap/tokencleaner.go index d39582b2fb..c9fb2c1483 100644 --- a/pkg/controller/bootstrap/tokencleaner.go +++ b/pkg/controller/bootstrap/tokencleaner.go @@ -161,7 +161,7 @@ func (tc *TokenCleaner) processNextWorkItem() bool { func (tc *TokenCleaner) syncFunc(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) diff --git a/pkg/controller/certificates/certificate_controller.go b/pkg/controller/certificates/certificate_controller.go index e112363e2a..088faf50e2 100644 --- a/pkg/controller/certificates/certificate_controller.go +++ b/pkg/controller/certificates/certificate_controller.go @@ -169,7 +169,7 @@ func (cc *CertificateController) enqueueCertificateRequest(obj interface{}) { func (cc *CertificateController) syncFunc(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Since(startTime)) }() csr, err := cc.csrLister.Get(key) if errors.IsNotFound(err) { diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index e278072d14..3e34294de8 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -1110,7 +1110,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash func (dsc *DaemonSetsController) syncDaemonSet(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) diff --git a/pkg/controller/disruption/disruption.go b/pkg/controller/disruption/disruption.go index 57ca546892..b6ae484a8c 100644 --- a/pkg/controller/disruption/disruption.go +++ b/pkg/controller/disruption/disruption.go @@ -471,7 +471,7 @@ func (dc *DisruptionController) processNextRecheckWorkItem() bool { func (dc *DisruptionController) sync(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index a0ae0e4e89..fc3a16ac5b 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -385,7 +385,7 @@ func (e *EndpointController) handleErr(err error, key interface{}) { func (e *EndpointController) syncService(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) diff --git a/pkg/controller/job/job_controller.go b/pkg/controller/job/job_controller.go index 552fdfd8fe..360b496051 100644 --- a/pkg/controller/job/job_controller.go +++ b/pkg/controller/job/job_controller.go @@ -434,7 +434,7 @@ func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) { func (jm *JobController) syncJob(key string) (bool, error) { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime)) }() ns, name, err := cache.SplitMetaNamespaceKey(key) diff --git a/pkg/controller/namespace/namespace_controller.go b/pkg/controller/namespace/namespace_controller.go index 32a0bb33fd..653c207e4c 100644 --- a/pkg/controller/namespace/namespace_controller.go +++ b/pkg/controller/namespace/namespace_controller.go @@ -163,7 +163,7 @@ func (nm *NamespaceController) worker() { func (nm *NamespaceController) syncNamespaceFromKey(key string) (err error) { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime)) }() namespace, err := nm.lister.Get(key) diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index 71cf8310e2..75878d9d0e 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -297,7 +297,7 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) { func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) diff --git a/pkg/controller/route/route_controller.go b/pkg/controller/route/route_controller.go index 3b92cbe1ed..08fb9145e9 100644 --- a/pkg/controller/route/route_controller.go +++ b/pkg/controller/route/route_controller.go @@ -173,13 +173,13 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R // Ensure that we don't have more than maxConcurrentRouteCreations // CreateRoute calls in flight. rateLimiter <- struct{}{} - glog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime)) + glog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime)) err := rc.routes.CreateRoute(context.TODO(), rc.clusterName, nameHint, route) <-rateLimiter rc.updateNetworkingCondition(nodeName, err == nil) if err != nil { - msg := fmt.Sprintf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Now().Sub(startTime), err) + msg := fmt.Sprintf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Since(startTime), err) if rc.recorder != nil { rc.recorder.Eventf( &v1.ObjectReference{ @@ -218,9 +218,9 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R defer wg.Done() glog.Infof("Deleting route %s %s", route.Name, route.DestinationCIDR) if err := rc.routes.DeleteRoute(context.TODO(), rc.clusterName, route); err != nil { - glog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Now().Sub(startTime), err) + glog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Since(startTime), err) } else { - glog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Now().Sub(startTime)) + glog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Since(startTime)) } }(route, time.Now()) } diff --git a/pkg/controller/serviceaccount/serviceaccounts_controller.go b/pkg/controller/serviceaccount/serviceaccounts_controller.go index e0c41b82f8..4d0f53bb0b 100644 --- a/pkg/controller/serviceaccount/serviceaccounts_controller.go +++ b/pkg/controller/serviceaccount/serviceaccounts_controller.go @@ -183,7 +183,7 @@ func (c *ServiceAccountsController) processNextWorkItem() bool { func (c *ServiceAccountsController) syncNamespace(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime)) }() ns, err := c.nsLister.Get(key) diff --git a/pkg/controller/statefulset/stateful_set.go b/pkg/controller/statefulset/stateful_set.go index c82b82276e..2e197c9da6 100644 --- a/pkg/controller/statefulset/stateful_set.go +++ b/pkg/controller/statefulset/stateful_set.go @@ -415,7 +415,7 @@ func (ssc *StatefulSetController) worker() { func (ssc *StatefulSetController) sync(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) diff --git a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go index 7c1ae1f562..0519db78eb 100644 --- a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go +++ b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go @@ -141,7 +141,7 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error { glog.V(4).Infof("Processing PVC %s/%s", pvcNamespace, pvcName) startTime := time.Now() defer func() { - glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Since(startTime)) }() pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName) diff --git a/pkg/controller/volume/pvprotection/pv_protection_controller.go b/pkg/controller/volume/pvprotection/pv_protection_controller.go index cd4b384141..4d29f39f31 100644 --- a/pkg/controller/volume/pvprotection/pv_protection_controller.go +++ b/pkg/controller/volume/pvprotection/pv_protection_controller.go @@ -119,7 +119,7 @@ func (c *Controller) processPV(pvName string) error { glog.V(4).Infof("Processing PV %s", pvName) startTime := time.Now() defer func() { - glog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Since(startTime)) }() pv, err := c.pvLister.Get(pvName) diff --git a/pkg/kubelet/dockershim/libdocker/kube_docker_client.go b/pkg/kubelet/dockershim/libdocker/kube_docker_client.go index 86fabe163b..02ef40bc79 100644 --- a/pkg/kubelet/dockershim/libdocker/kube_docker_client.go +++ b/pkg/kubelet/dockershim/libdocker/kube_docker_client.go @@ -337,7 +337,7 @@ func (p *progressReporter) start() { case <-ticker.C: progress, timestamp := p.progress.get() // If there is no progress for p.imagePullProgressDeadline, cancel the operation. - if time.Now().Sub(timestamp) > p.imagePullProgressDeadline { + if time.Since(timestamp) > p.imagePullProgressDeadline { glog.Errorf("Cancel pulling image %q because of no progress for %v, latest progress: %q", p.image, p.imagePullProgressDeadline, progress) p.cancel() return diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index 08475be5c0..1f7eef4016 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -503,7 +503,8 @@ func translateTimestamp(timestamp metav1.Time) string { if timestamp.IsZero() { return "" } - return duration.ShortHumanDuration(time.Now().Sub(timestamp.Time)) + + return duration.ShortHumanDuration(time.Since(timestamp.Time)) } var ( diff --git a/pkg/proxy/userspace/roundrobin.go b/pkg/proxy/userspace/roundrobin.go index e2aca29913..3ca546b5e0 100644 --- a/pkg/proxy/userspace/roundrobin.go +++ b/pkg/proxy/userspace/roundrobin.go @@ -159,7 +159,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne } if !sessionAffinityReset { sessionAffinity, exists := state.affinity.affinityMap[ipaddr] - if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds { + if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds { // Affinity wins. endpoint := sessionAffinity.endpoint sessionAffinity.lastUsed = time.Now() @@ -378,7 +378,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa return } for ip, affinity := range state.affinity.affinityMap { - if int(time.Now().Sub(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds { + if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds { glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort) delete(state.affinity.affinityMap, ip) } diff --git a/pkg/proxy/winuserspace/roundrobin.go b/pkg/proxy/winuserspace/roundrobin.go index 27b8355599..161d616ab5 100644 --- a/pkg/proxy/winuserspace/roundrobin.go +++ b/pkg/proxy/winuserspace/roundrobin.go @@ -149,7 +149,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne } if !sessionAffinityReset { sessionAffinity, exists := state.affinity.affinityMap[ipaddr] - if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds { + if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds { // Affinity wins. endpoint := sessionAffinity.endpoint sessionAffinity.lastUsed = time.Now() @@ -366,7 +366,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa return } for ip, affinity := range state.affinity.affinityMap { - if int(time.Now().Sub(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds { + if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds { glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort) delete(state.affinity.affinityMap, ip) } diff --git a/pkg/ssh/ssh.go b/pkg/ssh/ssh.go index c138acb32d..637d0564db 100644 --- a/pkg/ssh/ssh.go +++ b/pkg/ssh/ssh.go @@ -399,7 +399,7 @@ func (l *SSHTunnelList) Dial(net, addr string) (net.Conn, error) { id := mathrand.Int63() // So you can match begins/ends in the log. glog.Infof("[%x: %v] Dialing...", id, addr) defer func() { - glog.Infof("[%x: %v] Dialed in %v.", id, addr, time.Now().Sub(start)) + glog.Infof("[%x: %v] Dialed in %v.", id, addr, time.Since(start)) }() tunnel, err := l.pickTunnel(strings.Split(addr, ":")[0]) if err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index d3f155c11f..3c2aaddbe3 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -198,7 +198,7 @@ func InstrumentRouteFunc(verb, resource, subresource, scope string, routeFunc re routeFunc(request, response) - MonitorRequest(request.Request, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Now().Sub(now)) + MonitorRequest(request.Request, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now)) }) } @@ -220,7 +220,7 @@ func InstrumentHandlerFunc(verb, resource, subresource, scope string, handler ht handler(w, req) - MonitorRequest(req, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Now().Sub(now)) + MonitorRequest(req, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now)) } } diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go index 490eff1281..52ec5cb0b9 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go @@ -85,7 +85,7 @@ func (c *cachedGetter) Token() (string, error) { var err error // no token or exceeds the TTL - if c.token == "" || time.Now().Sub(c.born) > c.ttl { + if c.token == "" || time.Since(c.born) > c.ttl { c.token, err = c.tokenGetter.Token() if err != nil { return "", fmt.Errorf("failed to get token: %s", err) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 7c32febdee..87869ce1da 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -1116,7 +1116,7 @@ func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace st // no remaining content, but namespace was not deleted (namespace controller is probably wedged) return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err) } - Logf("namespace %v deletion completed in %s", namespace, time.Now().Sub(startTime)) + Logf("namespace %v deletion completed in %s", namespace, time.Since(startTime)) return nil } @@ -3028,13 +3028,13 @@ func DeleteResourceAndPods(clientset clientset.Interface, internalClientset inte if err := testutils.DeleteResourceUsingReaperWithRetries(internalClientset, kind, ns, name, nil, scaleClient); err != nil { return fmt.Errorf("error while stopping %v: %s: %v", kind, name, err) } - deleteTime := time.Now().Sub(startTime) + deleteTime := time.Since(startTime) Logf("Deleting %v %s took: %v", kind, name, deleteTime) err = waitForPodsInactive(ps, 100*time.Millisecond, 10*time.Minute) if err != nil { return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err) } - terminatePodTime := time.Now().Sub(startTime) - deleteTime + terminatePodTime := time.Since(startTime) - deleteTime Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime) // this is to relieve namespace controller's pressure when deleting the // namespace after a test. @@ -3042,7 +3042,7 @@ func DeleteResourceAndPods(clientset clientset.Interface, internalClientset inte if err != nil { return fmt.Errorf("error while waiting for pods gone %s: %v", name, err) } - gcPodTime := time.Now().Sub(startTime) - terminatePodTime + gcPodTime := time.Since(startTime) - terminatePodTime Logf("Garbage collecting %v %s pods took: %v", kind, name, gcPodTime) return nil } @@ -3080,7 +3080,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns if err := testutils.DeleteResourceWithRetries(c, kind, ns, name, deleteOption); err != nil { return err } - deleteTime := time.Now().Sub(startTime) + deleteTime := time.Since(startTime) Logf("Deleting %v %s took: %v", kind, name, deleteTime) var interval, timeout time.Duration @@ -3104,7 +3104,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns if err != nil { return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err) } - terminatePodTime := time.Now().Sub(startTime) - deleteTime + terminatePodTime := time.Since(startTime) - deleteTime Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime) err = waitForPodsGone(ps, interval, 10*time.Minute) diff --git a/test/e2e/scalability/density.go b/test/e2e/scalability/density.go index e0cdadafdd..19d78b31f0 100644 --- a/test/e2e/scalability/density.go +++ b/test/e2e/scalability/density.go @@ -274,7 +274,7 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi logStopCh := make(chan struct{}) go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh) wg.Wait() - startupTime := time.Now().Sub(startTime) + startupTime := time.Since(startTime) close(logStopCh) framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime) framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second)) diff --git a/test/images/liveness/server.go b/test/images/liveness/server.go index 889d277656..a32ac92676 100644 --- a/test/images/liveness/server.go +++ b/test/images/liveness/server.go @@ -29,11 +29,11 @@ func main() { started := time.Now() http.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) - data := (time.Now().Sub(started)).String() + data := (time.Since(started)).String() w.Write([]byte(data)) }) http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { - duration := time.Now().Sub(started) + duration := time.Since(started) if duration.Seconds() > 10 { w.WriteHeader(500) w.Write([]byte(fmt.Sprintf("error: %v", duration.Seconds()))) diff --git a/test/images/resource-consumer/consume-cpu/consume_cpu.go b/test/images/resource-consumer/consume-cpu/consume_cpu.go index b7baf6e71b..08a6dc9866 100644 --- a/test/images/resource-consumer/consume-cpu/consume_cpu.go +++ b/test/images/resource-consumer/consume-cpu/consume_cpu.go @@ -45,7 +45,7 @@ func main() { duration := time.Duration(*durationSec) * time.Second start := time.Now() first := systemstat.GetProcCPUSample() - for time.Now().Sub(start) < duration { + for time.Since(start) < duration { cpu := systemstat.GetProcCPUAverage(first, systemstat.GetProcCPUSample(), systemstat.GetUptime().Uptime) if cpu.TotalPct < millicoresPct { doSomething()