should use time.Since instead of time.Now().Sub

pull/8/head
Wang Guoliang 2018-02-11 21:17:00 +08:00 committed by Guoliang Wang
parent a5c3c8d16c
commit 89669283fe
25 changed files with 39 additions and 38 deletions

View File

@ -45,7 +45,7 @@ func (gce *GCECloud) waitForOp(op *computev1.Operation, getOperation func(operat
return wait.Poll(operationPollInterval, operationPollTimeoutDuration, func() (bool, error) {
start := time.Now()
gce.operationPollRateLimiter.Accept()
duration := time.Now().Sub(start)
duration := time.Since(start)
if duration > 5*time.Second {
glog.V(2).Infof("pollOperation: throttled %v for %v", duration, opName)
}
@ -57,7 +57,7 @@ func (gce *GCECloud) waitForOp(op *computev1.Operation, getOperation func(operat
done := opIsDone(pollOp)
if done {
duration := time.Now().Sub(opStart)
duration := time.Since(opStart)
if duration > 1*time.Minute {
// Log the JSON. It's cleaner than the %v structure.
enc, err := pollOp.MarshalJSON()

View File

@ -161,7 +161,7 @@ func (tc *TokenCleaner) processNextWorkItem() bool {
func (tc *TokenCleaner) syncFunc(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)

View File

@ -169,7 +169,7 @@ func (cc *CertificateController) enqueueCertificateRequest(obj interface{}) {
func (cc *CertificateController) syncFunc(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Since(startTime))
}()
csr, err := cc.csrLister.Get(key)
if errors.IsNotFound(err) {

View File

@ -1110,7 +1110,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash
func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)

View File

@ -471,7 +471,7 @@ func (dc *DisruptionController) processNextRecheckWorkItem() bool {
func (dc *DisruptionController) sync(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)

View File

@ -385,7 +385,7 @@ func (e *EndpointController) handleErr(err error, key interface{}) {
func (e *EndpointController) syncService(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)

View File

@ -434,7 +434,7 @@ func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) {
func (jm *JobController) syncJob(key string) (bool, error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)

View File

@ -163,7 +163,7 @@ func (nm *NamespaceController) worker() {
func (nm *NamespaceController) syncNamespaceFromKey(key string) (err error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
}()
namespace, err := nm.lister.Get(key)

View File

@ -297,7 +297,7 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)

View File

@ -173,13 +173,13 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
// Ensure that we don't have more than maxConcurrentRouteCreations
// CreateRoute calls in flight.
rateLimiter <- struct{}{}
glog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime))
glog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime))
err := rc.routes.CreateRoute(context.TODO(), rc.clusterName, nameHint, route)
<-rateLimiter
rc.updateNetworkingCondition(nodeName, err == nil)
if err != nil {
msg := fmt.Sprintf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Now().Sub(startTime), err)
msg := fmt.Sprintf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Since(startTime), err)
if rc.recorder != nil {
rc.recorder.Eventf(
&v1.ObjectReference{
@ -218,9 +218,9 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
defer wg.Done()
glog.Infof("Deleting route %s %s", route.Name, route.DestinationCIDR)
if err := rc.routes.DeleteRoute(context.TODO(), rc.clusterName, route); err != nil {
glog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Now().Sub(startTime), err)
glog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Since(startTime), err)
} else {
glog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Now().Sub(startTime))
glog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Since(startTime))
}
}(route, time.Now())
}

View File

@ -183,7 +183,7 @@ func (c *ServiceAccountsController) processNextWorkItem() bool {
func (c *ServiceAccountsController) syncNamespace(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
}()
ns, err := c.nsLister.Get(key)

View File

@ -415,7 +415,7 @@ func (ssc *StatefulSetController) worker() {
func (ssc *StatefulSetController) sync(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)

View File

@ -141,7 +141,7 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error {
glog.V(4).Infof("Processing PVC %s/%s", pvcNamespace, pvcName)
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Now().Sub(startTime))
glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Since(startTime))
}()
pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName)

View File

@ -119,7 +119,7 @@ func (c *Controller) processPV(pvName string) error {
glog.V(4).Infof("Processing PV %s", pvName)
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Now().Sub(startTime))
glog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Since(startTime))
}()
pv, err := c.pvLister.Get(pvName)

View File

@ -337,7 +337,7 @@ func (p *progressReporter) start() {
case <-ticker.C:
progress, timestamp := p.progress.get()
// If there is no progress for p.imagePullProgressDeadline, cancel the operation.
if time.Now().Sub(timestamp) > p.imagePullProgressDeadline {
if time.Since(timestamp) > p.imagePullProgressDeadline {
glog.Errorf("Cancel pulling image %q because of no progress for %v, latest progress: %q", p.image, p.imagePullProgressDeadline, progress)
p.cancel()
return

View File

@ -503,7 +503,8 @@ func translateTimestamp(timestamp metav1.Time) string {
if timestamp.IsZero() {
return "<unknown>"
}
return duration.ShortHumanDuration(time.Now().Sub(timestamp.Time))
return duration.ShortHumanDuration(time.Since(timestamp.Time))
}
var (

View File

@ -159,7 +159,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
}
if !sessionAffinityReset {
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
// Affinity wins.
endpoint := sessionAffinity.endpoint
sessionAffinity.lastUsed = time.Now()
@ -378,7 +378,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa
return
}
for ip, affinity := range state.affinity.affinityMap {
if int(time.Now().Sub(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
delete(state.affinity.affinityMap, ip)
}

View File

@ -149,7 +149,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
}
if !sessionAffinityReset {
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
// Affinity wins.
endpoint := sessionAffinity.endpoint
sessionAffinity.lastUsed = time.Now()
@ -366,7 +366,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa
return
}
for ip, affinity := range state.affinity.affinityMap {
if int(time.Now().Sub(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
delete(state.affinity.affinityMap, ip)
}

View File

@ -399,7 +399,7 @@ func (l *SSHTunnelList) Dial(net, addr string) (net.Conn, error) {
id := mathrand.Int63() // So you can match begins/ends in the log.
glog.Infof("[%x: %v] Dialing...", id, addr)
defer func() {
glog.Infof("[%x: %v] Dialed in %v.", id, addr, time.Now().Sub(start))
glog.Infof("[%x: %v] Dialed in %v.", id, addr, time.Since(start))
}()
tunnel, err := l.pickTunnel(strings.Split(addr, ":")[0])
if err != nil {

View File

@ -198,7 +198,7 @@ func InstrumentRouteFunc(verb, resource, subresource, scope string, routeFunc re
routeFunc(request, response)
MonitorRequest(request.Request, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Now().Sub(now))
MonitorRequest(request.Request, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now))
})
}
@ -220,7 +220,7 @@ func InstrumentHandlerFunc(verb, resource, subresource, scope string, handler ht
handler(w, req)
MonitorRequest(req, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Now().Sub(now))
MonitorRequest(req, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now))
}
}

View File

@ -85,7 +85,7 @@ func (c *cachedGetter) Token() (string, error) {
var err error
// no token or exceeds the TTL
if c.token == "" || time.Now().Sub(c.born) > c.ttl {
if c.token == "" || time.Since(c.born) > c.ttl {
c.token, err = c.tokenGetter.Token()
if err != nil {
return "", fmt.Errorf("failed to get token: %s", err)

View File

@ -1116,7 +1116,7 @@ func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace st
// no remaining content, but namespace was not deleted (namespace controller is probably wedged)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err)
}
Logf("namespace %v deletion completed in %s", namespace, time.Now().Sub(startTime))
Logf("namespace %v deletion completed in %s", namespace, time.Since(startTime))
return nil
}
@ -3028,13 +3028,13 @@ func DeleteResourceAndPods(clientset clientset.Interface, internalClientset inte
if err := testutils.DeleteResourceUsingReaperWithRetries(internalClientset, kind, ns, name, nil, scaleClient); err != nil {
return fmt.Errorf("error while stopping %v: %s: %v", kind, name, err)
}
deleteTime := time.Now().Sub(startTime)
deleteTime := time.Since(startTime)
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
err = waitForPodsInactive(ps, 100*time.Millisecond, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteTime
terminatePodTime := time.Since(startTime) - deleteTime
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
// this is to relieve namespace controller's pressure when deleting the
// namespace after a test.
@ -3042,7 +3042,7 @@ func DeleteResourceAndPods(clientset clientset.Interface, internalClientset inte
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
gcPodTime := time.Now().Sub(startTime) - terminatePodTime
gcPodTime := time.Since(startTime) - terminatePodTime
Logf("Garbage collecting %v %s pods took: %v", kind, name, gcPodTime)
return nil
}
@ -3080,7 +3080,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
if err := testutils.DeleteResourceWithRetries(c, kind, ns, name, deleteOption); err != nil {
return err
}
deleteTime := time.Now().Sub(startTime)
deleteTime := time.Since(startTime)
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
var interval, timeout time.Duration
@ -3104,7 +3104,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteTime
terminatePodTime := time.Since(startTime) - deleteTime
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
err = waitForPodsGone(ps, interval, 10*time.Minute)

View File

@ -274,7 +274,7 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
logStopCh := make(chan struct{})
go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
wg.Wait()
startupTime := time.Now().Sub(startTime)
startupTime := time.Since(startTime)
close(logStopCh)
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))

View File

@ -29,11 +29,11 @@ func main() {
started := time.Now()
http.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
data := (time.Now().Sub(started)).String()
data := (time.Since(started)).String()
w.Write([]byte(data))
})
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
duration := time.Now().Sub(started)
duration := time.Since(started)
if duration.Seconds() > 10 {
w.WriteHeader(500)
w.Write([]byte(fmt.Sprintf("error: %v", duration.Seconds())))

View File

@ -45,7 +45,7 @@ func main() {
duration := time.Duration(*durationSec) * time.Second
start := time.Now()
first := systemstat.GetProcCPUSample()
for time.Now().Sub(start) < duration {
for time.Since(start) < duration {
cpu := systemstat.GetProcCPUAverage(first, systemstat.GetProcCPUSample(), systemstat.GetUptime().Uptime)
if cpu.TotalPct < millicoresPct {
doSomething()