Merge pull request #75445 from shinytang6/enhance/fmt

Replace all time.Now().Sub with time.Since
k3s-v1.15.3
Kubernetes Prow Robot 2019-03-26 13:55:17 -07:00 committed by GitHub
commit 531dbd409f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 20 additions and 20 deletions

View File

@ -276,7 +276,7 @@ func shouldBackupAPIServerCertAndKey(certAndKeyDir string) (bool, error) {
return false, errors.New("no certificate data found")
}
if time.Now().Sub(certs[0].NotBefore) > expiry {
if time.Since(certs[0].NotBefore) > expiry {
return true, nil
}

View File

@ -158,7 +158,7 @@ func (g *Cloud) waitForTPUOp(ctx context.Context, op *tpuapi.Operation) (*tpuapi
start := time.Now()
g.operationPollRateLimiter.Accept()
duration := time.Now().Sub(start)
duration := time.Since(start)
if duration > 5*time.Second {
klog.V(2).Infof("Getting operation %q throttled for %v", op.Name, duration)
}

View File

@ -192,7 +192,7 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
klog.V(4).Infof(msg)
return err
}
klog.Infof("Created route for node %s %s with hint %s after %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime))
klog.Infof("Created route for node %s %s with hint %s after %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime))
return nil
})
if err != nil {

View File

@ -854,7 +854,7 @@ func (m *kubeGenericRuntimeManager) cleanupErrorTimeouts() {
m.errorMapLock.Lock()
defer m.errorMapLock.Unlock()
for name, timeout := range m.errorPrinted {
if time.Now().Sub(timeout) >= identicalErrorDelay {
if time.Since(timeout) >= identicalErrorDelay {
delete(m.errorPrinted, name)
delete(m.lastError, name)
}
@ -913,7 +913,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
defer m.errorMapLock.Unlock()
if err != nil {
lastMsg, ok := m.lastError[podFullName]
if !ok || err.Error() != lastMsg || time.Now().Sub(m.errorPrinted[podFullName]) >= identicalErrorDelay {
if !ok || err.Error() != lastMsg || time.Since(m.errorPrinted[podFullName]) >= identicalErrorDelay {
klog.Errorf("getPodContainerStatuses for pod %q failed: %v", podFullName, err)
m.errorPrinted[podFullName] = time.Now()
m.lastError[podFullName] = err.Error()

View File

@ -296,7 +296,7 @@ func (r *RemoteRuntimeService) cleanupErrorTimeouts() {
r.errorMapLock.Lock()
defer r.errorMapLock.Unlock()
for ID, timeout := range r.errorPrinted {
if time.Now().Sub(timeout) >= identicalErrorDelay {
if time.Since(timeout) >= identicalErrorDelay {
delete(r.lastError, ID)
delete(r.errorPrinted, ID)
}
@ -317,7 +317,7 @@ func (r *RemoteRuntimeService) ContainerStatus(containerID string) (*runtimeapi.
if err != nil {
// Don't spam the log with endless messages about the same failure.
lastMsg, ok := r.lastError[containerID]
if !ok || err.Error() != lastMsg || time.Now().Sub(r.errorPrinted[containerID]) >= identicalErrorDelay {
if !ok || err.Error() != lastMsg || time.Since(r.errorPrinted[containerID]) >= identicalErrorDelay {
klog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err)
r.errorPrinted[containerID] = time.Now()
r.lastError[containerID] = err.Error()
@ -505,7 +505,7 @@ func (r *RemoteRuntimeService) ContainerStats(containerID string) (*runtimeapi.C
defer r.errorMapLock.Unlock()
if err != nil {
lastMsg, ok := r.lastError[containerID]
if !ok || err.Error() != lastMsg || time.Now().Sub(r.errorPrinted[containerID]) >= identicalErrorDelay {
if !ok || err.Error() != lastMsg || time.Since(r.errorPrinted[containerID]) >= identicalErrorDelay {
klog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err)
r.errorPrinted[containerID] = time.Now()
r.lastError[containerID] = err.Error()

View File

@ -856,7 +856,7 @@ func printJob(obj *batch.Job, options printers.PrintOptions) ([]metav1beta1.Tabl
switch {
case obj.Status.StartTime == nil:
case obj.Status.CompletionTime == nil:
jobDuration = duration.HumanDuration(time.Now().Sub(obj.Status.StartTime.Time))
jobDuration = duration.HumanDuration(time.Since(obj.Status.StartTime.Time))
default:
jobDuration = duration.HumanDuration(obj.Status.CompletionTime.Sub(obj.Status.StartTime.Time))
}

View File

@ -828,7 +828,7 @@ func BenchmarkAuthorization(b *testing.B) {
},
},
})
diff := time.Now().Sub(start)
diff := time.Since(start)
atomic.AddInt64(&writes, 1)
switch {
case diff < time.Millisecond:

View File

@ -67,5 +67,5 @@ func ConvertToHumanReadableDateType(timestamp metav1.Time) string {
if timestamp.IsZero() {
return "<unknown>"
}
return duration.HumanDuration(time.Now().Sub(timestamp.Time))
return duration.HumanDuration(time.Since(timestamp.Time))
}

View File

@ -44,8 +44,8 @@ func TestFakeClock(t *testing.T) {
tt := tc.Now()
tc.SetTime(tt.Add(time.Hour))
if tc.Now().Sub(tt) != time.Hour {
t.Errorf("input: %s now=%s gap=%s expected=%s", tt, tc.Now(), tc.Now().Sub(tt), time.Hour)
if tc.Since(tt) != time.Hour {
t.Errorf("input: %s now=%s gap=%s expected=%s", tt, tc.Now(), tc.Since(tt), time.Hour)
}
}

View File

@ -531,7 +531,7 @@ func TestWaitForWithEarlyClosingWaitFunc(t *testing.T) {
}, func() (bool, error) {
return false, nil
}, stopCh)
duration := time.Now().Sub(start)
duration := time.Since(start)
// The WaitFor should return immediately, so the duration is close to 0s.
if duration >= ForeverTestTimeout/2 {
@ -555,7 +555,7 @@ func TestWaitForWithClosedChannel(t *testing.T) {
}, func() (bool, error) {
return false, nil
}, stopCh)
duration := time.Now().Sub(start)
duration := time.Since(start)
// The WaitFor should return immediately, so the duration is close to 0s.
if duration >= ForeverTestTimeout/2 {
t.Errorf("expected short timeout duration")

View File

@ -363,7 +363,7 @@ loop:
}
}
watchDuration := r.clock.Now().Sub(start)
watchDuration := r.clock.Since(start)
if watchDuration < 1*time.Second && eventCount == 0 {
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
}

View File

@ -99,7 +99,7 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
return false
}
return p.Clock.Now().Sub(eventTime) < entry.backoff
return p.Clock.Since(eventTime) < entry.backoff
}
// Returns True if time since lastupdate is less than the current backoff window.

View File

@ -114,14 +114,14 @@ func TestBackoffGC(t *testing.T) {
b.GC()
_, found := b.perItemBackoff[id]
if !found {
t.Errorf("expected GC to skip entry, elapsed time=%s maxDuration=%s", tc.Now().Sub(lastUpdate), maxDuration)
t.Errorf("expected GC to skip entry, elapsed time=%s maxDuration=%s", tc.Since(lastUpdate), maxDuration)
}
tc.Step(maxDuration + step)
b.GC()
r, found := b.perItemBackoff[id]
if found {
t.Errorf("expected GC of entry after %s got entry %v", tc.Now().Sub(lastUpdate), r)
t.Errorf("expected GC of entry after %s got entry %v", tc.Since(lastUpdate), r)
}
}

View File

@ -96,7 +96,7 @@ func TestWatchBasedManager(t *testing.T) {
if err != nil {
t.Fatalf("failed on %s: %v", name, err)
}
if d := time.Now().Sub(start); d > time.Second {
if d := time.Since(start); d > time.Second {
t.Logf("%s took %v", name, d)
}
}