mirror of https://github.com/k3s-io/k3s
Merge pull request #26332 from resouer/util-debt
Automatic merge from submit-queue Refactor util clock into it's own pkg Continue my work ref #15634pull/6/head
commit
eae90a3631
|
@ -21,7 +21,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
// ExpirationCache implements the store interface
|
||||
|
@ -38,7 +38,7 @@ import (
|
|||
type ExpirationCache struct {
|
||||
cacheStorage ThreadSafeStore
|
||||
keyFunc KeyFunc
|
||||
clock util.Clock
|
||||
clock clock.Clock
|
||||
expirationPolicy ExpirationPolicy
|
||||
// expirationLock is a write lock used to guarantee that we don't clobber
|
||||
// newly inserted objects because of a stale expiration timestamp comparison
|
||||
|
@ -58,7 +58,7 @@ type TTLPolicy struct {
|
|||
Ttl time.Duration
|
||||
|
||||
// Clock used to calculate ttl expiration
|
||||
Clock util.Clock
|
||||
Clock clock.Clock
|
||||
}
|
||||
|
||||
// IsExpired returns true if the given object is older than the ttl, or it can't
|
||||
|
@ -202,7 +202,7 @@ func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store {
|
|||
return &ExpirationCache{
|
||||
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
|
||||
keyFunc: keyFunc,
|
||||
clock: util.RealClock{},
|
||||
expirationPolicy: &TTLPolicy{ttl, util.RealClock{}},
|
||||
clock: clock.RealClock{},
|
||||
expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
|
@ -43,7 +43,7 @@ func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool {
|
|||
return !p.NeverExpire.Has(key)
|
||||
}
|
||||
|
||||
func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock util.Clock) Store {
|
||||
func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store {
|
||||
cacheStorage := NewThreadSafeStore(Indexers{}, Indices{})
|
||||
return &ExpirationCache{
|
||||
cacheStorage: &fakeThreadSafeMap{cacheStorage, deletedKeys},
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
@ -37,7 +37,7 @@ func TestTTLExpirationBasic(t *testing.T) {
|
|||
return obj.(*timestampedEntry).obj.(testStoreObject).id, nil
|
||||
},
|
||||
},
|
||||
util.RealClock{},
|
||||
clock.RealClock{},
|
||||
)
|
||||
err := ttlStore.Add(testObj)
|
||||
if err != nil {
|
||||
|
@ -71,7 +71,7 @@ func TestReAddExpiredItem(t *testing.T) {
|
|||
},
|
||||
}
|
||||
ttlStore := NewFakeExpirationStore(
|
||||
testStoreKeyFunc, deleteChan, exp, util.RealClock{})
|
||||
testStoreKeyFunc, deleteChan, exp, clock.RealClock{})
|
||||
testKey := "foo"
|
||||
testObj := testStoreObject{id: testKey, val: "bar"}
|
||||
err := ttlStore.Add(testObj)
|
||||
|
@ -133,7 +133,7 @@ func TestTTLList(t *testing.T) {
|
|||
return obj.(*timestampedEntry).obj.(testStoreObject).id, nil
|
||||
},
|
||||
},
|
||||
util.RealClock{},
|
||||
clock.RealClock{},
|
||||
)
|
||||
for _, obj := range testObjs {
|
||||
err := ttlStore.Add(obj)
|
||||
|
@ -167,7 +167,7 @@ func TestTTLPolicy(t *testing.T) {
|
|||
exactlyOnTTL := fakeTime.Add(-ttl)
|
||||
expiredTime := fakeTime.Add(-(ttl + 1))
|
||||
|
||||
policy := TTLPolicy{ttl, util.NewFakeClock(fakeTime)}
|
||||
policy := TTLPolicy{ttl, clock.NewFakeClock(fakeTime)}
|
||||
fakeTimestampedEntry := ×tampedEntry{obj: struct{}{}, timestamp: exactlyOnTTL}
|
||||
if policy.IsExpired(fakeTimestampedEntry) {
|
||||
t.Errorf("TTL cache should not expire entries exactly on ttl")
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
|
@ -113,7 +113,7 @@ func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSin
|
|||
// The default math/rand package functions aren't thread safe, so create a
|
||||
// new Rand object for each StartRecording call.
|
||||
randGen := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
eventCorrelator := NewEventCorrelator(util.RealClock{})
|
||||
eventCorrelator := NewEventCorrelator(clock.RealClock{})
|
||||
return eventBroadcaster.StartEventWatcher(
|
||||
func(event *api.Event) {
|
||||
recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration)
|
||||
|
@ -242,13 +242,13 @@ func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler fun
|
|||
|
||||
// NewRecorder returns an EventRecorder that records events with the given event source.
|
||||
func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(source api.EventSource) EventRecorder {
|
||||
return &recorderImpl{source, eventBroadcaster.Broadcaster, util.RealClock{}}
|
||||
return &recorderImpl{source, eventBroadcaster.Broadcaster, clock.RealClock{}}
|
||||
}
|
||||
|
||||
type recorderImpl struct {
|
||||
source api.EventSource
|
||||
*watch.Broadcaster
|
||||
clock util.Clock
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) {
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
k8sruntime "k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/strategicpatch"
|
||||
)
|
||||
|
||||
|
@ -346,7 +346,7 @@ func TestEventf(t *testing.T) {
|
|||
eventBroadcaster := NewBroadcasterForTests(0)
|
||||
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
|
||||
|
||||
clock := util.NewFakeClock(time.Now())
|
||||
clock := clock.NewFakeClock(time.Now())
|
||||
recorder := recorderWithFakeClock(api.EventSource{Component: "eventTest"}, eventBroadcaster, clock)
|
||||
for index, item := range table {
|
||||
clock.Step(1 * time.Second)
|
||||
|
@ -375,7 +375,7 @@ func TestEventf(t *testing.T) {
|
|||
sinkWatcher.Stop()
|
||||
}
|
||||
|
||||
func recorderWithFakeClock(eventSource api.EventSource, eventBroadcaster EventBroadcaster, clock util.Clock) EventRecorder {
|
||||
func recorderWithFakeClock(eventSource api.EventSource, eventBroadcaster EventBroadcaster, clock clock.Clock) EventRecorder {
|
||||
return &recorderImpl{eventSource, eventBroadcaster.(*eventBroadcasterImpl).Broadcaster, clock}
|
||||
}
|
||||
|
||||
|
@ -413,7 +413,7 @@ func TestWriteEventError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
eventCorrelator := NewEventCorrelator(util.RealClock{})
|
||||
eventCorrelator := NewEventCorrelator(clock.RealClock{})
|
||||
randGen := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for caseName, ent := range table {
|
||||
|
@ -436,7 +436,7 @@ func TestWriteEventError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestUpdateExpiredEvent(t *testing.T) {
|
||||
eventCorrelator := NewEventCorrelator(util.RealClock{})
|
||||
eventCorrelator := NewEventCorrelator(clock.RealClock{})
|
||||
randGen := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var createdEvent *api.Event
|
||||
|
@ -592,7 +592,7 @@ func TestEventfNoNamespace(t *testing.T) {
|
|||
eventBroadcaster := NewBroadcasterForTests(0)
|
||||
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
|
||||
|
||||
clock := util.NewFakeClock(time.Now())
|
||||
clock := clock.NewFakeClock(time.Now())
|
||||
recorder := recorderWithFakeClock(api.EventSource{Component: "eventTest"}, eventBroadcaster, clock)
|
||||
|
||||
for index, item := range table {
|
||||
|
@ -879,7 +879,7 @@ func TestMultiSinkCache(t *testing.T) {
|
|||
}
|
||||
|
||||
eventBroadcaster := NewBroadcasterForTests(0)
|
||||
clock := util.NewFakeClock(time.Now())
|
||||
clock := clock.NewFakeClock(time.Now())
|
||||
recorder := recorderWithFakeClock(api.EventSource{Component: "eventTest"}, eventBroadcaster, clock)
|
||||
|
||||
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/strategicpatch"
|
||||
)
|
||||
|
@ -116,12 +116,12 @@ type EventAggregator struct {
|
|||
maxIntervalInSeconds int
|
||||
|
||||
// clock is used to allow for testing over a time interval
|
||||
clock util.Clock
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// NewEventAggregator returns a new instance of an EventAggregator
|
||||
func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc,
|
||||
maxEvents int, maxIntervalInSeconds int, clock util.Clock) *EventAggregator {
|
||||
maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator {
|
||||
return &EventAggregator{
|
||||
cache: lru.New(lruCacheSize),
|
||||
keyFunc: keyFunc,
|
||||
|
@ -207,11 +207,11 @@ type eventLog struct {
|
|||
type eventLogger struct {
|
||||
sync.RWMutex
|
||||
cache *lru.Cache
|
||||
clock util.Clock
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// newEventLogger observes events and counts their frequencies
|
||||
func newEventLogger(lruCacheEntries int, clock util.Clock) *eventLogger {
|
||||
func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger {
|
||||
return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock}
|
||||
}
|
||||
|
||||
|
@ -326,7 +326,7 @@ type EventCorrelateResult struct {
|
|||
// the same reason.
|
||||
// * Events are incrementally counted if the exact same event is encountered multiple
|
||||
// times.
|
||||
func NewEventCorrelator(clock util.Clock) *EventCorrelator {
|
||||
func NewEventCorrelator(clock clock.Clock) *EventCorrelator {
|
||||
cacheSize := maxLruCacheEntries
|
||||
return &EventCorrelator{
|
||||
filterFunc: DefaultEventFilterFunc,
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/diff"
|
||||
)
|
||||
|
||||
|
@ -223,7 +223,7 @@ func TestEventCorrelator(t *testing.T) {
|
|||
|
||||
for testScenario, testInput := range scenario {
|
||||
eventInterval := time.Duration(testInput.intervalSeconds) * time.Second
|
||||
clock := util.IntervalClock{Time: time.Now(), Duration: eventInterval}
|
||||
clock := clock.IntervalClock{Time: time.Now(), Duration: eventInterval}
|
||||
correlator := NewEventCorrelator(&clock)
|
||||
for i := range testInput.previousEvents {
|
||||
event := testInput.previousEvents[i]
|
||||
|
|
|
@ -39,7 +39,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/runtime/serializer/streaming"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/httpstream"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
|
@ -972,7 +972,7 @@ func TestBackoffLifecycle(t *testing.T) {
|
|||
// which are used in the server implementation returning StatusOK above.
|
||||
seconds := []int{0, 1, 2, 4, 8, 0, 1, 2, 4, 0}
|
||||
request := c.Verb("POST").Prefix("backofftest").Suffix("abc")
|
||||
clock := util.FakeClock{}
|
||||
clock := clock.FakeClock{}
|
||||
request.backoffMgr = &URLBackoff{
|
||||
// Use a fake backoff here to avoid flakes and speed the test up.
|
||||
Backoff: flowcontrol.NewFakeBackOff(
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/integer"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
@ -167,12 +167,12 @@ func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) boo
|
|||
// TODO: Make this possible to disable in tests.
|
||||
// TODO: Support injection of clock.
|
||||
func (exp *ControlleeExpectations) isExpired() bool {
|
||||
return util.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout
|
||||
return clock.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout
|
||||
}
|
||||
|
||||
// SetExpectations registers new expectations for the given controller. Forgets existing expectations.
|
||||
func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) error {
|
||||
exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: util.RealClock{}.Now()}
|
||||
exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: clock.RealClock{}.Now()}
|
||||
glog.V(4).Infof("Setting expectations %#v", exp)
|
||||
return r.Add(exp)
|
||||
}
|
||||
|
|
|
@ -36,14 +36,15 @@ import (
|
|||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
)
|
||||
|
||||
// NewFakeControllerExpectationsLookup creates a fake store for PodExpectations.
|
||||
func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *util.FakeClock) {
|
||||
func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *clock.FakeClock) {
|
||||
fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
|
||||
fakeClock := util.NewFakeClock(fakeTime)
|
||||
fakeClock := clock.NewFakeClock(fakeTime)
|
||||
ttlPolicy := &cache.TTLPolicy{Ttl: ttl, Clock: fakeClock}
|
||||
ttlStore := cache.NewFakeExpirationStore(
|
||||
ExpKeyFunc, nil, ttlPolicy, fakeClock)
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/ssh"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -54,7 +55,7 @@ type SSHTunneler struct {
|
|||
lastSync int64 // Seconds since Epoch
|
||||
lastSSHKeySync int64 // Seconds since Epoch
|
||||
lastSyncMetric prometheus.GaugeFunc
|
||||
clock util.Clock
|
||||
clock clock.Clock
|
||||
|
||||
getAddresses AddressFunc
|
||||
stopChan chan struct{}
|
||||
|
@ -66,7 +67,7 @@ func NewSSHTunneler(sshUser, sshKeyfile string, healthCheckURL *url.URL, install
|
|||
SSHKeyfile: sshKeyfile,
|
||||
InstallSSHKey: installSSHKey,
|
||||
HealthCheckURL: healthCheckURL,
|
||||
clock: util.RealClock{},
|
||||
clock: clock.RealClock{},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
@ -37,32 +37,32 @@ func TestSecondsSinceSync(t *testing.T) {
|
|||
tunneler.lastSync = time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix()
|
||||
|
||||
// Nano Second. No difference.
|
||||
tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 2, time.UTC))
|
||||
tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 2, time.UTC))
|
||||
assert.Equal(int64(0), tunneler.SecondsSinceSync())
|
||||
|
||||
// Second
|
||||
tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 2, 1, time.UTC))
|
||||
tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 2, 1, time.UTC))
|
||||
assert.Equal(int64(1), tunneler.SecondsSinceSync())
|
||||
|
||||
// Minute
|
||||
tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 2, 1, 1, time.UTC))
|
||||
tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 2, 1, 1, time.UTC))
|
||||
assert.Equal(int64(60), tunneler.SecondsSinceSync())
|
||||
|
||||
// Hour
|
||||
tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 2, 1, 1, 1, time.UTC))
|
||||
tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 2, 1, 1, 1, time.UTC))
|
||||
assert.Equal(int64(3600), tunneler.SecondsSinceSync())
|
||||
|
||||
// Day
|
||||
tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 2, 1, 1, 1, 1, time.UTC))
|
||||
tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 2, 1, 1, 1, 1, time.UTC))
|
||||
assert.Equal(int64(86400), tunneler.SecondsSinceSync())
|
||||
|
||||
// Month
|
||||
tunneler.clock = util.NewFakeClock(time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC))
|
||||
tunneler.clock = clock.NewFakeClock(time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC))
|
||||
assert.Equal(int64(2678400), tunneler.SecondsSinceSync())
|
||||
|
||||
// Future Month. Should be -Month.
|
||||
tunneler.lastSync = time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC).Unix()
|
||||
tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC))
|
||||
tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC))
|
||||
assert.Equal(int64(-2678400), tunneler.SecondsSinceSync())
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -35,7 +35,7 @@ const (
|
|||
// activeDeadlineHandler knows how to enforce active deadlines on pods.
|
||||
type activeDeadlineHandler struct {
|
||||
// the clock to use for deadline enforcement
|
||||
clock util.Clock
|
||||
clock clock.Clock
|
||||
// the provider of pod status
|
||||
podStatusProvider status.PodStatusProvider
|
||||
// the recorder to dispatch events when we identify a pod has exceeded active deadline
|
||||
|
@ -46,7 +46,7 @@ type activeDeadlineHandler struct {
|
|||
func newActiveDeadlineHandler(
|
||||
podStatusProvider status.PodStatusProvider,
|
||||
recorder record.EventRecorder,
|
||||
clock util.Clock,
|
||||
clock clock.Clock,
|
||||
) (*activeDeadlineHandler, error) {
|
||||
|
||||
// check for all required fields
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
// mockPodStatusProvider returns the status on the specified pod
|
||||
|
@ -45,7 +45,7 @@ func (m *mockPodStatusProvider) GetPodStatus(uid types.UID) (api.PodStatus, bool
|
|||
// TestActiveDeadlineHandler verifies the active deadline handler functions as expected.
|
||||
func TestActiveDeadlineHandler(t *testing.T) {
|
||||
pods := newTestPods(4)
|
||||
fakeClock := util.NewFakeClock(time.Now())
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
podStatusProvider := &mockPodStatusProvider{pods: pods}
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
handler, err := newActiveDeadlineHandler(podStatusProvider, fakeRecorder, fakeClock)
|
||||
|
|
|
@ -52,7 +52,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
kubetypes "k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
uexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
|
@ -998,7 +998,7 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSyncPodBackoff(t *testing.T) {
|
||||
var fakeClock = util.NewFakeClock(time.Now())
|
||||
var fakeClock = clock.NewFakeClock(time.Now())
|
||||
startTime := fakeClock.Now()
|
||||
|
||||
dm, fakeDocker := newTestDockerManager()
|
||||
|
|
|
@ -28,14 +28,14 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
// managerImpl implements NodeStabilityManager
|
||||
type managerImpl struct {
|
||||
// used to track time
|
||||
clock util.Clock
|
||||
clock clock.Clock
|
||||
// config is how the manager is configured
|
||||
config Config
|
||||
// the function to invoke to kill a pod
|
||||
|
@ -66,7 +66,7 @@ func NewManager(
|
|||
killPodFunc KillPodFunc,
|
||||
recorder record.EventRecorder,
|
||||
nodeRef *api.ObjectReference,
|
||||
clock util.Clock) (Manager, lifecycle.PodAdmitHandler, error) {
|
||||
clock clock.Clock) (Manager, lifecycle.PodAdmitHandler, error) {
|
||||
manager := &managerImpl{
|
||||
clock: clock,
|
||||
killPodFunc: killPodFunc,
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
// mockPodKiller is used to testing which pod is killed
|
||||
|
@ -93,7 +93,7 @@ func TestMemoryPressure(t *testing.T) {
|
|||
return pods
|
||||
}
|
||||
|
||||
fakeClock := util.NewFakeClock(time.Now())
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
podKiller := &mockPodKiller{}
|
||||
nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
var zero time.Time
|
||||
|
@ -445,7 +445,7 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) {
|
|||
}},
|
||||
}
|
||||
|
||||
fakeClock := util.NewFakeClock(time.Now())
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
t.Log(fakeClock.Now())
|
||||
require.NoError(t, manager.detectImages(fakeClock.Now()))
|
||||
require.Equal(t, manager.imageRecordsLen(), 2)
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/record"
|
||||
. "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
ctest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
)
|
||||
|
||||
|
@ -101,7 +101,7 @@ func TestPuller(t *testing.T) {
|
|||
}
|
||||
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
fakeClock := util.NewFakeClock(time.Now())
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
backOff.Clock = fakeClock
|
||||
|
||||
fakeRuntime := &ctest.FakeRuntime{}
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/record"
|
||||
. "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
ctest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
)
|
||||
|
||||
|
@ -101,7 +101,7 @@ func TestSerializedPuller(t *testing.T) {
|
|||
}
|
||||
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
fakeClock := util.NewFakeClock(time.Now())
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
backOff.Clock = fakeClock
|
||||
|
||||
fakeRuntime := &ctest.FakeRuntime{}
|
||||
|
|
|
@ -73,8 +73,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/bandwidth"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
|
@ -354,7 +354,7 @@ func NewMainKubelet(
|
|||
flannelExperimentalOverlay: flannelExperimentalOverlay,
|
||||
flannelHelper: nil,
|
||||
nodeIP: nodeIP,
|
||||
clock: util.RealClock{},
|
||||
clock: clock.RealClock{},
|
||||
outOfDiskTransitionFrequency: outOfDiskTransitionFrequency,
|
||||
reservation: reservation,
|
||||
enableCustomMetrics: enableCustomMetrics,
|
||||
|
@ -466,7 +466,7 @@ func NewMainKubelet(
|
|||
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
||||
klet.resourceAnalyzer = stats.NewResourceAnalyzer(klet, volumeStatsAggPeriod, klet.containerRuntime)
|
||||
|
||||
klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, util.RealClock{})
|
||||
klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, clock.RealClock{})
|
||||
klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
|
||||
klet.updatePodCIDR(podCIDR)
|
||||
|
||||
|
@ -779,7 +779,7 @@ type Kubelet struct {
|
|||
|
||||
// clock is an interface that provides time related functionality in a way that makes it
|
||||
// easy to test the code.
|
||||
clock util.Clock
|
||||
clock clock.Clock
|
||||
|
||||
// outOfDiskTransitionFrequency specifies the amount of time the kubelet has to be actually
|
||||
// not out of disk before it can transition the node condition status from out-of-disk to
|
||||
|
|
|
@ -61,7 +61,7 @@ import (
|
|||
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/diff"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
@ -99,7 +99,7 @@ type TestKubelet struct {
|
|||
fakeCadvisor *cadvisortest.Mock
|
||||
fakeKubeClient *fake.Clientset
|
||||
fakeMirrorClient *podtest.FakeMirrorClient
|
||||
fakeClock *util.FakeClock
|
||||
fakeClock *clock.FakeClock
|
||||
mounter mount.Interface
|
||||
volumePlugin *volumetest.FakeVolumePlugin
|
||||
}
|
||||
|
@ -198,7 +198,7 @@ func newTestKubeletWithImageList(
|
|||
LowThresholdPercent: 80,
|
||||
}
|
||||
kubelet.imageManager, err = newImageManager(fakeRuntime, mockCadvisor, fakeRecorder, fakeNodeRef, fakeImageGCPolicy)
|
||||
fakeClock := util.NewFakeClock(time.Now())
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
kubelet.backOff.Clock = fakeClock
|
||||
kubelet.podKillingCh = make(chan *kubecontainer.PodPair, 20)
|
||||
|
@ -211,7 +211,7 @@ func newTestKubeletWithImageList(
|
|||
}
|
||||
kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock)
|
||||
// Relist period does not affect the tests.
|
||||
kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, util.RealClock{})
|
||||
kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, clock.RealClock{})
|
||||
kubelet.clock = fakeClock
|
||||
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
@ -59,7 +59,7 @@ type GenericPLEG struct {
|
|||
// Cache for storing the runtime states required for syncing pods.
|
||||
cache kubecontainer.Cache
|
||||
// For testability.
|
||||
clock util.Clock
|
||||
clock clock.Clock
|
||||
// Pods that failed to have their status retrieved during a relist. These pods will be
|
||||
// retried during the next relisting.
|
||||
podsToReinspect map[types.UID]*kubecontainer.Pod
|
||||
|
@ -98,7 +98,7 @@ type podRecord struct {
|
|||
type podRecords map[types.UID]*podRecord
|
||||
|
||||
func NewGenericPLEG(runtime kubecontainer.Runtime, channelCapacity int,
|
||||
relistPeriod time.Duration, cache kubecontainer.Cache, clock util.Clock) PodLifecycleEventGenerator {
|
||||
relistPeriod time.Duration, cache kubecontainer.Cache, clock clock.Clock) PodLifecycleEventGenerator {
|
||||
return &GenericPLEG{
|
||||
relistPeriod: relistPeriod,
|
||||
runtime: runtime,
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/diff"
|
||||
)
|
||||
|
||||
|
@ -39,12 +39,12 @@ const (
|
|||
type TestGenericPLEG struct {
|
||||
pleg *GenericPLEG
|
||||
runtime *containertest.FakeRuntime
|
||||
clock *util.FakeClock
|
||||
clock *clock.FakeClock
|
||||
}
|
||||
|
||||
func newTestGenericPLEG() *TestGenericPLEG {
|
||||
fakeRuntime := &containertest.FakeRuntime{}
|
||||
clock := util.NewFakeClock(time.Time{})
|
||||
clock := clock.NewFakeClock(time.Time{})
|
||||
// The channel capacity should be large enough to hold all events in a
|
||||
// single test.
|
||||
pleg := &GenericPLEG{
|
||||
|
@ -246,7 +246,7 @@ func newTestGenericPLEGWithRuntimeMock() (*GenericPLEG, *containertest.Mock) {
|
|||
eventChannel: make(chan *PodLifecycleEvent, 100),
|
||||
podRecords: make(podRecords),
|
||||
cache: kubecontainer.NewCache(),
|
||||
clock: util.RealClock{},
|
||||
clock: clock.RealClock{},
|
||||
}
|
||||
return pleg, runtimeMock
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
// fakePodWorkers runs sync pod function in serial, so we can have
|
||||
|
@ -99,7 +99,7 @@ func createPodWorkers() (*podWorkers, map[types.UID][]syncPodRecord) {
|
|||
return nil
|
||||
},
|
||||
fakeRecorder,
|
||||
queue.NewBasicWorkQueue(&util.RealClock{}),
|
||||
queue.NewBasicWorkQueue(&clock.RealClock{}),
|
||||
time.Second,
|
||||
time.Second,
|
||||
fakeCache,
|
||||
|
@ -279,7 +279,7 @@ func TestFakePodWorkers(t *testing.T) {
|
|||
kubeletForRealWorkers := &simpleFakeKubelet{}
|
||||
kubeletForFakeWorkers := &simpleFakeKubelet{}
|
||||
|
||||
realPodWorkers := newPodWorkers(kubeletForRealWorkers.syncPodWithWaitGroup, fakeRecorder, queue.NewBasicWorkQueue(&util.RealClock{}), time.Second, time.Second, fakeCache)
|
||||
realPodWorkers := newPodWorkers(kubeletForRealWorkers.syncPodWithWaitGroup, fakeRecorder, queue.NewBasicWorkQueue(&clock.RealClock{}), time.Second, time.Second, fakeCache)
|
||||
fakePodWorkers := &fakePodWorkers{kubeletForFakeWorkers.syncPod, fakeCache, t}
|
||||
|
||||
tests := []struct {
|
||||
|
|
|
@ -40,7 +40,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
@ -79,7 +79,7 @@ func TestRunOnce(t *testing.T) {
|
|||
diskSpaceManager: diskSpaceManager,
|
||||
containerRuntime: fakeRuntime,
|
||||
reasonCache: NewReasonCache(),
|
||||
clock: util.RealClock{},
|
||||
clock: clock.RealClock{},
|
||||
kubeClient: &fake.Clientset{},
|
||||
hostname: testKubeletHostname,
|
||||
nodeName: testKubeletHostname,
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
expirationCache "k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
type testObject struct {
|
||||
|
@ -31,7 +31,7 @@ type testObject struct {
|
|||
}
|
||||
|
||||
// A fake objectCache for unit test.
|
||||
func NewFakeObjectCache(f func() (interface{}, error), ttl time.Duration, clock util.Clock) *ObjectCache {
|
||||
func NewFakeObjectCache(f func() (interface{}, error), ttl time.Duration, clock clock.Clock) *ObjectCache {
|
||||
ttlPolicy := &expirationCache.TTLPolicy{Ttl: ttl, Clock: clock}
|
||||
deleteChan := make(chan string, 1)
|
||||
return &ObjectCache{
|
||||
|
@ -47,7 +47,7 @@ func TestAddAndGet(t *testing.T) {
|
|||
}
|
||||
objectCache := NewFakeObjectCache(func() (interface{}, error) {
|
||||
return nil, fmt.Errorf("Unexpected Error: updater should never be called in this test!")
|
||||
}, 1*time.Hour, util.NewFakeClock(time.Now()))
|
||||
}, 1*time.Hour, clock.NewFakeClock(time.Now()))
|
||||
|
||||
err := objectCache.Add(testObj.key, testObj.val)
|
||||
if err != nil {
|
||||
|
@ -72,7 +72,7 @@ func TestExpirationBasic(t *testing.T) {
|
|||
val: unexpectedVal,
|
||||
}
|
||||
|
||||
fakeClock := util.NewFakeClock(time.Now())
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
|
||||
objectCache := NewFakeObjectCache(func() (interface{}, error) {
|
||||
return expectedVal, nil
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
// WorkQueue allows queuing items with a timestamp. An item is
|
||||
|
@ -34,14 +34,14 @@ type WorkQueue interface {
|
|||
}
|
||||
|
||||
type basicWorkQueue struct {
|
||||
clock util.Clock
|
||||
clock clock.Clock
|
||||
lock sync.Mutex
|
||||
queue map[types.UID]time.Time
|
||||
}
|
||||
|
||||
var _ WorkQueue = &basicWorkQueue{}
|
||||
|
||||
func NewBasicWorkQueue(clock util.Clock) WorkQueue {
|
||||
func NewBasicWorkQueue(clock clock.Clock) WorkQueue {
|
||||
queue := make(map[types.UID]time.Time)
|
||||
return &basicWorkQueue{queue: queue, clock: clock}
|
||||
}
|
||||
|
|
|
@ -21,12 +21,12 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
func newTestBasicWorkQueue() (*basicWorkQueue, *util.FakeClock) {
|
||||
fakeClock := util.NewFakeClock(time.Now())
|
||||
func newTestBasicWorkQueue() (*basicWorkQueue, *clock.FakeClock) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
wq := &basicWorkQueue{
|
||||
clock: fakeClock,
|
||||
queue: make(map[types.UID]time.Time),
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
|
@ -96,7 +96,7 @@ type watchCache struct {
|
|||
onEvent func(watchCacheEvent)
|
||||
|
||||
// for testing timeouts.
|
||||
clock util.Clock
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
func newWatchCache(capacity int) *watchCache {
|
||||
|
@ -107,7 +107,7 @@ func newWatchCache(capacity int) *watchCache {
|
|||
endIndex: 0,
|
||||
store: cache.NewStore(cache.MetaNamespaceKeyFunc),
|
||||
resourceVersion: 0,
|
||||
clock: util.RealClock{},
|
||||
clock: clock.RealClock{},
|
||||
}
|
||||
wc.cond = sync.NewCond(wc.RLocker())
|
||||
return wc
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
@ -45,7 +45,7 @@ func makeTestPod(name string, resourceVersion uint64) *api.Pod {
|
|||
// newTestWatchCache just adds a fake clock.
|
||||
func newTestWatchCache(capacity int) *watchCache {
|
||||
wc := newWatchCache(capacity)
|
||||
wc.clock = util.NewFakeClock(time.Now())
|
||||
wc.clock = clock.NewFakeClock(time.Now())
|
||||
return wc
|
||||
}
|
||||
|
||||
|
@ -262,7 +262,7 @@ func TestWaitUntilFreshAndList(t *testing.T) {
|
|||
|
||||
func TestWaitUntilFreshAndListTimeout(t *testing.T) {
|
||||
store := newTestWatchCache(3)
|
||||
fc := store.clock.(*util.FakeClock)
|
||||
fc := store.clock.(*clock.FakeClock)
|
||||
|
||||
// In background, step clock after the below call starts the timer.
|
||||
go func() {
|
||||
|
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
package clock
|
||||
|
||||
import (
|
||||
"sync"
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
package clock
|
||||
|
||||
import (
|
||||
"testing"
|
|
@ -20,7 +20,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/integer"
|
||||
)
|
||||
|
||||
|
@ -31,13 +31,13 @@ type backoffEntry struct {
|
|||
|
||||
type Backoff struct {
|
||||
sync.Mutex
|
||||
Clock util.Clock
|
||||
Clock clock.Clock
|
||||
defaultDuration time.Duration
|
||||
maxDuration time.Duration
|
||||
perItemBackoff map[string]*backoffEntry
|
||||
}
|
||||
|
||||
func NewFakeBackOff(initial, max time.Duration, tc *util.FakeClock) *Backoff {
|
||||
func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff {
|
||||
return &Backoff{
|
||||
perItemBackoff: map[string]*backoffEntry{},
|
||||
Clock: tc,
|
||||
|
@ -49,7 +49,7 @@ func NewFakeBackOff(initial, max time.Duration, tc *util.FakeClock) *Backoff {
|
|||
func NewBackOff(initial, max time.Duration) *Backoff {
|
||||
return &Backoff{
|
||||
perItemBackoff: map[string]*backoffEntry{},
|
||||
Clock: util.RealClock{},
|
||||
Clock: clock.RealClock{},
|
||||
defaultDuration: initial,
|
||||
maxDuration: max,
|
||||
}
|
||||
|
|
|
@ -17,14 +17,14 @@ limitations under the License.
|
|||
package flowcontrol
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestSlowBackoff(t *testing.T) {
|
||||
id := "_idSlow"
|
||||
tc := util.NewFakeClock(time.Now())
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 50 * step
|
||||
|
||||
|
@ -50,7 +50,7 @@ func TestSlowBackoff(t *testing.T) {
|
|||
|
||||
func TestBackoffReset(t *testing.T) {
|
||||
id := "_idReset"
|
||||
tc := util.NewFakeClock(time.Now())
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := step * 5
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
|
@ -76,7 +76,7 @@ func TestBackoffReset(t *testing.T) {
|
|||
|
||||
func TestBackoffHightWaterMark(t *testing.T) {
|
||||
id := "_idHiWaterMark"
|
||||
tc := util.NewFakeClock(time.Now())
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 5 * step
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
|
@ -98,7 +98,7 @@ func TestBackoffHightWaterMark(t *testing.T) {
|
|||
|
||||
func TestBackoffGC(t *testing.T) {
|
||||
id := "_idGC"
|
||||
tc := util.NewFakeClock(time.Now())
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 5 * step
|
||||
|
||||
|
@ -126,7 +126,7 @@ func TestBackoffGC(t *testing.T) {
|
|||
|
||||
func TestIsInBackOffSinceUpdate(t *testing.T) {
|
||||
id := "_idIsInBackOffSinceUpdate"
|
||||
tc := util.NewFakeClock(time.Now())
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 10 * step
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"sort"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
)
|
||||
|
||||
|
@ -34,10 +34,10 @@ type DelayingInterface interface {
|
|||
|
||||
// NewDelayingQueue constructs a new workqueue with delayed queuing ability
|
||||
func NewDelayingQueue() DelayingInterface {
|
||||
return newDelayingQueue(util.RealClock{})
|
||||
return newDelayingQueue(clock.RealClock{})
|
||||
}
|
||||
|
||||
func newDelayingQueue(clock util.Clock) DelayingInterface {
|
||||
func newDelayingQueue(clock clock.Clock) DelayingInterface {
|
||||
ret := &delayingType{
|
||||
Interface: New(),
|
||||
clock: clock,
|
||||
|
@ -57,7 +57,7 @@ type delayingType struct {
|
|||
Interface
|
||||
|
||||
// clock tracks time for delayed firing
|
||||
clock util.Clock
|
||||
clock clock.Clock
|
||||
|
||||
// stopCh lets us signal a shutdown to the waiting loop
|
||||
stopCh chan struct{}
|
||||
|
|
|
@ -22,12 +22,12 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
func TestSimpleQueue(t *testing.T) {
|
||||
fakeClock := util.NewFakeClock(time.Now())
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
q := newDelayingQueue(fakeClock)
|
||||
|
||||
first := "foo"
|
||||
|
@ -69,7 +69,7 @@ func TestSimpleQueue(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDeduping(t *testing.T) {
|
||||
fakeClock := util.NewFakeClock(time.Now())
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
q := newDelayingQueue(fakeClock)
|
||||
|
||||
first := "foo"
|
||||
|
@ -128,7 +128,7 @@ func TestDeduping(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAddTwoFireEarly(t *testing.T) {
|
||||
fakeClock := util.NewFakeClock(time.Now())
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
q := newDelayingQueue(fakeClock)
|
||||
|
||||
first := "foo"
|
||||
|
@ -178,7 +178,7 @@ func TestAddTwoFireEarly(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCopyShifting(t *testing.T) {
|
||||
fakeClock := util.NewFakeClock(time.Now())
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
q := newDelayingQueue(fakeClock)
|
||||
|
||||
first := "foo"
|
||||
|
|
|
@ -20,13 +20,13 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
func TestRateLimitingQueue(t *testing.T) {
|
||||
limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second)
|
||||
queue := NewRateLimitingQueue(limiter).(*rateLimitingType)
|
||||
fakeClock := util.NewFakeClock(time.Now())
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
delayingQueue := &delayingType{
|
||||
Interface: New(),
|
||||
clock: fakeClock,
|
||||
|
|
Loading…
Reference in New Issue