mirror of https://github.com/k3s-io/k3s
Merge pull request #76702 from wojtek-t/reduce_watchcache_contention
Reduce contention in watchcache by not calling event handler under lockk3s-v1.15.3
commit
9a1572b70a
|
@ -291,11 +291,6 @@ type Cacher struct {
|
||||||
// given configuration.
|
// given configuration.
|
||||||
func NewCacherFromConfig(config Config) *Cacher {
|
func NewCacherFromConfig(config Config) *Cacher {
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
|
|
||||||
watchCache := newWatchCache(config.CacheCapacity, config.KeyFunc, config.GetAttrsFunc, config.Versioner)
|
|
||||||
listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)
|
|
||||||
reflectorName := "storage/cacher.go:" + config.ResourcePrefix
|
|
||||||
|
|
||||||
obj := config.NewFunc()
|
obj := config.NewFunc()
|
||||||
// Give this error when it is constructed rather than when you get the
|
// Give this error when it is constructed rather than when you get the
|
||||||
// first watch item, because it's much easier to track down that way.
|
// first watch item, because it's much easier to track down that way.
|
||||||
|
@ -303,18 +298,11 @@ func NewCacherFromConfig(config Config) *Cacher {
|
||||||
panic("storage codec doesn't seem to match given type: " + err.Error())
|
panic("storage codec doesn't seem to match given type: " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0)
|
|
||||||
// Configure reflector's pager to for an appropriate pagination chunk size for fetching data from
|
|
||||||
// storage. The pager falls back to full list if paginated list calls fail due to an "Expired" error.
|
|
||||||
reflector.WatchListPageSize = storageWatchListPageSize
|
|
||||||
|
|
||||||
clock := clock.RealClock{}
|
clock := clock.RealClock{}
|
||||||
cacher := &Cacher{
|
cacher := &Cacher{
|
||||||
ready: newReady(),
|
ready: newReady(),
|
||||||
storage: config.Storage,
|
storage: config.Storage,
|
||||||
objectType: reflect.TypeOf(obj),
|
objectType: reflect.TypeOf(obj),
|
||||||
watchCache: watchCache,
|
|
||||||
reflector: reflector,
|
|
||||||
versioner: config.Versioner,
|
versioner: config.Versioner,
|
||||||
newFunc: config.NewFunc,
|
newFunc: config.NewFunc,
|
||||||
triggerFunc: config.TriggerPublisherFunc,
|
triggerFunc: config.TriggerPublisherFunc,
|
||||||
|
@ -337,7 +325,27 @@ func NewCacherFromConfig(config Config) *Cacher {
|
||||||
bookmarkWatchers: newTimeBucketWatchers(clock),
|
bookmarkWatchers: newTimeBucketWatchers(clock),
|
||||||
watchBookmarkEnabled: utilfeature.DefaultFeatureGate.Enabled(features.WatchBookmark),
|
watchBookmarkEnabled: utilfeature.DefaultFeatureGate.Enabled(features.WatchBookmark),
|
||||||
}
|
}
|
||||||
watchCache.SetOnEvent(cacher.processEvent)
|
|
||||||
|
// Ensure that timer is stopped.
|
||||||
|
if !cacher.timer.Stop() {
|
||||||
|
// Consume triggered (but not yet received) timer event
|
||||||
|
// so that future reuse does not get a spurious timeout.
|
||||||
|
<-cacher.timer.C
|
||||||
|
}
|
||||||
|
|
||||||
|
watchCache := newWatchCache(
|
||||||
|
config.CacheCapacity, config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner)
|
||||||
|
listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)
|
||||||
|
reflectorName := "storage/cacher.go:" + config.ResourcePrefix
|
||||||
|
|
||||||
|
reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0)
|
||||||
|
// Configure reflector's pager to for an appropriate pagination chunk size for fetching data from
|
||||||
|
// storage. The pager falls back to full list if paginated list calls fail due to an "Expired" error.
|
||||||
|
reflector.WatchListPageSize = storageWatchListPageSize
|
||||||
|
|
||||||
|
cacher.watchCache = watchCache
|
||||||
|
cacher.reflector = reflector
|
||||||
|
|
||||||
go cacher.dispatchEvents()
|
go cacher.dispatchEvents()
|
||||||
|
|
||||||
cacher.stopWg.Add(1)
|
cacher.stopWg.Add(1)
|
||||||
|
@ -352,13 +360,6 @@ func NewCacherFromConfig(config Config) *Cacher {
|
||||||
)
|
)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Ensure that timer is stopped.
|
|
||||||
if !cacher.timer.Stop() {
|
|
||||||
// Consume triggered (but not yet received) timer event
|
|
||||||
// so that future reuse does not get a spurious timeout.
|
|
||||||
<-cacher.timer.C
|
|
||||||
}
|
|
||||||
|
|
||||||
return cacher
|
return cacher
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -125,7 +125,7 @@ type watchCache struct {
|
||||||
|
|
||||||
// This handler is run at the end of every Add/Update/Delete method
|
// This handler is run at the end of every Add/Update/Delete method
|
||||||
// and additionally gets the previous value of the object.
|
// and additionally gets the previous value of the object.
|
||||||
onEvent func(*watchCacheEvent)
|
eventHandler func(*watchCacheEvent)
|
||||||
|
|
||||||
// for testing timeouts.
|
// for testing timeouts.
|
||||||
clock clock.Clock
|
clock clock.Clock
|
||||||
|
@ -137,6 +137,7 @@ type watchCache struct {
|
||||||
func newWatchCache(
|
func newWatchCache(
|
||||||
capacity int,
|
capacity int,
|
||||||
keyFunc func(runtime.Object) (string, error),
|
keyFunc func(runtime.Object) (string, error),
|
||||||
|
eventHandler func(*watchCacheEvent),
|
||||||
getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, error),
|
getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, error),
|
||||||
versioner storage.Versioner) *watchCache {
|
versioner storage.Versioner) *watchCache {
|
||||||
wc := &watchCache{
|
wc := &watchCache{
|
||||||
|
@ -149,6 +150,7 @@ func newWatchCache(
|
||||||
store: cache.NewStore(storeElementKey),
|
store: cache.NewStore(storeElementKey),
|
||||||
resourceVersion: 0,
|
resourceVersion: 0,
|
||||||
listResourceVersion: 0,
|
listResourceVersion: 0,
|
||||||
|
eventHandler: eventHandler,
|
||||||
clock: clock.RealClock{},
|
clock: clock.RealClock{},
|
||||||
versioner: versioner,
|
versioner: versioner,
|
||||||
}
|
}
|
||||||
|
@ -204,6 +206,8 @@ func (w *watchCache) objectToVersionedRuntimeObject(obj interface{}) (runtime.Ob
|
||||||
return object, resourceVersion, nil
|
return object, resourceVersion, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// processEvent is safe as long as there is at most one call to it in flight
|
||||||
|
// at any point in time.
|
||||||
func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, updateFunc func(*storeElement) error) error {
|
func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, updateFunc func(*storeElement) error) error {
|
||||||
key, err := w.keyFunc(event.Object)
|
key, err := w.keyFunc(event.Object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -224,30 +228,41 @@ func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, upd
|
||||||
ResourceVersion: resourceVersion,
|
ResourceVersion: resourceVersion,
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: We should consider moving this lock below after the watchCacheEvent
|
if err := func() error {
|
||||||
// is created. In such situation, the only problematic scenario is Replace(
|
// TODO: We should consider moving this lock below after the watchCacheEvent
|
||||||
// happening after getting object from store and before acquiring a lock.
|
// is created. In such situation, the only problematic scenario is Replace(
|
||||||
// Maybe introduce another lock for this purpose.
|
// happening after getting object from store and before acquiring a lock.
|
||||||
w.Lock()
|
// Maybe introduce another lock for this purpose.
|
||||||
defer w.Unlock()
|
w.Lock()
|
||||||
previous, exists, err := w.store.Get(elem)
|
defer w.Unlock()
|
||||||
if err != nil {
|
|
||||||
|
previous, exists, err := w.store.Get(elem)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if exists {
|
||||||
|
previousElem := previous.(*storeElement)
|
||||||
|
watchCacheEvent.PrevObject = previousElem.Object
|
||||||
|
watchCacheEvent.PrevObjLabels = previousElem.Labels
|
||||||
|
watchCacheEvent.PrevObjFields = previousElem.Fields
|
||||||
|
}
|
||||||
|
|
||||||
|
w.updateCache(watchCacheEvent)
|
||||||
|
w.resourceVersion = resourceVersion
|
||||||
|
defer w.cond.Broadcast()
|
||||||
|
|
||||||
|
return updateFunc(elem)
|
||||||
|
}(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if exists {
|
|
||||||
previousElem := previous.(*storeElement)
|
|
||||||
watchCacheEvent.PrevObject = previousElem.Object
|
|
||||||
watchCacheEvent.PrevObjLabels = previousElem.Labels
|
|
||||||
watchCacheEvent.PrevObjFields = previousElem.Fields
|
|
||||||
}
|
|
||||||
w.updateCache(watchCacheEvent)
|
|
||||||
w.resourceVersion = resourceVersion
|
|
||||||
|
|
||||||
if w.onEvent != nil {
|
// Avoid calling event handler under lock.
|
||||||
w.onEvent(watchCacheEvent)
|
// This is safe as long as there is at most one call to processEvent in flight
|
||||||
|
// at any point in time.
|
||||||
|
if w.eventHandler != nil {
|
||||||
|
w.eventHandler(watchCacheEvent)
|
||||||
}
|
}
|
||||||
w.cond.Broadcast()
|
return nil
|
||||||
return updateFunc(elem)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assumes that lock is already held for write.
|
// Assumes that lock is already held for write.
|
||||||
|
@ -397,12 +412,6 @@ func (w *watchCache) SetOnReplace(onReplace func()) {
|
||||||
w.onReplace = onReplace
|
w.onReplace = onReplace
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *watchCache) SetOnEvent(onEvent func(*watchCacheEvent)) {
|
|
||||||
w.Lock()
|
|
||||||
defer w.Unlock()
|
|
||||||
w.onEvent = onEvent
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]*watchCacheEvent, error) {
|
func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]*watchCacheEvent, error) {
|
||||||
size := w.endIndex - w.startIndex
|
size := w.endIndex - w.startIndex
|
||||||
var oldest uint64
|
var oldest uint64
|
||||||
|
|
|
@ -76,7 +76,8 @@ func newTestWatchCache(capacity int) *watchCache {
|
||||||
return labels.Set(pod.Labels), fields.Set{"spec.nodeName": pod.Spec.NodeName}, nil
|
return labels.Set(pod.Labels), fields.Set{"spec.nodeName": pod.Spec.NodeName}, nil
|
||||||
}
|
}
|
||||||
versioner := etcd.APIObjectVersioner{}
|
versioner := etcd.APIObjectVersioner{}
|
||||||
wc := newWatchCache(capacity, keyFunc, getAttrsFunc, versioner)
|
mockHandler := func(*watchCacheEvent) {}
|
||||||
|
wc := newWatchCache(capacity, keyFunc, mockHandler, getAttrsFunc, versioner)
|
||||||
wc.clock = clock.NewFakeClock(time.Now())
|
wc.clock = clock.NewFakeClock(time.Now())
|
||||||
return wc
|
return wc
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue