2015-07-28 06:26:53 +00:00
|
|
|
/*
|
|
|
|
Copyright 2015 The Kubernetes Authors All rights reserved.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"reflect"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
2015-11-06 12:40:21 +00:00
|
|
|
"time"
|
2015-07-28 06:26:53 +00:00
|
|
|
|
2015-08-17 07:59:12 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api"
|
2015-11-12 10:45:42 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api/meta"
|
2016-01-22 00:59:30 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api/rest"
|
2015-09-03 21:40:58 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/cache"
|
2015-07-28 06:26:53 +00:00
|
|
|
"k8s.io/kubernetes/pkg/conversion"
|
|
|
|
"k8s.io/kubernetes/pkg/runtime"
|
2016-04-07 23:53:41 +00:00
|
|
|
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
2016-02-02 10:57:06 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/wait"
|
2015-07-28 06:26:53 +00:00
|
|
|
"k8s.io/kubernetes/pkg/watch"
|
|
|
|
|
|
|
|
"github.com/golang/glog"
|
2015-10-09 14:49:01 +00:00
|
|
|
"golang.org/x/net/context"
|
2015-07-28 06:26:53 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// CacherConfig contains the configuration for a given Cache.
|
|
|
|
type CacherConfig struct {
|
|
|
|
// Maximum size of the history cached in memory.
|
|
|
|
CacheCapacity int
|
|
|
|
|
|
|
|
// An underlying storage.Interface.
|
|
|
|
Storage Interface
|
|
|
|
|
|
|
|
// An underlying storage.Versioner.
|
|
|
|
Versioner Versioner
|
|
|
|
|
|
|
|
// The Cache will be caching objects of a given Type and assumes that they
|
|
|
|
// are all stored under ResourcePrefix directory in the underlying database.
|
|
|
|
Type interface{}
|
|
|
|
ResourcePrefix string
|
|
|
|
|
|
|
|
// KeyFunc is used to get a key in the underyling storage for a given object.
|
|
|
|
KeyFunc func(runtime.Object) (string, error)
|
|
|
|
|
|
|
|
// NewList is a function that creates new empty object storing a list of
|
|
|
|
// objects of type Type.
|
|
|
|
NewListFunc func() runtime.Object
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cacher is responsible for serving WATCH and LIST requests for a given
|
|
|
|
// resource from its internal cache and updating its cache in the background
|
|
|
|
// based on the underlying storage contents.
|
2015-08-17 07:59:12 +00:00
|
|
|
// Cacher implements storage.Interface (although most of the calls are just
|
|
|
|
// delegated to the underlying storage).
|
2015-07-28 06:26:53 +00:00
|
|
|
type Cacher struct {
|
|
|
|
sync.RWMutex
|
|
|
|
|
2015-08-25 12:23:10 +00:00
|
|
|
// Each user-facing method that is not simply redirected to the underlying
|
|
|
|
// storage has to read-lock on this mutex before starting any processing.
|
|
|
|
// This is necessary to prevent users from accessing structures that are
|
|
|
|
// uninitialized or are being repopulated right now.
|
|
|
|
// NOTE: We cannot easily reuse the main mutex for it due to multi-threaded
|
|
|
|
// interactions of Cacher with the underlying WatchCache. Since Cacher is
|
|
|
|
// caling WatchCache directly and WatchCache is calling Cacher methods
|
|
|
|
// via its OnEvent and OnReplace hooks, we explicitly assume that if mutexes
|
|
|
|
// of both structures are held, the one from WatchCache is acquired first
|
|
|
|
// to avoid deadlocks. Unfortunately, forcing this rule in startCaching
|
|
|
|
// would be very difficult and introducing one more mutex seems to be much
|
|
|
|
// easier.
|
|
|
|
usable sync.RWMutex
|
|
|
|
|
2015-08-17 07:59:12 +00:00
|
|
|
// Underlying storage.Interface.
|
|
|
|
storage Interface
|
|
|
|
|
2015-07-28 06:26:53 +00:00
|
|
|
// "sliding window" of recent changes of objects and the current state.
|
2015-08-18 08:40:23 +00:00
|
|
|
watchCache *watchCache
|
2015-07-28 06:26:53 +00:00
|
|
|
reflector *cache.Reflector
|
|
|
|
|
|
|
|
// Registered watchers.
|
|
|
|
watcherIdx int
|
|
|
|
watchers map[int]*cacheWatcher
|
|
|
|
|
|
|
|
// Versioner is used to handle resource versions.
|
|
|
|
versioner Versioner
|
|
|
|
|
|
|
|
// keyFunc is used to get a key in the underyling storage for a given object.
|
|
|
|
keyFunc func(runtime.Object) (string, error)
|
2015-12-28 09:35:12 +00:00
|
|
|
|
|
|
|
// Handling graceful termination.
|
|
|
|
stopLock sync.RWMutex
|
|
|
|
stopped bool
|
|
|
|
stopCh chan struct{}
|
|
|
|
stopWg sync.WaitGroup
|
2015-07-28 06:26:53 +00:00
|
|
|
}
|
|
|
|
|
2015-10-30 09:17:09 +00:00
|
|
|
// Create a new Cacher responsible from service WATCH and LIST requests from its
|
|
|
|
// internal cache and updating its cache in the background based on the given
|
|
|
|
// configuration.
|
|
|
|
func NewCacher(
|
|
|
|
storage Interface,
|
|
|
|
capacity int,
|
|
|
|
versioner Versioner,
|
|
|
|
objectType runtime.Object,
|
|
|
|
resourcePrefix string,
|
2016-01-22 00:59:30 +00:00
|
|
|
scopeStrategy rest.NamespaceScopedStrategy,
|
2015-10-30 09:17:09 +00:00
|
|
|
newListFunc func() runtime.Object) Interface {
|
|
|
|
config := CacherConfig{
|
|
|
|
CacheCapacity: capacity,
|
|
|
|
Storage: storage,
|
|
|
|
Versioner: versioner,
|
|
|
|
Type: objectType,
|
|
|
|
ResourcePrefix: resourcePrefix,
|
|
|
|
NewListFunc: newListFunc,
|
|
|
|
}
|
2016-01-22 00:59:30 +00:00
|
|
|
if scopeStrategy.NamespaceScoped() {
|
2015-10-30 09:17:09 +00:00
|
|
|
config.KeyFunc = func(obj runtime.Object) (string, error) {
|
|
|
|
return NamespaceKeyFunc(resourcePrefix, obj)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
config.KeyFunc = func(obj runtime.Object) (string, error) {
|
|
|
|
return NoNamespaceKeyFunc(resourcePrefix, obj)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NewCacherFromConfig(config)
|
|
|
|
}
|
|
|
|
|
2015-07-28 06:26:53 +00:00
|
|
|
// Create a new Cacher responsible from service WATCH and LIST requests from its
|
|
|
|
// internal cache and updating its cache in the background based on the given
|
|
|
|
// configuration.
|
2015-10-30 09:17:09 +00:00
|
|
|
func NewCacherFromConfig(config CacherConfig) *Cacher {
|
2015-08-18 08:40:23 +00:00
|
|
|
watchCache := newWatchCache(config.CacheCapacity)
|
2015-07-28 06:26:53 +00:00
|
|
|
listerWatcher := newCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)
|
|
|
|
|
2016-02-13 02:08:35 +00:00
|
|
|
// Give this error when it is constructed rather than when you get the
|
|
|
|
// first watch item, because it's much easier to track down that way.
|
|
|
|
if obj, ok := config.Type.(runtime.Object); ok {
|
|
|
|
if err := runtime.CheckCodec(config.Storage.Codec(), obj); err != nil {
|
|
|
|
panic("storage codec doesn't seem to match given type: " + err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-28 06:26:53 +00:00
|
|
|
cacher := &Cacher{
|
2015-12-04 13:56:33 +00:00
|
|
|
usable: sync.RWMutex{},
|
|
|
|
storage: config.Storage,
|
|
|
|
watchCache: watchCache,
|
|
|
|
reflector: cache.NewReflector(listerWatcher, config.Type, watchCache, 0),
|
|
|
|
watcherIdx: 0,
|
|
|
|
watchers: make(map[int]*cacheWatcher),
|
|
|
|
versioner: config.Versioner,
|
|
|
|
keyFunc: config.KeyFunc,
|
2015-12-28 09:35:12 +00:00
|
|
|
stopped: false,
|
|
|
|
// We need to (potentially) stop both:
|
2016-02-02 10:57:06 +00:00
|
|
|
// - wait.Until go-routine
|
2015-12-28 09:35:12 +00:00
|
|
|
// - reflector.ListAndWatch
|
|
|
|
// and there are no guarantees on the order that they will stop.
|
|
|
|
// So we will be simply closing the channel, and synchronizing on the WaitGroup.
|
|
|
|
stopCh: make(chan struct{}),
|
|
|
|
stopWg: sync.WaitGroup{},
|
2015-07-28 06:26:53 +00:00
|
|
|
}
|
2016-02-20 01:45:02 +00:00
|
|
|
// See startCaching method for explanation and where this is unlocked.
|
2015-08-25 12:23:10 +00:00
|
|
|
cacher.usable.Lock()
|
2015-07-28 06:26:53 +00:00
|
|
|
watchCache.SetOnEvent(cacher.processEvent)
|
|
|
|
|
2015-12-28 09:35:12 +00:00
|
|
|
stopCh := cacher.stopCh
|
|
|
|
cacher.stopWg.Add(1)
|
|
|
|
go func() {
|
2016-02-09 08:11:27 +00:00
|
|
|
defer cacher.stopWg.Done()
|
2016-02-02 10:57:06 +00:00
|
|
|
wait.Until(
|
2015-12-28 09:35:12 +00:00
|
|
|
func() {
|
|
|
|
if !cacher.isStopped() {
|
|
|
|
cacher.startCaching(stopCh)
|
|
|
|
}
|
2016-02-09 08:11:27 +00:00
|
|
|
}, time.Second, stopCh,
|
|
|
|
)
|
2015-12-28 09:35:12 +00:00
|
|
|
}()
|
2015-07-28 06:26:53 +00:00
|
|
|
return cacher
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cacher) startCaching(stopChannel <-chan struct{}) {
|
2016-02-20 01:45:02 +00:00
|
|
|
// The 'usable' lock is always 'RLock'able when it is safe to use the cache.
|
|
|
|
// It is safe to use the cache after a successful list until a disconnection.
|
|
|
|
// We start with usable (write) locked. The below OnReplace function will
|
|
|
|
// unlock it after a successful list. The below defer will then re-lock
|
|
|
|
// it when this function exits (always due to disconnection), only if
|
|
|
|
// we actually got a successful list. This cycle will repeat as needed.
|
|
|
|
successfulList := false
|
|
|
|
c.watchCache.SetOnReplace(func() {
|
|
|
|
successfulList = true
|
|
|
|
c.usable.Unlock()
|
|
|
|
})
|
|
|
|
defer func() {
|
|
|
|
if successfulList {
|
|
|
|
c.usable.Lock()
|
|
|
|
}
|
|
|
|
}()
|
2015-08-25 12:23:10 +00:00
|
|
|
|
2015-07-28 06:26:53 +00:00
|
|
|
c.terminateAllWatchers()
|
|
|
|
// Note that since onReplace may be not called due to errors, we explicitly
|
|
|
|
// need to retry it on errors under lock.
|
2016-02-09 08:11:27 +00:00
|
|
|
// Also note that startCaching is called in a loop, so there's no need
|
|
|
|
// to have another loop here.
|
|
|
|
if err := c.reflector.ListAndWatch(stopChannel); err != nil {
|
|
|
|
glog.Errorf("unexpected ListAndWatch error: %v", err)
|
2015-07-28 06:26:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-17 07:59:12 +00:00
|
|
|
// Implements storage.Interface.
|
2015-10-09 14:49:01 +00:00
|
|
|
func (c *Cacher) Backends(ctx context.Context) []string {
|
|
|
|
return c.storage.Backends(ctx)
|
2015-08-17 07:59:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Implements storage.Interface.
|
|
|
|
func (c *Cacher) Versioner() Versioner {
|
|
|
|
return c.storage.Versioner()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Implements storage.Interface.
|
2015-10-09 14:49:01 +00:00
|
|
|
func (c *Cacher) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
|
|
|
|
return c.storage.Create(ctx, key, obj, out, ttl)
|
2015-08-17 07:59:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Implements storage.Interface.
|
2016-03-21 06:15:00 +00:00
|
|
|
func (c *Cacher) Delete(ctx context.Context, key string, out runtime.Object, preconditions *Preconditions) error {
|
|
|
|
return c.storage.Delete(ctx, key, out, preconditions)
|
2015-08-17 07:59:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Implements storage.Interface.
|
2015-12-04 08:58:24 +00:00
|
|
|
func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) {
|
|
|
|
watchRV, err := ParseWatchResourceVersion(resourceVersion)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-08-25 12:23:10 +00:00
|
|
|
// Do NOT allow Watch to start when the underlying structures are not propagated.
|
|
|
|
c.usable.RLock()
|
|
|
|
defer c.usable.RUnlock()
|
|
|
|
|
2015-08-17 07:59:12 +00:00
|
|
|
// We explicitly use thread unsafe version and do locking ourself to ensure that
|
|
|
|
// no new events will be processed in the meantime. The watchCache will be unlocked
|
|
|
|
// on return from this function.
|
|
|
|
// Note that we cannot do it under Cacher lock, to avoid a deadlock, since the
|
|
|
|
// underlying watchCache is calling processEvent under its lock.
|
|
|
|
c.watchCache.RLock()
|
|
|
|
defer c.watchCache.RUnlock()
|
2015-12-04 08:58:24 +00:00
|
|
|
initEvents, err := c.watchCache.GetAllEventsSinceThreadUnsafe(watchRV)
|
2015-07-28 06:26:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-08-17 07:59:12 +00:00
|
|
|
|
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
2015-07-28 06:26:53 +00:00
|
|
|
watcher := newCacheWatcher(initEvents, filterFunction(key, c.keyFunc, filter), forgetWatcher(c, c.watcherIdx))
|
|
|
|
c.watchers[c.watcherIdx] = watcher
|
|
|
|
c.watcherIdx++
|
|
|
|
return watcher, nil
|
|
|
|
}
|
|
|
|
|
2015-08-17 07:59:12 +00:00
|
|
|
// Implements storage.Interface.
|
2015-12-04 08:58:24 +00:00
|
|
|
func (c *Cacher) WatchList(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) {
|
2015-10-09 14:49:01 +00:00
|
|
|
return c.Watch(ctx, key, resourceVersion, filter)
|
2015-07-28 06:26:53 +00:00
|
|
|
}
|
|
|
|
|
2015-08-17 07:59:12 +00:00
|
|
|
// Implements storage.Interface.
|
2015-10-09 14:49:01 +00:00
|
|
|
func (c *Cacher) Get(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) error {
|
|
|
|
return c.storage.Get(ctx, key, objPtr, ignoreNotFound)
|
2015-08-17 07:59:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Implements storage.Interface.
|
2015-10-09 14:49:01 +00:00
|
|
|
func (c *Cacher) GetToList(ctx context.Context, key string, filter FilterFunc, listObj runtime.Object) error {
|
|
|
|
return c.storage.GetToList(ctx, key, filter, listObj)
|
2015-08-17 07:59:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Implements storage.Interface.
|
2015-12-04 08:58:24 +00:00
|
|
|
func (c *Cacher) List(ctx context.Context, key string, resourceVersion string, filter FilterFunc, listObj runtime.Object) error {
|
2015-12-04 13:56:33 +00:00
|
|
|
if resourceVersion == "" {
|
|
|
|
// If resourceVersion is not specified, serve it from underlying
|
|
|
|
// storage (for backward compatibility).
|
2015-10-23 14:13:21 +00:00
|
|
|
return c.storage.List(ctx, key, resourceVersion, filter, listObj)
|
|
|
|
}
|
2015-08-17 07:59:12 +00:00
|
|
|
|
2015-12-04 13:56:33 +00:00
|
|
|
// If resourceVersion is specified, serve it from cache.
|
|
|
|
// It's guaranteed that the returned value is at least that
|
|
|
|
// fresh as the given resourceVersion.
|
|
|
|
|
2015-12-04 08:58:24 +00:00
|
|
|
listRV, err := ParseListResourceVersion(resourceVersion)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-12-28 14:28:07 +00:00
|
|
|
// To avoid situation when List is processed before the underlying
|
2015-10-23 14:13:21 +00:00
|
|
|
// watchCache is propagated for the first time, we acquire and immediately
|
|
|
|
// release the 'usable' lock.
|
|
|
|
// We don't need to hold it all the time, because watchCache is thread-safe
|
|
|
|
// and it would complicate already very difficult locking pattern.
|
2015-08-25 12:23:10 +00:00
|
|
|
c.usable.RLock()
|
2015-10-23 14:13:21 +00:00
|
|
|
c.usable.RUnlock()
|
2015-08-25 12:23:10 +00:00
|
|
|
|
2015-12-04 08:58:24 +00:00
|
|
|
// List elements from cache, with at least 'listRV'.
|
2015-11-12 10:45:42 +00:00
|
|
|
listPtr, err := meta.GetItemsPtr(listObj)
|
2015-07-28 06:26:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
listVal, err := conversion.EnforcePtr(listPtr)
|
|
|
|
if err != nil || listVal.Kind() != reflect.Slice {
|
|
|
|
return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind())
|
|
|
|
}
|
2015-10-23 14:13:21 +00:00
|
|
|
filterFunc := filterFunction(key, c.keyFunc, filter)
|
2015-07-28 06:26:53 +00:00
|
|
|
|
2016-02-01 18:50:22 +00:00
|
|
|
objs, readResourceVersion, err := c.watchCache.WaitUntilFreshAndList(listRV)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to wait for fresh list: %v", err)
|
|
|
|
}
|
2015-07-28 06:26:53 +00:00
|
|
|
for _, obj := range objs {
|
|
|
|
object, ok := obj.(runtime.Object)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("non runtime.Object returned from storage: %v", obj)
|
|
|
|
}
|
2015-10-23 14:13:21 +00:00
|
|
|
if filterFunc(object) {
|
2015-07-28 06:26:53 +00:00
|
|
|
listVal.Set(reflect.Append(listVal, reflect.ValueOf(object).Elem()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if c.versioner != nil {
|
2015-12-04 08:58:24 +00:00
|
|
|
if err := c.versioner.UpdateList(listObj, readResourceVersion); err != nil {
|
2015-07-28 06:26:53 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-17 07:59:12 +00:00
|
|
|
// Implements storage.Interface.
|
2016-03-21 06:15:00 +00:00
|
|
|
func (c *Cacher) GuaranteedUpdate(ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, preconditions *Preconditions, tryUpdate UpdateFunc) error {
|
|
|
|
return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate)
|
2015-08-17 07:59:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Implements storage.Interface.
|
|
|
|
func (c *Cacher) Codec() runtime.Codec {
|
|
|
|
return c.storage.Codec()
|
|
|
|
}
|
|
|
|
|
2015-08-18 08:40:23 +00:00
|
|
|
func (c *Cacher) processEvent(event watchCacheEvent) {
|
2015-07-28 06:26:53 +00:00
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
|
|
|
for _, watcher := range c.watchers {
|
|
|
|
watcher.add(event)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cacher) terminateAllWatchers() {
|
2015-08-25 12:23:10 +00:00
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
2015-07-28 06:26:53 +00:00
|
|
|
for key, watcher := range c.watchers {
|
|
|
|
delete(c.watchers, key)
|
|
|
|
watcher.stop()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-28 09:35:12 +00:00
|
|
|
func (c *Cacher) isStopped() bool {
|
|
|
|
c.stopLock.RLock()
|
|
|
|
defer c.stopLock.RUnlock()
|
|
|
|
return c.stopped
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cacher) Stop() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
c.stopped = true
|
|
|
|
c.stopLock.Unlock()
|
|
|
|
close(c.stopCh)
|
|
|
|
c.stopWg.Wait()
|
|
|
|
}
|
|
|
|
|
2015-11-06 12:40:21 +00:00
|
|
|
func forgetWatcher(c *Cacher, index int) func(bool) {
|
|
|
|
return func(lock bool) {
|
|
|
|
if lock {
|
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
|
|
|
}
|
2015-07-28 06:26:53 +00:00
|
|
|
// It's possible that the watcher is already not in the map (e.g. in case of
|
|
|
|
// simulaneous Stop() and terminateAllWatchers(), but it doesn't break anything.
|
|
|
|
delete(c.watchers, index)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func filterFunction(key string, keyFunc func(runtime.Object) (string, error), filter FilterFunc) FilterFunc {
|
|
|
|
return func(obj runtime.Object) bool {
|
|
|
|
objKey, err := keyFunc(obj)
|
|
|
|
if err != nil {
|
2015-09-09 10:35:44 +00:00
|
|
|
glog.Errorf("invalid object for filter: %v", obj)
|
2015-07-28 06:26:53 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
if !strings.HasPrefix(objKey, key) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return filter(obj)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns resource version to which the underlying cache is synced.
|
|
|
|
func (c *Cacher) LastSyncResourceVersion() (uint64, error) {
|
2015-12-28 14:28:07 +00:00
|
|
|
// To avoid situation when LastSyncResourceVersion is processed before the
|
|
|
|
// underlying watchCache is propagated, we acquire 'usable' lock.
|
|
|
|
c.usable.RLock()
|
|
|
|
defer c.usable.RUnlock()
|
|
|
|
|
2015-07-28 06:26:53 +00:00
|
|
|
c.RLock()
|
|
|
|
defer c.RUnlock()
|
|
|
|
|
|
|
|
resourceVersion := c.reflector.LastSyncResourceVersion()
|
|
|
|
if resourceVersion == "" {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
return strconv.ParseUint(resourceVersion, 10, 64)
|
|
|
|
}
|
|
|
|
|
|
|
|
// cacherListerWatcher opaques storage.Interface to expose cache.ListerWatcher.
|
|
|
|
type cacherListerWatcher struct {
|
|
|
|
storage Interface
|
|
|
|
resourcePrefix string
|
|
|
|
newListFunc func() runtime.Object
|
|
|
|
}
|
|
|
|
|
|
|
|
func newCacherListerWatcher(storage Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher {
|
|
|
|
return &cacherListerWatcher{
|
|
|
|
storage: storage,
|
|
|
|
resourcePrefix: resourcePrefix,
|
|
|
|
newListFunc: newListFunc,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Implements cache.ListerWatcher interface.
|
2015-12-10 09:39:03 +00:00
|
|
|
func (lw *cacherListerWatcher) List(options api.ListOptions) (runtime.Object, error) {
|
2015-07-28 06:26:53 +00:00
|
|
|
list := lw.newListFunc()
|
2015-12-04 08:58:24 +00:00
|
|
|
if err := lw.storage.List(context.TODO(), lw.resourcePrefix, "", Everything, list); err != nil {
|
2015-07-28 06:26:53 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return list, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Implements cache.ListerWatcher interface.
|
2015-12-10 09:39:03 +00:00
|
|
|
func (lw *cacherListerWatcher) Watch(options api.ListOptions) (watch.Interface, error) {
|
2015-12-04 08:58:24 +00:00
|
|
|
return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, options.ResourceVersion, Everything)
|
2015-07-28 06:26:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// cacherWatch implements watch.Interface
|
|
|
|
type cacheWatcher struct {
|
|
|
|
sync.Mutex
|
2015-08-18 08:40:23 +00:00
|
|
|
input chan watchCacheEvent
|
2015-07-28 06:26:53 +00:00
|
|
|
result chan watch.Event
|
|
|
|
filter FilterFunc
|
|
|
|
stopped bool
|
2015-11-06 12:40:21 +00:00
|
|
|
forget func(bool)
|
2015-07-28 06:26:53 +00:00
|
|
|
}
|
|
|
|
|
2015-11-06 12:40:21 +00:00
|
|
|
func newCacheWatcher(initEvents []watchCacheEvent, filter FilterFunc, forget func(bool)) *cacheWatcher {
|
2015-07-28 06:26:53 +00:00
|
|
|
watcher := &cacheWatcher{
|
2015-08-18 08:40:23 +00:00
|
|
|
input: make(chan watchCacheEvent, 10),
|
2015-07-28 06:26:53 +00:00
|
|
|
result: make(chan watch.Event, 10),
|
|
|
|
filter: filter,
|
|
|
|
stopped: false,
|
|
|
|
forget: forget,
|
|
|
|
}
|
|
|
|
go watcher.process(initEvents)
|
|
|
|
return watcher
|
|
|
|
}
|
|
|
|
|
|
|
|
// Implements watch.Interface.
|
|
|
|
func (c *cacheWatcher) ResultChan() <-chan watch.Event {
|
|
|
|
return c.result
|
|
|
|
}
|
|
|
|
|
|
|
|
// Implements watch.Interface.
|
|
|
|
func (c *cacheWatcher) Stop() {
|
2015-11-06 12:40:21 +00:00
|
|
|
c.forget(true)
|
2015-07-28 06:26:53 +00:00
|
|
|
c.stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *cacheWatcher) stop() {
|
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
|
|
|
if !c.stopped {
|
|
|
|
c.stopped = true
|
|
|
|
close(c.input)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-18 08:40:23 +00:00
|
|
|
func (c *cacheWatcher) add(event watchCacheEvent) {
|
2016-03-11 18:35:43 +00:00
|
|
|
t := time.NewTimer(5 * time.Second)
|
|
|
|
defer t.Stop()
|
2015-11-06 12:40:21 +00:00
|
|
|
select {
|
|
|
|
case c.input <- event:
|
2016-03-11 18:35:43 +00:00
|
|
|
case <-t.C:
|
2015-11-06 12:40:21 +00:00
|
|
|
// This means that we couldn't send event to that watcher.
|
|
|
|
// Since we don't want to blockin on it infinitely,
|
|
|
|
// we simply terminate it.
|
|
|
|
c.forget(false)
|
|
|
|
c.stop()
|
|
|
|
}
|
2015-07-28 06:26:53 +00:00
|
|
|
}
|
|
|
|
|
2015-08-18 08:40:23 +00:00
|
|
|
func (c *cacheWatcher) sendWatchCacheEvent(event watchCacheEvent) {
|
2015-08-14 10:14:09 +00:00
|
|
|
curObjPasses := event.Type != watch.Deleted && c.filter(event.Object)
|
|
|
|
oldObjPasses := false
|
|
|
|
if event.PrevObject != nil {
|
|
|
|
oldObjPasses = c.filter(event.PrevObject)
|
|
|
|
}
|
2015-08-17 07:59:12 +00:00
|
|
|
if !curObjPasses && !oldObjPasses {
|
|
|
|
// Watcher is not interested in that object.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
object, err := api.Scheme.Copy(event.Object)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("unexpected copy error: %v", err)
|
|
|
|
return
|
|
|
|
}
|
2015-08-14 10:14:09 +00:00
|
|
|
switch {
|
|
|
|
case curObjPasses && !oldObjPasses:
|
2015-08-17 07:59:12 +00:00
|
|
|
c.result <- watch.Event{Type: watch.Added, Object: object}
|
2015-08-14 10:14:09 +00:00
|
|
|
case curObjPasses && oldObjPasses:
|
2015-08-17 07:59:12 +00:00
|
|
|
c.result <- watch.Event{Type: watch.Modified, Object: object}
|
2015-08-14 10:14:09 +00:00
|
|
|
case !curObjPasses && oldObjPasses:
|
2015-08-17 07:59:12 +00:00
|
|
|
c.result <- watch.Event{Type: watch.Deleted, Object: object}
|
2015-08-14 10:14:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-18 08:40:23 +00:00
|
|
|
func (c *cacheWatcher) process(initEvents []watchCacheEvent) {
|
2016-04-07 23:53:41 +00:00
|
|
|
defer utilruntime.HandleCrash()
|
|
|
|
|
2015-07-28 06:26:53 +00:00
|
|
|
for _, event := range initEvents {
|
2015-08-14 10:14:09 +00:00
|
|
|
c.sendWatchCacheEvent(event)
|
2015-07-28 06:26:53 +00:00
|
|
|
}
|
|
|
|
defer close(c.result)
|
|
|
|
defer c.Stop()
|
|
|
|
for {
|
|
|
|
event, ok := <-c.input
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2015-08-14 10:14:09 +00:00
|
|
|
c.sendWatchCacheEvent(event)
|
2015-07-28 06:26:53 +00:00
|
|
|
}
|
|
|
|
}
|