Merge pull request #66862 from resouer/sync-map

Automatic merge from submit-queue (batch tested with PRs 66862, 67618). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Use sync.map to scale equiv class cache better

**What this PR does / why we need it**:

Change the current lock in first level ecache into  `sync.Map`, which is known for scaling better than `sync. Mutex ` on machines with >8 CPUs

ref: https://golang.org/pkg/sync/#Map
 
And the code is much cleaner in this way.

5k Nodes, 10k Pods benchmark with ecache enabled in 64 cores VM:

```bash
// before
BenchmarkScheduling/5000Nodes/0Pods-64             10000          17550089 ns/op

// after
BenchmarkScheduling/5000Nodes/0Pods-64             10000          16975098 ns/op
```
Comparing to current implementation, the improvement after this change is noticeable, and the test is stable in 8, 16, 64 cores VM.

**Special notes for your reviewer**:

**Release note**:

```release-note
Use sync.map to scale ecache better
```
pull/8/head
Kubernetes Submit Queue 2018-08-21 00:24:01 -07:00 committed by GitHub
commit 4cca6a89a0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 14 additions and 27 deletions

View File

@ -35,9 +35,6 @@ import (
"github.com/golang/glog"
)
// nodeMap stores a *Cache for each node.
type nodeMap map[string]*NodeCache
// Cache is a thread safe map saves and reuses the output of predicate functions,
// it uses node name as key to access those cached results.
//
@ -45,17 +42,13 @@ type nodeMap map[string]*NodeCache
// class". (Equivalence class is defined in the `Class` type.) Saved results
// will be reused until an appropriate invalidation function is called.
type Cache struct {
// NOTE(harry): Theoretically sync.Map has better performance in machine with 8+ CPUs, while
// the reality is lock contention in first level cache is rare.
mu sync.RWMutex
nodeToCache nodeMap
// i.e. map[string]*NodeCache
sync.Map
}
// NewCache create an empty equiv class cache.
func NewCache() *Cache {
return &Cache{
nodeToCache: make(nodeMap),
}
return new(Cache)
}
// NodeCache saves and reuses the output of predicate functions. Use RunPredicate to
@ -83,12 +76,8 @@ func newNodeCache() *NodeCache {
// it creates the NodeCache and returns it.
// The boolean flag is true if the value was loaded, false if created.
func (c *Cache) GetNodeCache(name string) (nodeCache *NodeCache, exists bool) {
c.mu.Lock()
defer c.mu.Unlock()
if nodeCache, exists = c.nodeToCache[name]; !exists {
nodeCache = newNodeCache()
c.nodeToCache[name] = nodeCache
}
v, exists := c.LoadOrStore(name, newNodeCache())
nodeCache = v.(*NodeCache)
return
}
@ -97,12 +86,13 @@ func (c *Cache) InvalidatePredicates(predicateKeys sets.String) {
if len(predicateKeys) == 0 {
return
}
c.mu.RLock()
defer c.mu.RUnlock()
for _, n := range c.nodeToCache {
c.Range(func(k, v interface{}) bool {
n := v.(*NodeCache)
n.invalidatePreds(predicateKeys)
}
return true
})
glog.V(5).Infof("Cache invalidation: node=*,predicates=%v", predicateKeys)
}
// InvalidatePredicatesOnNode clears cached results for the given predicates on one node.
@ -110,9 +100,8 @@ func (c *Cache) InvalidatePredicatesOnNode(nodeName string, predicateKeys sets.S
if len(predicateKeys) == 0 {
return
}
c.mu.RLock()
defer c.mu.RUnlock()
if n, ok := c.nodeToCache[nodeName]; ok {
if v, ok := c.Load(nodeName); ok {
n := v.(*NodeCache)
n.invalidatePreds(predicateKeys)
}
glog.V(5).Infof("Cache invalidation: node=%s,predicates=%v", nodeName, predicateKeys)
@ -120,9 +109,7 @@ func (c *Cache) InvalidatePredicatesOnNode(nodeName string, predicateKeys sets.S
// InvalidateAllPredicatesOnNode clears all cached results for one node.
func (c *Cache) InvalidateAllPredicatesOnNode(nodeName string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.nodeToCache, nodeName)
c.Delete(nodeName)
glog.V(5).Infof("Cache invalidation: node=%s,predicates=*", nodeName)
}

View File

@ -731,7 +731,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
// there should be no cached predicate any more
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if nodeCache, exist := ecache.nodeToCache[test.nodeName]; exist {
if nodeCache, exist := ecache.GetNodeCache(test.nodeName); exist {
if _, exist := nodeCache.cache[testPredicate]; exist {
t.Errorf("Failed: cached item for predicate key: %v on node: %v should be invalidated",
testPredicate, test.nodeName)