Merge pull request #74907 from bsalamat/num_cpu

Revert "Use runtime.NumCPU() instead of a fixed value for parallel scheduler threads"
pull/564/head
Kubernetes Prow Robot 2019-03-05 09:50:36 -08:00 committed by GitHub
commit b033f0c1d1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 6 additions and 9 deletions

View File

@ -19,7 +19,6 @@ package predicates
import ( import (
"context" "context"
"fmt" "fmt"
"runtime"
"sync" "sync"
"k8s.io/klog" "k8s.io/klog"
@ -416,7 +415,7 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*s
appendTopologyPairsMaps(existingPodTopologyMaps) appendTopologyPairsMaps(existingPodTopologyMaps)
} }
} }
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(allNodeNames), processNode) workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
return topologyMaps, firstError return topologyMaps, firstError
} }
@ -504,7 +503,7 @@ func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[s
appendResult(node.Name, nodeTopologyPairsAffinityPodsMaps, nodeTopologyPairsAntiAffinityPodsMaps) appendResult(node.Name, nodeTopologyPairsAffinityPodsMaps, nodeTopologyPairsAntiAffinityPodsMaps)
} }
} }
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(allNodeNames), processNode) workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
return topologyPairsAffinityPodsMaps, topologyPairsAntiAffinityPodsMaps, firstError return topologyPairsAffinityPodsMaps, topologyPairsAntiAffinityPodsMaps, firstError
} }

View File

@ -18,7 +18,6 @@ package priorities
import ( import (
"context" "context"
"runtime"
"sync" "sync"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
@ -212,7 +211,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
} }
} }
} }
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(allNodeNames), processNode) workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
if pm.firstError != nil { if pm.firstError != nil {
return nil, pm.firstError return nil, pm.firstError
} }

View File

@ -20,7 +20,6 @@ import (
"context" "context"
"fmt" "fmt"
"math" "math"
"runtime"
"sort" "sort"
"strings" "strings"
"sync" "sync"
@ -490,7 +489,7 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v
// Stops searching for more nodes once the configured number of feasible nodes // Stops searching for more nodes once the configured number of feasible nodes
// are found. // are found.
workqueue.ParallelizeUntil(ctx, runtime.NumCPU(), int(allNodes), checkNode) workqueue.ParallelizeUntil(ctx, 16, int(allNodes), checkNode)
filtered = filtered[:filteredLen] filtered = filtered[:filteredLen]
if len(errs) > 0 { if len(errs) > 0 {
@ -696,7 +695,7 @@ func PrioritizeNodes(
} }
} }
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(nodes), func(index int) { workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), func(index int) {
nodeInfo := nodeNameToInfo[nodes[index].Name] nodeInfo := nodeNameToInfo[nodes[index].Name]
for i := range priorityConfigs { for i := range priorityConfigs {
if priorityConfigs[i].Function != nil { if priorityConfigs[i].Function != nil {
@ -944,7 +943,7 @@ func selectNodesForPreemption(pod *v1.Pod,
resultLock.Unlock() resultLock.Unlock()
} }
} }
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(potentialNodes), checkNode) workqueue.ParallelizeUntil(context.TODO(), 16, len(potentialNodes), checkNode)
return nodeToVictims, nil return nodeToVictims, nil
} }