mirror of https://github.com/k3s-io/k3s
Merge pull request #74907 from bsalamat/num_cpu
Revert "Use runtime.NumCPU() instead of a fixed value for parallel scheduler threads"pull/564/head
commit
b033f0c1d1
|
@ -19,7 +19,6 @@ package predicates
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
@ -416,7 +415,7 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*s
|
|||
appendTopologyPairsMaps(existingPodTopologyMaps)
|
||||
}
|
||||
}
|
||||
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(allNodeNames), processNode)
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
|
||||
return topologyMaps, firstError
|
||||
}
|
||||
|
||||
|
@ -504,7 +503,7 @@ func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[s
|
|||
appendResult(node.Name, nodeTopologyPairsAffinityPodsMaps, nodeTopologyPairsAntiAffinityPodsMaps)
|
||||
}
|
||||
}
|
||||
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(allNodeNames), processNode)
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
|
||||
return topologyPairsAffinityPodsMaps, topologyPairsAntiAffinityPodsMaps, firstError
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@ package priorities
|
|||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
|
@ -212,7 +211,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
|||
}
|
||||
}
|
||||
}
|
||||
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(allNodeNames), processNode)
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
|
||||
if pm.firstError != nil {
|
||||
return nil, pm.firstError
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -490,7 +489,7 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v
|
|||
|
||||
// Stops searching for more nodes once the configured number of feasible nodes
|
||||
// are found.
|
||||
workqueue.ParallelizeUntil(ctx, runtime.NumCPU(), int(allNodes), checkNode)
|
||||
workqueue.ParallelizeUntil(ctx, 16, int(allNodes), checkNode)
|
||||
|
||||
filtered = filtered[:filteredLen]
|
||||
if len(errs) > 0 {
|
||||
|
@ -696,7 +695,7 @@ func PrioritizeNodes(
|
|||
}
|
||||
}
|
||||
|
||||
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(nodes), func(index int) {
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), func(index int) {
|
||||
nodeInfo := nodeNameToInfo[nodes[index].Name]
|
||||
for i := range priorityConfigs {
|
||||
if priorityConfigs[i].Function != nil {
|
||||
|
@ -944,7 +943,7 @@ func selectNodesForPreemption(pod *v1.Pod,
|
|||
resultLock.Unlock()
|
||||
}
|
||||
}
|
||||
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(potentialNodes), checkNode)
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(potentialNodes), checkNode)
|
||||
return nodeToVictims, nil
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue