mirror of https://github.com/k3s-io/k3s
Merge pull request #71824 from bsalamat/automated-cherry-pick-of-#71722-upstream-release-1.13
Automated cherry pick of #71722 upstream release 1.13pull/58/head
commit
198e0f7ea8
|
@ -655,24 +655,26 @@ func PrioritizeNodes(
|
|||
|
||||
// DEPRECATED: we can remove this when all priorityConfigs implement the
|
||||
// Map-Reduce pattern.
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(priorityConfigs), func(i int) {
|
||||
priorityConfig := priorityConfigs[i]
|
||||
if priorityConfig.Function == nil {
|
||||
for i := range priorityConfigs {
|
||||
if priorityConfigs[i].Function != nil {
|
||||
wg.Add(1)
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
var err error
|
||||
results[index], err = priorityConfigs[index].Function(pod, nodeNameToInfo, nodes)
|
||||
if err != nil {
|
||||
appendError(err)
|
||||
}
|
||||
}(i)
|
||||
} else {
|
||||
results[i] = make(schedulerapi.HostPriorityList, len(nodes))
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
results[i], err = priorityConfig.Function(pod, nodeNameToInfo, nodes)
|
||||
if err != nil {
|
||||
appendError(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), func(index int) {
|
||||
nodeInfo := nodeNameToInfo[nodes[index].Name]
|
||||
for i, priorityConfig := range priorityConfigs {
|
||||
if priorityConfig.Function != nil {
|
||||
for i := range priorityConfigs {
|
||||
if priorityConfigs[i].Function != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -685,22 +687,22 @@ func PrioritizeNodes(
|
|||
}
|
||||
})
|
||||
|
||||
for i, priorityConfig := range priorityConfigs {
|
||||
if priorityConfig.Reduce == nil {
|
||||
for i := range priorityConfigs {
|
||||
if priorityConfigs[i].Reduce == nil {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(index int, config algorithm.PriorityConfig) {
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
if err := config.Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil {
|
||||
if err := priorityConfigs[index].Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil {
|
||||
appendError(err)
|
||||
}
|
||||
if klog.V(10) {
|
||||
for _, hostPriority := range results[index] {
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, config.Name, hostPriority.Score)
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, priorityConfigs[index].Name, hostPriority.Score)
|
||||
}
|
||||
}
|
||||
}(i, priorityConfig)
|
||||
}(i)
|
||||
}
|
||||
// Wait for all computations to be finished.
|
||||
wg.Wait()
|
||||
|
@ -720,14 +722,14 @@ func PrioritizeNodes(
|
|||
|
||||
if len(extenders) != 0 && nodes != nil {
|
||||
combinedScores := make(map[string]int, len(nodeNameToInfo))
|
||||
for _, extender := range extenders {
|
||||
if !extender.IsInterested(pod) {
|
||||
for i := range extenders {
|
||||
if !extenders[i].IsInterested(pod) {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(ext algorithm.SchedulerExtender) {
|
||||
go func(extIndex int) {
|
||||
defer wg.Done()
|
||||
prioritizedList, weight, err := ext.Prioritize(pod, nodes)
|
||||
prioritizedList, weight, err := extenders[extIndex].Prioritize(pod, nodes)
|
||||
if err != nil {
|
||||
// Prioritization errors from extender can be ignored, let k8s/other extenders determine the priorities
|
||||
return
|
||||
|
@ -736,12 +738,12 @@ func PrioritizeNodes(
|
|||
for i := range *prioritizedList {
|
||||
host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score
|
||||
if klog.V(10) {
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, ext.Name(), score)
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, extenders[extIndex].Name(), score)
|
||||
}
|
||||
combinedScores[host] += score * weight
|
||||
}
|
||||
mu.Unlock()
|
||||
}(extender)
|
||||
}(i)
|
||||
}
|
||||
// wait for all go routines to finish
|
||||
wg.Wait()
|
||||
|
|
Loading…
Reference in New Issue