mirror of https://github.com/k3s-io/k3s
fix a scheduler panic due to internal cache inconsistency
parent
d0c3cd182c
commit
a86ba8b3c4
|
@ -895,7 +895,6 @@ func selectNodesForPreemption(pod *v1.Pod,
|
|||
queue internalqueue.SchedulingQueue,
|
||||
pdbs []*policy.PodDisruptionBudget,
|
||||
) (map[*v1.Node]*schedulerapi.Victims, error) {
|
||||
|
||||
nodeToVictims := map[*v1.Node]*schedulerapi.Victims{}
|
||||
var resultLock sync.Mutex
|
||||
|
||||
|
@ -984,6 +983,9 @@ func selectVictimsOnNode(
|
|||
queue internalqueue.SchedulingQueue,
|
||||
pdbs []*policy.PodDisruptionBudget,
|
||||
) ([]*v1.Pod, int, bool) {
|
||||
if nodeInfo == nil {
|
||||
return nil, 0, false
|
||||
}
|
||||
potentialVictims := util.SortableList{CompFunc: util.HigherPriorityPod}
|
||||
nodeInfoCopy := nodeInfo.Clone()
|
||||
|
||||
|
|
|
@ -960,6 +960,11 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||
test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods))
|
||||
}
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes)
|
||||
// newnode simulate a case that a new node is added to the cluster, but nodeNameToInfo
|
||||
// doesn't have it yet.
|
||||
newnode := makeNode("newnode", 1000*5, priorityutil.DefaultMemoryRequest*5)
|
||||
newnode.ObjectMeta.Labels = map[string]string{"hostname": "newnode"}
|
||||
nodes = append(nodes, newnode)
|
||||
nodeToPods, err := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
|
|
Loading…
Reference in New Issue