diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index 40effb6fd3..95ea73c298 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -114,7 +114,7 @@ type genericScheduler struct { percentageOfNodesToScore int32 } -// snapshot snapshots equivalane cache and node infos for all fit and priority +// snapshot snapshots equivalence cache and node infos for all fit and priority // functions. func (g *genericScheduler) snapshot() error { // IMPORTANT NOTE: We must snapshot equivalence cache before snapshotting @@ -123,7 +123,7 @@ func (g *genericScheduler) snapshot() error { // 1. snapshot cache // 2. event arrives, updating cache and invalidating predicates or whole node cache // 3. snapshot ecache - // 4. evaludate predicates + // 4. evaluate predicates // 5. stale result will be written to ecache if g.equivalenceCache != nil { g.equivalenceCache.Snapshot() @@ -289,7 +289,7 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, candidateNode := pickOneNodeForPreemption(nodeToVictims) if candidateNode == nil { - return nil, nil, nil, err + return nil, nil, nil, nil } // Lower priority pods nominated to run on this node, may no longer fit on @@ -298,7 +298,7 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, // lets scheduler find another place for them. nominatedPods := g.getLowerPriorityNominatedPods(pod, candidateNode.Name) if nodeInfo, ok := g.cachedNodeInfoMap[candidateNode.Name]; ok { - return nodeInfo.Node(), nodeToVictims[candidateNode].Pods, nominatedPods, err + return nodeInfo.Node(), nodeToVictims[candidateNode].Pods, nominatedPods, nil } return nil, nil, nil, fmt.Errorf( diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index b9b94d3970..1e8e451f8c 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -294,11 +294,6 @@ func (sched *Scheduler) schedule(pod *v1.Pod) (string, error) { // If it succeeds, it adds the name of the node where preemption has happened to the pod annotations. // It returns the node name and an error if any. func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, error) { - if !util.PodPriorityEnabled() || sched.config.DisablePreemption { - klog.V(3).Infof("Pod priority feature is not enabled or preemption is disabled by scheduler configuration." + - " No preemption is performed.") - return "", nil - } preemptor, err := sched.config.PodPreemptor.GetUpdatedPod(preemptor) if err != nil { klog.Errorf("Error getting the updated preemptor pod object: %v", err) @@ -306,7 +301,6 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e } node, victims, nominatedPodsToClear, err := sched.config.Algorithm.Preempt(preemptor, sched.config.NodeLister, scheduleErr) - metrics.PreemptionVictims.Set(float64(len(victims))) if err != nil { klog.Errorf("Error preempting victims to make room for %v/%v.", preemptor.Namespace, preemptor.Name) return "", err @@ -326,6 +320,7 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e } sched.config.Recorder.Eventf(victim, v1.EventTypeNormal, "Preempted", "by %v/%v on node %v", preemptor.Namespace, preemptor.Name, nodeName) } + metrics.PreemptionVictims.Set(float64(len(victims))) } // Clearing nominated pods should happen outside of "if node != nil". Node could // be nil when a pod with nominated node name is eligible to preempt again, @@ -498,11 +493,16 @@ func (sched *Scheduler) scheduleOne() { // will fit due to the preemption. It is also possible that a different pod will schedule // into the resources that were preempted, but this is harmless. if fitError, ok := err.(*core.FitError); ok { - preemptionStartTime := time.Now() - sched.preempt(pod, fitError) - metrics.PreemptionAttempts.Inc() - metrics.SchedulingAlgorithmPremptionEvaluationDuration.Observe(metrics.SinceInMicroseconds(preemptionStartTime)) - metrics.SchedulingLatency.WithLabelValues(metrics.PreemptionEvaluation).Observe(metrics.SinceInSeconds(preemptionStartTime)) + if !util.PodPriorityEnabled() || sched.config.DisablePreemption { + klog.V(3).Infof("Pod priority feature is not enabled or preemption is disabled by scheduler configuration." + + " No preemption is performed.") + } else { + preemptionStartTime := time.Now() + sched.preempt(pod, fitError) + metrics.PreemptionAttempts.Inc() + metrics.SchedulingAlgorithmPremptionEvaluationDuration.Observe(metrics.SinceInMicroseconds(preemptionStartTime)) + metrics.SchedulingLatency.WithLabelValues(metrics.PreemptionEvaluation).Observe(metrics.SinceInSeconds(preemptionStartTime)) + } // Pod did not fit anywhere, so it is counted as a failure. If preemption // succeeds, the pod should get counted as a success the next time we try to // schedule it. (hopefully)