2019-01-12 04:58:27 +00:00
/ *
Copyright 2014 The Kubernetes Authors .
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
package scheduler
import (
"fmt"
"io/ioutil"
"os"
"time"
2019-04-07 17:07:55 +00:00
"k8s.io/klog"
2019-09-27 21:51:53 +00:00
v1 "k8s.io/api/core/v1"
2019-01-12 04:58:27 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
appsinformers "k8s.io/client-go/informers/apps/v1"
coreinformers "k8s.io/client-go/informers/core/v1"
policyinformers "k8s.io/client-go/informers/policy/v1beta1"
2019-09-27 21:51:53 +00:00
storageinformersv1 "k8s.io/client-go/informers/storage/v1"
storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1"
2019-01-12 04:58:27 +00:00
clientset "k8s.io/client-go/kubernetes"
2019-09-27 21:51:53 +00:00
"k8s.io/client-go/tools/events"
2019-01-12 04:58:27 +00:00
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest"
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/core"
"k8s.io/kubernetes/pkg/scheduler/factory"
2019-08-30 18:33:25 +00:00
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
2019-09-27 21:51:53 +00:00
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
2019-01-12 04:58:27 +00:00
"k8s.io/kubernetes/pkg/scheduler/metrics"
2019-09-27 21:51:53 +00:00
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
2019-01-12 04:58:27 +00:00
)
const (
// BindTimeoutSeconds defines the default bind timeout
BindTimeoutSeconds = 100
2019-04-07 17:07:55 +00:00
// SchedulerError is the reason recorded for events when an error occurs during scheduling a pod.
SchedulerError = "SchedulerError"
2019-01-12 04:58:27 +00:00
)
// Scheduler watches for new unscheduled pods. It attempts to find
// nodes that they fit on and writes bindings back to the api server.
type Scheduler struct {
2019-09-27 21:51:53 +00:00
// It is expected that changes made via SchedulerCache will be observed
// by NodeLister and Algorithm.
SchedulerCache internalcache . Cache
Algorithm core . ScheduleAlgorithm
GetBinder func ( pod * v1 . Pod ) factory . Binder
// PodConditionUpdater is used only in case of scheduling errors. If we succeed
// with scheduling, PodScheduled condition will be updated in apiserver in /bind
// handler so that binding and setting PodCondition it is atomic.
PodConditionUpdater factory . PodConditionUpdater
// PodPreemptor is used to evict pods and update 'NominatedNode' field of
// the preemptor pod.
PodPreemptor factory . PodPreemptor
// Framework runs scheduler plugins at configured extension points.
Framework framework . Framework
// NextPod should be a function that blocks until the next pod
// is available. We don't use a channel for this, because scheduling
// a pod may take some amount of time and we don't want pods to get
// stale while they sit in a channel.
NextPod func ( ) * v1 . Pod
// WaitForCacheSync waits for scheduler cache to populate.
// It returns true if it was successful, false if the controller should shutdown.
WaitForCacheSync func ( ) bool
// Error is called if there is an error. It is passed the pod in
// question, and the error
Error func ( * v1 . Pod , error )
// Recorder is the EventRecorder to use
Recorder events . EventRecorder
// Close this to shut down the scheduler.
StopEverything <- chan struct { }
// VolumeBinder handles PVC/PV binding for the pod.
VolumeBinder * volumebinder . VolumeBinder
// Disable pod preemption or not.
DisablePreemption bool
// SchedulingQueue holds pods to be scheduled
SchedulingQueue internalqueue . SchedulingQueue
2019-01-12 04:58:27 +00:00
}
// Cache returns the cache in scheduler for test to check the data in scheduler.
2019-08-30 18:33:25 +00:00
func ( sched * Scheduler ) Cache ( ) internalcache . Cache {
2019-09-27 21:51:53 +00:00
return sched . SchedulerCache
2019-01-12 04:58:27 +00:00
}
type schedulerOptions struct {
schedulerName string
hardPodAffinitySymmetricWeight int32
disablePreemption bool
percentageOfNodesToScore int32
bindTimeoutSeconds int64
}
// Option configures a Scheduler
type Option func ( * schedulerOptions )
// WithName sets schedulerName for Scheduler, the default schedulerName is default-scheduler
func WithName ( schedulerName string ) Option {
return func ( o * schedulerOptions ) {
o . schedulerName = schedulerName
}
}
// WithHardPodAffinitySymmetricWeight sets hardPodAffinitySymmetricWeight for Scheduler, the default value is 1
func WithHardPodAffinitySymmetricWeight ( hardPodAffinitySymmetricWeight int32 ) Option {
return func ( o * schedulerOptions ) {
o . hardPodAffinitySymmetricWeight = hardPodAffinitySymmetricWeight
}
}
// WithPreemptionDisabled sets disablePreemption for Scheduler, the default value is false
func WithPreemptionDisabled ( disablePreemption bool ) Option {
return func ( o * schedulerOptions ) {
o . disablePreemption = disablePreemption
}
}
// WithPercentageOfNodesToScore sets percentageOfNodesToScore for Scheduler, the default value is 50
func WithPercentageOfNodesToScore ( percentageOfNodesToScore int32 ) Option {
return func ( o * schedulerOptions ) {
o . percentageOfNodesToScore = percentageOfNodesToScore
}
}
// WithBindTimeoutSeconds sets bindTimeoutSeconds for Scheduler, the default value is 100
func WithBindTimeoutSeconds ( bindTimeoutSeconds int64 ) Option {
return func ( o * schedulerOptions ) {
o . bindTimeoutSeconds = bindTimeoutSeconds
}
}
var defaultSchedulerOptions = schedulerOptions {
schedulerName : v1 . DefaultSchedulerName ,
hardPodAffinitySymmetricWeight : v1 . DefaultHardPodAffinitySymmetricWeight ,
disablePreemption : false ,
percentageOfNodesToScore : schedulerapi . DefaultPercentageOfNodesToScore ,
bindTimeoutSeconds : BindTimeoutSeconds ,
}
// New returns a Scheduler
func New ( client clientset . Interface ,
nodeInformer coreinformers . NodeInformer ,
podInformer coreinformers . PodInformer ,
pvInformer coreinformers . PersistentVolumeInformer ,
pvcInformer coreinformers . PersistentVolumeClaimInformer ,
replicationControllerInformer coreinformers . ReplicationControllerInformer ,
replicaSetInformer appsinformers . ReplicaSetInformer ,
statefulSetInformer appsinformers . StatefulSetInformer ,
serviceInformer coreinformers . ServiceInformer ,
pdbInformer policyinformers . PodDisruptionBudgetInformer ,
2019-09-27 21:51:53 +00:00
storageClassInformer storageinformersv1 . StorageClassInformer ,
csiNodeInformer storageinformersv1beta1 . CSINodeInformer ,
recorder events . EventRecorder ,
2019-01-12 04:58:27 +00:00
schedulerAlgorithmSource kubeschedulerconfig . SchedulerAlgorithmSource ,
stopCh <- chan struct { } ,
2019-08-30 18:33:25 +00:00
registry framework . Registry ,
plugins * kubeschedulerconfig . Plugins ,
pluginConfig [ ] kubeschedulerconfig . PluginConfig ,
2019-01-12 04:58:27 +00:00
opts ... func ( o * schedulerOptions ) ) ( * Scheduler , error ) {
options := defaultSchedulerOptions
for _ , opt := range opts {
opt ( & options )
}
// Set up the configurator which can create schedulers from configs.
configurator := factory . NewConfigFactory ( & factory . ConfigFactoryArgs {
Client : client ,
NodeInformer : nodeInformer ,
PodInformer : podInformer ,
PvInformer : pvInformer ,
PvcInformer : pvcInformer ,
ReplicationControllerInformer : replicationControllerInformer ,
ReplicaSetInformer : replicaSetInformer ,
StatefulSetInformer : statefulSetInformer ,
ServiceInformer : serviceInformer ,
PdbInformer : pdbInformer ,
StorageClassInformer : storageClassInformer ,
2019-09-27 21:51:53 +00:00
CSINodeInformer : csiNodeInformer ,
2019-01-12 04:58:27 +00:00
HardPodAffinitySymmetricWeight : options . hardPodAffinitySymmetricWeight ,
DisablePreemption : options . disablePreemption ,
PercentageOfNodesToScore : options . percentageOfNodesToScore ,
BindTimeoutSeconds : options . bindTimeoutSeconds ,
2019-08-30 18:33:25 +00:00
Registry : registry ,
Plugins : plugins ,
PluginConfig : pluginConfig ,
2019-01-12 04:58:27 +00:00
} )
var config * factory . Config
source := schedulerAlgorithmSource
switch {
case source . Provider != nil :
// Create the config from a named algorithm provider.
sc , err := configurator . CreateFromProvider ( * source . Provider )
if err != nil {
return nil , fmt . Errorf ( "couldn't create scheduler using provider %q: %v" , * source . Provider , err )
}
config = sc
case source . Policy != nil :
// Create the config from a user specified policy source.
policy := & schedulerapi . Policy { }
switch {
case source . Policy . File != nil :
if err := initPolicyFromFile ( source . Policy . File . Path , policy ) ; err != nil {
return nil , err
}
case source . Policy . ConfigMap != nil :
if err := initPolicyFromConfigMap ( client , source . Policy . ConfigMap , policy ) ; err != nil {
return nil , err
}
}
sc , err := configurator . CreateFromConfig ( * policy )
if err != nil {
return nil , fmt . Errorf ( "couldn't create scheduler from policy: %v" , err )
}
config = sc
default :
return nil , fmt . Errorf ( "unsupported algorithm source: %v" , source )
}
// Additional tweaks to the config produced by the configurator.
config . Recorder = recorder
config . DisablePreemption = options . disablePreemption
config . StopEverything = stopCh
2019-04-07 17:07:55 +00:00
2019-01-12 04:58:27 +00:00
// Create the scheduler.
sched := NewFromConfig ( config )
2019-04-07 17:07:55 +00:00
2019-09-27 21:51:53 +00:00
AddAllEventHandlers ( sched , options . schedulerName , nodeInformer , podInformer , pvInformer , pvcInformer , serviceInformer , storageClassInformer , csiNodeInformer )
2019-01-12 04:58:27 +00:00
return sched , nil
}
// initPolicyFromFile initialize policy from file
func initPolicyFromFile ( policyFile string , policy * schedulerapi . Policy ) error {
// Use a policy serialized in a file.
_ , err := os . Stat ( policyFile )
if err != nil {
return fmt . Errorf ( "missing policy config file %s" , policyFile )
}
data , err := ioutil . ReadFile ( policyFile )
if err != nil {
return fmt . Errorf ( "couldn't read policy config: %v" , err )
}
err = runtime . DecodeInto ( latestschedulerapi . Codec , [ ] byte ( data ) , policy )
if err != nil {
return fmt . Errorf ( "invalid policy: %v" , err )
}
return nil
}
// initPolicyFromConfigMap initialize policy from configMap
func initPolicyFromConfigMap ( client clientset . Interface , policyRef * kubeschedulerconfig . SchedulerPolicyConfigMapSource , policy * schedulerapi . Policy ) error {
// Use a policy serialized in a config map value.
policyConfigMap , err := client . CoreV1 ( ) . ConfigMaps ( policyRef . Namespace ) . Get ( policyRef . Name , metav1 . GetOptions { } )
if err != nil {
return fmt . Errorf ( "couldn't get policy config map %s/%s: %v" , policyRef . Namespace , policyRef . Name , err )
}
data , found := policyConfigMap . Data [ kubeschedulerconfig . SchedulerPolicyConfigMapKey ]
if ! found {
return fmt . Errorf ( "missing policy config map value at key %q" , kubeschedulerconfig . SchedulerPolicyConfigMapKey )
}
err = runtime . DecodeInto ( latestschedulerapi . Codec , [ ] byte ( data ) , policy )
if err != nil {
return fmt . Errorf ( "invalid policy: %v" , err )
}
return nil
}
// NewFromConfig returns a new scheduler using the provided Config.
func NewFromConfig ( config * factory . Config ) * Scheduler {
metrics . Register ( )
return & Scheduler {
2019-09-27 21:51:53 +00:00
SchedulerCache : config . SchedulerCache ,
Algorithm : config . Algorithm ,
GetBinder : config . GetBinder ,
PodConditionUpdater : config . PodConditionUpdater ,
PodPreemptor : config . PodPreemptor ,
Framework : config . Framework ,
NextPod : config . NextPod ,
WaitForCacheSync : config . WaitForCacheSync ,
Error : config . Error ,
Recorder : config . Recorder ,
StopEverything : config . StopEverything ,
VolumeBinder : config . VolumeBinder ,
DisablePreemption : config . DisablePreemption ,
SchedulingQueue : config . SchedulingQueue ,
2019-01-12 04:58:27 +00:00
}
}
// Run begins watching and scheduling. It waits for cache to be synced, then starts a goroutine and returns immediately.
func ( sched * Scheduler ) Run ( ) {
2019-09-27 21:51:53 +00:00
if ! sched . WaitForCacheSync ( ) {
2019-01-12 04:58:27 +00:00
return
}
2019-09-27 21:51:53 +00:00
go wait . Until ( sched . scheduleOne , 0 , sched . StopEverything )
2019-01-12 04:58:27 +00:00
}
2019-04-07 17:07:55 +00:00
// recordFailedSchedulingEvent records an event for the pod that indicates the
// pod has failed to schedule.
// NOTE: This function modifies "pod". "pod" should be copied before being passed.
func ( sched * Scheduler ) recordSchedulingFailure ( pod * v1 . Pod , err error , reason string , message string ) {
2019-09-27 21:51:53 +00:00
sched . Error ( pod , err )
sched . Recorder . Eventf ( pod , nil , v1 . EventTypeWarning , "FailedScheduling" , "Scheduling" , message )
if err := sched . PodConditionUpdater . Update ( pod , & v1 . PodCondition {
2019-04-07 17:07:55 +00:00
Type : v1 . PodScheduled ,
Status : v1 . ConditionFalse ,
Reason : reason ,
Message : err . Error ( ) ,
2019-09-27 21:51:53 +00:00
} ) ; err != nil {
klog . Errorf ( "Error updating the condition of the pod %s/%s: %v" , pod . Namespace , pod . Name , err )
}
2019-04-07 17:07:55 +00:00
}
// schedule implements the scheduling algorithm and returns the suggested result(host,
// evaluated nodes number,feasible nodes number).
2019-09-27 21:51:53 +00:00
func ( sched * Scheduler ) schedule ( pod * v1 . Pod , pluginContext * framework . PluginContext ) ( core . ScheduleResult , error ) {
result , err := sched . Algorithm . Schedule ( pod , pluginContext )
2019-01-12 04:58:27 +00:00
if err != nil {
pod = pod . DeepCopy ( )
2019-04-07 17:07:55 +00:00
sched . recordSchedulingFailure ( pod , err , v1 . PodReasonUnschedulable , err . Error ( ) )
return core . ScheduleResult { } , err
2019-01-12 04:58:27 +00:00
}
2019-04-07 17:07:55 +00:00
return result , err
2019-01-12 04:58:27 +00:00
}
// preempt tries to create room for a pod that has failed to schedule, by preempting lower priority pods if possible.
2019-08-30 18:33:25 +00:00
// If it succeeds, it adds the name of the node where preemption has happened to the pod spec.
2019-01-12 04:58:27 +00:00
// It returns the node name and an error if any.
2019-09-27 21:51:53 +00:00
func ( sched * Scheduler ) preempt ( pluginContext * framework . PluginContext , fwk framework . Framework , preemptor * v1 . Pod , scheduleErr error ) ( string , error ) {
preemptor , err := sched . PodPreemptor . GetUpdatedPod ( preemptor )
2019-01-12 04:58:27 +00:00
if err != nil {
klog . Errorf ( "Error getting the updated preemptor pod object: %v" , err )
return "" , err
}
2019-09-27 21:51:53 +00:00
node , victims , nominatedPodsToClear , err := sched . Algorithm . Preempt ( pluginContext , preemptor , scheduleErr )
2019-01-12 04:58:27 +00:00
if err != nil {
2019-09-27 21:51:53 +00:00
klog . Errorf ( "Error preempting victims to make room for %v/%v: %v" , preemptor . Namespace , preemptor . Name , err )
2019-01-12 04:58:27 +00:00
return "" , err
}
var nodeName = ""
if node != nil {
nodeName = node . Name
2019-01-22 20:53:35 +00:00
// Update the scheduling queue with the nominated pod information. Without
// this, there would be a race condition between the next scheduling cycle
// and the time the scheduler receives a Pod Update for the nominated pod.
2019-09-27 21:51:53 +00:00
sched . SchedulingQueue . UpdateNominatedPodForNode ( preemptor , nodeName )
2019-01-22 20:53:35 +00:00
// Make a call to update nominated node name of the pod on the API server.
2019-09-27 21:51:53 +00:00
err = sched . PodPreemptor . SetNominatedNodeName ( preemptor , nodeName )
2019-01-12 04:58:27 +00:00
if err != nil {
2019-08-30 18:33:25 +00:00
klog . Errorf ( "Error in preemption process. Cannot set 'NominatedPod' on pod %v/%v: %v" , preemptor . Namespace , preemptor . Name , err )
2019-09-27 21:51:53 +00:00
sched . SchedulingQueue . DeleteNominatedPodIfExists ( preemptor )
2019-01-12 04:58:27 +00:00
return "" , err
}
2019-01-22 20:53:35 +00:00
2019-01-12 04:58:27 +00:00
for _ , victim := range victims {
2019-09-27 21:51:53 +00:00
if err := sched . PodPreemptor . DeletePod ( victim ) ; err != nil {
2019-01-12 04:58:27 +00:00
klog . Errorf ( "Error preempting pod %v/%v: %v" , victim . Namespace , victim . Name , err )
return "" , err
}
2019-09-27 21:51:53 +00:00
// If the victim is a WaitingPod, send a reject message to the PermitPlugin
if waitingPod := fwk . GetWaitingPod ( victim . UID ) ; waitingPod != nil {
waitingPod . Reject ( "preempted" )
}
sched . Recorder . Eventf ( victim , preemptor , v1 . EventTypeNormal , "Preempted" , "Preempting" , "Preempted by %v/%v on node %v" , preemptor . Namespace , preemptor . Name , nodeName )
2019-01-12 04:58:27 +00:00
}
2019-04-07 17:07:55 +00:00
metrics . PreemptionVictims . Set ( float64 ( len ( victims ) ) )
2019-01-12 04:58:27 +00:00
}
// Clearing nominated pods should happen outside of "if node != nil". Node could
// be nil when a pod with nominated node name is eligible to preempt again,
// but preemption logic does not find any node for it. In that case Preempt()
2019-08-30 18:33:25 +00:00
// function of generic_scheduler.go returns the pod itself for removal of
// the 'NominatedPod' field.
2019-01-12 04:58:27 +00:00
for _ , p := range nominatedPodsToClear {
2019-09-27 21:51:53 +00:00
rErr := sched . PodPreemptor . RemoveNominatedNodeName ( p )
2019-01-12 04:58:27 +00:00
if rErr != nil {
2019-08-30 18:33:25 +00:00
klog . Errorf ( "Cannot remove 'NominatedPod' field of pod: %v" , rErr )
2019-01-12 04:58:27 +00:00
// We do not return as this error is not critical.
}
}
return nodeName , err
}
// assumeVolumes will update the volume cache with the chosen bindings
//
// This function modifies assumed if volume binding is required.
func ( sched * Scheduler ) assumeVolumes ( assumed * v1 . Pod , host string ) ( allBound bool , err error ) {
2019-09-27 21:51:53 +00:00
allBound , err = sched . VolumeBinder . Binder . AssumePodVolumes ( assumed , host )
2019-04-07 17:07:55 +00:00
if err != nil {
sched . recordSchedulingFailure ( assumed , err , SchedulerError ,
fmt . Sprintf ( "AssumePodVolumes failed: %v" , err ) )
2019-01-12 04:58:27 +00:00
}
return
}
// bindVolumes will make the API update with the assumed bindings and wait until
// the PV controller has completely finished the binding operation.
//
// If binding errors, times out or gets undone, then an error will be returned to
// retry scheduling.
func ( sched * Scheduler ) bindVolumes ( assumed * v1 . Pod ) error {
klog . V ( 5 ) . Infof ( "Trying to bind volumes for pod \"%v/%v\"" , assumed . Namespace , assumed . Name )
2019-09-27 21:51:53 +00:00
err := sched . VolumeBinder . Binder . BindPodVolumes ( assumed )
2019-01-12 04:58:27 +00:00
if err != nil {
klog . V ( 1 ) . Infof ( "Failed to bind volumes for pod \"%v/%v\": %v" , assumed . Namespace , assumed . Name , err )
// Unassume the Pod and retry scheduling
2019-09-27 21:51:53 +00:00
if forgetErr := sched . SchedulerCache . ForgetPod ( assumed ) ; forgetErr != nil {
2019-01-12 04:58:27 +00:00
klog . Errorf ( "scheduler cache ForgetPod failed: %v" , forgetErr )
}
2019-04-07 17:07:55 +00:00
sched . recordSchedulingFailure ( assumed , err , "VolumeBindingFailed" , err . Error ( ) )
2019-01-12 04:58:27 +00:00
return err
}
klog . V ( 5 ) . Infof ( "Success binding volumes for pod \"%v/%v\"" , assumed . Namespace , assumed . Name )
return nil
}
// assume signals to the cache that a pod is already in the cache, so that binding can be asynchronous.
// assume modifies `assumed`.
func ( sched * Scheduler ) assume ( assumed * v1 . Pod , host string ) error {
// Optimistically assume that the binding will succeed and send it to apiserver
// in the background.
// If the binding fails, scheduler will release resources allocated to assumed pod
// immediately.
assumed . Spec . NodeName = host
2019-04-07 17:07:55 +00:00
2019-09-27 21:51:53 +00:00
if err := sched . SchedulerCache . AssumePod ( assumed ) ; err != nil {
2019-01-12 04:58:27 +00:00
klog . Errorf ( "scheduler cache AssumePod failed: %v" , err )
// This is most probably result of a BUG in retrying logic.
// We report an error here so that pod scheduling can be retried.
// This relies on the fact that Error will check if the pod has been bound
// to a node and if so will not add it back to the unscheduled pods queue
// (otherwise this would cause an infinite loop).
2019-04-07 17:07:55 +00:00
sched . recordSchedulingFailure ( assumed , err , SchedulerError ,
fmt . Sprintf ( "AssumePod failed: %v" , err ) )
2019-01-12 04:58:27 +00:00
return err
}
// if "assumed" is a nominated pod, we should remove it from internal cache
2019-09-27 21:51:53 +00:00
if sched . SchedulingQueue != nil {
sched . SchedulingQueue . DeleteNominatedPodIfExists ( assumed )
2019-01-12 04:58:27 +00:00
}
return nil
}
// bind binds a pod to a given node defined in a binding object. We expect this to run asynchronously, so we
// handle binding metrics internally.
2019-09-27 21:51:53 +00:00
func ( sched * Scheduler ) bind ( assumed * v1 . Pod , targetNode string , pluginContext * framework . PluginContext ) error {
2019-01-12 04:58:27 +00:00
bindingStart := time . Now ( )
2019-09-27 21:51:53 +00:00
bindStatus := sched . Framework . RunBindPlugins ( pluginContext , assumed , targetNode )
var err error
if ! bindStatus . IsSuccess ( ) {
if bindStatus . Code ( ) == framework . Skip {
// All bind plugins chose to skip binding of this pod, call original binding function.
// If binding succeeds then PodScheduled condition will be updated in apiserver so that
// it's atomic with setting host.
err = sched . GetBinder ( assumed ) . Bind ( & v1 . Binding {
ObjectMeta : metav1 . ObjectMeta { Namespace : assumed . Namespace , Name : assumed . Name , UID : assumed . UID } ,
Target : v1 . ObjectReference {
Kind : "Node" ,
Name : targetNode ,
} ,
} )
} else {
err = fmt . Errorf ( "Bind failure, code: %d: %v" , bindStatus . Code ( ) , bindStatus . Message ( ) )
}
}
if finErr := sched . SchedulerCache . FinishBinding ( assumed ) ; finErr != nil {
2019-01-12 04:58:27 +00:00
klog . Errorf ( "scheduler cache FinishBinding failed: %v" , finErr )
}
if err != nil {
klog . V ( 1 ) . Infof ( "Failed to bind pod: %v/%v" , assumed . Namespace , assumed . Name )
2019-09-27 21:51:53 +00:00
if err := sched . SchedulerCache . ForgetPod ( assumed ) ; err != nil {
2019-01-12 04:58:27 +00:00
klog . Errorf ( "scheduler cache ForgetPod failed: %v" , err )
}
return err
}
2019-04-07 17:07:55 +00:00
metrics . BindingLatency . Observe ( metrics . SinceInSeconds ( bindingStart ) )
metrics . DeprecatedBindingLatency . Observe ( metrics . SinceInMicroseconds ( bindingStart ) )
2019-01-12 04:58:27 +00:00
metrics . SchedulingLatency . WithLabelValues ( metrics . Binding ) . Observe ( metrics . SinceInSeconds ( bindingStart ) )
2019-04-07 17:07:55 +00:00
metrics . DeprecatedSchedulingLatency . WithLabelValues ( metrics . Binding ) . Observe ( metrics . SinceInSeconds ( bindingStart ) )
2019-09-27 21:51:53 +00:00
sched . Recorder . Eventf ( assumed , nil , v1 . EventTypeNormal , "Scheduled" , "Binding" , "Successfully assigned %v/%v to %v" , assumed . Namespace , assumed . Name , targetNode )
2019-01-12 04:58:27 +00:00
return nil
}
// scheduleOne does the entire scheduling workflow for a single pod. It is serialized on the scheduling algorithm's host fitting.
func ( sched * Scheduler ) scheduleOne ( ) {
2019-09-27 21:51:53 +00:00
fwk := sched . Framework
2019-04-07 17:07:55 +00:00
2019-09-27 21:51:53 +00:00
pod := sched . NextPod ( )
2019-01-12 04:58:27 +00:00
// pod could be nil when schedulerQueue is closed
if pod == nil {
return
}
if pod . DeletionTimestamp != nil {
2019-09-27 21:51:53 +00:00
sched . Recorder . Eventf ( pod , nil , v1 . EventTypeWarning , "FailedScheduling" , "Scheduling" , "skip schedule deleting pod: %v/%v" , pod . Namespace , pod . Name )
2019-01-12 04:58:27 +00:00
klog . V ( 3 ) . Infof ( "Skip schedule deleting pod: %v/%v" , pod . Namespace , pod . Name )
return
}
klog . V ( 3 ) . Infof ( "Attempting to schedule pod: %v/%v" , pod . Namespace , pod . Name )
// Synchronously attempt to find a fit for the pod.
start := time . Now ( )
2019-08-30 18:33:25 +00:00
pluginContext := framework . NewPluginContext ( )
2019-09-27 21:51:53 +00:00
scheduleResult , err := sched . schedule ( pod , pluginContext )
2019-01-12 04:58:27 +00:00
if err != nil {
// schedule() may have failed because the pod would not fit on any host, so we try to
// preempt, with the expectation that the next time the pod is tried for scheduling it
// will fit due to the preemption. It is also possible that a different pod will schedule
// into the resources that were preempted, but this is harmless.
if fitError , ok := err . ( * core . FitError ) ; ok {
2019-09-27 21:51:53 +00:00
if sched . DisablePreemption {
2019-04-07 17:07:55 +00:00
klog . V ( 3 ) . Infof ( "Pod priority feature is not enabled or preemption is disabled by scheduler configuration." +
" No preemption is performed." )
} else {
preemptionStartTime := time . Now ( )
2019-09-27 21:51:53 +00:00
sched . preempt ( pluginContext , fwk , pod , fitError )
2019-04-07 17:07:55 +00:00
metrics . PreemptionAttempts . Inc ( )
metrics . SchedulingAlgorithmPremptionEvaluationDuration . Observe ( metrics . SinceInSeconds ( preemptionStartTime ) )
metrics . DeprecatedSchedulingAlgorithmPremptionEvaluationDuration . Observe ( metrics . SinceInMicroseconds ( preemptionStartTime ) )
metrics . SchedulingLatency . WithLabelValues ( metrics . PreemptionEvaluation ) . Observe ( metrics . SinceInSeconds ( preemptionStartTime ) )
metrics . DeprecatedSchedulingLatency . WithLabelValues ( metrics . PreemptionEvaluation ) . Observe ( metrics . SinceInSeconds ( preemptionStartTime ) )
}
2019-01-12 04:58:27 +00:00
// Pod did not fit anywhere, so it is counted as a failure. If preemption
// succeeds, the pod should get counted as a success the next time we try to
// schedule it. (hopefully)
metrics . PodScheduleFailures . Inc ( )
} else {
klog . Errorf ( "error selecting node for pod: %v" , err )
metrics . PodScheduleErrors . Inc ( )
}
return
}
2019-04-07 17:07:55 +00:00
metrics . SchedulingAlgorithmLatency . Observe ( metrics . SinceInSeconds ( start ) )
metrics . DeprecatedSchedulingAlgorithmLatency . Observe ( metrics . SinceInMicroseconds ( start ) )
2019-01-12 04:58:27 +00:00
// Tell the cache to assume that a pod now is running on a given node, even though it hasn't been bound yet.
// This allows us to keep scheduling without waiting on binding to occur.
assumedPod := pod . DeepCopy ( )
// Assume volumes first before assuming the pod.
//
// If all volumes are completely bound, then allBound is true and binding will be skipped.
//
// Otherwise, binding of volumes is started after the pod is assumed, but before pod binding.
//
// This function modifies 'assumedPod' if volume binding is required.
2019-04-07 17:07:55 +00:00
allBound , err := sched . assumeVolumes ( assumedPod , scheduleResult . SuggestedHost )
2019-01-12 04:58:27 +00:00
if err != nil {
klog . Errorf ( "error assuming volumes: %v" , err )
metrics . PodScheduleErrors . Inc ( )
return
}
2019-04-07 17:07:55 +00:00
// Run "reserve" plugins.
2019-08-30 18:33:25 +00:00
if sts := fwk . RunReservePlugins ( pluginContext , assumedPod , scheduleResult . SuggestedHost ) ; ! sts . IsSuccess ( ) {
sched . recordSchedulingFailure ( assumedPod , sts . AsError ( ) , SchedulerError , sts . Message ( ) )
metrics . PodScheduleErrors . Inc ( )
return
2019-04-07 17:07:55 +00:00
}
2019-08-30 18:33:25 +00:00
2019-04-07 17:07:55 +00:00
// assume modifies `assumedPod` by setting NodeName=scheduleResult.SuggestedHost
err = sched . assume ( assumedPod , scheduleResult . SuggestedHost )
2019-01-12 04:58:27 +00:00
if err != nil {
klog . Errorf ( "error assuming pod: %v" , err )
metrics . PodScheduleErrors . Inc ( )
2019-08-30 18:33:25 +00:00
// trigger un-reserve plugins to clean up state associated with the reserved Pod
fwk . RunUnreservePlugins ( pluginContext , assumedPod , scheduleResult . SuggestedHost )
2019-01-12 04:58:27 +00:00
return
}
// bind the pod to its host asynchronously (we can do this b/c of the assumption step above).
go func ( ) {
// Bind volumes first before Pod
if ! allBound {
err := sched . bindVolumes ( assumedPod )
if err != nil {
klog . Errorf ( "error binding volumes: %v" , err )
metrics . PodScheduleErrors . Inc ( )
2019-08-30 18:33:25 +00:00
// trigger un-reserve plugins to clean up state associated with the reserved Pod
fwk . RunUnreservePlugins ( pluginContext , assumedPod , scheduleResult . SuggestedHost )
2019-01-12 04:58:27 +00:00
return
}
}
2019-08-30 18:33:25 +00:00
// Run "permit" plugins.
permitStatus := fwk . RunPermitPlugins ( pluginContext , assumedPod , scheduleResult . SuggestedHost )
if ! permitStatus . IsSuccess ( ) {
var reason string
2019-09-27 21:51:53 +00:00
if permitStatus . IsUnschedulable ( ) {
metrics . PodScheduleFailures . Inc ( )
2019-08-30 18:33:25 +00:00
reason = v1 . PodReasonUnschedulable
} else {
metrics . PodScheduleErrors . Inc ( )
reason = SchedulerError
}
if forgetErr := sched . Cache ( ) . ForgetPod ( assumedPod ) ; forgetErr != nil {
klog . Errorf ( "scheduler cache ForgetPod failed: %v" , forgetErr )
}
// trigger un-reserve plugins to clean up state associated with the reserved Pod
fwk . RunUnreservePlugins ( pluginContext , assumedPod , scheduleResult . SuggestedHost )
2019-09-27 21:51:53 +00:00
sched . recordSchedulingFailure ( assumedPod , permitStatus . AsError ( ) , reason , permitStatus . Message ( ) )
2019-08-30 18:33:25 +00:00
return
}
2019-04-07 17:07:55 +00:00
// Run "prebind" plugins.
2019-09-27 21:51:53 +00:00
preBindStatus := fwk . RunPreBindPlugins ( pluginContext , assumedPod , scheduleResult . SuggestedHost )
if ! preBindStatus . IsSuccess ( ) {
2019-08-30 18:33:25 +00:00
var reason string
2019-09-27 21:51:53 +00:00
if preBindStatus . IsUnschedulable ( ) {
metrics . PodScheduleFailures . Inc ( )
2019-08-30 18:33:25 +00:00
reason = v1 . PodReasonUnschedulable
} else {
2019-04-07 17:07:55 +00:00
metrics . PodScheduleErrors . Inc ( )
2019-08-30 18:33:25 +00:00
reason = SchedulerError
2019-04-07 17:07:55 +00:00
}
2019-08-30 18:33:25 +00:00
if forgetErr := sched . Cache ( ) . ForgetPod ( assumedPod ) ; forgetErr != nil {
klog . Errorf ( "scheduler cache ForgetPod failed: %v" , forgetErr )
2019-04-07 17:07:55 +00:00
}
2019-08-30 18:33:25 +00:00
// trigger un-reserve plugins to clean up state associated with the reserved Pod
fwk . RunUnreservePlugins ( pluginContext , assumedPod , scheduleResult . SuggestedHost )
2019-09-27 21:51:53 +00:00
sched . recordSchedulingFailure ( assumedPod , preBindStatus . AsError ( ) , reason , preBindStatus . Message ( ) )
2019-08-30 18:33:25 +00:00
return
2019-04-07 17:07:55 +00:00
}
2019-09-27 21:51:53 +00:00
err := sched . bind ( assumedPod , scheduleResult . SuggestedHost , pluginContext )
2019-04-07 17:07:55 +00:00
metrics . E2eSchedulingLatency . Observe ( metrics . SinceInSeconds ( start ) )
metrics . DeprecatedE2eSchedulingLatency . Observe ( metrics . SinceInMicroseconds ( start ) )
2019-01-12 04:58:27 +00:00
if err != nil {
klog . Errorf ( "error binding pod: %v" , err )
metrics . PodScheduleErrors . Inc ( )
2019-08-30 18:33:25 +00:00
// trigger un-reserve plugins to clean up state associated with the reserved Pod
fwk . RunUnreservePlugins ( pluginContext , assumedPod , scheduleResult . SuggestedHost )
2019-09-27 21:51:53 +00:00
sched . recordSchedulingFailure ( assumedPod , err , SchedulerError , fmt . Sprintf ( "Binding rejected: %v" , err ) )
2019-01-12 04:58:27 +00:00
} else {
2019-09-27 21:51:53 +00:00
// Calculating nodeResourceString can be heavy. Avoid it if klog verbosity is below 2.
if klog . V ( 2 ) {
node , _ := sched . Cache ( ) . GetNodeInfo ( scheduleResult . SuggestedHost )
klog . Infof ( "pod %v/%v is bound successfully on node %q, %d nodes evaluated, %d nodes were found feasible. Bound node resource: %q." , assumedPod . Namespace , assumedPod . Name , scheduleResult . SuggestedHost , scheduleResult . EvaluatedNodes , scheduleResult . FeasibleNodes , nodeResourceString ( node ) )
}
2019-01-12 04:58:27 +00:00
metrics . PodScheduleSuccesses . Inc ( )
2019-08-30 18:33:25 +00:00
// Run "postbind" plugins.
2019-09-27 21:51:53 +00:00
fwk . RunPostBindPlugins ( pluginContext , assumedPod , scheduleResult . SuggestedHost )
2019-01-12 04:58:27 +00:00
}
} ( )
}
2019-09-27 21:51:53 +00:00
// nodeResourceString returns a string representation of node resources.
func nodeResourceString ( n * v1 . Node ) string {
if n == nil {
return "N/A"
}
return fmt . Sprintf ( "Capacity: %s; Allocatable: %s." , resourceString ( & n . Status . Capacity ) , resourceString ( & n . Status . Allocatable ) )
}
func resourceString ( r * v1 . ResourceList ) string {
return fmt . Sprintf ( "CPU<%s>|Memory<%s>|Pods<%s>|StorageEphemeral<%s>" , r . Cpu ( ) . String ( ) , r . Memory ( ) . String ( ) , r . Pods ( ) . String ( ) , r . StorageEphemeral ( ) . String ( ) )
}