Merge pull request #71978 from denkensk/move-predicate-types

Move predicate types from algorithm to predicates
pull/564/head
Kubernetes Prow Robot 2018-12-21 19:05:29 -08:00 committed by GitHub
commit 37dc6789d7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 380 additions and 387 deletions

View File

@ -20,7 +20,6 @@ go_library(
"//pkg/controller/daemon/util:go_default_library", "//pkg/controller/daemon/util:go_default_library",
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/kubelet/types:go_default_library", "//pkg/kubelet/types:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/labels:go_default_library", "//pkg/util/labels:go_default_library",

View File

@ -54,7 +54,6 @@ import (
"k8s.io/kubernetes/pkg/controller/daemon/util" "k8s.io/kubernetes/pkg/controller/daemon/util"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
@ -1287,7 +1286,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
return dsc.updateDaemonSetStatus(ds, hash, true) return dsc.updateDaemonSetStatus(ds, hash, true)
} }
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulernodeinfo.NodeInfo, error) { func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]predicates.PredicateFailureReason, *schedulernodeinfo.NodeInfo, error) {
objects, err := dsc.podNodeIndex.ByIndex("nodeName", node.Name) objects, err := dsc.podNodeIndex.ByIndex("nodeName", node.Name)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@ -1428,8 +1427,8 @@ func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod {
// - PodFitsHost: checks pod's NodeName against node // - PodFitsHost: checks pod's NodeName against node
// - PodMatchNodeSelector: checks pod's NodeSelector and NodeAffinity against node // - PodMatchNodeSelector: checks pod's NodeSelector and NodeAffinity against node
// - PodToleratesNodeTaints: exclude tainted node unless pod has specific toleration // - PodToleratesNodeTaints: exclude tainted node unless pod has specific toleration
func checkNodeFitness(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func checkNodeFitness(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason var predicateFails []predicates.PredicateFailureReason
fit, reasons, err := predicates.PodFitsHost(pod, meta, nodeInfo) fit, reasons, err := predicates.PodFitsHost(pod, meta, nodeInfo)
if err != nil { if err != nil {
return false, predicateFails, err return false, predicateFails, err
@ -1458,8 +1457,8 @@ func checkNodeFitness(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
// Predicates checks if a DaemonSet's pod can be scheduled on a node using GeneralPredicates // Predicates checks if a DaemonSet's pod can be scheduled on a node using GeneralPredicates
// and PodToleratesNodeTaints predicate // and PodToleratesNodeTaints predicate
func Predicates(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func Predicates(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason var predicateFails []predicates.PredicateFailureReason
// If ScheduleDaemonSetPods is enabled, only check nodeSelector, nodeAffinity and toleration/taint match. // If ScheduleDaemonSetPods is enabled, only check nodeSelector, nodeAffinity and toleration/taint match.
if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) {

View File

@ -21,7 +21,6 @@ go_library(
"//pkg/kubelet/container:go_default_library", "//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/types:go_default_library", "//pkg/kubelet/types:go_default_library",
"//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/util/format:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/security/apparmor:go_default_library", "//pkg/security/apparmor:go_default_library",

View File

@ -18,7 +18,7 @@ package lifecycle
import ( import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
) )
// AdmissionFailureHandlerStub is an AdmissionFailureHandler that does not perform any handling of admission failure. // AdmissionFailureHandlerStub is an AdmissionFailureHandler that does not perform any handling of admission failure.
@ -31,6 +31,6 @@ func NewAdmissionFailureHandlerStub() *AdmissionFailureHandlerStub {
return &AdmissionFailureHandlerStub{} return &AdmissionFailureHandlerStub{}
} }
func (n *AdmissionFailureHandlerStub) HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []algorithm.PredicateFailureReason) (bool, []algorithm.PredicateFailureReason, error) { func (n *AdmissionFailureHandlerStub) HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []predicates.PredicateFailureReason) (bool, []predicates.PredicateFailureReason, error) {
return false, failureReasons, nil return false, failureReasons, nil
} }

View File

@ -24,7 +24,6 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
@ -36,7 +35,7 @@ type pluginResourceUpdateFuncType func(*schedulernodeinfo.NodeInfo, *PodAdmitAtt
// AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod. // AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod.
// This allows for the graceful handling of pod admission failure. // This allows for the graceful handling of pod admission failure.
type AdmissionFailureHandler interface { type AdmissionFailureHandler interface {
HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []algorithm.PredicateFailureReason) (bool, []algorithm.PredicateFailureReason, error) HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []predicates.PredicateFailureReason) (bool, []predicates.PredicateFailureReason, error)
} }
type predicateAdmitHandler struct { type predicateAdmitHandler struct {

View File

@ -18,7 +18,6 @@ go_library(
"//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/types:go_default_library", "//pkg/kubelet/types:go_default_library",
"//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/util/format:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library",

View File

@ -30,7 +30,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
) )
@ -61,13 +60,13 @@ func NewCriticalPodAdmissionHandler(getPodsFunc eviction.ActivePodsFunc, killPod
// HandleAdmissionFailure gracefully handles admission rejection, and, in some cases, // HandleAdmissionFailure gracefully handles admission rejection, and, in some cases,
// to allow admission of the pod despite its previous failure. // to allow admission of the pod despite its previous failure.
func (c *CriticalPodAdmissionHandler) HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []algorithm.PredicateFailureReason) (bool, []algorithm.PredicateFailureReason, error) { func (c *CriticalPodAdmissionHandler) HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []predicates.PredicateFailureReason) (bool, []predicates.PredicateFailureReason, error) {
if !kubetypes.IsCriticalPod(admitPod) { if !kubetypes.IsCriticalPod(admitPod) {
return false, failureReasons, nil return false, failureReasons, nil
} }
// InsufficientResourceError is not a reason to reject a critical pod. // InsufficientResourceError is not a reason to reject a critical pod.
// Instead of rejecting, we free up resources to admit it, if no other reasons for rejection exist. // Instead of rejecting, we free up resources to admit it, if no other reasons for rejection exist.
nonResourceReasons := []algorithm.PredicateFailureReason{} nonResourceReasons := []predicates.PredicateFailureReason{}
resourceReasons := []*admissionRequirement{} resourceReasons := []*admissionRequirement{}
for _, reason := range failureReasons { for _, reason := range failureReasons {
if r, ok := reason.(*predicates.InsufficientResourceError); ok { if r, ok := reason.(*predicates.InsufficientResourceError); ok {

View File

@ -11,6 +11,7 @@ go_library(
deps = [ deps = [
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/api/latest:go_default_library", "//pkg/scheduler/api/latest:go_default_library",
"//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/apis/config:go_default_library",

View File

@ -59,7 +59,6 @@ go_test(
"//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/testing:go_default_library",

View File

@ -23,7 +23,6 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog" "k8s.io/klog"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
) )
@ -36,7 +35,7 @@ type CSIMaxVolumeLimitChecker struct {
// NewCSIMaxVolumeLimitPredicate returns a predicate for counting CSI volumes // NewCSIMaxVolumeLimitPredicate returns a predicate for counting CSI volumes
func NewCSIMaxVolumeLimitPredicate( func NewCSIMaxVolumeLimitPredicate(
pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate { pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) FitPredicate {
c := &CSIMaxVolumeLimitChecker{ c := &CSIMaxVolumeLimitChecker{
pvInfo: pvInfo, pvInfo: pvInfo,
pvcInfo: pvcInfo, pvcInfo: pvcInfo,
@ -45,7 +44,7 @@ func NewCSIMaxVolumeLimitPredicate(
} }
func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate( func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
// if feature gate is disable we return // if feature gate is disable we return
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) { if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
@ -101,7 +100,7 @@ func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
if ok { if ok {
currentVolumeCount := attachedVolumeCount[volumeLimitKey] currentVolumeCount := attachedVolumeCount[volumeLimitKey]
if currentVolumeCount+count > int(maxVolumeLimit) { if currentVolumeCount+count > int(maxVolumeLimit) {
return false, []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil return false, []PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
} }
} }
} }

View File

@ -25,7 +25,6 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
) )
func TestCSIVolumeCountPredicate(t *testing.T) { func TestCSIVolumeCountPredicate(t *testing.T) {
@ -105,12 +104,12 @@ func TestCSIVolumeCountPredicate(t *testing.T) {
} }
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)() defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded} expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
// running attachable predicate tests with feature gate and limit present on nodes // running attachable predicate tests with feature gate and limit present on nodes
for _, test := range tests { for _, test := range tests {
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName) node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo("csi-ebs", "csi-ebs"), getFakeCSIPVCInfo("csi-ebs")) pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo("csi-ebs", "csi-ebs"), getFakeCSIPVCInfo("csi-ebs"))
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), node) fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
if err != nil { if err != nil {
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err) t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
} }

View File

@ -134,6 +134,11 @@ func (e *PredicateFailureError) GetReason() string {
return e.PredicateDesc return e.PredicateDesc
} }
// PredicateFailureReason interface represents the failure reason of a predicate.
type PredicateFailureReason interface {
GetReason() string
}
// FailureReason describes a failure reason. // FailureReason describes a failure reason.
type FailureReason struct { type FailureReason struct {
reason string reason string

View File

@ -30,7 +30,6 @@ import (
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
) )
@ -742,13 +741,13 @@ func TestVolumeCountConflicts(t *testing.T) {
}, },
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded} expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
// running attachable predicate tests without feature gate and no limit present on nodes // running attachable predicate tests without feature gate and no limit present on nodes
for _, test := range tests { for _, test := range tests {
os.Setenv(KubeMaxPDVols, strconv.Itoa(test.maxVols)) os.Setenv(KubeMaxPDVols, strconv.Itoa(test.maxVols))
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName)) pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), schedulernodeinfo.NewNodeInfo(test.existingPods...)) fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), schedulernodeinfo.NewNodeInfo(test.existingPods...))
if err != nil { if err != nil {
t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err) t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err)
} }
@ -766,7 +765,7 @@ func TestVolumeCountConflicts(t *testing.T) {
for _, test := range tests { for _, test := range tests {
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName) node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName)) pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), node) fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
if err != nil { if err != nil {
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err) t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
} }

View File

@ -34,6 +34,16 @@ import (
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
) )
// PredicateMetadata interface represents anything that can access a predicate metadata.
type PredicateMetadata interface {
ShallowCopy() PredicateMetadata
AddPod(addedPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) error
RemovePod(deletedPod *v1.Pod) error
}
// PredicateMetadataProducer is a function that computes predicate metadata for a given pod.
type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata
// PredicateMetadataFactory defines a factory of predicate metadata. // PredicateMetadataFactory defines a factory of predicate metadata.
type PredicateMetadataFactory struct { type PredicateMetadataFactory struct {
podLister algorithm.PodLister podLister algorithm.PodLister
@ -91,21 +101,27 @@ type predicateMetadata struct {
} }
// Ensure that predicateMetadata implements algorithm.PredicateMetadata. // Ensure that predicateMetadata implements algorithm.PredicateMetadata.
var _ algorithm.PredicateMetadata = &predicateMetadata{} var _ PredicateMetadata = &predicateMetadata{}
// PredicateMetadataProducer function produces predicate metadata. // predicateMetadataProducer function produces predicate metadata. It is stored in a global variable below
type PredicateMetadataProducer func(pm *predicateMetadata) // and used to modify the return values of PredicateMetadataProducer
type predicateMetadataProducer func(pm *predicateMetadata)
var predicateMetaProducerRegisterLock sync.Mutex var predicateMetaProducerRegisterLock sync.Mutex
var predicateMetadataProducers = make(map[string]PredicateMetadataProducer) var predicateMetadataProducers = make(map[string]predicateMetadataProducer)
// RegisterPredicateMetadataProducer registers a PredicateMetadataProducer. // RegisterPredicateMetadataProducer registers a PredicateMetadataProducer.
func RegisterPredicateMetadataProducer(predicateName string, precomp PredicateMetadataProducer) { func RegisterPredicateMetadataProducer(predicateName string, precomp predicateMetadataProducer) {
predicateMetaProducerRegisterLock.Lock() predicateMetaProducerRegisterLock.Lock()
defer predicateMetaProducerRegisterLock.Unlock() defer predicateMetaProducerRegisterLock.Unlock()
predicateMetadataProducers[predicateName] = precomp predicateMetadataProducers[predicateName] = precomp
} }
// EmptyPredicateMetadataProducer returns a no-op MetadataProducer type.
func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
return nil
}
// RegisterPredicateMetadataProducerWithExtendedResourceOptions registers a // RegisterPredicateMetadataProducerWithExtendedResourceOptions registers a
// PredicateMetadataProducer that creates predicate metadata with the provided // PredicateMetadataProducer that creates predicate metadata with the provided
// options for extended resources. // options for extended resources.
@ -118,7 +134,7 @@ func RegisterPredicateMetadataProducerWithExtendedResourceOptions(ignoredExtende
} }
// NewPredicateMetadataFactory creates a PredicateMetadataFactory. // NewPredicateMetadataFactory creates a PredicateMetadataFactory.
func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.PredicateMetadataProducer { func NewPredicateMetadataFactory(podLister algorithm.PodLister) PredicateMetadataProducer {
factory := &PredicateMetadataFactory{ factory := &PredicateMetadataFactory{
podLister, podLister,
} }
@ -126,7 +142,7 @@ func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.Predic
} }
// GetMetadata returns the predicateMetadata used which will be used by various predicates. // GetMetadata returns the predicateMetadata used which will be used by various predicates.
func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata { func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
// If we cannot compute metadata, just return nil // If we cannot compute metadata, just return nil
if pod == nil { if pod == nil {
return nil return nil
@ -285,7 +301,7 @@ func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulernodei
// ShallowCopy copies a metadata struct into a new struct and creates a copy of // ShallowCopy copies a metadata struct into a new struct and creates a copy of
// its maps and slices, but it does not copy the contents of pointer values. // its maps and slices, but it does not copy the contents of pointer values.
func (meta *predicateMetadata) ShallowCopy() algorithm.PredicateMetadata { func (meta *predicateMetadata) ShallowCopy() PredicateMetadata {
newPredMeta := &predicateMetadata{ newPredMeta := &predicateMetadata{
pod: meta.pod, pod: meta.pod,
podBestEffort: meta.podBestEffort, podBestEffort: meta.podBestEffort,
@ -304,7 +320,7 @@ func (meta *predicateMetadata) ShallowCopy() algorithm.PredicateMetadata {
meta.serviceAffinityMatchingPodServices...) meta.serviceAffinityMatchingPodServices...)
newPredMeta.serviceAffinityMatchingPodList = append([]*v1.Pod(nil), newPredMeta.serviceAffinityMatchingPodList = append([]*v1.Pod(nil),
meta.serviceAffinityMatchingPodList...) meta.serviceAffinityMatchingPodList...)
return (algorithm.PredicateMetadata)(newPredMeta) return (PredicateMetadata)(newPredMeta)
} }
type affinityTermProperties struct { type affinityTermProperties struct {

View File

@ -137,6 +137,10 @@ var (
CheckNodeMemoryPressurePred, CheckNodePIDPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred} CheckNodeMemoryPressurePred, CheckNodePIDPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred}
) )
// FitPredicate is a function that indicates if a pod fits into an existing node.
// The failure information is given by the error.
type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error)
// NodeInfo interface represents anything that can get node object from node ID. // NodeInfo interface represents anything that can get node object from node ID.
type NodeInfo interface { type NodeInfo interface {
GetNodeInfo(nodeID string) (*v1.Node, error) GetNodeInfo(nodeID string) (*v1.Node, error)
@ -274,11 +278,11 @@ func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image. // - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
// - ISCSI forbids if any two pods share at least same IQN, LUN and Target // - ISCSI forbids if any two pods share at least same IQN, LUN and Target
// TODO: migrate this into some per-volume specific code? // TODO: migrate this into some per-volume specific code?
func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func NoDiskConflict(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
for _, v := range pod.Spec.Volumes { for _, v := range pod.Spec.Volumes {
for _, ev := range nodeInfo.Pods() { for _, ev := range nodeInfo.Pods() {
if isVolumeConflict(v, ev) { if isVolumeConflict(v, ev) {
return false, []algorithm.PredicateFailureReason{ErrDiskConflict}, nil return false, []PredicateFailureReason{ErrDiskConflict}, nil
} }
} }
} }
@ -313,7 +317,7 @@ type VolumeFilter struct {
// types, counts the number of unique volumes, and rejects the new pod if it would place the total count over // types, counts the number of unique volumes, and rejects the new pod if it would place the total count over
// the maximum. // the maximum.
func NewMaxPDVolumeCountPredicate( func NewMaxPDVolumeCountPredicate(
filterName string, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate { filterName string, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) FitPredicate {
var filter VolumeFilter var filter VolumeFilter
var volumeLimitKey v1.ResourceName var volumeLimitKey v1.ResourceName
@ -447,7 +451,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
return nil return nil
} }
func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
// If a pod doesn't have any volume attached to it, the predicate will always be true. // If a pod doesn't have any volume attached to it, the predicate will always be true.
// Thus we make a fast path for it, to avoid unnecessary computations in this case. // Thus we make a fast path for it, to avoid unnecessary computations in this case.
if len(pod.Spec.Volumes) == 0 { if len(pod.Spec.Volumes) == 0 {
@ -492,7 +496,7 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.Predicat
if numExistingVolumes+numNewVolumes > maxAttachLimit { if numExistingVolumes+numNewVolumes > maxAttachLimit {
// violates MaxEBSVolumeCount or MaxGCEPDVolumeCount // violates MaxEBSVolumeCount or MaxGCEPDVolumeCount
return false, []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil return false, []PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
} }
if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) { if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) {
nodeInfo.TransientInfo.TransientLock.Lock() nodeInfo.TransientInfo.TransientLock.Lock()
@ -575,7 +579,7 @@ type VolumeZoneChecker struct {
// determining the zone of a volume during scheduling, and that is likely to // determining the zone of a volume during scheduling, and that is likely to
// require calling out to the cloud provider. It seems that we are moving away // require calling out to the cloud provider. It seems that we are moving away
// from inline volume declarations anyway. // from inline volume declarations anyway.
func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, classInfo StorageClassInfo) algorithm.FitPredicate { func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, classInfo StorageClassInfo) FitPredicate {
c := &VolumeZoneChecker{ c := &VolumeZoneChecker{
pvInfo: pvInfo, pvInfo: pvInfo,
pvcInfo: pvcInfo, pvcInfo: pvcInfo,
@ -584,7 +588,7 @@ func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolum
return c.predicate return c.predicate
} }
func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
// If a pod doesn't have any volume attached to it, the predicate will always be true. // If a pod doesn't have any volume attached to it, the predicate will always be true.
// Thus we make a fast path for it, to avoid unnecessary computations in this case. // Thus we make a fast path for it, to avoid unnecessary computations in this case.
if len(pod.Spec.Volumes) == 0 { if len(pod.Spec.Volumes) == 0 {
@ -671,7 +675,7 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetad
if !volumeVSet.Has(nodeV) { if !volumeVSet.Has(nodeV) {
klog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k) klog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k)
return false, []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}, nil return false, []PredicateFailureReason{ErrVolumeZoneConflict}, nil
} }
} }
} }
@ -725,13 +729,13 @@ func podName(pod *v1.Pod) string {
// PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod. // PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod.
// First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the // First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the
// predicate failure reasons if the node has insufficient resources to run the pod. // predicate failure reasons if the node has insufficient resources to run the pod.
func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PodFitsResources(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return false, nil, fmt.Errorf("node not found") return false, nil, fmt.Errorf("node not found")
} }
var predicateFails []algorithm.PredicateFailureReason var predicateFails []PredicateFailureReason
allowedPodNumber := nodeInfo.AllowedPodNumber() allowedPodNumber := nodeInfo.AllowedPodNumber()
if len(nodeInfo.Pods())+1 > allowedPodNumber { if len(nodeInfo.Pods())+1 > allowedPodNumber {
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber))) predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber)))
@ -850,7 +854,7 @@ func podMatchesNodeSelectorAndAffinityTerms(pod *v1.Pod, node *v1.Node) bool {
} }
// PodMatchNodeSelector checks if a pod node selector matches the node label. // PodMatchNodeSelector checks if a pod node selector matches the node label.
func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PodMatchNodeSelector(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return false, nil, fmt.Errorf("node not found") return false, nil, fmt.Errorf("node not found")
@ -858,11 +862,11 @@ func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInf
if podMatchesNodeSelectorAndAffinityTerms(pod, node) { if podMatchesNodeSelectorAndAffinityTerms(pod, node) {
return true, nil, nil return true, nil, nil
} }
return false, []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}, nil return false, []PredicateFailureReason{ErrNodeSelectorNotMatch}, nil
} }
// PodFitsHost checks if a pod spec node name matches the current node. // PodFitsHost checks if a pod spec node name matches the current node.
func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PodFitsHost(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
if len(pod.Spec.NodeName) == 0 { if len(pod.Spec.NodeName) == 0 {
return true, nil, nil return true, nil, nil
} }
@ -873,7 +877,7 @@ func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedu
if pod.Spec.NodeName == node.Name { if pod.Spec.NodeName == node.Name {
return true, nil, nil return true, nil, nil
} }
return false, []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}, nil return false, []PredicateFailureReason{ErrPodNotMatchHostName}, nil
} }
// NodeLabelChecker contains information to check node labels for a predicate. // NodeLabelChecker contains information to check node labels for a predicate.
@ -884,7 +888,7 @@ type NodeLabelChecker struct {
// NewNodeLabelPredicate creates a predicate which evaluates whether a pod can fit based on the // NewNodeLabelPredicate creates a predicate which evaluates whether a pod can fit based on the
// node labels which match a filter that it requests. // node labels which match a filter that it requests.
func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicate { func NewNodeLabelPredicate(labels []string, presence bool) FitPredicate {
labelChecker := &NodeLabelChecker{ labelChecker := &NodeLabelChecker{
labels: labels, labels: labels,
presence: presence, presence: presence,
@ -904,7 +908,7 @@ func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicat
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful // Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
// A node may have a label with "retiring" as key and the date as the value // A node may have a label with "retiring" as key and the date as the value
// and it may be desirable to avoid scheduling new pods on this node // and it may be desirable to avoid scheduling new pods on this node
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return false, nil, fmt.Errorf("node not found") return false, nil, fmt.Errorf("node not found")
@ -915,7 +919,7 @@ func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.Pr
for _, label := range n.labels { for _, label := range n.labels {
exists = nodeLabels.Has(label) exists = nodeLabels.Has(label)
if (exists && !n.presence) || (!exists && n.presence) { if (exists && !n.presence) || (!exists && n.presence) {
return false, []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated}, nil return false, []PredicateFailureReason{ErrNodeLabelPresenceViolated}, nil
} }
} }
return true, nil, nil return true, nil, nil
@ -952,7 +956,7 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata)
} }
// NewServiceAffinityPredicate creates a ServiceAffinity. // NewServiceAffinityPredicate creates a ServiceAffinity.
func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (algorithm.FitPredicate, PredicateMetadataProducer) { func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (FitPredicate, predicateMetadataProducer) {
affinity := &ServiceAffinity{ affinity := &ServiceAffinity{
podLister: podLister, podLister: podLister,
serviceLister: serviceLister, serviceLister: serviceLister,
@ -989,7 +993,7 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al
// //
// WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed... // WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed...
// For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction. // For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction.
func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
var services []*v1.Service var services []*v1.Service
var pods []*v1.Pod var pods []*v1.Pod
if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) { if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) {
@ -1024,11 +1028,11 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.Predi
if CreateSelectorFromLabels(affinityLabels).Matches(labels.Set(node.Labels)) { if CreateSelectorFromLabels(affinityLabels).Matches(labels.Set(node.Labels)) {
return true, nil, nil return true, nil, nil
} }
return false, []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}, nil return false, []PredicateFailureReason{ErrServiceAffinityViolated}, nil
} }
// PodFitsHostPorts checks if a node has free ports for the requested pod ports. // PodFitsHostPorts checks if a node has free ports for the requested pod ports.
func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PodFitsHostPorts(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
var wantPorts []*v1.ContainerPort var wantPorts []*v1.ContainerPort
if predicateMeta, ok := meta.(*predicateMetadata); ok { if predicateMeta, ok := meta.(*predicateMetadata); ok {
wantPorts = predicateMeta.podPorts wantPorts = predicateMeta.podPorts
@ -1044,7 +1048,7 @@ func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
// try to see whether existingPorts and wantPorts will conflict or not // try to see whether existingPorts and wantPorts will conflict or not
if portsConflict(existingPorts, wantPorts) { if portsConflict(existingPorts, wantPorts) {
return false, []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, nil return false, []PredicateFailureReason{ErrPodNotFitsHostPorts}, nil
} }
return true, nil, nil return true, nil, nil
@ -1068,8 +1072,8 @@ func haveOverlap(a1, a2 []string) bool {
// GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates // GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates
// that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need // that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need
func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func GeneralPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason var predicateFails []PredicateFailureReason
fit, reasons, err := noncriticalPredicates(pod, meta, nodeInfo) fit, reasons, err := noncriticalPredicates(pod, meta, nodeInfo)
if err != nil { if err != nil {
return false, predicateFails, err return false, predicateFails, err
@ -1090,8 +1094,8 @@ func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *
} }
// noncriticalPredicates are the predicates that only non-critical pods need // noncriticalPredicates are the predicates that only non-critical pods need
func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func noncriticalPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason var predicateFails []PredicateFailureReason
fit, reasons, err := PodFitsResources(pod, meta, nodeInfo) fit, reasons, err := PodFitsResources(pod, meta, nodeInfo)
if err != nil { if err != nil {
return false, predicateFails, err return false, predicateFails, err
@ -1104,8 +1108,8 @@ func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeIn
} }
// EssentialPredicates are the predicates that all pods, including critical pods, need // EssentialPredicates are the predicates that all pods, including critical pods, need
func EssentialPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func EssentialPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason var predicateFails []PredicateFailureReason
fit, reasons, err := PodFitsHost(pod, meta, nodeInfo) fit, reasons, err := PodFitsHost(pod, meta, nodeInfo)
if err != nil { if err != nil {
return false, predicateFails, err return false, predicateFails, err
@ -1141,7 +1145,7 @@ type PodAffinityChecker struct {
} }
// NewPodAffinityPredicate creates a PodAffinityChecker. // NewPodAffinityPredicate creates a PodAffinityChecker.
func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algorithm.FitPredicate { func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) FitPredicate {
checker := &PodAffinityChecker{ checker := &PodAffinityChecker{
info: info, info: info,
podLister: podLister, podLister: podLister,
@ -1152,13 +1156,13 @@ func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algor
// InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration. // InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration.
// First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the // First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the
// predicate failure reasons if the pod cannot be scheduled on the specified node. // predicate failure reasons if the pod cannot be scheduled on the specified node.
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return false, nil, fmt.Errorf("node not found") return false, nil, fmt.Errorf("node not found")
} }
if failedPredicates, error := c.satisfiesExistingPodsAntiAffinity(pod, meta, nodeInfo); failedPredicates != nil { if failedPredicates, error := c.satisfiesExistingPodsAntiAffinity(pod, meta, nodeInfo); failedPredicates != nil {
failedPredicates := append([]algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates) failedPredicates := append([]PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates)
return false, failedPredicates, error return false, failedPredicates, error
} }
@ -1168,7 +1172,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm
return true, nil, nil return true, nil, nil
} }
if failedPredicates, error := c.satisfiesPodsAffinityAntiAffinity(pod, meta, nodeInfo, affinity); failedPredicates != nil { if failedPredicates, error := c.satisfiesPodsAffinityAntiAffinity(pod, meta, nodeInfo, affinity); failedPredicates != nil {
failedPredicates := append([]algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates) failedPredicates := append([]PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates)
return false, failedPredicates, error return false, failedPredicates, error
} }
@ -1290,7 +1294,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.
// Checks if scheduling the pod onto this node would break any anti-affinity // Checks if scheduling the pod onto this node would break any anti-affinity
// terms indicated by the existing pods. // terms indicated by the existing pods.
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (algorithm.PredicateFailureReason, error) { func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("Node is nil") return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("Node is nil")
@ -1365,8 +1369,8 @@ func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPai
// Checks if scheduling the pod onto this node would break any term of this pod. // Checks if scheduling the pod onto this node would break any term of this pod.
func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo,
affinity *v1.Affinity) (algorithm.PredicateFailureReason, error) { affinity *v1.Affinity) (PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return ErrPodAffinityRulesNotMatch, fmt.Errorf("Node is nil") return ErrPodAffinityRulesNotMatch, fmt.Errorf("Node is nil")
@ -1466,9 +1470,9 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
} }
// CheckNodeUnschedulablePredicate checks if a pod can be scheduled on a node with Unschedulable spec. // CheckNodeUnschedulablePredicate checks if a pod can be scheduled on a node with Unschedulable spec.
func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil { if nodeInfo == nil || nodeInfo.Node() == nil {
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil
} }
// If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`. // If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`.
@ -1479,16 +1483,16 @@ func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetada
// TODO (k82cn): deprecates `node.Spec.Unschedulable` in 1.13. // TODO (k82cn): deprecates `node.Spec.Unschedulable` in 1.13.
if nodeInfo.Node().Spec.Unschedulable && !podToleratesUnschedulable { if nodeInfo.Node().Spec.Unschedulable && !podToleratesUnschedulable {
return false, []algorithm.PredicateFailureReason{ErrNodeUnschedulable}, nil return false, []PredicateFailureReason{ErrNodeUnschedulable}, nil
} }
return true, nil, nil return true, nil, nil
} }
// PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints // PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints
func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PodToleratesNodeTaints(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil { if nodeInfo == nil || nodeInfo.Node() == nil {
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil
} }
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool { return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
@ -1498,13 +1502,13 @@ func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeI
} }
// PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints // PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints
func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool { return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
return t.Effect == v1.TaintEffectNoExecute return t.Effect == v1.TaintEffectNoExecute
}) })
} }
func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, filter func(t *v1.Taint) bool) (bool, []algorithm.PredicateFailureReason, error) { func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, filter func(t *v1.Taint) bool) (bool, []PredicateFailureReason, error) {
taints, err := nodeInfo.Taints() taints, err := nodeInfo.Taints()
if err != nil { if err != nil {
return false, nil, err return false, nil, err
@ -1513,7 +1517,7 @@ func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, f
if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, filter) { if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, filter) {
return true, nil, nil return true, nil, nil
} }
return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil return false, []PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil
} }
// isPodBestEffort checks if pod is scheduled with best-effort QoS // isPodBestEffort checks if pod is scheduled with best-effort QoS
@ -1523,7 +1527,7 @@ func isPodBestEffort(pod *v1.Pod) bool {
// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node // CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node
// reporting memory pressure condition. // reporting memory pressure condition.
func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
var podBestEffort bool var podBestEffort bool
if predicateMeta, ok := meta.(*predicateMetadata); ok { if predicateMeta, ok := meta.(*predicateMetadata); ok {
podBestEffort = predicateMeta.podBestEffort podBestEffort = predicateMeta.podBestEffort
@ -1538,38 +1542,37 @@ func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetad
// check if node is under memory pressure // check if node is under memory pressure
if nodeInfo.MemoryPressureCondition() == v1.ConditionTrue { if nodeInfo.MemoryPressureCondition() == v1.ConditionTrue {
return false, []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil return false, []PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil
} }
return true, nil, nil return true, nil, nil
} }
// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node // CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node
// reporting disk pressure condition. // reporting disk pressure condition.
func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
// check if node is under disk pressure // check if node is under disk pressure
if nodeInfo.DiskPressureCondition() == v1.ConditionTrue { if nodeInfo.DiskPressureCondition() == v1.ConditionTrue {
return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil return false, []PredicateFailureReason{ErrNodeUnderDiskPressure}, nil
} }
return true, nil, nil return true, nil, nil
} }
// CheckNodePIDPressurePredicate checks if a pod can be scheduled on a node // CheckNodePIDPressurePredicate checks if a pod can be scheduled on a node
// reporting pid pressure condition. // reporting pid pressure condition.
func CheckNodePIDPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func CheckNodePIDPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
// check if node is under pid pressure // check if node is under pid pressure
if nodeInfo.PIDPressureCondition() == v1.ConditionTrue { if nodeInfo.PIDPressureCondition() == v1.ConditionTrue {
return false, []algorithm.PredicateFailureReason{ErrNodeUnderPIDPressure}, nil return false, []PredicateFailureReason{ErrNodeUnderPIDPressure}, nil
} }
return true, nil, nil return true, nil, nil
} }
// CheckNodeConditionPredicate checks if a pod can be scheduled on a node reporting // CheckNodeConditionPredicate checks if a pod can be scheduled on a node reporting
// network unavailable and not ready condition. Only node conditions are accounted in this predicate. // network unavailable and not ready condition. Only node conditions are accounted in this predicate.
func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func CheckNodeConditionPredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
reasons := []algorithm.PredicateFailureReason{} reasons := []PredicateFailureReason{}
if nodeInfo == nil || nodeInfo.Node() == nil { if nodeInfo == nil || nodeInfo.Node() == nil {
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil
} }
node := nodeInfo.Node() node := nodeInfo.Node()
@ -1607,14 +1610,14 @@ type VolumeBindingChecker struct {
// //
// The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound // The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound
// PVCs can be matched with an available and node-compatible PV. // PVCs can be matched with an available and node-compatible PV.
func NewVolumeBindingPredicate(binder *volumebinder.VolumeBinder) algorithm.FitPredicate { func NewVolumeBindingPredicate(binder *volumebinder.VolumeBinder) FitPredicate {
c := &VolumeBindingChecker{ c := &VolumeBindingChecker{
binder: binder, binder: binder,
} }
return c.predicate return c.predicate
} }
func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
return true, nil, nil return true, nil, nil
} }
@ -1629,7 +1632,7 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe
return false, nil, err return false, nil, err
} }
failReasons := []algorithm.PredicateFailureReason{} failReasons := []PredicateFailureReason{}
if !boundSatisfied { if !boundSatisfied {
klog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) klog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
failReasons = append(failReasons, ErrVolumeNodeConflict) failReasons = append(failReasons, ErrVolumeNodeConflict)

View File

@ -34,7 +34,6 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
@ -91,7 +90,7 @@ func newResourceInitPod(pod *v1.Pod, usage ...schedulernodeinfo.Resource) *v1.Po
return pod return pod
} }
func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata { func GetPredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
pm := PredicateMetadataFactory{schedulertesting.FakePodLister{p}} pm := PredicateMetadataFactory{schedulertesting.FakePodLister{p}}
return pm.GetMetadata(p, nodeInfo) return pm.GetMetadata(p, nodeInfo)
} }
@ -102,7 +101,7 @@ func TestPodFitsResources(t *testing.T) {
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
fits bool fits bool
name string name string
reasons []algorithm.PredicateFailureReason reasons []PredicateFailureReason
ignoredExtendedResources sets.String ignoredExtendedResources sets.String
}{ }{
{ {
@ -118,7 +117,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
fits: false, fits: false,
name: "too many resources fails", name: "too many resources fails",
reasons: []algorithm.PredicateFailureReason{ reasons: []PredicateFailureReason{
NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10), NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
NewInsufficientResourceError(v1.ResourceMemory, 1, 20, 20), NewInsufficientResourceError(v1.ResourceMemory, 1, 20, 20),
}, },
@ -129,7 +128,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
fits: false, fits: false,
name: "too many resources fails due to init container cpu", name: "too many resources fails due to init container cpu",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}),
@ -137,7 +136,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
fits: false, fits: false,
name: "too many resources fails due to highest init container cpu", name: "too many resources fails due to highest init container cpu",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}),
@ -145,7 +144,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
fits: false, fits: false,
name: "too many resources fails due to init container memory", name: "too many resources fails due to init container memory",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
@ -153,7 +152,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
fits: false, fits: false,
name: "too many resources fails due to highest init container memory", name: "too many resources fails due to highest init container memory",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
@ -182,7 +181,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 5})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 5})),
fits: false, fits: false,
name: "one resource memory fits", name: "one resource memory fits",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10)},
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
@ -190,7 +189,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
fits: false, fits: false,
name: "one resource cpu fits", name: "one resource cpu fits",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20)},
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
@ -225,7 +224,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
fits: false, fits: false,
name: "extended resource capacity enforced", name: "extended resource capacity enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
@ -234,7 +233,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
fits: false, fits: false,
name: "extended resource capacity enforced for init container", name: "extended resource capacity enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
@ -243,7 +242,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
fits: false, fits: false,
name: "extended resource allocatable enforced", name: "extended resource allocatable enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
@ -252,7 +251,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
fits: false, fits: false,
name: "extended resource allocatable enforced for init container", name: "extended resource allocatable enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
@ -262,7 +261,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
fits: false, fits: false,
name: "extended resource allocatable enforced for multiple containers", name: "extended resource allocatable enforced for multiple containers",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
@ -281,7 +280,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
fits: false, fits: false,
name: "extended resource allocatable enforced for multiple init containers", name: "extended resource allocatable enforced for multiple init containers",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
@ -290,7 +289,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
fits: false, fits: false,
name: "extended resource allocatable enforced for unknown resource", name: "extended resource allocatable enforced for unknown resource",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
@ -299,7 +298,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
fits: false, fits: false,
name: "extended resource allocatable enforced for unknown resource for init container", name: "extended resource allocatable enforced for unknown resource for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
@ -308,7 +307,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
fits: false, fits: false,
name: "kubernetes.io resource capacity enforced", name: "kubernetes.io resource capacity enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
@ -317,7 +316,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
fits: false, fits: false,
name: "kubernetes.io resource capacity enforced for init container", name: "kubernetes.io resource capacity enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
@ -326,7 +325,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
fits: false, fits: false,
name: "hugepages resource capacity enforced", name: "hugepages resource capacity enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
@ -335,7 +334,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
fits: false, fits: false,
name: "hugepages resource capacity enforced for init container", name: "hugepages resource capacity enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
@ -345,7 +344,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
fits: false, fits: false,
name: "hugepages resource allocatable enforced for multiple containers", name: "hugepages resource allocatable enforced for multiple containers",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
@ -363,7 +362,7 @@ func TestPodFitsResources(t *testing.T) {
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}} node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
test.nodeInfo.SetNode(&node) test.nodeInfo.SetNode(&node)
RegisterPredicateMetadataProducerWithExtendedResourceOptions(test.ignoredExtendedResources) RegisterPredicateMetadataProducerWithExtendedResourceOptions(test.ignoredExtendedResources)
meta := PredicateMetadata(test.pod, nil) meta := GetPredicateMetadata(test.pod, nil)
fits, reasons, err := PodFitsResources(test.pod, meta, test.nodeInfo) fits, reasons, err := PodFitsResources(test.pod, meta, test.nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
@ -382,7 +381,7 @@ func TestPodFitsResources(t *testing.T) {
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
fits bool fits bool
name string name string
reasons []algorithm.PredicateFailureReason reasons []PredicateFailureReason
}{ }{
{ {
pod: &v1.Pod{}, pod: &v1.Pod{},
@ -390,7 +389,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
fits: false, fits: false,
name: "even without specified resources predicate fails when there's no space for additional pod", name: "even without specified resources predicate fails when there's no space for additional pod",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
@ -398,7 +397,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
fits: false, fits: false,
name: "even if both resources fit predicate fails when there's no space for additional pod", name: "even if both resources fit predicate fails when there's no space for additional pod",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
@ -406,7 +405,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
fits: false, fits: false,
name: "even for equal edge case predicate fails when there's no space for additional pod", name: "even for equal edge case predicate fails when there's no space for additional pod",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
@ -414,14 +413,14 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
fits: false, fits: false,
name: "even for equal edge case predicate fails when there's no space for additional pod due to init container", name: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
}, },
} }
for _, test := range notEnoughPodsTests { for _, test := range notEnoughPodsTests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}} node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}}
test.nodeInfo.SetNode(&node) test.nodeInfo.SetNode(&node)
fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo) fits, reasons, err := PodFitsResources(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -439,7 +438,7 @@ func TestPodFitsResources(t *testing.T) {
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
fits bool fits bool
name string name string
reasons []algorithm.PredicateFailureReason reasons []PredicateFailureReason
}{ }{
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
@ -447,7 +446,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 10})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 10})),
fits: false, fits: false,
name: "due to container scratch disk", name: "due to container scratch disk",
reasons: []algorithm.PredicateFailureReason{ reasons: []PredicateFailureReason{
NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10), NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
}, },
}, },
@ -464,7 +463,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})),
fits: false, fits: false,
name: "storage ephemeral local storage request exceeds allocatable", name: "storage ephemeral local storage request exceeds allocatable",
reasons: []algorithm.PredicateFailureReason{ reasons: []PredicateFailureReason{
NewInsufficientResourceError(v1.ResourceEphemeralStorage, 25, 0, 20), NewInsufficientResourceError(v1.ResourceEphemeralStorage, 25, 0, 20),
}, },
}, },
@ -481,7 +480,7 @@ func TestPodFitsResources(t *testing.T) {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}} node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
test.nodeInfo.SetNode(&node) test.nodeInfo.SetNode(&node)
fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo) fits, reasons, err := PodFitsResources(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -538,13 +537,13 @@ func TestPodFitsHost(t *testing.T) {
name: "host doesn't match", name: "host doesn't match",
}, },
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodNotMatchHostName} expectedFailureReasons := []PredicateFailureReason{ErrPodNotMatchHostName}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(test.node) nodeInfo.SetNode(test.node)
fits, reasons, err := PodFitsHost(test.pod, PredicateMetadata(test.pod, nil), nodeInfo) fits, reasons, err := PodFitsHost(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -680,11 +679,11 @@ func TestPodFitsHostPorts(t *testing.T) {
name: "UDP hostPort conflict due to 0.0.0.0 hostIP", name: "UDP hostPort conflict due to 0.0.0.0 hostIP",
}, },
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts} expectedFailureReasons := []PredicateFailureReason{ErrPodNotFitsHostPorts}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
fits, reasons, err := PodFitsHostPorts(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo) fits, reasons, err := PodFitsHostPorts(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -732,11 +731,11 @@ func TestGCEDiskConflicts(t *testing.T) {
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo) ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -787,11 +786,11 @@ func TestAWSDiskConflicts(t *testing.T) {
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo) ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -848,11 +847,11 @@ func TestRBDDiskConflicts(t *testing.T) {
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo) ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -909,11 +908,11 @@ func TestISCSIDiskConflicts(t *testing.T) {
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo) ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -1603,7 +1602,7 @@ func TestPodFitsSelector(t *testing.T) {
name: "Pod with two terms: both matchFields and matchExpressions do not match", name: "Pod with two terms: both matchFields and matchExpressions do not match",
}, },
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch} expectedFailureReasons := []PredicateFailureReason{ErrNodeSelectorNotMatch}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
@ -1614,7 +1613,7 @@ func TestPodFitsSelector(t *testing.T) {
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
fits, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nil), nodeInfo) fits, reasons, err := PodMatchNodeSelector(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -1674,7 +1673,7 @@ func TestNodeLabelPresence(t *testing.T) {
name: "all labels match, presence false", name: "all labels match, presence false",
}, },
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated} expectedFailureReasons := []PredicateFailureReason{ErrNodeLabelPresenceViolated}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
@ -1683,7 +1682,7 @@ func TestNodeLabelPresence(t *testing.T) {
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
labelChecker := NodeLabelChecker{test.labels, test.presence} labelChecker := NodeLabelChecker{test.labels, test.presence}
fits, reasons, err := labelChecker.CheckNodeLabelPresence(test.pod, PredicateMetadata(test.pod, nil), nodeInfo) fits, reasons, err := labelChecker.CheckNodeLabelPresence(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -1823,7 +1822,7 @@ func TestServiceAffinity(t *testing.T) {
name: "service pod on different node, multiple labels, all match", name: "service pod on different node, multiple labels, all match",
}, },
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrServiceAffinityViolated} expectedFailureReasons := []PredicateFailureReason{ErrServiceAffinityViolated}
for _, test := range tests { for _, test := range tests {
testIt := func(skipPrecompute bool) { testIt := func(skipPrecompute bool) {
t.Run(fmt.Sprintf("%v/skipPrecompute/%v", test.name, skipPrecompute), func(t *testing.T) { t.Run(fmt.Sprintf("%v/skipPrecompute/%v", test.name, skipPrecompute), func(t *testing.T) {
@ -1839,7 +1838,7 @@ func TestServiceAffinity(t *testing.T) {
precompute(pm) precompute(pm)
} }
}) })
if pmeta, ok := (PredicateMetadata(test.pod, nodeInfoMap)).(*predicateMetadata); ok { if pmeta, ok := (GetPredicateMetadata(test.pod, nodeInfoMap)).(*predicateMetadata); ok {
fits, reasons, err := predicate(test.pod, pmeta, nodeInfo) fits, reasons, err := predicate(test.pod, pmeta, nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
@ -1885,7 +1884,7 @@ func TestRunGeneralPredicates(t *testing.T) {
fits bool fits bool
name string name string
wErr error wErr error
reasons []algorithm.PredicateFailureReason reasons []PredicateFailureReason
}{ }{
{ {
pod: &v1.Pod{}, pod: &v1.Pod{},
@ -1909,7 +1908,7 @@ func TestRunGeneralPredicates(t *testing.T) {
}, },
fits: false, fits: false,
wErr: nil, wErr: nil,
reasons: []algorithm.PredicateFailureReason{ reasons: []PredicateFailureReason{
NewInsufficientResourceError(v1.ResourceCPU, 8, 5, 10), NewInsufficientResourceError(v1.ResourceCPU, 8, 5, 10),
NewInsufficientResourceError(v1.ResourceMemory, 10, 19, 20), NewInsufficientResourceError(v1.ResourceMemory, 10, 19, 20),
}, },
@ -1928,7 +1927,7 @@ func TestRunGeneralPredicates(t *testing.T) {
}, },
fits: false, fits: false,
wErr: nil, wErr: nil,
reasons: []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}, reasons: []PredicateFailureReason{ErrPodNotMatchHostName},
name: "host not match", name: "host not match",
}, },
{ {
@ -1940,14 +1939,14 @@ func TestRunGeneralPredicates(t *testing.T) {
}, },
fits: false, fits: false,
wErr: nil, wErr: nil,
reasons: []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, reasons: []PredicateFailureReason{ErrPodNotFitsHostPorts},
name: "hostport conflict", name: "hostport conflict",
}, },
} }
for _, test := range resourceTests { for _, test := range resourceTests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
test.nodeInfo.SetNode(test.node) test.nodeInfo.SetNode(test.node)
fits, reasons, err := GeneralPredicates(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo) fits, reasons, err := GeneralPredicates(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -1976,7 +1975,7 @@ func TestInterPodAffinity(t *testing.T) {
node *v1.Node node *v1.Node
fits bool fits bool
name string name string
expectFailureReasons []algorithm.PredicateFailureReason expectFailureReasons []PredicateFailureReason
}{ }{
{ {
pod: new(v1.Pod), pod: new(v1.Pod),
@ -2076,7 +2075,7 @@ func TestInterPodAffinity(t *testing.T) {
node: &node1, node: &node1,
fits: false, fits: false,
name: "Does not satisfy the PodAffinity with labelSelector because of diff Namespace", name: "Does not satisfy the PodAffinity with labelSelector because of diff Namespace",
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}, expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
}, },
{ {
pod: &v1.Pod{ pod: &v1.Pod{
@ -2107,7 +2106,7 @@ func TestInterPodAffinity(t *testing.T) {
node: &node1, node: &node1,
fits: false, fits: false,
name: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod", name: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod",
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}, expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
}, },
{ {
pod: &v1.Pod{ pod: &v1.Pod{
@ -2204,7 +2203,7 @@ func TestInterPodAffinity(t *testing.T) {
node: &node1, node: &node1,
fits: false, fits: false,
name: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression item don't match.", name: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression item don't match.",
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}, expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
}, },
{ {
pod: &v1.Pod{ pod: &v1.Pod{
@ -2371,7 +2370,7 @@ func TestInterPodAffinity(t *testing.T) {
node: &node1, node: &node1,
fits: false, fits: false,
name: "satisfies the PodAffinity but doesn't satisfy the PodAntiAffinity with the existing pod", name: "satisfies the PodAffinity but doesn't satisfy the PodAntiAffinity with the existing pod",
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
}, },
{ {
pod: &v1.Pod{ pod: &v1.Pod{
@ -2444,7 +2443,7 @@ func TestInterPodAffinity(t *testing.T) {
node: &node1, node: &node1,
fits: false, fits: false,
name: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfy PodAntiAffinity symmetry with the existing pod", name: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfy PodAntiAffinity symmetry with the existing pod",
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
}, },
{ {
pod: &v1.Pod{ pod: &v1.Pod{
@ -2476,7 +2475,7 @@ func TestInterPodAffinity(t *testing.T) {
node: &node1, node: &node1,
fits: false, fits: false,
name: "pod matches its own Label in PodAffinity and that matches the existing pod Labels", name: "pod matches its own Label in PodAffinity and that matches the existing pod Labels",
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}, expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
}, },
{ {
pod: &v1.Pod{ pod: &v1.Pod{
@ -2512,7 +2511,7 @@ func TestInterPodAffinity(t *testing.T) {
node: &node1, node: &node1,
fits: false, fits: false,
name: "verify that PodAntiAffinity from existing pod is respected when pod has no AntiAffinity constraints. doesn't satisfy PodAntiAffinity symmetry with the existing pod", name: "verify that PodAntiAffinity from existing pod is respected when pod has no AntiAffinity constraints. doesn't satisfy PodAntiAffinity symmetry with the existing pod",
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
}, },
{ {
pod: &v1.Pod{ pod: &v1.Pod{
@ -2612,7 +2611,7 @@ func TestInterPodAffinity(t *testing.T) {
node: &node1, node: &node1,
fits: false, fits: false,
name: "satisfies the PodAntiAffinity with existing pod but doesn't satisfy PodAntiAffinity symmetry with incoming pod", name: "satisfies the PodAntiAffinity with existing pod but doesn't satisfy PodAntiAffinity symmetry with incoming pod",
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
}, },
{ {
pod: &v1.Pod{ pod: &v1.Pod{
@ -2675,7 +2674,7 @@ func TestInterPodAffinity(t *testing.T) {
}, },
node: &node1, node: &node1,
fits: false, fits: false,
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
name: "PodAntiAffinity symmetry check a1: incoming pod and existing pod partially match each other on AffinityTerms", name: "PodAntiAffinity symmetry check a1: incoming pod and existing pod partially match each other on AffinityTerms",
}, },
{ {
@ -2739,7 +2738,7 @@ func TestInterPodAffinity(t *testing.T) {
}, },
node: &node1, node: &node1,
fits: false, fits: false,
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
name: "PodAntiAffinity symmetry check a2: incoming pod and existing pod partially match each other on AffinityTerms", name: "PodAntiAffinity symmetry check a2: incoming pod and existing pod partially match each other on AffinityTerms",
}, },
{ {
@ -2814,7 +2813,7 @@ func TestInterPodAffinity(t *testing.T) {
}, },
node: &node1, node: &node1,
fits: false, fits: false,
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
name: "PodAntiAffinity symmetry check b1: incoming pod and existing pod partially match each other on AffinityTerms", name: "PodAntiAffinity symmetry check b1: incoming pod and existing pod partially match each other on AffinityTerms",
}, },
{ {
@ -2889,7 +2888,7 @@ func TestInterPodAffinity(t *testing.T) {
}, },
node: &node1, node: &node1,
fits: false, fits: false,
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
name: "PodAntiAffinity symmetry check b2: incoming pod and existing pod partially match each other on AffinityTerms", name: "PodAntiAffinity symmetry check b2: incoming pod and existing pod partially match each other on AffinityTerms",
}, },
} }
@ -2911,7 +2910,7 @@ func TestInterPodAffinity(t *testing.T) {
nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...) nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
nodeInfo.SetNode(test.node) nodeInfo.SetNode(test.node)
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{test.node.Name: nodeInfo} nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{test.node.Name: nodeInfo}
fits, reasons, _ := fit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo) fits, reasons, _ := fit.InterPodAffinityMatches(test.pod, GetPredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
if !fits && !reflect.DeepEqual(reasons, test.expectFailureReasons) { if !fits && !reflect.DeepEqual(reasons, test.expectFailureReasons) {
t.Errorf("unexpected failure reasons: %v, want: %v", reasons, test.expectFailureReasons) t.Errorf("unexpected failure reasons: %v, want: %v", reasons, test.expectFailureReasons)
} }
@ -2944,7 +2943,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
pod *v1.Pod pod *v1.Pod
pods []*v1.Pod pods []*v1.Pod
nodes []v1.Node nodes []v1.Node
nodesExpectAffinityFailureReasons [][]algorithm.PredicateFailureReason nodesExpectAffinityFailureReasons [][]PredicateFailureReason
fits map[string]bool fits map[string]bool
name string name string
nometa bool nometa bool
@ -2985,7 +2984,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
"machine2": true, "machine2": true,
"machine3": false, "machine3": false,
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{nil, nil, {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}}, nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{nil, nil, {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}},
name: "A pod can be scheduled onto all the nodes that have the same topology key & label value with one of them has an existing pod that matches the affinity rules", name: "A pod can be scheduled onto all the nodes that have the same topology key & label value with one of them has an existing pod that matches the affinity rules",
}, },
{ {
@ -3034,7 +3033,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{nil, nil}, nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{nil, nil},
fits: map[string]bool{ fits: map[string]bool{
"nodeA": false, "nodeA": false,
"nodeB": true, "nodeB": true,
@ -3087,7 +3086,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{nil, nil}, nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{nil, nil},
fits: map[string]bool{ fits: map[string]bool{
"nodeA": true, "nodeA": true,
"nodeB": true, "nodeB": true,
@ -3125,7 +3124,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "nodeA"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "nodeA"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "nodeB"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "nodeB"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}}, nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}},
fits: map[string]bool{ fits: map[string]bool{
"nodeA": false, "nodeA": false,
"nodeB": false, "nodeB": false,
@ -3174,7 +3173,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
}, },
@ -3215,7 +3214,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, nil}, nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, nil},
fits: map[string]bool{ fits: map[string]bool{
"nodeA": false, "nodeA": false,
"nodeB": false, "nodeB": false,
@ -3279,7 +3278,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeD", Labels: labelRgUS}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeD", Labels: labelRgUS}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
@ -3359,7 +3358,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
nil, nil,
@ -3403,7 +3402,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{}, nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{},
fits: map[string]bool{ fits: map[string]bool{
"nodeA": true, "nodeA": true,
"nodeB": true, "nodeB": true,
@ -3444,7 +3443,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{}, nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{},
fits: map[string]bool{ fits: map[string]bool{
"nodeA": true, "nodeA": true,
"nodeB": true, "nodeB": true,
@ -3507,7 +3506,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
}, },
@ -3568,7 +3567,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
}, },
@ -3621,7 +3620,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
}, },
fits: map[string]bool{ fits: map[string]bool{
@ -3675,7 +3674,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
}, },
fits: map[string]bool{ fits: map[string]bool{
@ -3727,7 +3726,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
}, },
@ -3782,7 +3781,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
}, },
@ -3869,7 +3868,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: map[string]string{"region": "r1", "zone": "z3", "hostname": "nodeC"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: map[string]string{"region": "r1", "zone": "z3", "hostname": "nodeC"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
}, },
@ -3925,7 +3924,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
{}, {},
{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
}, },
@ -3986,7 +3985,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
}, },
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
}, },
@ -3998,7 +3997,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
}, },
} }
selectorExpectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch} selectorExpectedFailureReasons := []PredicateFailureReason{ErrNodeSelectorNotMatch}
for indexTest, test := range tests { for indexTest, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
@ -4023,9 +4022,9 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
podLister: schedulertesting.FakePodLister(test.pods), podLister: schedulertesting.FakePodLister(test.pods),
} }
var meta algorithm.PredicateMetadata var meta PredicateMetadata
if !test.nometa { if !test.nometa {
meta = PredicateMetadata(test.pod, nodeInfoMap) meta = GetPredicateMetadata(test.pod, nodeInfoMap)
} }
fits, reasons, _ := testFit.InterPodAffinityMatches(test.pod, meta, nodeInfoMap[node.Name]) fits, reasons, _ := testFit.InterPodAffinityMatches(test.pod, meta, nodeInfoMap[node.Name])
@ -4037,7 +4036,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{node.Name: nodeInfo} nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{node.Name: nodeInfo}
fits2, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo) fits2, reasons, err := PodMatchNodeSelector(test.pod, GetPredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -4238,13 +4237,13 @@ func TestPodToleratesTaints(t *testing.T) {
"but the effect of taint on node is PreferNochedule. Pod can be scheduled onto the node", "but the effect of taint on node is PreferNochedule. Pod can be scheduled onto the node",
}, },
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch} expectedFailureReasons := []PredicateFailureReason{ErrTaintsTolerationsNotMatch}
for _, test := range podTolerateTaintsTests { for _, test := range podTolerateTaintsTests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(&test.node) nodeInfo.SetNode(&test.node)
fits, reasons, err := PodToleratesNodeTaints(test.pod, PredicateMetadata(test.pod, nil), nodeInfo) fits, reasons, err := PodToleratesNodeTaints(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -4352,11 +4351,11 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) {
name: "non best-effort pod schedulable on node without memory pressure condition on", name: "non best-effort pod schedulable on node without memory pressure condition on",
}, },
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure} expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderMemoryPressure}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
fits, reasons, err := CheckNodeMemoryPressurePredicate(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo) fits, reasons, err := CheckNodeMemoryPressurePredicate(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -4426,11 +4425,11 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) {
name: "pod not schedulable on node with pressure condition on", name: "pod not schedulable on node with pressure condition on",
}, },
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure} expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderDiskPressure}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
fits, reasons, err := CheckNodeDiskPressurePredicate(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo) fits, reasons, err := CheckNodeDiskPressurePredicate(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -4486,11 +4485,11 @@ func TestPodSchedulesOnNodeWithPIDPressureCondition(t *testing.T) {
name: "pod not schedulable on node with pressure condition on", name: "pod not schedulable on node with pressure condition on",
}, },
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderPIDPressure} expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderPIDPressure}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
fits, reasons, err := CheckNodePIDPressurePredicate(&v1.Pod{}, PredicateMetadata(&v1.Pod{}, nil), test.nodeInfo) fits, reasons, err := CheckNodePIDPressurePredicate(&v1.Pod{}, GetPredicateMetadata(&v1.Pod{}, nil), test.nodeInfo)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
@ -4673,7 +4672,7 @@ func TestVolumeZonePredicate(t *testing.T) {
}, },
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrVolumeZoneConflict} expectedFailureReasons := []PredicateFailureReason{ErrVolumeZoneConflict}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
@ -4767,7 +4766,7 @@ func TestVolumeZonePredicateMultiZone(t *testing.T) {
}, },
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrVolumeZoneConflict} expectedFailureReasons := []PredicateFailureReason{ErrVolumeZoneConflict}
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {

View File

@ -31,10 +31,6 @@ var NodeFieldSelectorKeys = map[string]func(*v1.Node) string{
schedulerapi.NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name }, schedulerapi.NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name },
} }
// FitPredicate is a function that indicates if a pod fits into an existing node.
// The failure information is given by the error.
type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error)
// PriorityMapFunction is a function that computes per-node results for a given node. // PriorityMapFunction is a function that computes per-node results for a given node.
// TODO: Figure out the exact API of this method. // TODO: Figure out the exact API of this method.
// TODO: Change interface{} to a specific type. // TODO: Change interface{} to a specific type.
@ -46,9 +42,6 @@ type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *scheduler
// TODO: Change interface{} to a specific type. // TODO: Change interface{} to a specific type.
type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error
// PredicateMetadataProducer is a function that computes predicate metadata for a given pod.
type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata
// PriorityMetadataProducer is a function that computes metadata for a given pod. This // PriorityMetadataProducer is a function that computes metadata for a given pod. This
// is now used for only for priority functions. For predicates please use PredicateMetadataProducer. // is now used for only for priority functions. For predicates please use PredicateMetadataProducer.
type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{} type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{}
@ -69,21 +62,11 @@ type PriorityConfig struct {
Weight int Weight int
} }
// EmptyPredicateMetadataProducer returns a no-op MetadataProducer type.
func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
return nil
}
// EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type. // EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type.
func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{} { func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{} {
return nil return nil
} }
// PredicateFailureReason interface represents the failure reason of a predicate.
type PredicateFailureReason interface {
GetReason() string
}
// NodeLister interface represents anything that can list nodes for a scheduler. // NodeLister interface represents anything that can list nodes for a scheduler.
type NodeLister interface { type NodeLister interface {
// We explicitly return []*v1.Node, instead of v1.NodeList, to avoid // We explicitly return []*v1.Node, instead of v1.NodeList, to avoid
@ -172,10 +155,3 @@ type EmptyStatefulSetLister struct{}
func (f EmptyStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.StatefulSet, err error) { func (f EmptyStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.StatefulSet, err error) {
return nil, nil return nil, nil
} }
// PredicateMetadata interface represents anything that can access a predicate metadata.
type PredicateMetadata interface {
ShallowCopy() PredicateMetadata
AddPod(addedPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) error
RemovePod(deletedPod *v1.Pod) error
}

View File

@ -38,7 +38,7 @@ const (
func init() { func init() {
// Register functions that extract metadata used by predicates and priorities computations. // Register functions that extract metadata used by predicates and priorities computations.
factory.RegisterPredicateMetadataProducerFactory( factory.RegisterPredicateMetadataProducerFactory(
func(args factory.PluginFactoryArgs) algorithm.PredicateMetadataProducer { func(args factory.PluginFactoryArgs) predicates.PredicateMetadataProducer {
return predicates.NewPredicateMetadataFactory(args.PodLister) return predicates.NewPredicateMetadataFactory(args.PodLister)
}) })
factory.RegisterPriorityMetadataProducerFactory( factory.RegisterPriorityMetadataProducerFactory(
@ -101,41 +101,41 @@ func defaultPredicates() sets.String {
// Fit is determined by volume zone requirements. // Fit is determined by volume zone requirements.
factory.RegisterFitPredicateFactory( factory.RegisterFitPredicateFactory(
predicates.NoVolumeZoneConflictPred, predicates.NoVolumeZoneConflictPred,
func(args factory.PluginFactoryArgs) algorithm.FitPredicate { func(args factory.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewVolumeZonePredicate(args.PVInfo, args.PVCInfo, args.StorageClassInfo) return predicates.NewVolumeZonePredicate(args.PVInfo, args.PVCInfo, args.StorageClassInfo)
}, },
), ),
// Fit is determined by whether or not there would be too many AWS EBS volumes attached to the node // Fit is determined by whether or not there would be too many AWS EBS volumes attached to the node
factory.RegisterFitPredicateFactory( factory.RegisterFitPredicateFactory(
predicates.MaxEBSVolumeCountPred, predicates.MaxEBSVolumeCountPred,
func(args factory.PluginFactoryArgs) algorithm.FitPredicate { func(args factory.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilterType, args.PVInfo, args.PVCInfo) return predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilterType, args.PVInfo, args.PVCInfo)
}, },
), ),
// Fit is determined by whether or not there would be too many GCE PD volumes attached to the node // Fit is determined by whether or not there would be too many GCE PD volumes attached to the node
factory.RegisterFitPredicateFactory( factory.RegisterFitPredicateFactory(
predicates.MaxGCEPDVolumeCountPred, predicates.MaxGCEPDVolumeCountPred,
func(args factory.PluginFactoryArgs) algorithm.FitPredicate { func(args factory.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilterType, args.PVInfo, args.PVCInfo) return predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilterType, args.PVInfo, args.PVCInfo)
}, },
), ),
// Fit is determined by whether or not there would be too many Azure Disk volumes attached to the node // Fit is determined by whether or not there would be too many Azure Disk volumes attached to the node
factory.RegisterFitPredicateFactory( factory.RegisterFitPredicateFactory(
predicates.MaxAzureDiskVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred,
func(args factory.PluginFactoryArgs) algorithm.FitPredicate { func(args factory.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, args.PVInfo, args.PVCInfo) return predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, args.PVInfo, args.PVCInfo)
}, },
), ),
factory.RegisterFitPredicateFactory( factory.RegisterFitPredicateFactory(
predicates.MaxCSIVolumeCountPred, predicates.MaxCSIVolumeCountPred,
func(args factory.PluginFactoryArgs) algorithm.FitPredicate { func(args factory.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewCSIMaxVolumeLimitPredicate(args.PVInfo, args.PVCInfo) return predicates.NewCSIMaxVolumeLimitPredicate(args.PVInfo, args.PVCInfo)
}, },
), ),
// Fit is determined by inter-pod affinity. // Fit is determined by inter-pod affinity.
factory.RegisterFitPredicateFactory( factory.RegisterFitPredicateFactory(
predicates.MatchInterPodAffinityPred, predicates.MatchInterPodAffinityPred,
func(args factory.PluginFactoryArgs) algorithm.FitPredicate { func(args factory.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewPodAffinityPredicate(args.NodeInfo, args.PodLister) return predicates.NewPodAffinityPredicate(args.NodeInfo, args.PodLister)
}, },
), ),
@ -165,7 +165,7 @@ func defaultPredicates() sets.String {
// Fit is determined by volume topology requirements. // Fit is determined by volume topology requirements.
factory.RegisterFitPredicateFactory( factory.RegisterFitPredicateFactory(
predicates.CheckVolumeBindingPred, predicates.CheckVolumeBindingPred,
func(args factory.PluginFactoryArgs) algorithm.FitPredicate { func(args factory.PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewVolumeBindingPredicate(args.VolumeBinder) return predicates.NewVolumeBindingPredicate(args.VolumeBinder)
}, },
), ),

View File

@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
@ -337,7 +338,7 @@ var _ algorithm.SchedulerExtender = &FakeExtender{}
func TestGenericSchedulerWithExtenders(t *testing.T) { func TestGenericSchedulerWithExtenders(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
predicates map[string]algorithm.FitPredicate predicates map[string]predicates.FitPredicate
prioritizers []algorithm.PriorityConfig prioritizers []algorithm.PriorityConfig
extenders []FakeExtender extenders []FakeExtender
nodes []string nodes []string
@ -345,7 +346,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
expectsErr bool expectsErr bool
}{ }{
{ {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]predicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
extenders: []FakeExtender{ extenders: []FakeExtender{
{ {
@ -360,7 +361,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
name: "test 1", name: "test 1",
}, },
{ {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]predicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
extenders: []FakeExtender{ extenders: []FakeExtender{
{ {
@ -375,7 +376,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
name: "test 2", name: "test 2",
}, },
{ {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]predicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
extenders: []FakeExtender{ extenders: []FakeExtender{
{ {
@ -390,7 +391,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
name: "test 3", name: "test 3",
}, },
{ {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]predicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
extenders: []FakeExtender{ extenders: []FakeExtender{
{ {
@ -405,7 +406,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
name: "test 4", name: "test 4",
}, },
{ {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]predicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
extenders: []FakeExtender{ extenders: []FakeExtender{
{ {
@ -419,7 +420,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
name: "test 5", name: "test 5",
}, },
{ {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]predicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
extenders: []FakeExtender{ extenders: []FakeExtender{
{ {
@ -438,7 +439,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
name: "test 6", name: "test 6",
}, },
{ {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]predicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: machine2Prioritizer, Weight: 20}}, prioritizers: []algorithm.PriorityConfig{{Function: machine2Prioritizer, Weight: 20}},
extenders: []FakeExtender{ extenders: []FakeExtender{
{ {
@ -459,7 +460,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
// If scheduler sends the pod by mistake, the test would fail // If scheduler sends the pod by mistake, the test would fail
// because of the errors from errorPredicateExtender and/or // because of the errors from errorPredicateExtender and/or
// errorPrioritizerExtender. // errorPrioritizerExtender.
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]predicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: machine2Prioritizer, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Function: machine2Prioritizer, Weight: 1}},
extenders: []FakeExtender{ extenders: []FakeExtender{
{ {
@ -479,7 +480,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
// //
// If scheduler did not ignore the extender, the test would fail // If scheduler did not ignore the extender, the test would fail
// because of the errors from errorPredicateExtender. // because of the errors from errorPredicateExtender.
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]predicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
extenders: []FakeExtender{ extenders: []FakeExtender{
{ {
@ -512,7 +513,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
cache, cache,
queue, queue,
test.predicates, test.predicates,
algorithm.EmptyPredicateMetadataProducer, predicates.EmptyPredicateMetadataProducer,
test.prioritizers, test.prioritizers,
algorithm.EmptyPriorityMetadataProducer, algorithm.EmptyPriorityMetadataProducer,
emptyPluginSet, emptyPluginSet,

View File

@ -62,7 +62,7 @@ const (
) )
// FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type. // FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type.
type FailedPredicateMap map[string][]algorithm.PredicateFailureReason type FailedPredicateMap map[string][]predicates.PredicateFailureReason
// FitError describes a fit error of a pod. // FitError describes a fit error of a pod.
type FitError struct { type FitError struct {
@ -112,7 +112,7 @@ type ScheduleAlgorithm interface {
Preempt(*v1.Pod, algorithm.NodeLister, error) (selectedNode *v1.Node, preemptedPods []*v1.Pod, cleanupNominatedPods []*v1.Pod, err error) Preempt(*v1.Pod, algorithm.NodeLister, error) (selectedNode *v1.Node, preemptedPods []*v1.Pod, cleanupNominatedPods []*v1.Pod, err error)
// Predicates() returns a pointer to a map of predicate functions. This is // Predicates() returns a pointer to a map of predicate functions. This is
// exposed for testing. // exposed for testing.
Predicates() map[string]algorithm.FitPredicate Predicates() map[string]predicates.FitPredicate
// Prioritizers returns a slice of priority config. This is exposed for // Prioritizers returns a slice of priority config. This is exposed for
// testing. // testing.
Prioritizers() []algorithm.PriorityConfig Prioritizers() []algorithm.PriorityConfig
@ -121,9 +121,9 @@ type ScheduleAlgorithm interface {
type genericScheduler struct { type genericScheduler struct {
cache schedulerinternalcache.Cache cache schedulerinternalcache.Cache
schedulingQueue internalqueue.SchedulingQueue schedulingQueue internalqueue.SchedulingQueue
predicates map[string]algorithm.FitPredicate predicates map[string]predicates.FitPredicate
priorityMetaProducer algorithm.PriorityMetadataProducer priorityMetaProducer algorithm.PriorityMetadataProducer
predicateMetaProducer algorithm.PredicateMetadataProducer predicateMetaProducer predicates.PredicateMetadataProducer
prioritizers []algorithm.PriorityConfig prioritizers []algorithm.PriorityConfig
pluginSet pluginsv1alpha1.PluginSet pluginSet pluginsv1alpha1.PluginSet
extenders []algorithm.SchedulerExtender extenders []algorithm.SchedulerExtender
@ -213,7 +213,7 @@ func (g *genericScheduler) Prioritizers() []algorithm.PriorityConfig {
// Predicates returns a map containing all the scheduler's predicate // Predicates returns a map containing all the scheduler's predicate
// functions. It is exposed for testing only. // functions. It is exposed for testing only.
func (g *genericScheduler) Predicates() map[string]algorithm.FitPredicate { func (g *genericScheduler) Predicates() map[string]predicates.FitPredicate {
return g.predicates return g.predicates
} }
@ -486,7 +486,7 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v
for failedNodeName, failedMsg := range failedMap { for failedNodeName, failedMsg := range failedMap {
if _, found := failedPredicateMap[failedNodeName]; !found { if _, found := failedPredicateMap[failedNodeName]; !found {
failedPredicateMap[failedNodeName] = []algorithm.PredicateFailureReason{} failedPredicateMap[failedNodeName] = []predicates.PredicateFailureReason{}
} }
failedPredicateMap[failedNodeName] = append(failedPredicateMap[failedNodeName], predicates.NewFailureReason(failedMsg)) failedPredicateMap[failedNodeName] = append(failedPredicateMap[failedNodeName], predicates.NewFailureReason(failedMsg))
} }
@ -502,8 +502,8 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v
// addNominatedPods adds pods with equal or greater priority which are nominated // addNominatedPods adds pods with equal or greater priority which are nominated
// to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether // to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether
// any pod was found, 2) augmented meta data, 3) augmented nodeInfo. // any pod was found, 2) augmented meta data, 3) augmented nodeInfo.
func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata, func addNominatedPods(pod *v1.Pod, meta predicates.PredicateMetadata,
nodeInfo *schedulernodeinfo.NodeInfo, queue internalqueue.SchedulingQueue) (bool, algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo, queue internalqueue.SchedulingQueue) (bool, predicates.PredicateMetadata,
*schedulernodeinfo.NodeInfo) { *schedulernodeinfo.NodeInfo) {
if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil { if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen only in tests. // This may happen only in tests.
@ -513,7 +513,7 @@ func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata,
if nominatedPods == nil || len(nominatedPods) == 0 { if nominatedPods == nil || len(nominatedPods) == 0 {
return false, meta, nodeInfo return false, meta, nodeInfo
} }
var metaOut algorithm.PredicateMetadata var metaOut predicates.PredicateMetadata
if meta != nil { if meta != nil {
metaOut = meta.ShallowCopy() metaOut = meta.ShallowCopy()
} }
@ -541,13 +541,13 @@ func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata,
// It removes victims from meta and NodeInfo before calling this function. // It removes victims from meta and NodeInfo before calling this function.
func podFitsOnNode( func podFitsOnNode(
pod *v1.Pod, pod *v1.Pod,
meta algorithm.PredicateMetadata, meta predicates.PredicateMetadata,
info *schedulernodeinfo.NodeInfo, info *schedulernodeinfo.NodeInfo,
predicateFuncs map[string]algorithm.FitPredicate, predicateFuncs map[string]predicates.FitPredicate,
queue internalqueue.SchedulingQueue, queue internalqueue.SchedulingQueue,
alwaysCheckAllPredicates bool, alwaysCheckAllPredicates bool,
) (bool, []algorithm.PredicateFailureReason, error) { ) (bool, []predicates.PredicateFailureReason, error) {
var failedPredicates []algorithm.PredicateFailureReason var failedPredicates []predicates.PredicateFailureReason
podsAdded := false podsAdded := false
// We run predicates twice in some cases. If the node has greater or equal priority // We run predicates twice in some cases. If the node has greater or equal priority
@ -579,14 +579,14 @@ func podFitsOnNode(
for _, predicateKey := range predicates.Ordering() { for _, predicateKey := range predicates.Ordering() {
var ( var (
fit bool fit bool
reasons []algorithm.PredicateFailureReason reasons []predicates.PredicateFailureReason
err error err error
) )
//TODO (yastij) : compute average predicate restrictiveness to export it as Prometheus metric //TODO (yastij) : compute average predicate restrictiveness to export it as Prometheus metric
if predicate, exist := predicateFuncs[predicateKey]; exist { if predicate, exist := predicateFuncs[predicateKey]; exist {
fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse) fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse)
if err != nil { if err != nil {
return false, []algorithm.PredicateFailureReason{}, err return false, []predicates.PredicateFailureReason{}, err
} }
if !fit { if !fit {
@ -887,8 +887,8 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims)
func selectNodesForPreemption(pod *v1.Pod, func selectNodesForPreemption(pod *v1.Pod,
nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
potentialNodes []*v1.Node, potentialNodes []*v1.Node,
predicates map[string]algorithm.FitPredicate, fitPredicates map[string]predicates.FitPredicate,
metadataProducer algorithm.PredicateMetadataProducer, metadataProducer predicates.PredicateMetadataProducer,
queue internalqueue.SchedulingQueue, queue internalqueue.SchedulingQueue,
pdbs []*policy.PodDisruptionBudget, pdbs []*policy.PodDisruptionBudget,
) (map[*v1.Node]*schedulerapi.Victims, error) { ) (map[*v1.Node]*schedulerapi.Victims, error) {
@ -899,11 +899,11 @@ func selectNodesForPreemption(pod *v1.Pod,
meta := metadataProducer(pod, nodeNameToInfo) meta := metadataProducer(pod, nodeNameToInfo)
checkNode := func(i int) { checkNode := func(i int) {
nodeName := potentialNodes[i].Name nodeName := potentialNodes[i].Name
var metaCopy algorithm.PredicateMetadata var metaCopy predicates.PredicateMetadata
if meta != nil { if meta != nil {
metaCopy = meta.ShallowCopy() metaCopy = meta.ShallowCopy()
} }
pods, numPDBViolations, fits := selectVictimsOnNode(pod, metaCopy, nodeNameToInfo[nodeName], predicates, queue, pdbs) pods, numPDBViolations, fits := selectVictimsOnNode(pod, metaCopy, nodeNameToInfo[nodeName], fitPredicates, queue, pdbs)
if fits { if fits {
resultLock.Lock() resultLock.Lock()
victims := schedulerapi.Victims{ victims := schedulerapi.Victims{
@ -974,9 +974,9 @@ func filterPodsWithPDBViolation(pods []interface{}, pdbs []*policy.PodDisruption
// these predicates can be satisfied by removing more pods from the node. // these predicates can be satisfied by removing more pods from the node.
func selectVictimsOnNode( func selectVictimsOnNode(
pod *v1.Pod, pod *v1.Pod,
meta algorithm.PredicateMetadata, meta predicates.PredicateMetadata,
nodeInfo *schedulernodeinfo.NodeInfo, nodeInfo *schedulernodeinfo.NodeInfo,
fitPredicates map[string]algorithm.FitPredicate, fitPredicates map[string]predicates.FitPredicate,
queue internalqueue.SchedulingQueue, queue internalqueue.SchedulingQueue,
pdbs []*policy.PodDisruptionBudget, pdbs []*policy.PodDisruptionBudget,
) ([]*v1.Pod, int, bool) { ) ([]*v1.Pod, int, bool) {
@ -1143,8 +1143,8 @@ func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeCla
func NewGenericScheduler( func NewGenericScheduler(
cache schedulerinternalcache.Cache, cache schedulerinternalcache.Cache,
podQueue internalqueue.SchedulingQueue, podQueue internalqueue.SchedulingQueue,
predicates map[string]algorithm.FitPredicate, predicates map[string]predicates.FitPredicate,
predicateMetaProducer algorithm.PredicateMetadataProducer, predicateMetaProducer predicates.PredicateMetadataProducer,
prioritizers []algorithm.PriorityConfig, prioritizers []algorithm.PriorityConfig,
priorityMetaProducer algorithm.PriorityMetadataProducer, priorityMetaProducer algorithm.PriorityMetadataProducer,
pluginSet pluginsv1alpha1.PluginSet, pluginSet pluginsv1alpha1.PluginSet,

View File

@ -50,15 +50,15 @@ var (
order = []string{"false", "true", "matches", "nopods", algorithmpredicates.MatchInterPodAffinityPred} order = []string{"false", "true", "matches", "nopods", algorithmpredicates.MatchInterPodAffinityPred}
) )
func falsePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func falsePredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil return false, []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
} }
func truePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func truePredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
return true, nil, nil return true, nil, nil
} }
func matchesPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func matchesPredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return false, nil, fmt.Errorf("node not found") return false, nil, fmt.Errorf("node not found")
@ -66,14 +66,14 @@ func matchesPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
if pod.Name == node.Name { if pod.Name == node.Name {
return true, nil, nil return true, nil, nil
} }
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil return false, []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
} }
func hasNoPodsPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func hasNoPodsPredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
if len(nodeInfo.Pods()) == 0 { if len(nodeInfo.Pods()) == 0 {
return true, nil, nil return true, nil, nil
} }
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil return false, []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
} }
func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
@ -238,7 +238,7 @@ func TestGenericScheduler(t *testing.T) {
algorithmpredicates.SetPredicatesOrdering(order) algorithmpredicates.SetPredicatesOrdering(order)
tests := []struct { tests := []struct {
name string name string
predicates map[string]algorithm.FitPredicate predicates map[string]algorithmpredicates.FitPredicate
prioritizers []algorithm.PriorityConfig prioritizers []algorithm.PriorityConfig
alwaysCheckAllPredicates bool alwaysCheckAllPredicates bool
nodes []string nodes []string
@ -250,7 +250,7 @@ func TestGenericScheduler(t *testing.T) {
wErr error wErr error
}{ }{
{ {
predicates: map[string]algorithm.FitPredicate{"false": falsePredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"false": falsePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
expectsErr: true, expectsErr: true,
@ -260,12 +260,12 @@ func TestGenericScheduler(t *testing.T) {
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}}, Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 2, NumAllNodes: 2,
FailedPredicates: FailedPredicateMap{ FailedPredicates: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
}}, }},
}, },
{ {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}},
@ -275,7 +275,7 @@ func TestGenericScheduler(t *testing.T) {
}, },
{ {
// Fits on a machine where the pod ID matches the machine name // Fits on a machine where the pod ID matches the machine name
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": matchesPredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine2", UID: types.UID("machine2")}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine2", UID: types.UID("machine2")}},
@ -284,7 +284,7 @@ func TestGenericScheduler(t *testing.T) {
wErr: nil, wErr: nil,
}, },
{ {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"}, nodes: []string{"3", "2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}},
@ -293,7 +293,7 @@ func TestGenericScheduler(t *testing.T) {
wErr: nil, wErr: nil,
}, },
{ {
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": matchesPredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"}, nodes: []string{"3", "2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
@ -302,7 +302,7 @@ func TestGenericScheduler(t *testing.T) {
wErr: nil, wErr: nil,
}, },
{ {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}},
nodes: []string{"3", "2", "1"}, nodes: []string{"3", "2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
@ -311,7 +311,7 @@ func TestGenericScheduler(t *testing.T) {
wErr: nil, wErr: nil,
}, },
{ {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "false": falsePredicate},
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"}, nodes: []string{"3", "2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
@ -321,14 +321,14 @@ func TestGenericScheduler(t *testing.T) {
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}}, Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 3, NumAllNodes: 3,
FailedPredicates: FailedPredicateMap{ FailedPredicates: FailedPredicateMap{
"3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
}, },
}, },
}, },
{ {
predicates: map[string]algorithm.FitPredicate{ predicates: map[string]algorithmpredicates.FitPredicate{
"nopods": hasNoPodsPredicate, "nopods": hasNoPodsPredicate,
"matches": matchesPredicate, "matches": matchesPredicate,
}, },
@ -352,14 +352,14 @@ func TestGenericScheduler(t *testing.T) {
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}}, Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 2, NumAllNodes: 2,
FailedPredicates: FailedPredicateMap{ FailedPredicates: FailedPredicateMap{
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
}, },
}, },
}, },
{ {
// Pod with existing PVC // Pod with existing PVC
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pvcs: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC"}}}, pvcs: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC"}}},
@ -383,7 +383,7 @@ func TestGenericScheduler(t *testing.T) {
}, },
{ {
// Pod with non existing PVC // Pod with non existing PVC
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ pod: &v1.Pod{
@ -406,7 +406,7 @@ func TestGenericScheduler(t *testing.T) {
}, },
{ {
// Pod with deleting PVC // Pod with deleting PVC
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pvcs: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", DeletionTimestamp: &metav1.Time{}}}}, pvcs: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", DeletionTimestamp: &metav1.Time{}}}},
@ -430,7 +430,7 @@ func TestGenericScheduler(t *testing.T) {
}, },
{ {
// alwaysCheckAllPredicates is true // alwaysCheckAllPredicates is true
predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "matches": matchesPredicate, "false": falsePredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "matches": matchesPredicate, "false": falsePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
alwaysCheckAllPredicates: true, alwaysCheckAllPredicates: true,
nodes: []string{"1"}, nodes: []string{"1"},
@ -440,12 +440,12 @@ func TestGenericScheduler(t *testing.T) {
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}}, Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 1, NumAllNodes: 1,
FailedPredicates: FailedPredicateMap{ FailedPredicates: FailedPredicateMap{
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate, algorithmpredicates.ErrFakePredicate}, "1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate, algorithmpredicates.ErrFakePredicate},
}, },
}, },
}, },
{ {
predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
prioritizers: []algorithm.PriorityConfig{{Map: falseMapPriority, Weight: 1}, {Map: trueMapPriority, Reduce: getNodeReducePriority, Weight: 2}}, prioritizers: []algorithm.PriorityConfig{{Map: falseMapPriority, Weight: 1}, {Map: trueMapPriority, Reduce: getNodeReducePriority, Weight: 2}},
nodes: []string{"2", "1"}, nodes: []string{"2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
@ -471,7 +471,7 @@ func TestGenericScheduler(t *testing.T) {
cache, cache,
internalqueue.NewSchedulingQueue(nil), internalqueue.NewSchedulingQueue(nil),
test.predicates, test.predicates,
algorithm.EmptyPredicateMetadataProducer, algorithmpredicates.EmptyPredicateMetadataProducer,
test.prioritizers, test.prioritizers,
algorithm.EmptyPriorityMetadataProducer, algorithm.EmptyPriorityMetadataProducer,
emptyPluginSet, emptyPluginSet,
@ -495,7 +495,7 @@ func TestGenericScheduler(t *testing.T) {
} }
// makeScheduler makes a simple genericScheduler for testing. // makeScheduler makes a simple genericScheduler for testing.
func makeScheduler(predicates map[string]algorithm.FitPredicate, nodes []*v1.Node) *genericScheduler { func makeScheduler(predicates map[string]algorithmpredicates.FitPredicate, nodes []*v1.Node) *genericScheduler {
algorithmpredicates.SetPredicatesOrdering(order) algorithmpredicates.SetPredicatesOrdering(order)
cache := schedulerinternalcache.New(time.Duration(0), wait.NeverStop) cache := schedulerinternalcache.New(time.Duration(0), wait.NeverStop)
for _, n := range nodes { for _, n := range nodes {
@ -507,7 +507,7 @@ func makeScheduler(predicates map[string]algorithm.FitPredicate, nodes []*v1.Nod
cache, cache,
internalqueue.NewSchedulingQueue(nil), internalqueue.NewSchedulingQueue(nil),
predicates, predicates,
algorithm.EmptyPredicateMetadataProducer, algorithmpredicates.EmptyPredicateMetadataProducer,
prioritizers, prioritizers,
algorithm.EmptyPriorityMetadataProducer, algorithm.EmptyPriorityMetadataProducer,
emptyPluginSet, emptyPluginSet,
@ -519,7 +519,7 @@ func makeScheduler(predicates map[string]algorithm.FitPredicate, nodes []*v1.Nod
} }
func TestFindFitAllError(t *testing.T) { func TestFindFitAllError(t *testing.T) {
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "matches": matchesPredicate} predicates := map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "matches": matchesPredicate}
nodes := makeNodeList([]string{"3", "2", "1"}) nodes := makeNodeList([]string{"3", "2", "1"})
scheduler := makeScheduler(predicates, nodes) scheduler := makeScheduler(predicates, nodes)
@ -547,7 +547,7 @@ func TestFindFitAllError(t *testing.T) {
} }
func TestFindFitSomeError(t *testing.T) { func TestFindFitSomeError(t *testing.T) {
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "matches": matchesPredicate} predicates := map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "matches": matchesPredicate}
nodes := makeNodeList([]string{"3", "2", "1"}) nodes := makeNodeList([]string{"3", "2", "1"})
scheduler := makeScheduler(predicates, nodes) scheduler := makeScheduler(predicates, nodes)
@ -602,9 +602,9 @@ func TestHumanReadableFitError(t *testing.T) {
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}}, Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 3, NumAllNodes: 3,
FailedPredicates: FailedPredicateMap{ FailedPredicates: FailedPredicateMap{
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure}, "1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure}, "2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
"3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure}, "3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
}, },
} }
if strings.Contains(err.Error(), "0/3 nodes are available") { if strings.Contains(err.Error(), "0/3 nodes are available") {
@ -796,7 +796,7 @@ func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
return &node, nil return &node, nil
} }
func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata { func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithmpredicates.PredicateMetadata {
return algorithmpredicates.NewPredicateMetadataFactory(schedulertesting.FakePodLister{p})(p, nodeInfo) return algorithmpredicates.NewPredicateMetadataFactory(schedulertesting.FakePodLister{p})(p, nodeInfo)
} }
@ -856,7 +856,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
algorithmpredicates.SetPredicatesOrdering(order) algorithmpredicates.SetPredicatesOrdering(order)
tests := []struct { tests := []struct {
name string name string
predicates map[string]algorithm.FitPredicate predicates map[string]algorithmpredicates.FitPredicate
nodes []string nodes []string
pod *v1.Pod pod *v1.Pod
pods []*v1.Pod pods []*v1.Pod
@ -865,7 +865,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
}{ }{
{ {
name: "a pod that does not fit on any machine", name: "a pod that does not fit on any machine",
predicates: map[string]algorithm.FitPredicate{"matches": falsePredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": falsePredicate},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "new", UID: types.UID("new")}, Spec: v1.PodSpec{Priority: &highPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "new", UID: types.UID("new")}, Spec: v1.PodSpec{Priority: &highPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -875,7 +875,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
}, },
{ {
name: "a pod that fits with no preemption", name: "a pod that fits with no preemption",
predicates: map[string]algorithm.FitPredicate{"matches": truePredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": truePredicate},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "new", UID: types.UID("new")}, Spec: v1.PodSpec{Priority: &highPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "new", UID: types.UID("new")}, Spec: v1.PodSpec{Priority: &highPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -885,7 +885,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
}, },
{ {
name: "a pod that fits on one machine with no preemption", name: "a pod that fits on one machine with no preemption",
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": matchesPredicate},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Priority: &highPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Priority: &highPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -895,7 +895,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
}, },
{ {
name: "a pod that fits on both machines when lower priority pods are preempted", name: "a pod that fits on both machines when lower priority pods are preempted",
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -905,7 +905,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
}, },
{ {
name: "a pod that would fit on the machines, but other pods running are higher priority", name: "a pod that would fit on the machines, but other pods running are higher priority",
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &lowPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &lowPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -915,7 +915,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
}, },
{ {
name: "medium priority pod is preempted, but lower priority one stays as it is small", name: "medium priority pod is preempted, but lower priority one stays as it is small",
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -926,7 +926,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
}, },
{ {
name: "mixed priority pods are preempted", name: "mixed priority pods are preempted",
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -939,7 +939,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
}, },
{ {
name: "pod with anti-affinity is preempted", name: "pod with anti-affinity is preempted",
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{
Name: "machine1", Name: "machine1",
@ -1002,7 +1002,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
algorithmpredicates.SetPredicatesOrdering(order) algorithmpredicates.SetPredicatesOrdering(order)
tests := []struct { tests := []struct {
name string name string
predicates map[string]algorithm.FitPredicate predicates map[string]algorithmpredicates.FitPredicate
nodes []string nodes []string
pod *v1.Pod pod *v1.Pod
pods []*v1.Pod pods []*v1.Pod
@ -1010,7 +1010,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
}{ }{
{ {
name: "No node needs preemption", name: "No node needs preemption",
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1"}, nodes: []string{"machine1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1019,7 +1019,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
}, },
{ {
name: "a pod that fits on both machines when lower priority pods are preempted", name: "a pod that fits on both machines when lower priority pods are preempted",
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1030,7 +1030,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
}, },
{ {
name: "a pod that fits on a machine with no preemption", name: "a pod that fits on a machine with no preemption",
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2", "machine3"}, nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1041,7 +1041,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
}, },
{ {
name: "machine with min highest priority pod is picked", name: "machine with min highest priority pod is picked",
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2", "machine3"}, nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1058,7 +1058,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
}, },
{ {
name: "when highest priorities are the same, minimum sum of priorities is picked", name: "when highest priorities are the same, minimum sum of priorities is picked",
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2", "machine3"}, nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1075,7 +1075,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
}, },
{ {
name: "when highest priority and sum are the same, minimum number of pods is picked", name: "when highest priority and sum are the same, minimum number of pods is picked",
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2", "machine3"}, nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1097,7 +1097,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
// pickOneNodeForPreemption adjusts pod priorities when finding the sum of the victims. This // pickOneNodeForPreemption adjusts pod priorities when finding the sum of the victims. This
// test ensures that the logic works correctly. // test ensures that the logic works correctly.
name: "sum of adjusted priorities is considered", name: "sum of adjusted priorities is considered",
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2", "machine3"}, nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1116,7 +1116,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
}, },
{ {
name: "non-overlapping lowest high priority, sum priorities, and number of pods", name: "non-overlapping lowest high priority, sum priorities, and number of pods",
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
nodes: []string{"machine1", "machine2", "machine3", "machine4"}, nodes: []string{"machine1", "machine2", "machine3", "machine4"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &veryHighPriority}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &veryHighPriority}},
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1177,72 +1177,72 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
{ {
name: "No node should be attempted", name: "No node should be attempted",
failedPredMap: FailedPredicateMap{ failedPredMap: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch}, "machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName}, "machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrTaintsTolerationsNotMatch}, "machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrTaintsTolerationsNotMatch},
"machine4": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeLabelPresenceViolated}, "machine4": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeLabelPresenceViolated},
}, },
expected: map[string]bool{}, expected: map[string]bool{},
}, },
{ {
name: "ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity", name: "ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
failedPredMap: FailedPredicateMap{ failedPredMap: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch}, "machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName}, "machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnschedulable}, "machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnschedulable},
}, },
expected: map[string]bool{"machine1": true, "machine4": true}, expected: map[string]bool{"machine1": true, "machine4": true},
}, },
{ {
name: "pod with both pod affinity and anti-affinity should be tried", name: "pod with both pod affinity and anti-affinity should be tried",
failedPredMap: FailedPredicateMap{ failedPredMap: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch}, "machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName}, "machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
}, },
expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true}, expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true},
}, },
{ {
name: "ErrPodAffinityRulesNotMatch should not be tried as it indicates that the pod is unschedulable due to inter-pod affinity, but ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity", name: "ErrPodAffinityRulesNotMatch should not be tried as it indicates that the pod is unschedulable due to inter-pod affinity, but ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
failedPredMap: FailedPredicateMap{ failedPredMap: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodAffinityRulesNotMatch}, "machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityRulesNotMatch},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch}, "machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
}, },
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true}, expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
}, },
{ {
name: "Mix of failed predicates works fine", name: "Mix of failed predicates works fine",
failedPredMap: FailedPredicateMap{ failedPredMap: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch, algorithmpredicates.ErrNodeUnderDiskPressure, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)}, "machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch, algorithmpredicates.ErrNodeUnderDiskPressure, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName, algorithmpredicates.ErrDiskConflict}, "machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName, algorithmpredicates.ErrDiskConflict},
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)}, "machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
"machine4": []algorithm.PredicateFailureReason{}, "machine4": []algorithmpredicates.PredicateFailureReason{},
}, },
expected: map[string]bool{"machine3": true, "machine4": true}, expected: map[string]bool{"machine3": true, "machine4": true},
}, },
{ {
name: "Node condition errors should be considered unresolvable", name: "Node condition errors should be considered unresolvable",
failedPredMap: FailedPredicateMap{ failedPredMap: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure}, "machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderPIDPressure}, "machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderPIDPressure},
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure}, "machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
}, },
expected: map[string]bool{"machine4": true}, expected: map[string]bool{"machine4": true},
}, },
{ {
name: "Node condition errors and ErrNodeUnknownCondition should be considered unresolvable", name: "Node condition errors and ErrNodeUnknownCondition should be considered unresolvable",
failedPredMap: FailedPredicateMap{ failedPredMap: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeNotReady}, "machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeNotReady},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeNetworkUnavailable}, "machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeNetworkUnavailable},
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnknownCondition}, "machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnknownCondition},
}, },
expected: map[string]bool{"machine4": true}, expected: map[string]bool{"machine4": true},
}, },
{ {
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node", name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
failedPredMap: FailedPredicateMap{ failedPredMap: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrVolumeZoneConflict}, "machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeZoneConflict},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrVolumeNodeConflict}, "machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeNodeConflict},
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrVolumeBindConflict}, "machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeBindConflict},
}, },
expected: map[string]bool{"machine4": true}, expected: map[string]bool{"machine4": true},
}, },
@ -1265,9 +1265,9 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
func TestPreempt(t *testing.T) { func TestPreempt(t *testing.T) {
failedPredMap := FailedPredicateMap{ failedPredMap := FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)}, "machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrDiskConflict}, "machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrDiskConflict},
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)}, "machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
} }
// Prepare 3 node names. // Prepare 3 node names.
nodeNames := []string{} nodeNames := []string{}
@ -1431,8 +1431,8 @@ func TestPreempt(t *testing.T) {
scheduler := NewGenericScheduler( scheduler := NewGenericScheduler(
cache, cache,
internalqueue.NewSchedulingQueue(nil), internalqueue.NewSchedulingQueue(nil),
map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
algorithm.EmptyPredicateMetadataProducer, algorithmpredicates.EmptyPredicateMetadataProducer,
[]algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
algorithm.EmptyPriorityMetadataProducer, algorithm.EmptyPriorityMetadataProducer,
emptyPluginSet, emptyPluginSet,

View File

@ -64,6 +64,7 @@ go_test(
deps = [ deps = [
"//pkg/api/testing:go_default_library", "//pkg/api/testing:go_default_library",
"//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/algorithm/priorities:go_default_library", "//pkg/scheduler/algorithm/priorities:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/api/latest:go_default_library", "//pkg/scheduler/api/latest:go_default_library",

View File

@ -159,8 +159,8 @@ type Configurator interface {
MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue internalqueue.SchedulingQueue) func(pod *v1.Pod, err error) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue internalqueue.SchedulingQueue) func(pod *v1.Pod, err error)
// Predicate related accessors to be exposed for use by k8s.io/autoscaler/cluster-autoscaler // Predicate related accessors to be exposed for use by k8s.io/autoscaler/cluster-autoscaler
GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) GetPredicateMetadataProducer() (predicates.PredicateMetadataProducer, error)
GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) GetPredicates(predicateKeys sets.String) (map[string]predicates.FitPredicate, error)
// Needs to be exposed for things like integration tests where we want to make fake nodes. // Needs to be exposed for things like integration tests where we want to make fake nodes.
GetNodeLister() corelisters.NodeLister GetNodeLister() corelisters.NodeLister
@ -956,7 +956,7 @@ func (c *configFactory) GetPriorityMetadataProducer() (algorithm.PriorityMetadat
return getPriorityMetadataProducer(*pluginArgs) return getPriorityMetadataProducer(*pluginArgs)
} }
func (c *configFactory) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) { func (c *configFactory) GetPredicateMetadataProducer() (predicates.PredicateMetadataProducer, error) {
pluginArgs, err := c.getPluginArgs() pluginArgs, err := c.getPluginArgs()
if err != nil { if err != nil {
return nil, err return nil, err
@ -964,7 +964,7 @@ func (c *configFactory) GetPredicateMetadataProducer() (algorithm.PredicateMetad
return getPredicateMetadataProducer(*pluginArgs) return getPredicateMetadataProducer(*pluginArgs)
} }
func (c *configFactory) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) { func (c *configFactory) GetPredicates(predicateKeys sets.String) (map[string]predicates.FitPredicate, error) {
pluginArgs, err := c.getPluginArgs() pluginArgs, err := c.getPluginArgs()
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -36,6 +36,7 @@ import (
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
apitesting "k8s.io/kubernetes/pkg/api/testing" apitesting "k8s.io/kubernetes/pkg/api/testing"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest" latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest"
fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake" fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
@ -229,11 +230,11 @@ func TestCreateFromConfigWithEmptyPredicatesOrPriorities(t *testing.T) {
} }
} }
func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PredicateOne(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
return true, nil, nil return true, nil, nil
} }
func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PredicateTwo(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
return true, nil, nil return true, nil, nil
} }

View File

@ -54,10 +54,10 @@ type PluginFactoryArgs struct {
type PriorityMetadataProducerFactory func(PluginFactoryArgs) algorithm.PriorityMetadataProducer type PriorityMetadataProducerFactory func(PluginFactoryArgs) algorithm.PriorityMetadataProducer
// PredicateMetadataProducerFactory produces PredicateMetadataProducer from the given args. // PredicateMetadataProducerFactory produces PredicateMetadataProducer from the given args.
type PredicateMetadataProducerFactory func(PluginFactoryArgs) algorithm.PredicateMetadataProducer type PredicateMetadataProducerFactory func(PluginFactoryArgs) predicates.PredicateMetadataProducer
// FitPredicateFactory produces a FitPredicate from the given args. // FitPredicateFactory produces a FitPredicate from the given args.
type FitPredicateFactory func(PluginFactoryArgs) algorithm.FitPredicate type FitPredicateFactory func(PluginFactoryArgs) predicates.FitPredicate
// PriorityFunctionFactory produces a PriorityConfig from the given args. // PriorityFunctionFactory produces a PriorityConfig from the given args.
// DEPRECATED // DEPRECATED
@ -103,8 +103,8 @@ type AlgorithmProviderConfig struct {
// RegisterFitPredicate registers a fit predicate with the algorithm // RegisterFitPredicate registers a fit predicate with the algorithm
// registry. Returns the name with which the predicate was registered. // registry. Returns the name with which the predicate was registered.
func RegisterFitPredicate(name string, predicate algorithm.FitPredicate) string { func RegisterFitPredicate(name string, predicate predicates.FitPredicate) string {
return RegisterFitPredicateFactory(name, func(PluginFactoryArgs) algorithm.FitPredicate { return predicate }) return RegisterFitPredicateFactory(name, func(PluginFactoryArgs) predicates.FitPredicate { return predicate })
} }
// RemoveFitPredicate removes a fit predicate from factory. // RemoveFitPredicate removes a fit predicate from factory.
@ -181,11 +181,11 @@ func InsertPriorityKeyToAlgorithmProviderMap(key string) {
// RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by // RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by
// kubelet, DaemonSet; it is always included in configuration. Returns the name with which the predicate was // kubelet, DaemonSet; it is always included in configuration. Returns the name with which the predicate was
// registered. // registered.
func RegisterMandatoryFitPredicate(name string, predicate algorithm.FitPredicate) string { func RegisterMandatoryFitPredicate(name string, predicate predicates.FitPredicate) string {
schedulerFactoryMutex.Lock() schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock() defer schedulerFactoryMutex.Unlock()
validateAlgorithmNameOrDie(name) validateAlgorithmNameOrDie(name)
fitPredicateMap[name] = func(PluginFactoryArgs) algorithm.FitPredicate { return predicate } fitPredicateMap[name] = func(PluginFactoryArgs) predicates.FitPredicate { return predicate }
mandatoryFitPredicates.Insert(name) mandatoryFitPredicates.Insert(name)
return name return name
} }
@ -211,7 +211,7 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
// generate the predicate function, if a custom type is requested // generate the predicate function, if a custom type is requested
if policy.Argument != nil { if policy.Argument != nil {
if policy.Argument.ServiceAffinity != nil { if policy.Argument.ServiceAffinity != nil {
predicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate { predicateFactory = func(args PluginFactoryArgs) predicates.FitPredicate {
predicate, precomputationFunction := predicates.NewServiceAffinityPredicate( predicate, precomputationFunction := predicates.NewServiceAffinityPredicate(
args.PodLister, args.PodLister,
args.ServiceLister, args.ServiceLister,
@ -224,7 +224,7 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
return predicate return predicate
} }
} else if policy.Argument.LabelsPresence != nil { } else if policy.Argument.LabelsPresence != nil {
predicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate { predicateFactory = func(args PluginFactoryArgs) predicates.FitPredicate {
return predicates.NewNodeLabelPredicate( return predicates.NewNodeLabelPredicate(
policy.Argument.LabelsPresence.Labels, policy.Argument.LabelsPresence.Labels,
policy.Argument.LabelsPresence.Presence, policy.Argument.LabelsPresence.Presence,
@ -408,11 +408,11 @@ func GetAlgorithmProvider(name string) (*AlgorithmProviderConfig, error) {
return &provider, nil return &provider, nil
} }
func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) { func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]predicates.FitPredicate, error) {
schedulerFactoryMutex.Lock() schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock() defer schedulerFactoryMutex.Unlock()
predicates := map[string]algorithm.FitPredicate{} predicates := map[string]predicates.FitPredicate{}
for _, name := range names.List() { for _, name := range names.List() {
factory, ok := fitPredicateMap[name] factory, ok := fitPredicateMap[name]
if !ok { if !ok {
@ -441,12 +441,12 @@ func getPriorityMetadataProducer(args PluginFactoryArgs) (algorithm.PriorityMeta
return priorityMetadataProducer(args), nil return priorityMetadataProducer(args), nil
} }
func getPredicateMetadataProducer(args PluginFactoryArgs) (algorithm.PredicateMetadataProducer, error) { func getPredicateMetadataProducer(args PluginFactoryArgs) (predicates.PredicateMetadataProducer, error) {
schedulerFactoryMutex.Lock() schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock() defer schedulerFactoryMutex.Unlock()
if predicateMetadataProducer == nil { if predicateMetadataProducer == nil {
return algorithm.EmptyPredicateMetadataProducer, nil return predicates.EmptyPredicateMetadataProducer, nil
} }
return predicateMetadataProducer(args), nil return predicateMetadataProducer(args), nil
} }

View File

@ -136,7 +136,7 @@ func podWithResources(id, desiredHost string, limits v1.ResourceList, requests v
return pod return pod
} }
func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PredicateOne(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
return true, nil, nil return true, nil, nil
} }
@ -153,7 +153,7 @@ func (es mockScheduler) Schedule(pod *v1.Pod, ml algorithm.NodeLister) (string,
return es.machine, es.err return es.machine, es.err
} }
func (es mockScheduler) Predicates() map[string]algorithm.FitPredicate { func (es mockScheduler) Predicates() map[string]predicates.FitPredicate {
return nil return nil
} }
func (es mockScheduler) Prioritizers() []algorithm.PriorityConfig { func (es mockScheduler) Prioritizers() []algorithm.PriorityConfig {
@ -341,7 +341,7 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
scache.AddNode(&node) scache.AddNode(&node)
client := clientsetfake.NewSimpleClientset(&node) client := clientsetfake.NewSimpleClientset(&node)
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts} predicateMap := map[string]predicates.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
scheduler, bindingChan, _ := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, informerFactory, stop, predicateMap, pod, &node) scheduler, bindingChan, _ := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, informerFactory, stop, predicateMap, pod, &node)
waitPodExpireChan := make(chan struct{}) waitPodExpireChan := make(chan struct{})
@ -400,7 +400,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
scache.AddNode(&node) scache.AddNode(&node)
client := clientsetfake.NewSimpleClientset(&node) client := clientsetfake.NewSimpleClientset(&node)
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts} predicateMap := map[string]predicates.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, informerFactory, stop, predicateMap, firstPod, &node) scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, informerFactory, stop, predicateMap, firstPod, &node)
// We use conflicted pod ports to incur fit predicate failure. // We use conflicted pod ports to incur fit predicate failure.
@ -415,7 +415,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
expectErr := &core.FitError{ expectErr := &core.FitError{
Pod: secondPod, Pod: secondPod,
NumAllNodes: 1, NumAllNodes: 1,
FailedPredicates: core.FailedPredicateMap{node.Name: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}}, FailedPredicates: core.FailedPredicateMap{node.Name: []predicates.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}},
} }
if !reflect.DeepEqual(expectErr, err) { if !reflect.DeepEqual(expectErr, err) {
t.Errorf("err want=%v, get=%v", expectErr, err) t.Errorf("err want=%v, get=%v", expectErr, err)
@ -489,7 +489,7 @@ func TestSchedulerErrorWithLongBinding(t *testing.T) {
client := clientsetfake.NewSimpleClientset(&node) client := clientsetfake.NewSimpleClientset(&node)
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts} predicateMap := map[string]predicates.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
scheduler, bindingChan := setupTestSchedulerLongBindingWithRetry( scheduler, bindingChan := setupTestSchedulerLongBindingWithRetry(
queuedPodStore, scache, informerFactory, predicateMap, stop, test.BindingDuration) queuedPodStore, scache, informerFactory, predicateMap, stop, test.BindingDuration)
@ -524,7 +524,7 @@ func TestSchedulerErrorWithLongBinding(t *testing.T) {
// queuedPodStore: pods queued before processing. // queuedPodStore: pods queued before processing.
// cache: scheduler cache that might contain assumed pods. // cache: scheduler cache that might contain assumed pods.
func setupTestSchedulerWithOnePodOnNode(t *testing.T, queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache, func setupTestSchedulerWithOnePodOnNode(t *testing.T, queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache,
informerFactory informers.SharedInformerFactory, stop chan struct{}, predicateMap map[string]algorithm.FitPredicate, pod *v1.Pod, node *v1.Node) (*Scheduler, chan *v1.Binding, chan error) { informerFactory informers.SharedInformerFactory, stop chan struct{}, predicateMap map[string]predicates.FitPredicate, pod *v1.Pod, node *v1.Node) (*Scheduler, chan *v1.Binding, chan error) {
scheduler, bindingChan, errChan := setupTestScheduler(queuedPodStore, scache, informerFactory, predicateMap, nil) scheduler, bindingChan, errChan := setupTestScheduler(queuedPodStore, scache, informerFactory, predicateMap, nil)
@ -596,14 +596,14 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
} }
client := clientsetfake.NewSimpleClientset(objects...) client := clientsetfake.NewSimpleClientset(objects...)
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
predicateMap := map[string]algorithm.FitPredicate{ predicateMap := map[string]predicates.FitPredicate{
"PodFitsResources": predicates.PodFitsResources, "PodFitsResources": predicates.PodFitsResources,
} }
// Create expected failure reasons for all the nodes. Hopefully they will get rolled up into a non-spammy summary. // Create expected failure reasons for all the nodes. Hopefully they will get rolled up into a non-spammy summary.
failedPredicatesMap := core.FailedPredicateMap{} failedPredicatesMap := core.FailedPredicateMap{}
for _, node := range nodes { for _, node := range nodes {
failedPredicatesMap[node.Name] = []algorithm.PredicateFailureReason{ failedPredicatesMap[node.Name] = []predicates.PredicateFailureReason{
predicates.NewInsufficientResourceError(v1.ResourceCPU, 4000, 0, 2000), predicates.NewInsufficientResourceError(v1.ResourceCPU, 4000, 0, 2000),
predicates.NewInsufficientResourceError(v1.ResourceMemory, 500, 0, 100), predicates.NewInsufficientResourceError(v1.ResourceMemory, 500, 0, 100),
} }
@ -635,12 +635,12 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
// queuedPodStore: pods queued before processing. // queuedPodStore: pods queued before processing.
// scache: scheduler cache that might contain assumed pods. // scache: scheduler cache that might contain assumed pods.
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]algorithm.FitPredicate, recorder record.EventRecorder) (*Scheduler, chan *v1.Binding, chan error) { func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, recorder record.EventRecorder) (*Scheduler, chan *v1.Binding, chan error) {
algo := core.NewGenericScheduler( algo := core.NewGenericScheduler(
scache, scache,
nil, nil,
predicateMap, predicateMap,
algorithm.EmptyPredicateMetadataProducer, predicates.EmptyPredicateMetadataProducer,
[]algorithm.PriorityConfig{}, []algorithm.PriorityConfig{},
algorithm.EmptyPriorityMetadataProducer, algorithm.EmptyPriorityMetadataProducer,
&EmptyPluginSet{}, &EmptyPluginSet{},
@ -687,12 +687,12 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulerintern
return sched, bindingChan, errChan return sched, bindingChan, errChan
} }
func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]algorithm.FitPredicate, stop chan struct{}, bindingTime time.Duration) (*Scheduler, chan *v1.Binding) { func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, stop chan struct{}, bindingTime time.Duration) (*Scheduler, chan *v1.Binding) {
algo := core.NewGenericScheduler( algo := core.NewGenericScheduler(
scache, scache,
nil, nil,
predicateMap, predicateMap,
algorithm.EmptyPredicateMetadataProducer, predicates.EmptyPredicateMetadataProducer,
[]algorithm.PriorityConfig{}, []algorithm.PriorityConfig{},
algorithm.EmptyPriorityMetadataProducer, algorithm.EmptyPriorityMetadataProducer,
&EmptyPluginSet{}, &EmptyPluginSet{},
@ -748,7 +748,7 @@ func setupTestSchedulerWithVolumeBinding(fakeVolumeBinder *volumebinder.VolumeBi
client := clientsetfake.NewSimpleClientset(&testNode) client := clientsetfake.NewSimpleClientset(&testNode)
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
predicateMap := map[string]algorithm.FitPredicate{ predicateMap := map[string]predicates.FitPredicate{
predicates.CheckVolumeBindingPred: predicates.NewVolumeBindingPredicate(fakeVolumeBinder), predicates.CheckVolumeBindingPred: predicates.NewVolumeBindingPredicate(fakeVolumeBinder),
} }

View File

@ -24,6 +24,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1" corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/pkg/scheduler/factory" "k8s.io/kubernetes/pkg/scheduler/factory"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
@ -37,12 +38,12 @@ type FakeConfigurator struct {
} }
// GetPredicateMetadataProducer is not implemented yet. // GetPredicateMetadataProducer is not implemented yet.
func (fc *FakeConfigurator) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) { func (fc *FakeConfigurator) GetPredicateMetadataProducer() (predicates.PredicateMetadataProducer, error) {
return nil, fmt.Errorf("not implemented") return nil, fmt.Errorf("not implemented")
} }
// GetPredicates is not implemented yet. // GetPredicates is not implemented yet.
func (fc *FakeConfigurator) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) { func (fc *FakeConfigurator) GetPredicates(predicateKeys sets.String) (map[string]predicates.FitPredicate, error) {
return nil, fmt.Errorf("not implemented") return nil, fmt.Errorf("not implemented")
} }

View File

@ -31,7 +31,6 @@ go_test(
"//pkg/controller/volume/persistentvolume/options:go_default_library", "//pkg/controller/volume/persistentvolume/options:go_default_library",
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/scheduler:go_default_library", "//pkg/scheduler:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/algorithmprovider:go_default_library", "//pkg/scheduler/algorithmprovider:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",

View File

@ -40,7 +40,7 @@ import (
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config" schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/scheduler" "k8s.io/kubernetes/pkg/scheduler"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
@ -56,11 +56,11 @@ type nodeStateManager struct {
makeUnSchedulable nodeMutationFunc makeUnSchedulable nodeMutationFunc
} }
func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PredicateOne(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
return true, nil, nil return true, nil, nil
} }
func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PredicateTwo(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
return true, nil, nil return true, nil, nil
} }