mirror of https://github.com/k3s-io/k3s
Merge pull request #71978 from denkensk/move-predicate-types
Move predicate types from algorithm to predicatespull/564/head
commit
37dc6789d7
|
@ -20,7 +20,6 @@ go_library(
|
|||
"//pkg/controller/daemon/util:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
|
|
|
@ -54,7 +54,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
@ -1287,7 +1286,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
|||
return dsc.updateDaemonSetStatus(ds, hash, true)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulernodeinfo.NodeInfo, error) {
|
||||
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]predicates.PredicateFailureReason, *schedulernodeinfo.NodeInfo, error) {
|
||||
objects, err := dsc.podNodeIndex.ByIndex("nodeName", node.Name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
@ -1428,8 +1427,8 @@ func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod {
|
|||
// - PodFitsHost: checks pod's NodeName against node
|
||||
// - PodMatchNodeSelector: checks pod's NodeSelector and NodeAffinity against node
|
||||
// - PodToleratesNodeTaints: exclude tainted node unless pod has specific toleration
|
||||
func checkNodeFitness(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
func checkNodeFitness(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
var predicateFails []predicates.PredicateFailureReason
|
||||
fit, reasons, err := predicates.PodFitsHost(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
|
@ -1458,8 +1457,8 @@ func checkNodeFitness(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
|
|||
|
||||
// Predicates checks if a DaemonSet's pod can be scheduled on a node using GeneralPredicates
|
||||
// and PodToleratesNodeTaints predicate
|
||||
func Predicates(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
func Predicates(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
var predicateFails []predicates.PredicateFailureReason
|
||||
|
||||
// If ScheduleDaemonSetPods is enabled, only check nodeSelector, nodeAffinity and toleration/taint match.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) {
|
||||
|
|
|
@ -21,7 +21,6 @@ go_library(
|
|||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
|
|
|
@ -18,7 +18,7 @@ package lifecycle
|
|||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
)
|
||||
|
||||
// AdmissionFailureHandlerStub is an AdmissionFailureHandler that does not perform any handling of admission failure.
|
||||
|
@ -31,6 +31,6 @@ func NewAdmissionFailureHandlerStub() *AdmissionFailureHandlerStub {
|
|||
return &AdmissionFailureHandlerStub{}
|
||||
}
|
||||
|
||||
func (n *AdmissionFailureHandlerStub) HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []algorithm.PredicateFailureReason) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (n *AdmissionFailureHandlerStub) HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []predicates.PredicateFailureReason) (bool, []predicates.PredicateFailureReason, error) {
|
||||
return false, failureReasons, nil
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"k8s.io/api/core/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
@ -36,7 +35,7 @@ type pluginResourceUpdateFuncType func(*schedulernodeinfo.NodeInfo, *PodAdmitAtt
|
|||
// AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod.
|
||||
// This allows for the graceful handling of pod admission failure.
|
||||
type AdmissionFailureHandler interface {
|
||||
HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []algorithm.PredicateFailureReason) (bool, []algorithm.PredicateFailureReason, error)
|
||||
HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []predicates.PredicateFailureReason) (bool, []predicates.PredicateFailureReason, error)
|
||||
}
|
||||
|
||||
type predicateAdmitHandler struct {
|
||||
|
|
|
@ -18,7 +18,6 @@ go_library(
|
|||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
)
|
||||
|
||||
|
@ -61,13 +60,13 @@ func NewCriticalPodAdmissionHandler(getPodsFunc eviction.ActivePodsFunc, killPod
|
|||
|
||||
// HandleAdmissionFailure gracefully handles admission rejection, and, in some cases,
|
||||
// to allow admission of the pod despite its previous failure.
|
||||
func (c *CriticalPodAdmissionHandler) HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []algorithm.PredicateFailureReason) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (c *CriticalPodAdmissionHandler) HandleAdmissionFailure(admitPod *v1.Pod, failureReasons []predicates.PredicateFailureReason) (bool, []predicates.PredicateFailureReason, error) {
|
||||
if !kubetypes.IsCriticalPod(admitPod) {
|
||||
return false, failureReasons, nil
|
||||
}
|
||||
// InsufficientResourceError is not a reason to reject a critical pod.
|
||||
// Instead of rejecting, we free up resources to admit it, if no other reasons for rejection exist.
|
||||
nonResourceReasons := []algorithm.PredicateFailureReason{}
|
||||
nonResourceReasons := []predicates.PredicateFailureReason{}
|
||||
resourceReasons := []*admissionRequirement{}
|
||||
for _, reason := range failureReasons {
|
||||
if r, ok := reason.(*predicates.InsufficientResourceError); ok {
|
||||
|
|
|
@ -11,6 +11,7 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/api/latest:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
|
|
|
@ -59,7 +59,6 @@ go_test(
|
|||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
@ -36,7 +35,7 @@ type CSIMaxVolumeLimitChecker struct {
|
|||
|
||||
// NewCSIMaxVolumeLimitPredicate returns a predicate for counting CSI volumes
|
||||
func NewCSIMaxVolumeLimitPredicate(
|
||||
pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate {
|
||||
pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) FitPredicate {
|
||||
c := &CSIMaxVolumeLimitChecker{
|
||||
pvInfo: pvInfo,
|
||||
pvcInfo: pvcInfo,
|
||||
|
@ -45,7 +44,7 @@ func NewCSIMaxVolumeLimitPredicate(
|
|||
}
|
||||
|
||||
func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
|
||||
pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
|
||||
// if feature gate is disable we return
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
|
@ -101,7 +100,7 @@ func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
|
|||
if ok {
|
||||
currentVolumeCount := attachedVolumeCount[volumeLimitKey]
|
||||
if currentVolumeCount+count > int(maxVolumeLimit) {
|
||||
return false, []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
|
||||
return false, []PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
func TestCSIVolumeCountPredicate(t *testing.T) {
|
||||
|
@ -105,12 +104,12 @@ func TestCSIVolumeCountPredicate(t *testing.T) {
|
|||
}
|
||||
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
// running attachable predicate tests with feature gate and limit present on nodes
|
||||
for _, test := range tests {
|
||||
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
|
||||
pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo("csi-ebs", "csi-ebs"), getFakeCSIPVCInfo("csi-ebs"))
|
||||
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), node)
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
|
|
|
@ -134,6 +134,11 @@ func (e *PredicateFailureError) GetReason() string {
|
|||
return e.PredicateDesc
|
||||
}
|
||||
|
||||
// PredicateFailureReason interface represents the failure reason of a predicate.
|
||||
type PredicateFailureReason interface {
|
||||
GetReason() string
|
||||
}
|
||||
|
||||
// FailureReason describes a failure reason.
|
||||
type FailureReason struct {
|
||||
reason string
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
@ -742,13 +741,13 @@ func TestVolumeCountConflicts(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
|
||||
// running attachable predicate tests without feature gate and no limit present on nodes
|
||||
for _, test := range tests {
|
||||
os.Setenv(KubeMaxPDVols, strconv.Itoa(test.maxVols))
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
|
||||
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), schedulernodeinfo.NewNodeInfo(test.existingPods...))
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), schedulernodeinfo.NewNodeInfo(test.existingPods...))
|
||||
if err != nil {
|
||||
t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
|
@ -766,7 +765,7 @@ func TestVolumeCountConflicts(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
|
||||
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), node)
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
|
|
|
@ -34,6 +34,16 @@ import (
|
|||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
// PredicateMetadata interface represents anything that can access a predicate metadata.
|
||||
type PredicateMetadata interface {
|
||||
ShallowCopy() PredicateMetadata
|
||||
AddPod(addedPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) error
|
||||
RemovePod(deletedPod *v1.Pod) error
|
||||
}
|
||||
|
||||
// PredicateMetadataProducer is a function that computes predicate metadata for a given pod.
|
||||
type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata
|
||||
|
||||
// PredicateMetadataFactory defines a factory of predicate metadata.
|
||||
type PredicateMetadataFactory struct {
|
||||
podLister algorithm.PodLister
|
||||
|
@ -91,21 +101,27 @@ type predicateMetadata struct {
|
|||
}
|
||||
|
||||
// Ensure that predicateMetadata implements algorithm.PredicateMetadata.
|
||||
var _ algorithm.PredicateMetadata = &predicateMetadata{}
|
||||
var _ PredicateMetadata = &predicateMetadata{}
|
||||
|
||||
// PredicateMetadataProducer function produces predicate metadata.
|
||||
type PredicateMetadataProducer func(pm *predicateMetadata)
|
||||
// predicateMetadataProducer function produces predicate metadata. It is stored in a global variable below
|
||||
// and used to modify the return values of PredicateMetadataProducer
|
||||
type predicateMetadataProducer func(pm *predicateMetadata)
|
||||
|
||||
var predicateMetaProducerRegisterLock sync.Mutex
|
||||
var predicateMetadataProducers = make(map[string]PredicateMetadataProducer)
|
||||
var predicateMetadataProducers = make(map[string]predicateMetadataProducer)
|
||||
|
||||
// RegisterPredicateMetadataProducer registers a PredicateMetadataProducer.
|
||||
func RegisterPredicateMetadataProducer(predicateName string, precomp PredicateMetadataProducer) {
|
||||
func RegisterPredicateMetadataProducer(predicateName string, precomp predicateMetadataProducer) {
|
||||
predicateMetaProducerRegisterLock.Lock()
|
||||
defer predicateMetaProducerRegisterLock.Unlock()
|
||||
predicateMetadataProducers[predicateName] = precomp
|
||||
}
|
||||
|
||||
// EmptyPredicateMetadataProducer returns a no-op MetadataProducer type.
|
||||
func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterPredicateMetadataProducerWithExtendedResourceOptions registers a
|
||||
// PredicateMetadataProducer that creates predicate metadata with the provided
|
||||
// options for extended resources.
|
||||
|
@ -118,7 +134,7 @@ func RegisterPredicateMetadataProducerWithExtendedResourceOptions(ignoredExtende
|
|||
}
|
||||
|
||||
// NewPredicateMetadataFactory creates a PredicateMetadataFactory.
|
||||
func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.PredicateMetadataProducer {
|
||||
func NewPredicateMetadataFactory(podLister algorithm.PodLister) PredicateMetadataProducer {
|
||||
factory := &PredicateMetadataFactory{
|
||||
podLister,
|
||||
}
|
||||
|
@ -126,7 +142,7 @@ func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.Predic
|
|||
}
|
||||
|
||||
// GetMetadata returns the predicateMetadata used which will be used by various predicates.
|
||||
func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata {
|
||||
func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
|
||||
// If we cannot compute metadata, just return nil
|
||||
if pod == nil {
|
||||
return nil
|
||||
|
@ -285,7 +301,7 @@ func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulernodei
|
|||
|
||||
// ShallowCopy copies a metadata struct into a new struct and creates a copy of
|
||||
// its maps and slices, but it does not copy the contents of pointer values.
|
||||
func (meta *predicateMetadata) ShallowCopy() algorithm.PredicateMetadata {
|
||||
func (meta *predicateMetadata) ShallowCopy() PredicateMetadata {
|
||||
newPredMeta := &predicateMetadata{
|
||||
pod: meta.pod,
|
||||
podBestEffort: meta.podBestEffort,
|
||||
|
@ -304,7 +320,7 @@ func (meta *predicateMetadata) ShallowCopy() algorithm.PredicateMetadata {
|
|||
meta.serviceAffinityMatchingPodServices...)
|
||||
newPredMeta.serviceAffinityMatchingPodList = append([]*v1.Pod(nil),
|
||||
meta.serviceAffinityMatchingPodList...)
|
||||
return (algorithm.PredicateMetadata)(newPredMeta)
|
||||
return (PredicateMetadata)(newPredMeta)
|
||||
}
|
||||
|
||||
type affinityTermProperties struct {
|
||||
|
|
|
@ -137,6 +137,10 @@ var (
|
|||
CheckNodeMemoryPressurePred, CheckNodePIDPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred}
|
||||
)
|
||||
|
||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||
// The failure information is given by the error.
|
||||
type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error)
|
||||
|
||||
// NodeInfo interface represents anything that can get node object from node ID.
|
||||
type NodeInfo interface {
|
||||
GetNodeInfo(nodeID string) (*v1.Node, error)
|
||||
|
@ -274,11 +278,11 @@ func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
|
|||
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
|
||||
// - ISCSI forbids if any two pods share at least same IQN, LUN and Target
|
||||
// TODO: migrate this into some per-volume specific code?
|
||||
func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func NoDiskConflict(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
for _, v := range pod.Spec.Volumes {
|
||||
for _, ev := range nodeInfo.Pods() {
|
||||
if isVolumeConflict(v, ev) {
|
||||
return false, []algorithm.PredicateFailureReason{ErrDiskConflict}, nil
|
||||
return false, []PredicateFailureReason{ErrDiskConflict}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -313,7 +317,7 @@ type VolumeFilter struct {
|
|||
// types, counts the number of unique volumes, and rejects the new pod if it would place the total count over
|
||||
// the maximum.
|
||||
func NewMaxPDVolumeCountPredicate(
|
||||
filterName string, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate {
|
||||
filterName string, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) FitPredicate {
|
||||
var filter VolumeFilter
|
||||
var volumeLimitKey v1.ResourceName
|
||||
|
||||
|
@ -447,7 +451,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
|
@ -492,7 +496,7 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.Predicat
|
|||
|
||||
if numExistingVolumes+numNewVolumes > maxAttachLimit {
|
||||
// violates MaxEBSVolumeCount or MaxGCEPDVolumeCount
|
||||
return false, []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
|
||||
return false, []PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
|
||||
}
|
||||
if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) {
|
||||
nodeInfo.TransientInfo.TransientLock.Lock()
|
||||
|
@ -575,7 +579,7 @@ type VolumeZoneChecker struct {
|
|||
// determining the zone of a volume during scheduling, and that is likely to
|
||||
// require calling out to the cloud provider. It seems that we are moving away
|
||||
// from inline volume declarations anyway.
|
||||
func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, classInfo StorageClassInfo) algorithm.FitPredicate {
|
||||
func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, classInfo StorageClassInfo) FitPredicate {
|
||||
c := &VolumeZoneChecker{
|
||||
pvInfo: pvInfo,
|
||||
pvcInfo: pvcInfo,
|
||||
|
@ -584,7 +588,7 @@ func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolum
|
|||
return c.predicate
|
||||
}
|
||||
|
||||
func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
|
@ -671,7 +675,7 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetad
|
|||
|
||||
if !volumeVSet.Has(nodeV) {
|
||||
klog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k)
|
||||
return false, []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}, nil
|
||||
return false, []PredicateFailureReason{ErrVolumeZoneConflict}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -725,13 +729,13 @@ func podName(pod *v1.Pod) string {
|
|||
// PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod.
|
||||
// First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the
|
||||
// predicate failure reasons if the node has insufficient resources to run the pod.
|
||||
func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodFitsResources(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
var predicateFails []PredicateFailureReason
|
||||
allowedPodNumber := nodeInfo.AllowedPodNumber()
|
||||
if len(nodeInfo.Pods())+1 > allowedPodNumber {
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber)))
|
||||
|
@ -850,7 +854,7 @@ func podMatchesNodeSelectorAndAffinityTerms(pod *v1.Pod, node *v1.Node) bool {
|
|||
}
|
||||
|
||||
// PodMatchNodeSelector checks if a pod node selector matches the node label.
|
||||
func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodMatchNodeSelector(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
|
@ -858,11 +862,11 @@ func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInf
|
|||
if podMatchesNodeSelectorAndAffinityTerms(pod, node) {
|
||||
return true, nil, nil
|
||||
}
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeSelectorNotMatch}, nil
|
||||
}
|
||||
|
||||
// PodFitsHost checks if a pod spec node name matches the current node.
|
||||
func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodFitsHost(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
if len(pod.Spec.NodeName) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
@ -873,7 +877,7 @@ func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedu
|
|||
if pod.Spec.NodeName == node.Name {
|
||||
return true, nil, nil
|
||||
}
|
||||
return false, []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}, nil
|
||||
return false, []PredicateFailureReason{ErrPodNotMatchHostName}, nil
|
||||
}
|
||||
|
||||
// NodeLabelChecker contains information to check node labels for a predicate.
|
||||
|
@ -884,7 +888,7 @@ type NodeLabelChecker struct {
|
|||
|
||||
// NewNodeLabelPredicate creates a predicate which evaluates whether a pod can fit based on the
|
||||
// node labels which match a filter that it requests.
|
||||
func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicate {
|
||||
func NewNodeLabelPredicate(labels []string, presence bool) FitPredicate {
|
||||
labelChecker := &NodeLabelChecker{
|
||||
labels: labels,
|
||||
presence: presence,
|
||||
|
@ -904,7 +908,7 @@ func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicat
|
|||
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
|
||||
// A node may have a label with "retiring" as key and the date as the value
|
||||
// and it may be desirable to avoid scheduling new pods on this node
|
||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
|
@ -915,7 +919,7 @@ func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.Pr
|
|||
for _, label := range n.labels {
|
||||
exists = nodeLabels.Has(label)
|
||||
if (exists && !n.presence) || (!exists && n.presence) {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeLabelPresenceViolated}, nil
|
||||
}
|
||||
}
|
||||
return true, nil, nil
|
||||
|
@ -952,7 +956,7 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata)
|
|||
}
|
||||
|
||||
// NewServiceAffinityPredicate creates a ServiceAffinity.
|
||||
func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (algorithm.FitPredicate, PredicateMetadataProducer) {
|
||||
func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (FitPredicate, predicateMetadataProducer) {
|
||||
affinity := &ServiceAffinity{
|
||||
podLister: podLister,
|
||||
serviceLister: serviceLister,
|
||||
|
@ -989,7 +993,7 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al
|
|||
//
|
||||
// WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed...
|
||||
// For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction.
|
||||
func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var services []*v1.Service
|
||||
var pods []*v1.Pod
|
||||
if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) {
|
||||
|
@ -1024,11 +1028,11 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.Predi
|
|||
if CreateSelectorFromLabels(affinityLabels).Matches(labels.Set(node.Labels)) {
|
||||
return true, nil, nil
|
||||
}
|
||||
return false, []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}, nil
|
||||
return false, []PredicateFailureReason{ErrServiceAffinityViolated}, nil
|
||||
}
|
||||
|
||||
// PodFitsHostPorts checks if a node has free ports for the requested pod ports.
|
||||
func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodFitsHostPorts(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var wantPorts []*v1.ContainerPort
|
||||
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
||||
wantPorts = predicateMeta.podPorts
|
||||
|
@ -1044,7 +1048,7 @@ func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
|
|||
|
||||
// try to see whether existingPorts and wantPorts will conflict or not
|
||||
if portsConflict(existingPorts, wantPorts) {
|
||||
return false, []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, nil
|
||||
return false, []PredicateFailureReason{ErrPodNotFitsHostPorts}, nil
|
||||
}
|
||||
|
||||
return true, nil, nil
|
||||
|
@ -1068,8 +1072,8 @@ func haveOverlap(a1, a2 []string) bool {
|
|||
|
||||
// GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates
|
||||
// that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need
|
||||
func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
func GeneralPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var predicateFails []PredicateFailureReason
|
||||
fit, reasons, err := noncriticalPredicates(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
|
@ -1090,8 +1094,8 @@ func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *
|
|||
}
|
||||
|
||||
// noncriticalPredicates are the predicates that only non-critical pods need
|
||||
func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
func noncriticalPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var predicateFails []PredicateFailureReason
|
||||
fit, reasons, err := PodFitsResources(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
|
@ -1104,8 +1108,8 @@ func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeIn
|
|||
}
|
||||
|
||||
// EssentialPredicates are the predicates that all pods, including critical pods, need
|
||||
func EssentialPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
func EssentialPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var predicateFails []PredicateFailureReason
|
||||
fit, reasons, err := PodFitsHost(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
|
@ -1141,7 +1145,7 @@ type PodAffinityChecker struct {
|
|||
}
|
||||
|
||||
// NewPodAffinityPredicate creates a PodAffinityChecker.
|
||||
func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algorithm.FitPredicate {
|
||||
func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) FitPredicate {
|
||||
checker := &PodAffinityChecker{
|
||||
info: info,
|
||||
podLister: podLister,
|
||||
|
@ -1152,13 +1156,13 @@ func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algor
|
|||
// InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration.
|
||||
// First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the
|
||||
// predicate failure reasons if the pod cannot be scheduled on the specified node.
|
||||
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
}
|
||||
if failedPredicates, error := c.satisfiesExistingPodsAntiAffinity(pod, meta, nodeInfo); failedPredicates != nil {
|
||||
failedPredicates := append([]algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates)
|
||||
failedPredicates := append([]PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates)
|
||||
return false, failedPredicates, error
|
||||
}
|
||||
|
||||
|
@ -1168,7 +1172,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm
|
|||
return true, nil, nil
|
||||
}
|
||||
if failedPredicates, error := c.satisfiesPodsAffinityAntiAffinity(pod, meta, nodeInfo, affinity); failedPredicates != nil {
|
||||
failedPredicates := append([]algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates)
|
||||
failedPredicates := append([]PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates)
|
||||
return false, failedPredicates, error
|
||||
}
|
||||
|
||||
|
@ -1290,7 +1294,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.
|
|||
|
||||
// Checks if scheduling the pod onto this node would break any anti-affinity
|
||||
// terms indicated by the existing pods.
|
||||
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (algorithm.PredicateFailureReason, error) {
|
||||
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("Node is nil")
|
||||
|
@ -1365,8 +1369,8 @@ func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPai
|
|||
|
||||
// Checks if scheduling the pod onto this node would break any term of this pod.
|
||||
func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo,
|
||||
affinity *v1.Affinity) (algorithm.PredicateFailureReason, error) {
|
||||
meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo,
|
||||
affinity *v1.Affinity) (PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return ErrPodAffinityRulesNotMatch, fmt.Errorf("Node is nil")
|
||||
|
@ -1466,9 +1470,9 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
|||
}
|
||||
|
||||
// CheckNodeUnschedulablePredicate checks if a pod can be scheduled on a node with Unschedulable spec.
|
||||
func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
}
|
||||
|
||||
// If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`.
|
||||
|
@ -1479,16 +1483,16 @@ func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetada
|
|||
|
||||
// TODO (k82cn): deprecates `node.Spec.Unschedulable` in 1.13.
|
||||
if nodeInfo.Node().Spec.Unschedulable && !podToleratesUnschedulable {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnschedulable}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnschedulable}, nil
|
||||
}
|
||||
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints
|
||||
func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodToleratesNodeTaints(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
}
|
||||
|
||||
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
|
||||
|
@ -1498,13 +1502,13 @@ func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeI
|
|||
}
|
||||
|
||||
// PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints
|
||||
func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
|
||||
return t.Effect == v1.TaintEffectNoExecute
|
||||
})
|
||||
}
|
||||
|
||||
func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, filter func(t *v1.Taint) bool) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, filter func(t *v1.Taint) bool) (bool, []PredicateFailureReason, error) {
|
||||
taints, err := nodeInfo.Taints()
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
|
@ -1513,7 +1517,7 @@ func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, f
|
|||
if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, filter) {
|
||||
return true, nil, nil
|
||||
}
|
||||
return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil
|
||||
return false, []PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil
|
||||
}
|
||||
|
||||
// isPodBestEffort checks if pod is scheduled with best-effort QoS
|
||||
|
@ -1523,7 +1527,7 @@ func isPodBestEffort(pod *v1.Pod) bool {
|
|||
|
||||
// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node
|
||||
// reporting memory pressure condition.
|
||||
func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var podBestEffort bool
|
||||
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
||||
podBestEffort = predicateMeta.podBestEffort
|
||||
|
@ -1538,38 +1542,37 @@ func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetad
|
|||
|
||||
// check if node is under memory pressure
|
||||
if nodeInfo.MemoryPressureCondition() == v1.ConditionTrue {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node
|
||||
// reporting disk pressure condition.
|
||||
func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
// check if node is under disk pressure
|
||||
if nodeInfo.DiskPressureCondition() == v1.ConditionTrue {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnderDiskPressure}, nil
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// CheckNodePIDPressurePredicate checks if a pod can be scheduled on a node
|
||||
// reporting pid pressure condition.
|
||||
func CheckNodePIDPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func CheckNodePIDPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
// check if node is under pid pressure
|
||||
if nodeInfo.PIDPressureCondition() == v1.ConditionTrue {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnderPIDPressure}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnderPIDPressure}, nil
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// CheckNodeConditionPredicate checks if a pod can be scheduled on a node reporting
|
||||
// network unavailable and not ready condition. Only node conditions are accounted in this predicate.
|
||||
func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
reasons := []algorithm.PredicateFailureReason{}
|
||||
|
||||
func CheckNodeConditionPredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
reasons := []PredicateFailureReason{}
|
||||
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||
}
|
||||
|
||||
node := nodeInfo.Node()
|
||||
|
@ -1607,14 +1610,14 @@ type VolumeBindingChecker struct {
|
|||
//
|
||||
// The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound
|
||||
// PVCs can be matched with an available and node-compatible PV.
|
||||
func NewVolumeBindingPredicate(binder *volumebinder.VolumeBinder) algorithm.FitPredicate {
|
||||
func NewVolumeBindingPredicate(binder *volumebinder.VolumeBinder) FitPredicate {
|
||||
c := &VolumeBindingChecker{
|
||||
binder: binder,
|
||||
}
|
||||
return c.predicate
|
||||
}
|
||||
|
||||
func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
@ -1629,7 +1632,7 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe
|
|||
return false, nil, err
|
||||
}
|
||||
|
||||
failReasons := []algorithm.PredicateFailureReason{}
|
||||
failReasons := []PredicateFailureReason{}
|
||||
if !boundSatisfied {
|
||||
klog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
failReasons = append(failReasons, ErrVolumeNodeConflict)
|
||||
|
|
|
@ -34,7 +34,6 @@ import (
|
|||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
|
@ -91,7 +90,7 @@ func newResourceInitPod(pod *v1.Pod, usage ...schedulernodeinfo.Resource) *v1.Po
|
|||
return pod
|
||||
}
|
||||
|
||||
func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata {
|
||||
func GetPredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
|
||||
pm := PredicateMetadataFactory{schedulertesting.FakePodLister{p}}
|
||||
return pm.GetMetadata(p, nodeInfo)
|
||||
}
|
||||
|
@ -102,7 +101,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
fits bool
|
||||
name string
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
reasons []PredicateFailureReason
|
||||
ignoredExtendedResources sets.String
|
||||
}{
|
||||
{
|
||||
|
@ -118,7 +117,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
|
||||
fits: false,
|
||||
name: "too many resources fails",
|
||||
reasons: []algorithm.PredicateFailureReason{
|
||||
reasons: []PredicateFailureReason{
|
||||
NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
|
||||
NewInsufficientResourceError(v1.ResourceMemory, 1, 20, 20),
|
||||
},
|
||||
|
@ -129,7 +128,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
|
||||
fits: false,
|
||||
name: "too many resources fails due to init container cpu",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}),
|
||||
|
@ -137,7 +136,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
|
||||
fits: false,
|
||||
name: "too many resources fails due to highest init container cpu",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}),
|
||||
|
@ -145,7 +144,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
|
||||
fits: false,
|
||||
name: "too many resources fails due to init container memory",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
|
||||
|
@ -153,7 +152,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
|
||||
fits: false,
|
||||
name: "too many resources fails due to highest init container memory",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
|
@ -182,7 +181,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 5})),
|
||||
fits: false,
|
||||
name: "one resource memory fits",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
|
||||
|
@ -190,7 +189,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: false,
|
||||
name: "one resource cpu fits",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
|
@ -225,7 +224,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
|
||||
fits: false,
|
||||
name: "extended resource capacity enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
|
@ -234,7 +233,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
|
||||
fits: false,
|
||||
name: "extended resource capacity enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
|
@ -243,7 +242,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
|
@ -252,7 +251,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
|
@ -262,7 +261,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for multiple containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
|
@ -281,7 +280,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for multiple init containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
|
@ -290,7 +289,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for unknown resource",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
|
@ -299,7 +298,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
name: "extended resource allocatable enforced for unknown resource for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
|
@ -308,7 +307,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
name: "kubernetes.io resource capacity enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
|
@ -317,7 +316,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
fits: false,
|
||||
name: "kubernetes.io resource capacity enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
|
@ -326,7 +325,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
|
||||
fits: false,
|
||||
name: "hugepages resource capacity enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
|
@ -335,7 +334,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
|
||||
fits: false,
|
||||
name: "hugepages resource capacity enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
|
@ -345,7 +344,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
|
||||
fits: false,
|
||||
name: "hugepages resource allocatable enforced for multiple containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
|
@ -363,7 +362,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
RegisterPredicateMetadataProducerWithExtendedResourceOptions(test.ignoredExtendedResources)
|
||||
meta := PredicateMetadata(test.pod, nil)
|
||||
meta := GetPredicateMetadata(test.pod, nil)
|
||||
fits, reasons, err := PodFitsResources(test.pod, meta, test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
|
@ -382,7 +381,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
fits bool
|
||||
name string
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
reasons []PredicateFailureReason
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{},
|
||||
|
@ -390,7 +389,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
|
||||
fits: false,
|
||||
name: "even without specified resources predicate fails when there's no space for additional pod",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
|
@ -398,7 +397,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
|
||||
fits: false,
|
||||
name: "even if both resources fit predicate fails when there's no space for additional pod",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
|
@ -406,7 +405,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: false,
|
||||
name: "even for equal edge case predicate fails when there's no space for additional pod",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
|
@ -414,14 +413,14 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: false,
|
||||
name: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
reasons: []PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
|
||||
},
|
||||
}
|
||||
for _, test := range notEnoughPodsTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := PodFitsResources(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -439,7 +438,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
fits bool
|
||||
name string
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
reasons []PredicateFailureReason
|
||||
}{
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
|
@ -447,7 +446,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 10})),
|
||||
fits: false,
|
||||
name: "due to container scratch disk",
|
||||
reasons: []algorithm.PredicateFailureReason{
|
||||
reasons: []PredicateFailureReason{
|
||||
NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
|
||||
},
|
||||
},
|
||||
|
@ -464,7 +463,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})),
|
||||
fits: false,
|
||||
name: "storage ephemeral local storage request exceeds allocatable",
|
||||
reasons: []algorithm.PredicateFailureReason{
|
||||
reasons: []PredicateFailureReason{
|
||||
NewInsufficientResourceError(v1.ResourceEphemeralStorage, 25, 0, 20),
|
||||
},
|
||||
},
|
||||
|
@ -481,7 +480,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := PodFitsResources(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -538,13 +537,13 @@ func TestPodFitsHost(t *testing.T) {
|
|||
name: "host doesn't match",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrPodNotMatchHostName}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(test.node)
|
||||
fits, reasons, err := PodFitsHost(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
|
||||
fits, reasons, err := PodFitsHost(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -680,11 +679,11 @@ func TestPodFitsHostPorts(t *testing.T) {
|
|||
name: "UDP hostPort conflict due to 0.0.0.0 hostIP",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrPodNotFitsHostPorts}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fits, reasons, err := PodFitsHostPorts(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := PodFitsHostPorts(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -732,11 +731,11 @@ func TestGCEDiskConflicts(t *testing.T) {
|
|||
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
|
||||
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -787,11 +786,11 @@ func TestAWSDiskConflicts(t *testing.T) {
|
|||
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
|
||||
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -848,11 +847,11 @@ func TestRBDDiskConflicts(t *testing.T) {
|
|||
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
|
||||
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -909,11 +908,11 @@ func TestISCSIDiskConflicts(t *testing.T) {
|
|||
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
|
||||
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrDiskConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
ok, reasons, err := NoDiskConflict(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -1603,7 +1602,7 @@ func TestPodFitsSelector(t *testing.T) {
|
|||
name: "Pod with two terms: both matchFields and matchExpressions do not match",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeSelectorNotMatch}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
@ -1614,7 +1613,7 @@ func TestPodFitsSelector(t *testing.T) {
|
|||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
fits, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
|
||||
fits, reasons, err := PodMatchNodeSelector(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -1674,7 +1673,7 @@ func TestNodeLabelPresence(t *testing.T) {
|
|||
name: "all labels match, presence false",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeLabelPresenceViolated}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
@ -1683,7 +1682,7 @@ func TestNodeLabelPresence(t *testing.T) {
|
|||
nodeInfo.SetNode(&node)
|
||||
|
||||
labelChecker := NodeLabelChecker{test.labels, test.presence}
|
||||
fits, reasons, err := labelChecker.CheckNodeLabelPresence(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
|
||||
fits, reasons, err := labelChecker.CheckNodeLabelPresence(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -1823,7 +1822,7 @@ func TestServiceAffinity(t *testing.T) {
|
|||
name: "service pod on different node, multiple labels, all match",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrServiceAffinityViolated}
|
||||
for _, test := range tests {
|
||||
testIt := func(skipPrecompute bool) {
|
||||
t.Run(fmt.Sprintf("%v/skipPrecompute/%v", test.name, skipPrecompute), func(t *testing.T) {
|
||||
|
@ -1839,7 +1838,7 @@ func TestServiceAffinity(t *testing.T) {
|
|||
precompute(pm)
|
||||
}
|
||||
})
|
||||
if pmeta, ok := (PredicateMetadata(test.pod, nodeInfoMap)).(*predicateMetadata); ok {
|
||||
if pmeta, ok := (GetPredicateMetadata(test.pod, nodeInfoMap)).(*predicateMetadata); ok {
|
||||
fits, reasons, err := predicate(test.pod, pmeta, nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
|
@ -1885,7 +1884,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
|||
fits bool
|
||||
name string
|
||||
wErr error
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
reasons []PredicateFailureReason
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{},
|
||||
|
@ -1909,7 +1908,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
|||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
reasons: []algorithm.PredicateFailureReason{
|
||||
reasons: []PredicateFailureReason{
|
||||
NewInsufficientResourceError(v1.ResourceCPU, 8, 5, 10),
|
||||
NewInsufficientResourceError(v1.ResourceMemory, 10, 19, 20),
|
||||
},
|
||||
|
@ -1928,7 +1927,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
|||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
reasons: []algorithm.PredicateFailureReason{ErrPodNotMatchHostName},
|
||||
reasons: []PredicateFailureReason{ErrPodNotMatchHostName},
|
||||
name: "host not match",
|
||||
},
|
||||
{
|
||||
|
@ -1940,14 +1939,14 @@ func TestRunGeneralPredicates(t *testing.T) {
|
|||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
reasons: []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts},
|
||||
reasons: []PredicateFailureReason{ErrPodNotFitsHostPorts},
|
||||
name: "hostport conflict",
|
||||
},
|
||||
}
|
||||
for _, test := range resourceTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
test.nodeInfo.SetNode(test.node)
|
||||
fits, reasons, err := GeneralPredicates(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := GeneralPredicates(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -1976,7 +1975,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
node *v1.Node
|
||||
fits bool
|
||||
name string
|
||||
expectFailureReasons []algorithm.PredicateFailureReason
|
||||
expectFailureReasons []PredicateFailureReason
|
||||
}{
|
||||
{
|
||||
pod: new(v1.Pod),
|
||||
|
@ -2076,7 +2075,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
node: &node1,
|
||||
fits: false,
|
||||
name: "Does not satisfy the PodAffinity with labelSelector because of diff Namespace",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
|
@ -2107,7 +2106,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
node: &node1,
|
||||
fits: false,
|
||||
name: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
|
@ -2204,7 +2203,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
node: &node1,
|
||||
fits: false,
|
||||
name: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression item don't match.",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
|
@ -2371,7 +2370,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
node: &node1,
|
||||
fits: false,
|
||||
name: "satisfies the PodAffinity but doesn't satisfy the PodAntiAffinity with the existing pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
|
@ -2444,7 +2443,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
node: &node1,
|
||||
fits: false,
|
||||
name: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfy PodAntiAffinity symmetry with the existing pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
|
@ -2476,7 +2475,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
node: &node1,
|
||||
fits: false,
|
||||
name: "pod matches its own Label in PodAffinity and that matches the existing pod Labels",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
|
@ -2512,7 +2511,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
node: &node1,
|
||||
fits: false,
|
||||
name: "verify that PodAntiAffinity from existing pod is respected when pod has no AntiAffinity constraints. doesn't satisfy PodAntiAffinity symmetry with the existing pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
|
@ -2612,7 +2611,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
node: &node1,
|
||||
fits: false,
|
||||
name: "satisfies the PodAntiAffinity with existing pod but doesn't satisfy PodAntiAffinity symmetry with incoming pod",
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
|
@ -2675,7 +2674,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
name: "PodAntiAffinity symmetry check a1: incoming pod and existing pod partially match each other on AffinityTerms",
|
||||
},
|
||||
{
|
||||
|
@ -2739,7 +2738,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
name: "PodAntiAffinity symmetry check a2: incoming pod and existing pod partially match each other on AffinityTerms",
|
||||
},
|
||||
{
|
||||
|
@ -2814,7 +2813,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
name: "PodAntiAffinity symmetry check b1: incoming pod and existing pod partially match each other on AffinityTerms",
|
||||
},
|
||||
{
|
||||
|
@ -2889,7 +2888,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
expectFailureReasons: []PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
name: "PodAntiAffinity symmetry check b2: incoming pod and existing pod partially match each other on AffinityTerms",
|
||||
},
|
||||
}
|
||||
|
@ -2911,7 +2910,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
|
||||
nodeInfo.SetNode(test.node)
|
||||
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{test.node.Name: nodeInfo}
|
||||
fits, reasons, _ := fit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
||||
fits, reasons, _ := fit.InterPodAffinityMatches(test.pod, GetPredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
||||
if !fits && !reflect.DeepEqual(reasons, test.expectFailureReasons) {
|
||||
t.Errorf("unexpected failure reasons: %v, want: %v", reasons, test.expectFailureReasons)
|
||||
}
|
||||
|
@ -2944,7 +2943,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []v1.Node
|
||||
nodesExpectAffinityFailureReasons [][]algorithm.PredicateFailureReason
|
||||
nodesExpectAffinityFailureReasons [][]PredicateFailureReason
|
||||
fits map[string]bool
|
||||
name string
|
||||
nometa bool
|
||||
|
@ -2985,7 +2984,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
"machine2": true,
|
||||
"machine3": false,
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{nil, nil, {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{nil, nil, {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}},
|
||||
name: "A pod can be scheduled onto all the nodes that have the same topology key & label value with one of them has an existing pod that matches the affinity rules",
|
||||
},
|
||||
{
|
||||
|
@ -3034,7 +3033,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{nil, nil},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{nil, nil},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
"nodeB": true,
|
||||
|
@ -3087,7 +3086,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{nil, nil},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{nil, nil},
|
||||
fits: map[string]bool{
|
||||
"nodeA": true,
|
||||
"nodeB": true,
|
||||
|
@ -3125,7 +3124,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
"nodeB": false,
|
||||
|
@ -3174,7 +3173,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
|
@ -3215,7 +3214,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, nil},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, nil},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
"nodeB": false,
|
||||
|
@ -3279,7 +3278,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeD", Labels: labelRgUS}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
|
@ -3359,7 +3358,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
nil,
|
||||
|
@ -3403,7 +3402,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{},
|
||||
fits: map[string]bool{
|
||||
"nodeA": true,
|
||||
"nodeB": true,
|
||||
|
@ -3444,7 +3443,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{},
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{},
|
||||
fits: map[string]bool{
|
||||
"nodeA": true,
|
||||
"nodeB": true,
|
||||
|
@ -3507,7 +3506,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
|
@ -3568,7 +3567,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
|
@ -3621,7 +3620,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
|
@ -3675,7 +3674,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
|
@ -3727,7 +3726,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
|
@ -3782,7 +3781,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch},
|
||||
},
|
||||
|
@ -3869,7 +3868,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: map[string]string{"region": "r1", "zone": "z3", "hostname": "nodeC"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch},
|
||||
},
|
||||
|
@ -3925,7 +3924,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{},
|
||||
{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
|
@ -3986,7 +3985,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||
},
|
||||
nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{
|
||||
nodesExpectAffinityFailureReasons: [][]PredicateFailureReason{
|
||||
{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
{ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch},
|
||||
},
|
||||
|
@ -3998,7 +3997,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
selectorExpectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}
|
||||
selectorExpectedFailureReasons := []PredicateFailureReason{ErrNodeSelectorNotMatch}
|
||||
|
||||
for indexTest, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
@ -4023,9 +4022,9 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
podLister: schedulertesting.FakePodLister(test.pods),
|
||||
}
|
||||
|
||||
var meta algorithm.PredicateMetadata
|
||||
var meta PredicateMetadata
|
||||
if !test.nometa {
|
||||
meta = PredicateMetadata(test.pod, nodeInfoMap)
|
||||
meta = GetPredicateMetadata(test.pod, nodeInfoMap)
|
||||
}
|
||||
|
||||
fits, reasons, _ := testFit.InterPodAffinityMatches(test.pod, meta, nodeInfoMap[node.Name])
|
||||
|
@ -4037,7 +4036,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{node.Name: nodeInfo}
|
||||
fits2, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
||||
fits2, reasons, err := PodMatchNodeSelector(test.pod, GetPredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -4238,13 +4237,13 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
"but the effect of taint on node is PreferNochedule. Pod can be scheduled onto the node",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrTaintsTolerationsNotMatch}
|
||||
|
||||
for _, test := range podTolerateTaintsTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(&test.node)
|
||||
fits, reasons, err := PodToleratesNodeTaints(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
|
||||
fits, reasons, err := PodToleratesNodeTaints(test.pod, GetPredicateMetadata(test.pod, nil), nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -4352,11 +4351,11 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) {
|
|||
name: "non best-effort pod schedulable on node without memory pressure condition on",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderMemoryPressure}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fits, reasons, err := CheckNodeMemoryPressurePredicate(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := CheckNodeMemoryPressurePredicate(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -4426,11 +4425,11 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) {
|
|||
name: "pod not schedulable on node with pressure condition on",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderDiskPressure}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fits, reasons, err := CheckNodeDiskPressurePredicate(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
fits, reasons, err := CheckNodeDiskPressurePredicate(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -4486,11 +4485,11 @@ func TestPodSchedulesOnNodeWithPIDPressureCondition(t *testing.T) {
|
|||
name: "pod not schedulable on node with pressure condition on",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderPIDPressure}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderPIDPressure}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fits, reasons, err := CheckNodePIDPressurePredicate(&v1.Pod{}, PredicateMetadata(&v1.Pod{}, nil), test.nodeInfo)
|
||||
fits, reasons, err := CheckNodePIDPressurePredicate(&v1.Pod{}, GetPredicateMetadata(&v1.Pod{}, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -4673,7 +4672,7 @@ func TestVolumeZonePredicate(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrVolumeZoneConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
@ -4767,7 +4766,7 @@ func TestVolumeZonePredicateMultiZone(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrVolumeZoneConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
|
|
@ -31,10 +31,6 @@ var NodeFieldSelectorKeys = map[string]func(*v1.Node) string{
|
|||
schedulerapi.NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name },
|
||||
}
|
||||
|
||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||
// The failure information is given by the error.
|
||||
type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error)
|
||||
|
||||
// PriorityMapFunction is a function that computes per-node results for a given node.
|
||||
// TODO: Figure out the exact API of this method.
|
||||
// TODO: Change interface{} to a specific type.
|
||||
|
@ -46,9 +42,6 @@ type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *scheduler
|
|||
// TODO: Change interface{} to a specific type.
|
||||
type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error
|
||||
|
||||
// PredicateMetadataProducer is a function that computes predicate metadata for a given pod.
|
||||
type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata
|
||||
|
||||
// PriorityMetadataProducer is a function that computes metadata for a given pod. This
|
||||
// is now used for only for priority functions. For predicates please use PredicateMetadataProducer.
|
||||
type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{}
|
||||
|
@ -69,21 +62,11 @@ type PriorityConfig struct {
|
|||
Weight int
|
||||
}
|
||||
|
||||
// EmptyPredicateMetadataProducer returns a no-op MetadataProducer type.
|
||||
func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type.
|
||||
func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PredicateFailureReason interface represents the failure reason of a predicate.
|
||||
type PredicateFailureReason interface {
|
||||
GetReason() string
|
||||
}
|
||||
|
||||
// NodeLister interface represents anything that can list nodes for a scheduler.
|
||||
type NodeLister interface {
|
||||
// We explicitly return []*v1.Node, instead of v1.NodeList, to avoid
|
||||
|
@ -172,10 +155,3 @@ type EmptyStatefulSetLister struct{}
|
|||
func (f EmptyStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.StatefulSet, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// PredicateMetadata interface represents anything that can access a predicate metadata.
|
||||
type PredicateMetadata interface {
|
||||
ShallowCopy() PredicateMetadata
|
||||
AddPod(addedPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) error
|
||||
RemovePod(deletedPod *v1.Pod) error
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ const (
|
|||
func init() {
|
||||
// Register functions that extract metadata used by predicates and priorities computations.
|
||||
factory.RegisterPredicateMetadataProducerFactory(
|
||||
func(args factory.PluginFactoryArgs) algorithm.PredicateMetadataProducer {
|
||||
func(args factory.PluginFactoryArgs) predicates.PredicateMetadataProducer {
|
||||
return predicates.NewPredicateMetadataFactory(args.PodLister)
|
||||
})
|
||||
factory.RegisterPriorityMetadataProducerFactory(
|
||||
|
@ -101,41 +101,41 @@ func defaultPredicates() sets.String {
|
|||
// Fit is determined by volume zone requirements.
|
||||
factory.RegisterFitPredicateFactory(
|
||||
predicates.NoVolumeZoneConflictPred,
|
||||
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
|
||||
func(args factory.PluginFactoryArgs) predicates.FitPredicate {
|
||||
return predicates.NewVolumeZonePredicate(args.PVInfo, args.PVCInfo, args.StorageClassInfo)
|
||||
},
|
||||
),
|
||||
// Fit is determined by whether or not there would be too many AWS EBS volumes attached to the node
|
||||
factory.RegisterFitPredicateFactory(
|
||||
predicates.MaxEBSVolumeCountPred,
|
||||
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
|
||||
func(args factory.PluginFactoryArgs) predicates.FitPredicate {
|
||||
return predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilterType, args.PVInfo, args.PVCInfo)
|
||||
},
|
||||
),
|
||||
// Fit is determined by whether or not there would be too many GCE PD volumes attached to the node
|
||||
factory.RegisterFitPredicateFactory(
|
||||
predicates.MaxGCEPDVolumeCountPred,
|
||||
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
|
||||
func(args factory.PluginFactoryArgs) predicates.FitPredicate {
|
||||
return predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilterType, args.PVInfo, args.PVCInfo)
|
||||
},
|
||||
),
|
||||
// Fit is determined by whether or not there would be too many Azure Disk volumes attached to the node
|
||||
factory.RegisterFitPredicateFactory(
|
||||
predicates.MaxAzureDiskVolumeCountPred,
|
||||
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
|
||||
func(args factory.PluginFactoryArgs) predicates.FitPredicate {
|
||||
return predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, args.PVInfo, args.PVCInfo)
|
||||
},
|
||||
),
|
||||
factory.RegisterFitPredicateFactory(
|
||||
predicates.MaxCSIVolumeCountPred,
|
||||
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
|
||||
func(args factory.PluginFactoryArgs) predicates.FitPredicate {
|
||||
return predicates.NewCSIMaxVolumeLimitPredicate(args.PVInfo, args.PVCInfo)
|
||||
},
|
||||
),
|
||||
// Fit is determined by inter-pod affinity.
|
||||
factory.RegisterFitPredicateFactory(
|
||||
predicates.MatchInterPodAffinityPred,
|
||||
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
|
||||
func(args factory.PluginFactoryArgs) predicates.FitPredicate {
|
||||
return predicates.NewPodAffinityPredicate(args.NodeInfo, args.PodLister)
|
||||
},
|
||||
),
|
||||
|
@ -165,7 +165,7 @@ func defaultPredicates() sets.String {
|
|||
// Fit is determined by volume topology requirements.
|
||||
factory.RegisterFitPredicateFactory(
|
||||
predicates.CheckVolumeBindingPred,
|
||||
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
|
||||
func(args factory.PluginFactoryArgs) predicates.FitPredicate {
|
||||
return predicates.NewVolumeBindingPredicate(args.VolumeBinder)
|
||||
},
|
||||
),
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
|
@ -337,7 +338,7 @@ var _ algorithm.SchedulerExtender = &FakeExtender{}
|
|||
func TestGenericSchedulerWithExtenders(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
predicates map[string]algorithm.FitPredicate
|
||||
predicates map[string]predicates.FitPredicate
|
||||
prioritizers []algorithm.PriorityConfig
|
||||
extenders []FakeExtender
|
||||
nodes []string
|
||||
|
@ -345,7 +346,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||
expectsErr bool
|
||||
}{
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]predicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
extenders: []FakeExtender{
|
||||
{
|
||||
|
@ -360,7 +361,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||
name: "test 1",
|
||||
},
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]predicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
extenders: []FakeExtender{
|
||||
{
|
||||
|
@ -375,7 +376,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||
name: "test 2",
|
||||
},
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]predicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
extenders: []FakeExtender{
|
||||
{
|
||||
|
@ -390,7 +391,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||
name: "test 3",
|
||||
},
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]predicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
extenders: []FakeExtender{
|
||||
{
|
||||
|
@ -405,7 +406,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||
name: "test 4",
|
||||
},
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]predicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
extenders: []FakeExtender{
|
||||
{
|
||||
|
@ -419,7 +420,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||
name: "test 5",
|
||||
},
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]predicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
extenders: []FakeExtender{
|
||||
{
|
||||
|
@ -438,7 +439,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||
name: "test 6",
|
||||
},
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]predicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Function: machine2Prioritizer, Weight: 20}},
|
||||
extenders: []FakeExtender{
|
||||
{
|
||||
|
@ -459,7 +460,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||
// If scheduler sends the pod by mistake, the test would fail
|
||||
// because of the errors from errorPredicateExtender and/or
|
||||
// errorPrioritizerExtender.
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]predicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Function: machine2Prioritizer, Weight: 1}},
|
||||
extenders: []FakeExtender{
|
||||
{
|
||||
|
@ -479,7 +480,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||
//
|
||||
// If scheduler did not ignore the extender, the test would fail
|
||||
// because of the errors from errorPredicateExtender.
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]predicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
extenders: []FakeExtender{
|
||||
{
|
||||
|
@ -512,7 +513,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||
cache,
|
||||
queue,
|
||||
test.predicates,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
predicates.EmptyPredicateMetadataProducer,
|
||||
test.prioritizers,
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
emptyPluginSet,
|
||||
|
|
|
@ -62,7 +62,7 @@ const (
|
|||
)
|
||||
|
||||
// FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type.
|
||||
type FailedPredicateMap map[string][]algorithm.PredicateFailureReason
|
||||
type FailedPredicateMap map[string][]predicates.PredicateFailureReason
|
||||
|
||||
// FitError describes a fit error of a pod.
|
||||
type FitError struct {
|
||||
|
@ -112,7 +112,7 @@ type ScheduleAlgorithm interface {
|
|||
Preempt(*v1.Pod, algorithm.NodeLister, error) (selectedNode *v1.Node, preemptedPods []*v1.Pod, cleanupNominatedPods []*v1.Pod, err error)
|
||||
// Predicates() returns a pointer to a map of predicate functions. This is
|
||||
// exposed for testing.
|
||||
Predicates() map[string]algorithm.FitPredicate
|
||||
Predicates() map[string]predicates.FitPredicate
|
||||
// Prioritizers returns a slice of priority config. This is exposed for
|
||||
// testing.
|
||||
Prioritizers() []algorithm.PriorityConfig
|
||||
|
@ -121,9 +121,9 @@ type ScheduleAlgorithm interface {
|
|||
type genericScheduler struct {
|
||||
cache schedulerinternalcache.Cache
|
||||
schedulingQueue internalqueue.SchedulingQueue
|
||||
predicates map[string]algorithm.FitPredicate
|
||||
predicates map[string]predicates.FitPredicate
|
||||
priorityMetaProducer algorithm.PriorityMetadataProducer
|
||||
predicateMetaProducer algorithm.PredicateMetadataProducer
|
||||
predicateMetaProducer predicates.PredicateMetadataProducer
|
||||
prioritizers []algorithm.PriorityConfig
|
||||
pluginSet pluginsv1alpha1.PluginSet
|
||||
extenders []algorithm.SchedulerExtender
|
||||
|
@ -213,7 +213,7 @@ func (g *genericScheduler) Prioritizers() []algorithm.PriorityConfig {
|
|||
|
||||
// Predicates returns a map containing all the scheduler's predicate
|
||||
// functions. It is exposed for testing only.
|
||||
func (g *genericScheduler) Predicates() map[string]algorithm.FitPredicate {
|
||||
func (g *genericScheduler) Predicates() map[string]predicates.FitPredicate {
|
||||
return g.predicates
|
||||
}
|
||||
|
||||
|
@ -486,7 +486,7 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v
|
|||
|
||||
for failedNodeName, failedMsg := range failedMap {
|
||||
if _, found := failedPredicateMap[failedNodeName]; !found {
|
||||
failedPredicateMap[failedNodeName] = []algorithm.PredicateFailureReason{}
|
||||
failedPredicateMap[failedNodeName] = []predicates.PredicateFailureReason{}
|
||||
}
|
||||
failedPredicateMap[failedNodeName] = append(failedPredicateMap[failedNodeName], predicates.NewFailureReason(failedMsg))
|
||||
}
|
||||
|
@ -502,8 +502,8 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v
|
|||
// addNominatedPods adds pods with equal or greater priority which are nominated
|
||||
// to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether
|
||||
// any pod was found, 2) augmented meta data, 3) augmented nodeInfo.
|
||||
func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata,
|
||||
nodeInfo *schedulernodeinfo.NodeInfo, queue internalqueue.SchedulingQueue) (bool, algorithm.PredicateMetadata,
|
||||
func addNominatedPods(pod *v1.Pod, meta predicates.PredicateMetadata,
|
||||
nodeInfo *schedulernodeinfo.NodeInfo, queue internalqueue.SchedulingQueue) (bool, predicates.PredicateMetadata,
|
||||
*schedulernodeinfo.NodeInfo) {
|
||||
if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
|
||||
// This may happen only in tests.
|
||||
|
@ -513,7 +513,7 @@ func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata,
|
|||
if nominatedPods == nil || len(nominatedPods) == 0 {
|
||||
return false, meta, nodeInfo
|
||||
}
|
||||
var metaOut algorithm.PredicateMetadata
|
||||
var metaOut predicates.PredicateMetadata
|
||||
if meta != nil {
|
||||
metaOut = meta.ShallowCopy()
|
||||
}
|
||||
|
@ -541,13 +541,13 @@ func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata,
|
|||
// It removes victims from meta and NodeInfo before calling this function.
|
||||
func podFitsOnNode(
|
||||
pod *v1.Pod,
|
||||
meta algorithm.PredicateMetadata,
|
||||
meta predicates.PredicateMetadata,
|
||||
info *schedulernodeinfo.NodeInfo,
|
||||
predicateFuncs map[string]algorithm.FitPredicate,
|
||||
predicateFuncs map[string]predicates.FitPredicate,
|
||||
queue internalqueue.SchedulingQueue,
|
||||
alwaysCheckAllPredicates bool,
|
||||
) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var failedPredicates []algorithm.PredicateFailureReason
|
||||
) (bool, []predicates.PredicateFailureReason, error) {
|
||||
var failedPredicates []predicates.PredicateFailureReason
|
||||
|
||||
podsAdded := false
|
||||
// We run predicates twice in some cases. If the node has greater or equal priority
|
||||
|
@ -579,14 +579,14 @@ func podFitsOnNode(
|
|||
for _, predicateKey := range predicates.Ordering() {
|
||||
var (
|
||||
fit bool
|
||||
reasons []algorithm.PredicateFailureReason
|
||||
reasons []predicates.PredicateFailureReason
|
||||
err error
|
||||
)
|
||||
//TODO (yastij) : compute average predicate restrictiveness to export it as Prometheus metric
|
||||
if predicate, exist := predicateFuncs[predicateKey]; exist {
|
||||
fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse)
|
||||
if err != nil {
|
||||
return false, []algorithm.PredicateFailureReason{}, err
|
||||
return false, []predicates.PredicateFailureReason{}, err
|
||||
}
|
||||
|
||||
if !fit {
|
||||
|
@ -887,8 +887,8 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims)
|
|||
func selectNodesForPreemption(pod *v1.Pod,
|
||||
nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
|
||||
potentialNodes []*v1.Node,
|
||||
predicates map[string]algorithm.FitPredicate,
|
||||
metadataProducer algorithm.PredicateMetadataProducer,
|
||||
fitPredicates map[string]predicates.FitPredicate,
|
||||
metadataProducer predicates.PredicateMetadataProducer,
|
||||
queue internalqueue.SchedulingQueue,
|
||||
pdbs []*policy.PodDisruptionBudget,
|
||||
) (map[*v1.Node]*schedulerapi.Victims, error) {
|
||||
|
@ -899,11 +899,11 @@ func selectNodesForPreemption(pod *v1.Pod,
|
|||
meta := metadataProducer(pod, nodeNameToInfo)
|
||||
checkNode := func(i int) {
|
||||
nodeName := potentialNodes[i].Name
|
||||
var metaCopy algorithm.PredicateMetadata
|
||||
var metaCopy predicates.PredicateMetadata
|
||||
if meta != nil {
|
||||
metaCopy = meta.ShallowCopy()
|
||||
}
|
||||
pods, numPDBViolations, fits := selectVictimsOnNode(pod, metaCopy, nodeNameToInfo[nodeName], predicates, queue, pdbs)
|
||||
pods, numPDBViolations, fits := selectVictimsOnNode(pod, metaCopy, nodeNameToInfo[nodeName], fitPredicates, queue, pdbs)
|
||||
if fits {
|
||||
resultLock.Lock()
|
||||
victims := schedulerapi.Victims{
|
||||
|
@ -974,9 +974,9 @@ func filterPodsWithPDBViolation(pods []interface{}, pdbs []*policy.PodDisruption
|
|||
// these predicates can be satisfied by removing more pods from the node.
|
||||
func selectVictimsOnNode(
|
||||
pod *v1.Pod,
|
||||
meta algorithm.PredicateMetadata,
|
||||
meta predicates.PredicateMetadata,
|
||||
nodeInfo *schedulernodeinfo.NodeInfo,
|
||||
fitPredicates map[string]algorithm.FitPredicate,
|
||||
fitPredicates map[string]predicates.FitPredicate,
|
||||
queue internalqueue.SchedulingQueue,
|
||||
pdbs []*policy.PodDisruptionBudget,
|
||||
) ([]*v1.Pod, int, bool) {
|
||||
|
@ -1143,8 +1143,8 @@ func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeCla
|
|||
func NewGenericScheduler(
|
||||
cache schedulerinternalcache.Cache,
|
||||
podQueue internalqueue.SchedulingQueue,
|
||||
predicates map[string]algorithm.FitPredicate,
|
||||
predicateMetaProducer algorithm.PredicateMetadataProducer,
|
||||
predicates map[string]predicates.FitPredicate,
|
||||
predicateMetaProducer predicates.PredicateMetadataProducer,
|
||||
prioritizers []algorithm.PriorityConfig,
|
||||
priorityMetaProducer algorithm.PriorityMetadataProducer,
|
||||
pluginSet pluginsv1alpha1.PluginSet,
|
||||
|
|
|
@ -50,15 +50,15 @@ var (
|
|||
order = []string{"false", "true", "matches", "nopods", algorithmpredicates.MatchInterPodAffinityPred}
|
||||
)
|
||||
|
||||
func falsePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
||||
func falsePredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
return false, []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
||||
}
|
||||
|
||||
func truePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func truePredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func matchesPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func matchesPredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
|
@ -66,14 +66,14 @@ func matchesPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
|
|||
if pod.Name == node.Name {
|
||||
return true, nil, nil
|
||||
}
|
||||
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
||||
return false, []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
||||
}
|
||||
|
||||
func hasNoPodsPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func hasNoPodsPredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithmpredicates.PredicateFailureReason, error) {
|
||||
if len(nodeInfo.Pods()) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
||||
return false, []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
||||
}
|
||||
|
||||
func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||
|
@ -238,7 +238,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
algorithmpredicates.SetPredicatesOrdering(order)
|
||||
tests := []struct {
|
||||
name string
|
||||
predicates map[string]algorithm.FitPredicate
|
||||
predicates map[string]algorithmpredicates.FitPredicate
|
||||
prioritizers []algorithm.PriorityConfig
|
||||
alwaysCheckAllPredicates bool
|
||||
nodes []string
|
||||
|
@ -250,7 +250,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
wErr error
|
||||
}{
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"false": falsePredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"false": falsePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
expectsErr: true,
|
||||
|
@ -260,12 +260,12 @@ func TestGenericScheduler(t *testing.T) {
|
|||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
NumAllNodes: 2,
|
||||
FailedPredicates: FailedPredicateMap{
|
||||
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
}},
|
||||
},
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}},
|
||||
|
@ -275,7 +275,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
},
|
||||
{
|
||||
// Fits on a machine where the pod ID matches the machine name
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": matchesPredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine2", UID: types.UID("machine2")}},
|
||||
|
@ -284,7 +284,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
wErr: nil,
|
||||
},
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
|
||||
nodes: []string{"3", "2", "1"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}},
|
||||
|
@ -293,7 +293,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
wErr: nil,
|
||||
},
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": matchesPredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
|
||||
nodes: []string{"3", "2", "1"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
|
@ -302,7 +302,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
wErr: nil,
|
||||
},
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}},
|
||||
nodes: []string{"3", "2", "1"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
|
@ -311,7 +311,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
wErr: nil,
|
||||
},
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "false": falsePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
|
||||
nodes: []string{"3", "2", "1"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
|
@ -321,14 +321,14 @@ func TestGenericScheduler(t *testing.T) {
|
|||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
NumAllNodes: 3,
|
||||
FailedPredicates: FailedPredicateMap{
|
||||
"3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{
|
||||
"nopods": hasNoPodsPredicate,
|
||||
"matches": matchesPredicate,
|
||||
},
|
||||
|
@ -352,14 +352,14 @@ func TestGenericScheduler(t *testing.T) {
|
|||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
NumAllNodes: 2,
|
||||
FailedPredicates: FailedPredicateMap{
|
||||
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Pod with existing PVC
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pvcs: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC"}}},
|
||||
|
@ -383,7 +383,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
},
|
||||
{
|
||||
// Pod with non existing PVC
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{
|
||||
|
@ -406,7 +406,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
},
|
||||
{
|
||||
// Pod with deleting PVC
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pvcs: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", DeletionTimestamp: &metav1.Time{}}}},
|
||||
|
@ -430,7 +430,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
},
|
||||
{
|
||||
// alwaysCheckAllPredicates is true
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "matches": matchesPredicate, "false": falsePredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "matches": matchesPredicate, "false": falsePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
alwaysCheckAllPredicates: true,
|
||||
nodes: []string{"1"},
|
||||
|
@ -440,12 +440,12 @@ func TestGenericScheduler(t *testing.T) {
|
|||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
NumAllNodes: 1,
|
||||
FailedPredicates: FailedPredicateMap{
|
||||
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate, algorithmpredicates.ErrFakePredicate},
|
||||
"1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate, algorithmpredicates.ErrFakePredicate},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: falseMapPriority, Weight: 1}, {Map: trueMapPriority, Reduce: getNodeReducePriority, Weight: 2}},
|
||||
nodes: []string{"2", "1"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
|
@ -471,7 +471,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
cache,
|
||||
internalqueue.NewSchedulingQueue(nil),
|
||||
test.predicates,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
algorithmpredicates.EmptyPredicateMetadataProducer,
|
||||
test.prioritizers,
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
emptyPluginSet,
|
||||
|
@ -495,7 +495,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||
}
|
||||
|
||||
// makeScheduler makes a simple genericScheduler for testing.
|
||||
func makeScheduler(predicates map[string]algorithm.FitPredicate, nodes []*v1.Node) *genericScheduler {
|
||||
func makeScheduler(predicates map[string]algorithmpredicates.FitPredicate, nodes []*v1.Node) *genericScheduler {
|
||||
algorithmpredicates.SetPredicatesOrdering(order)
|
||||
cache := schedulerinternalcache.New(time.Duration(0), wait.NeverStop)
|
||||
for _, n := range nodes {
|
||||
|
@ -507,7 +507,7 @@ func makeScheduler(predicates map[string]algorithm.FitPredicate, nodes []*v1.Nod
|
|||
cache,
|
||||
internalqueue.NewSchedulingQueue(nil),
|
||||
predicates,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
algorithmpredicates.EmptyPredicateMetadataProducer,
|
||||
prioritizers,
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
emptyPluginSet,
|
||||
|
@ -519,7 +519,7 @@ func makeScheduler(predicates map[string]algorithm.FitPredicate, nodes []*v1.Nod
|
|||
}
|
||||
|
||||
func TestFindFitAllError(t *testing.T) {
|
||||
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "matches": matchesPredicate}
|
||||
predicates := map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "matches": matchesPredicate}
|
||||
nodes := makeNodeList([]string{"3", "2", "1"})
|
||||
scheduler := makeScheduler(predicates, nodes)
|
||||
|
||||
|
@ -547,7 +547,7 @@ func TestFindFitAllError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFindFitSomeError(t *testing.T) {
|
||||
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "matches": matchesPredicate}
|
||||
predicates := map[string]algorithmpredicates.FitPredicate{"true": truePredicate, "matches": matchesPredicate}
|
||||
nodes := makeNodeList([]string{"3", "2", "1"})
|
||||
scheduler := makeScheduler(predicates, nodes)
|
||||
|
||||
|
@ -602,9 +602,9 @@ func TestHumanReadableFitError(t *testing.T) {
|
|||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||
NumAllNodes: 3,
|
||||
FailedPredicates: FailedPredicateMap{
|
||||
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
|
||||
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
|
||||
"3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
|
||||
"1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
|
||||
"2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
|
||||
"3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
|
||||
},
|
||||
}
|
||||
if strings.Contains(err.Error(), "0/3 nodes are available") {
|
||||
|
@ -796,7 +796,7 @@ func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
|||
return &node, nil
|
||||
}
|
||||
|
||||
func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata {
|
||||
func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithmpredicates.PredicateMetadata {
|
||||
return algorithmpredicates.NewPredicateMetadataFactory(schedulertesting.FakePodLister{p})(p, nodeInfo)
|
||||
}
|
||||
|
||||
|
@ -856,7 +856,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||
algorithmpredicates.SetPredicatesOrdering(order)
|
||||
tests := []struct {
|
||||
name string
|
||||
predicates map[string]algorithm.FitPredicate
|
||||
predicates map[string]algorithmpredicates.FitPredicate
|
||||
nodes []string
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
|
@ -865,7 +865,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "a pod that does not fit on any machine",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": falsePredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": falsePredicate},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "new", UID: types.UID("new")}, Spec: v1.PodSpec{Priority: &highPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -875,7 +875,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "a pod that fits with no preemption",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": truePredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": truePredicate},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "new", UID: types.UID("new")}, Spec: v1.PodSpec{Priority: &highPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -885,7 +885,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "a pod that fits on one machine with no preemption",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": matchesPredicate},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Priority: &highPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -895,7 +895,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "a pod that fits on both machines when lower priority pods are preempted",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -905,7 +905,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "a pod that would fit on the machines, but other pods running are higher priority",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &lowPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -915,7 +915,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "medium priority pod is preempted, but lower priority one stays as it is small",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -926,7 +926,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "mixed priority pods are preempted",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -939,7 +939,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "pod with anti-affinity is preempted",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "machine1",
|
||||
|
@ -1002,7 +1002,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
|||
algorithmpredicates.SetPredicatesOrdering(order)
|
||||
tests := []struct {
|
||||
name string
|
||||
predicates map[string]algorithm.FitPredicate
|
||||
predicates map[string]algorithmpredicates.FitPredicate
|
||||
nodes []string
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
|
@ -1010,7 +1010,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "No node needs preemption",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
nodes: []string{"machine1"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -1019,7 +1019,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "a pod that fits on both machines when lower priority pods are preempted",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -1030,7 +1030,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "a pod that fits on a machine with no preemption",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
nodes: []string{"machine1", "machine2", "machine3"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -1041,7 +1041,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "machine with min highest priority pod is picked",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
nodes: []string{"machine1", "machine2", "machine3"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -1058,7 +1058,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "when highest priorities are the same, minimum sum of priorities is picked",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
nodes: []string{"machine1", "machine2", "machine3"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -1075,7 +1075,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "when highest priority and sum are the same, minimum number of pods is picked",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
nodes: []string{"machine1", "machine2", "machine3"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -1097,7 +1097,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
|||
// pickOneNodeForPreemption adjusts pod priorities when finding the sum of the victims. This
|
||||
// test ensures that the logic works correctly.
|
||||
name: "sum of adjusted priorities is considered",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
nodes: []string{"machine1", "machine2", "machine3"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -1116,7 +1116,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "non-overlapping lowest high priority, sum priorities, and number of pods",
|
||||
predicates: map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
predicates: map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
nodes: []string{"machine1", "machine2", "machine3", "machine4"},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &veryHighPriority}},
|
||||
pods: []*v1.Pod{
|
||||
|
@ -1177,72 +1177,72 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
|||
{
|
||||
name: "No node should be attempted",
|
||||
failedPredMap: FailedPredicateMap{
|
||||
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch},
|
||||
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
|
||||
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrTaintsTolerationsNotMatch},
|
||||
"machine4": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeLabelPresenceViolated},
|
||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch},
|
||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
|
||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrTaintsTolerationsNotMatch},
|
||||
"machine4": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeLabelPresenceViolated},
|
||||
},
|
||||
expected: map[string]bool{},
|
||||
},
|
||||
{
|
||||
name: "ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
|
||||
failedPredMap: FailedPredicateMap{
|
||||
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
|
||||
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
|
||||
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnschedulable},
|
||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
|
||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
|
||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnschedulable},
|
||||
},
|
||||
expected: map[string]bool{"machine1": true, "machine4": true},
|
||||
},
|
||||
{
|
||||
name: "pod with both pod affinity and anti-affinity should be tried",
|
||||
failedPredMap: FailedPredicateMap{
|
||||
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
|
||||
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
|
||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
|
||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
|
||||
},
|
||||
expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true},
|
||||
},
|
||||
{
|
||||
name: "ErrPodAffinityRulesNotMatch should not be tried as it indicates that the pod is unschedulable due to inter-pod affinity, but ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
|
||||
failedPredMap: FailedPredicateMap{
|
||||
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodAffinityRulesNotMatch},
|
||||
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
|
||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityRulesNotMatch},
|
||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
|
||||
},
|
||||
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
|
||||
},
|
||||
{
|
||||
name: "Mix of failed predicates works fine",
|
||||
failedPredMap: FailedPredicateMap{
|
||||
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch, algorithmpredicates.ErrNodeUnderDiskPressure, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
|
||||
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName, algorithmpredicates.ErrDiskConflict},
|
||||
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
|
||||
"machine4": []algorithm.PredicateFailureReason{},
|
||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch, algorithmpredicates.ErrNodeUnderDiskPressure, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
|
||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName, algorithmpredicates.ErrDiskConflict},
|
||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
|
||||
"machine4": []algorithmpredicates.PredicateFailureReason{},
|
||||
},
|
||||
expected: map[string]bool{"machine3": true, "machine4": true},
|
||||
},
|
||||
{
|
||||
name: "Node condition errors should be considered unresolvable",
|
||||
failedPredMap: FailedPredicateMap{
|
||||
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
|
||||
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderPIDPressure},
|
||||
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
|
||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
|
||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderPIDPressure},
|
||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
|
||||
},
|
||||
expected: map[string]bool{"machine4": true},
|
||||
},
|
||||
{
|
||||
name: "Node condition errors and ErrNodeUnknownCondition should be considered unresolvable",
|
||||
failedPredMap: FailedPredicateMap{
|
||||
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeNotReady},
|
||||
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeNetworkUnavailable},
|
||||
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnknownCondition},
|
||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeNotReady},
|
||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeNetworkUnavailable},
|
||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnknownCondition},
|
||||
},
|
||||
expected: map[string]bool{"machine4": true},
|
||||
},
|
||||
{
|
||||
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
|
||||
failedPredMap: FailedPredicateMap{
|
||||
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrVolumeZoneConflict},
|
||||
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrVolumeNodeConflict},
|
||||
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrVolumeBindConflict},
|
||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeZoneConflict},
|
||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeNodeConflict},
|
||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeBindConflict},
|
||||
},
|
||||
expected: map[string]bool{"machine4": true},
|
||||
},
|
||||
|
@ -1265,9 +1265,9 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
|||
|
||||
func TestPreempt(t *testing.T) {
|
||||
failedPredMap := FailedPredicateMap{
|
||||
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
|
||||
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrDiskConflict},
|
||||
"machine3": []algorithm.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
|
||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
|
||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrDiskConflict},
|
||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
|
||||
}
|
||||
// Prepare 3 node names.
|
||||
nodeNames := []string{}
|
||||
|
@ -1431,8 +1431,8 @@ func TestPreempt(t *testing.T) {
|
|||
scheduler := NewGenericScheduler(
|
||||
cache,
|
||||
internalqueue.NewSchedulingQueue(nil),
|
||||
map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
map[string]algorithmpredicates.FitPredicate{"matches": algorithmpredicates.PodFitsResources},
|
||||
algorithmpredicates.EmptyPredicateMetadataProducer,
|
||||
[]algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
emptyPluginSet,
|
||||
|
|
|
@ -64,6 +64,7 @@ go_test(
|
|||
deps = [
|
||||
"//pkg/api/testing:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/api/latest:go_default_library",
|
||||
|
|
|
@ -159,8 +159,8 @@ type Configurator interface {
|
|||
MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue internalqueue.SchedulingQueue) func(pod *v1.Pod, err error)
|
||||
|
||||
// Predicate related accessors to be exposed for use by k8s.io/autoscaler/cluster-autoscaler
|
||||
GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error)
|
||||
GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error)
|
||||
GetPredicateMetadataProducer() (predicates.PredicateMetadataProducer, error)
|
||||
GetPredicates(predicateKeys sets.String) (map[string]predicates.FitPredicate, error)
|
||||
|
||||
// Needs to be exposed for things like integration tests where we want to make fake nodes.
|
||||
GetNodeLister() corelisters.NodeLister
|
||||
|
@ -956,7 +956,7 @@ func (c *configFactory) GetPriorityMetadataProducer() (algorithm.PriorityMetadat
|
|||
return getPriorityMetadataProducer(*pluginArgs)
|
||||
}
|
||||
|
||||
func (c *configFactory) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) {
|
||||
func (c *configFactory) GetPredicateMetadataProducer() (predicates.PredicateMetadataProducer, error) {
|
||||
pluginArgs, err := c.getPluginArgs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -964,7 +964,7 @@ func (c *configFactory) GetPredicateMetadataProducer() (algorithm.PredicateMetad
|
|||
return getPredicateMetadataProducer(*pluginArgs)
|
||||
}
|
||||
|
||||
func (c *configFactory) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) {
|
||||
func (c *configFactory) GetPredicates(predicateKeys sets.String) (map[string]predicates.FitPredicate, error) {
|
||||
pluginArgs, err := c.getPluginArgs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -36,6 +36,7 @@ import (
|
|||
"k8s.io/client-go/tools/cache"
|
||||
apitesting "k8s.io/kubernetes/pkg/api/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest"
|
||||
fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
|
||||
|
@ -229,11 +230,11 @@ func TestCreateFromConfigWithEmptyPredicatesOrPriorities(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PredicateOne(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PredicateTwo(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -54,10 +54,10 @@ type PluginFactoryArgs struct {
|
|||
type PriorityMetadataProducerFactory func(PluginFactoryArgs) algorithm.PriorityMetadataProducer
|
||||
|
||||
// PredicateMetadataProducerFactory produces PredicateMetadataProducer from the given args.
|
||||
type PredicateMetadataProducerFactory func(PluginFactoryArgs) algorithm.PredicateMetadataProducer
|
||||
type PredicateMetadataProducerFactory func(PluginFactoryArgs) predicates.PredicateMetadataProducer
|
||||
|
||||
// FitPredicateFactory produces a FitPredicate from the given args.
|
||||
type FitPredicateFactory func(PluginFactoryArgs) algorithm.FitPredicate
|
||||
type FitPredicateFactory func(PluginFactoryArgs) predicates.FitPredicate
|
||||
|
||||
// PriorityFunctionFactory produces a PriorityConfig from the given args.
|
||||
// DEPRECATED
|
||||
|
@ -103,8 +103,8 @@ type AlgorithmProviderConfig struct {
|
|||
|
||||
// RegisterFitPredicate registers a fit predicate with the algorithm
|
||||
// registry. Returns the name with which the predicate was registered.
|
||||
func RegisterFitPredicate(name string, predicate algorithm.FitPredicate) string {
|
||||
return RegisterFitPredicateFactory(name, func(PluginFactoryArgs) algorithm.FitPredicate { return predicate })
|
||||
func RegisterFitPredicate(name string, predicate predicates.FitPredicate) string {
|
||||
return RegisterFitPredicateFactory(name, func(PluginFactoryArgs) predicates.FitPredicate { return predicate })
|
||||
}
|
||||
|
||||
// RemoveFitPredicate removes a fit predicate from factory.
|
||||
|
@ -181,11 +181,11 @@ func InsertPriorityKeyToAlgorithmProviderMap(key string) {
|
|||
// RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by
|
||||
// kubelet, DaemonSet; it is always included in configuration. Returns the name with which the predicate was
|
||||
// registered.
|
||||
func RegisterMandatoryFitPredicate(name string, predicate algorithm.FitPredicate) string {
|
||||
func RegisterMandatoryFitPredicate(name string, predicate predicates.FitPredicate) string {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
validateAlgorithmNameOrDie(name)
|
||||
fitPredicateMap[name] = func(PluginFactoryArgs) algorithm.FitPredicate { return predicate }
|
||||
fitPredicateMap[name] = func(PluginFactoryArgs) predicates.FitPredicate { return predicate }
|
||||
mandatoryFitPredicates.Insert(name)
|
||||
return name
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
|
|||
// generate the predicate function, if a custom type is requested
|
||||
if policy.Argument != nil {
|
||||
if policy.Argument.ServiceAffinity != nil {
|
||||
predicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate {
|
||||
predicateFactory = func(args PluginFactoryArgs) predicates.FitPredicate {
|
||||
predicate, precomputationFunction := predicates.NewServiceAffinityPredicate(
|
||||
args.PodLister,
|
||||
args.ServiceLister,
|
||||
|
@ -224,7 +224,7 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
|
|||
return predicate
|
||||
}
|
||||
} else if policy.Argument.LabelsPresence != nil {
|
||||
predicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate {
|
||||
predicateFactory = func(args PluginFactoryArgs) predicates.FitPredicate {
|
||||
return predicates.NewNodeLabelPredicate(
|
||||
policy.Argument.LabelsPresence.Labels,
|
||||
policy.Argument.LabelsPresence.Presence,
|
||||
|
@ -408,11 +408,11 @@ func GetAlgorithmProvider(name string) (*AlgorithmProviderConfig, error) {
|
|||
return &provider, nil
|
||||
}
|
||||
|
||||
func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) {
|
||||
func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]predicates.FitPredicate, error) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
predicates := map[string]algorithm.FitPredicate{}
|
||||
predicates := map[string]predicates.FitPredicate{}
|
||||
for _, name := range names.List() {
|
||||
factory, ok := fitPredicateMap[name]
|
||||
if !ok {
|
||||
|
@ -441,12 +441,12 @@ func getPriorityMetadataProducer(args PluginFactoryArgs) (algorithm.PriorityMeta
|
|||
return priorityMetadataProducer(args), nil
|
||||
}
|
||||
|
||||
func getPredicateMetadataProducer(args PluginFactoryArgs) (algorithm.PredicateMetadataProducer, error) {
|
||||
func getPredicateMetadataProducer(args PluginFactoryArgs) (predicates.PredicateMetadataProducer, error) {
|
||||
schedulerFactoryMutex.Lock()
|
||||
defer schedulerFactoryMutex.Unlock()
|
||||
|
||||
if predicateMetadataProducer == nil {
|
||||
return algorithm.EmptyPredicateMetadataProducer, nil
|
||||
return predicates.EmptyPredicateMetadataProducer, nil
|
||||
}
|
||||
return predicateMetadataProducer(args), nil
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ func podWithResources(id, desiredHost string, limits v1.ResourceList, requests v
|
|||
return pod
|
||||
}
|
||||
|
||||
func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PredicateOne(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ func (es mockScheduler) Schedule(pod *v1.Pod, ml algorithm.NodeLister) (string,
|
|||
return es.machine, es.err
|
||||
}
|
||||
|
||||
func (es mockScheduler) Predicates() map[string]algorithm.FitPredicate {
|
||||
func (es mockScheduler) Predicates() map[string]predicates.FitPredicate {
|
||||
return nil
|
||||
}
|
||||
func (es mockScheduler) Prioritizers() []algorithm.PriorityConfig {
|
||||
|
@ -341,7 +341,7 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
|
|||
scache.AddNode(&node)
|
||||
client := clientsetfake.NewSimpleClientset(&node)
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
|
||||
predicateMap := map[string]predicates.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
|
||||
scheduler, bindingChan, _ := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, informerFactory, stop, predicateMap, pod, &node)
|
||||
|
||||
waitPodExpireChan := make(chan struct{})
|
||||
|
@ -400,7 +400,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
|
|||
scache.AddNode(&node)
|
||||
client := clientsetfake.NewSimpleClientset(&node)
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
|
||||
predicateMap := map[string]predicates.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
|
||||
scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, informerFactory, stop, predicateMap, firstPod, &node)
|
||||
|
||||
// We use conflicted pod ports to incur fit predicate failure.
|
||||
|
@ -415,7 +415,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
|
|||
expectErr := &core.FitError{
|
||||
Pod: secondPod,
|
||||
NumAllNodes: 1,
|
||||
FailedPredicates: core.FailedPredicateMap{node.Name: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}},
|
||||
FailedPredicates: core.FailedPredicateMap{node.Name: []predicates.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}},
|
||||
}
|
||||
if !reflect.DeepEqual(expectErr, err) {
|
||||
t.Errorf("err want=%v, get=%v", expectErr, err)
|
||||
|
@ -489,7 +489,7 @@ func TestSchedulerErrorWithLongBinding(t *testing.T) {
|
|||
|
||||
client := clientsetfake.NewSimpleClientset(&node)
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
|
||||
predicateMap := map[string]predicates.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
|
||||
|
||||
scheduler, bindingChan := setupTestSchedulerLongBindingWithRetry(
|
||||
queuedPodStore, scache, informerFactory, predicateMap, stop, test.BindingDuration)
|
||||
|
@ -524,7 +524,7 @@ func TestSchedulerErrorWithLongBinding(t *testing.T) {
|
|||
// queuedPodStore: pods queued before processing.
|
||||
// cache: scheduler cache that might contain assumed pods.
|
||||
func setupTestSchedulerWithOnePodOnNode(t *testing.T, queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache,
|
||||
informerFactory informers.SharedInformerFactory, stop chan struct{}, predicateMap map[string]algorithm.FitPredicate, pod *v1.Pod, node *v1.Node) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
informerFactory informers.SharedInformerFactory, stop chan struct{}, predicateMap map[string]predicates.FitPredicate, pod *v1.Pod, node *v1.Node) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
|
||||
scheduler, bindingChan, errChan := setupTestScheduler(queuedPodStore, scache, informerFactory, predicateMap, nil)
|
||||
|
||||
|
@ -596,14 +596,14 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
|||
}
|
||||
client := clientsetfake.NewSimpleClientset(objects...)
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
predicateMap := map[string]algorithm.FitPredicate{
|
||||
predicateMap := map[string]predicates.FitPredicate{
|
||||
"PodFitsResources": predicates.PodFitsResources,
|
||||
}
|
||||
|
||||
// Create expected failure reasons for all the nodes. Hopefully they will get rolled up into a non-spammy summary.
|
||||
failedPredicatesMap := core.FailedPredicateMap{}
|
||||
for _, node := range nodes {
|
||||
failedPredicatesMap[node.Name] = []algorithm.PredicateFailureReason{
|
||||
failedPredicatesMap[node.Name] = []predicates.PredicateFailureReason{
|
||||
predicates.NewInsufficientResourceError(v1.ResourceCPU, 4000, 0, 2000),
|
||||
predicates.NewInsufficientResourceError(v1.ResourceMemory, 500, 0, 100),
|
||||
}
|
||||
|
@ -635,12 +635,12 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
|||
|
||||
// queuedPodStore: pods queued before processing.
|
||||
// scache: scheduler cache that might contain assumed pods.
|
||||
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]algorithm.FitPredicate, recorder record.EventRecorder) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, recorder record.EventRecorder) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
algo := core.NewGenericScheduler(
|
||||
scache,
|
||||
nil,
|
||||
predicateMap,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
predicates.EmptyPredicateMetadataProducer,
|
||||
[]algorithm.PriorityConfig{},
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
&EmptyPluginSet{},
|
||||
|
@ -687,12 +687,12 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulerintern
|
|||
return sched, bindingChan, errChan
|
||||
}
|
||||
|
||||
func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]algorithm.FitPredicate, stop chan struct{}, bindingTime time.Duration) (*Scheduler, chan *v1.Binding) {
|
||||
func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, scache schedulerinternalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, stop chan struct{}, bindingTime time.Duration) (*Scheduler, chan *v1.Binding) {
|
||||
algo := core.NewGenericScheduler(
|
||||
scache,
|
||||
nil,
|
||||
predicateMap,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
predicates.EmptyPredicateMetadataProducer,
|
||||
[]algorithm.PriorityConfig{},
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
&EmptyPluginSet{},
|
||||
|
@ -748,7 +748,7 @@ func setupTestSchedulerWithVolumeBinding(fakeVolumeBinder *volumebinder.VolumeBi
|
|||
client := clientsetfake.NewSimpleClientset(&testNode)
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
predicateMap := map[string]algorithm.FitPredicate{
|
||||
predicateMap := map[string]predicates.FitPredicate{
|
||||
predicates.CheckVolumeBindingPred: predicates.NewVolumeBindingPredicate(fakeVolumeBinder),
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
|
@ -37,12 +38,12 @@ type FakeConfigurator struct {
|
|||
}
|
||||
|
||||
// GetPredicateMetadataProducer is not implemented yet.
|
||||
func (fc *FakeConfigurator) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) {
|
||||
func (fc *FakeConfigurator) GetPredicateMetadataProducer() (predicates.PredicateMetadataProducer, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
// GetPredicates is not implemented yet.
|
||||
func (fc *FakeConfigurator) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) {
|
||||
func (fc *FakeConfigurator) GetPredicates(predicateKeys sets.String) (map[string]predicates.FitPredicate, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ go_test(
|
|||
"//pkg/controller/volume/persistentvolume/options:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
|
|
|
@ -40,7 +40,7 @@ import (
|
|||
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
|
@ -56,11 +56,11 @@ type nodeStateManager struct {
|
|||
makeUnSchedulable nodeMutationFunc
|
||||
}
|
||||
|
||||
func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PredicateOne(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PredicateTwo(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue