Merge pull request #60386 from k82cn/k8s_60163

Automatic merge from submit-queue (batch tested with PRs 60683, 60386). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Added unschedulabe predicate.

Signed-off-by: Da K. Ma <madaxa@cn.ibm.com>

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #60163

**Release note**:
```release-note
None
```
pull/6/head
Kubernetes Submit Queue 2018-03-02 03:41:50 -08:00 committed by GitHub
commit ae1fc13aee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 38 additions and 4 deletions

View File

@ -1545,6 +1545,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
predicateName string
podsOnNode []*v1.Pod
nodeCondition []v1.NodeCondition
nodeUnschedulable bool
ds *apps.DaemonSet
wantToRun, shouldSchedule, shouldContinueRunning bool
err error
@ -1800,6 +1801,24 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
shouldSchedule: true,
shouldContinueRunning: true,
},
{
predicateName: "ShouldRunDaemonPodOnUnscheduableNode",
ds: &apps.DaemonSet{
Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: simpleDaemonSetLabel,
},
Spec: resourcePodSpec("", "50M", "0.5"),
},
},
},
nodeUnschedulable: true,
wantToRun: true,
shouldSchedule: true,
shouldContinueRunning: true,
},
}
for i, c := range cases {
@ -1807,6 +1826,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
node := newNode("test-node", simpleDaemonSetLabel)
node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...)
node.Status.Allocatable = allocatableResources("100M", "1")
node.Spec.Unschedulable = c.nodeUnschedulable
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)

View File

@ -69,6 +69,8 @@ const (
NoDiskConflictPred = "NoDiskConflict"
// PodToleratesNodeTaintsPred defines the name of predicate PodToleratesNodeTaints.
PodToleratesNodeTaintsPred = "PodToleratesNodeTaints"
// CheckNodeUnschedulablePred defines the name of predicate CheckNodeUnschedulablePredicate.
CheckNodeUnschedulablePred = "CheckNodeUnschedulable"
// PodToleratesNodeNoExecuteTaintsPred defines the name of predicate PodToleratesNodeNoExecuteTaints.
PodToleratesNodeNoExecuteTaintsPred = "PodToleratesNodeNoExecuteTaints"
// CheckNodeLabelPresencePred defines the name of predicate CheckNodeLabelPresence.
@ -125,7 +127,7 @@ const (
// The order is based on the restrictiveness & complexity of predicates.
// Design doc: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/predicates-ordering.md
var (
predicatesOrdering = []string{CheckNodeConditionPred,
predicatesOrdering = []string{CheckNodeConditionPred, CheckNodeUnschedulablePred,
GeneralPred, HostNamePred, PodFitsHostPortsPred,
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred,
@ -1446,8 +1448,8 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node
return nil, nil
}
// PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints
func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
// CheckNodeUnschedulablePredicate checks if a pod can be scheduled on a node with Unschedulable spec.
func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil {
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
}
@ -1456,6 +1458,15 @@ func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeI
return false, []algorithm.PredicateFailureReason{ErrNodeUnschedulable}, nil
}
return true, nil, nil
}
// PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints
func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil {
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
}
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
// PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints.
return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute

View File

@ -186,12 +186,15 @@ func ApplyFeatureGates() {
// if you just want remove specific provider, call func RemovePredicateKeyFromAlgoProvider()
factory.RemovePredicateKeyFromAlgorithmProviderMap(predicates.CheckNodeConditionPred)
// Fit is determined based on whether a node has Unschedulable spec
factory.RegisterMandatoryFitPredicate(predicates.CheckNodeUnschedulablePred, predicates.CheckNodeUnschedulablePredicate)
// Fit is determined based on whether a pod can tolerate all of the node's taints
factory.RegisterMandatoryFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints)
// Insert Key "PodToleratesNodeTaints" To All Algorithm Provider
// Insert Key "PodToleratesNodeTaints" and "CheckNodeUnschedulable" To All Algorithm Provider
// The key will insert to all providers which in algorithmProviderMap[]
// if you just want insert to specific provider, call func InsertPredicateKeyToAlgoProvider()
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred)
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.CheckNodeUnschedulablePred)
glog.Warningf("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory")
}