Fix default algorithm provider priority insertion

pull/58/head
ravisantoshgudimetla 2018-10-24 16:32:19 -04:00
parent 04d3949048
commit fad6b326e3
4 changed files with 34 additions and 13 deletions

View File

@ -207,14 +207,16 @@ func ApplyFeatureGates() {
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred) factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred)
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.CheckNodeUnschedulablePred) factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.CheckNodeUnschedulablePred)
glog.Warningf("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory") glog.Infof("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory")
} }
// Prioritizes nodes that satisfy pod's resource limits // Prioritizes nodes that satisfy pod's resource limits
if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) { if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) {
glog.Infof("Registering resourcelimits priority function")
factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1) factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1)
// Register the priority function to specific provider too.
factory.InsertPriorityKeyToAlgorithmProviderMap(factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1))
} }
} }
func registerAlgorithmProvider(predSet, priSet sets.String) { func registerAlgorithmProvider(predSet, priSet sets.String) {

View File

@ -1096,7 +1096,6 @@ func (c *configFactory) CreateFromProvider(providerName string) (*Config, error)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return c.CreateFromKeys(provider.FitPredicateKeys, provider.PriorityFunctionKeys, []algorithm.SchedulerExtender{}) return c.CreateFromKeys(provider.FitPredicateKeys, provider.PriorityFunctionKeys, []algorithm.SchedulerExtender{})
} }

View File

@ -167,6 +167,17 @@ func InsertPredicateKeyToAlgorithmProviderMap(key string) {
return return
} }
// InsertPriorityKeyToAlgorithmProviderMap inserts a priority function to all algorithmProviders which are in algorithmProviderMap.
func InsertPriorityKeyToAlgorithmProviderMap(key string) {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
for _, provider := range algorithmProviderMap {
provider.PriorityFunctionKeys.Insert(key)
}
return
}
// RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by // RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by
// kubelet, DaemonSet; it is always included in configuration. Returns the name with which the predicate was // kubelet, DaemonSet; it is always included in configuration. Returns the name with which the predicate was
// registered. // registered.

View File

@ -32,7 +32,9 @@ import (
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/features"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -83,7 +85,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
It("Pod should be schedule to node that don't match the PodAntiAffinity terms", func() { It("Pod should be scheduled to node that don't match the PodAntiAffinity terms", func() {
By("Trying to launch a pod with a label to get a node which can launch it.") By("Trying to launch a pod with a label to get a node which can launch it.")
pod := runPausePod(f, pausePodConfig{ pod := runPausePod(f, pausePodConfig{
Name: "pod-with-label-security-s1", Name: "pod-with-label-security-s1",
@ -144,7 +146,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
Expect(labelPod.Spec.NodeName).NotTo(Equal(nodeName)) Expect(labelPod.Spec.NodeName).NotTo(Equal(nodeName))
}) })
It("Pod should avoid to schedule to node that have avoidPod annotation", func() { It("Pod should avoid nodes that have avoidPod annotation", func() {
nodeName := nodeList.Items[0].Name nodeName := nodeList.Items[0].Name
// make the nodes have balanced cpu,mem usage // make the nodes have balanced cpu,mem usage
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5) err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
@ -207,7 +209,8 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
} }
}) })
It("Pod should perfer to scheduled to nodes pod can tolerate", func() { It("Pod should be preferably scheduled to nodes pod can tolerate", func() {
// make the nodes have balanced cpu,mem usage ratio // make the nodes have balanced cpu,mem usage ratio
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5) err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -257,6 +260,9 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
Expect(tolePod.Spec.NodeName).To(Equal(nodeName)) Expect(tolePod.Spec.NodeName).To(Equal(nodeName))
}) })
It("Pod should be preferably scheduled to nodes which satisfy its limits", func() { It("Pod should be preferably scheduled to nodes which satisfy its limits", func() {
if !utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) {
framework.Skipf("ResourceLimits Priority function is not enabled, so skipping this test")
}
var podwithLargeRequestedResource *v1.ResourceRequirements = &v1.ResourceRequirements{ var podwithLargeRequestedResource *v1.ResourceRequirements = &v1.ResourceRequirements{
Requests: v1.ResourceList{ Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100Mi"), v1.ResourceMemory: resource.MustParse("100Mi"),
@ -264,26 +270,28 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
}, },
Limits: v1.ResourceList{ Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("3000Mi"), v1.ResourceMemory: resource.MustParse("3000Mi"),
v1.ResourceCPU: resource.MustParse("100m"), v1.ResourceCPU: resource.MustParse("5000m"),
}, },
} }
// Update one node to have large allocatable. // Update one node to have large allocatable.
lastNode := nodeList.Items[len(nodeList.Items)-1] lastNode := nodeList.Items[len(nodeList.Items)-1]
nodeName := lastNode.Name nodeName := lastNode.Name
nodeOriginalMemory, found := lastNode.Status.Allocatable[v1.ResourceMemory] nodeOriginalMemory, found := lastNode.Status.Allocatable[v1.ResourceMemory]
nodeOriginalCPU, found := lastNode.Status.Allocatable[v1.ResourceCPU]
Expect(found).To(Equal(true)) Expect(found).To(Equal(true))
nodeOriginalMemoryVal := nodeOriginalMemory.Value() nodeOriginalMemoryVal := nodeOriginalMemory.Value()
err := updateMemoryOfNode(cs, nodeName, int64(10000)) nodeOriginalCPUVal := nodeOriginalCPU.MilliValue()
err := updateNodeAllocatable(cs, nodeName, int64(10000), int64(12000))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
// Resize the node back to its original memory. // Resize the node back to its original allocatable values.
if err := updateMemoryOfNode(cs, nodeName, nodeOriginalMemoryVal); err != nil { if err := updateNodeAllocatable(cs, nodeName, nodeOriginalMemoryVal, nodeOriginalCPUVal); err != nil {
framework.Logf("Failed to revert node memory with %v", err) framework.Logf("Failed to revert node memory with %v", err)
} }
}() }()
err = createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5) err = createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.5)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// After the above we should see 50% of node to be available which is 5000MiB for large node. // After the above we should see 50% of node to be available which is 5000MiB memory, 6000m cpu for large node.
By("Create a pod with unusual large limits") By("Create a pod with unusual large limits")
podWithLargeLimits := "with-large-limits" podWithLargeLimits := "with-large-limits"
@ -445,8 +453,8 @@ func addRandomTaitToNode(cs clientset.Interface, nodeName string) *v1.Taint {
return &testTaint return &testTaint
} }
// updateMemoryOfNode updates the memory of given node with the given value // updateNodeAllocatable updates the allocatable values of given node with the given values.
func updateMemoryOfNode(c clientset.Interface, nodeName string, memory int64) error { func updateNodeAllocatable(c clientset.Interface, nodeName string, memory, cpu int64) error {
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
oldData, err := json.Marshal(node) oldData, err := json.Marshal(node)
@ -454,6 +462,7 @@ func updateMemoryOfNode(c clientset.Interface, nodeName string, memory int64) er
return err return err
} }
node.Status.Allocatable[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.BinarySI) node.Status.Allocatable[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.BinarySI)
node.Status.Allocatable[v1.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
newData, err := json.Marshal(node) newData, err := json.Marshal(node)
if err != nil { if err != nil {
return err return err