mirror of https://github.com/k3s-io/k3s
Use const value maxPriority instead of immediate value 10
Signed-off-by: sakeven <jc5930@sina.cn>pull/6/head
parent
b139d9d759
commit
6aeb77aa6a
|
@ -71,7 +71,7 @@ func calculateBalancedResourceAllocation(pod *v1.Pod, podRequests *schedulercach
|
|||
// 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from
|
||||
// 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced.
|
||||
diff := math.Abs(cpuFraction - memoryFraction)
|
||||
score = int(10 - diff*10)
|
||||
score = int((1 - diff) * float64(schedulerapi.MaxPriority))
|
||||
}
|
||||
if glog.V(10) {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
|
@ -98,8 +98,8 @@ func fractionOfCapacity(requested, capacity int64) float64 {
|
|||
return float64(requested) / float64(capacity)
|
||||
}
|
||||
|
||||
// BalancedResourceAllocation favors nodes with balanced resource usage rate.
|
||||
// BalancedResourceAllocation should **NOT** be used alone, and **MUST** be used together with LeastRequestedPriority.
|
||||
// BalancedResourceAllocationMap favors nodes with balanced resource usage rate.
|
||||
// BalancedResourceAllocationMap should **NOT** be used alone, and **MUST** be used together with LeastRequestedPriority.
|
||||
// It calculates the difference between the cpu and memory fracion of capacity, and prioritizes the host based on how
|
||||
// close the two metrics are to each other.
|
||||
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
|
||||
|
|
|
@ -110,7 +110,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||
*/
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||
test: "nothing scheduled, nothing requested",
|
||||
},
|
||||
{
|
||||
|
@ -127,7 +127,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||
*/
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||
test: "nothing scheduled, resources requested, differently sized machines",
|
||||
},
|
||||
{
|
||||
|
@ -144,7 +144,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||
*/
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||
test: "no resources requested, pods scheduled",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
// ImageLocalityPriority is a priority function that favors nodes that already have requested pod container's images.
|
||||
// ImageLocalityPriorityMap is a priority function that favors nodes that already have requested pod container's images.
|
||||
// It will detect whether the requested images are present on a node, and then calculate a score ranging from 0 to 10
|
||||
// based on the total size of those images.
|
||||
// - If none of the images are present, this node will be given the lowest priority.
|
||||
|
@ -57,9 +57,9 @@ func calculateScoreFromSize(sumSize int64) int {
|
|||
score = 0
|
||||
// If existing images' total size is larger than max, just make it highest priority.
|
||||
case sumSize >= maxImgSize:
|
||||
score = 10
|
||||
score = schedulerapi.MaxPriority
|
||||
default:
|
||||
score = int((10 * (sumSize - minImgSize) / (maxImgSize - minImgSize)) + 1)
|
||||
score = int((int64(schedulerapi.MaxPriority) * (sumSize - minImgSize) / (maxImgSize - minImgSize)) + 1)
|
||||
}
|
||||
// Return which bucket the given size belongs to
|
||||
return score
|
||||
|
|
|
@ -154,7 +154,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||
// Score: 10 < min score = 0
|
||||
pod: &v1.Pod{Spec: test_min_max},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||
test: "if exceed limit, use limit",
|
||||
},
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ func (p *podAffinityPriorityMap) processTerms(terms []v1.WeightedPodAffinityTerm
|
|||
}
|
||||
}
|
||||
|
||||
// compute a sum by iterating through the elements of weightedPodAffinityTerm and adding
|
||||
// CalculateInterPodAffinityPriority compute a sum by iterating through the elements of weightedPodAffinityTerm and adding
|
||||
// "weight" to the sum if the corresponding PodAffinityTerm is satisfied for
|
||||
// that node; the node(s) with the highest sum are the most preferred.
|
||||
// Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity,
|
||||
|
@ -224,7 +224,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
|||
for _, node := range nodes {
|
||||
fScore := float64(0)
|
||||
if (maxCount - minCount) > 0 {
|
||||
fScore = 10 * ((pm.counts[node.Name] - minCount) / (maxCount - minCount))
|
||||
fScore = float64(schedulerapi.MaxPriority) * ((pm.counts[node.Name] - minCount) / (maxCount - minCount))
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
|
||||
if glog.V(10) {
|
||||
|
|
|
@ -294,7 +294,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
|
||||
"which doesn't match either pods in nodes or in topology key",
|
||||
},
|
||||
|
@ -312,7 +312,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
||||
test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
|
||||
},
|
||||
// there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference.
|
||||
|
@ -336,7 +336,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 5}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 5}},
|
||||
test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score",
|
||||
},
|
||||
// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
|
||||
|
@ -352,7 +352,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
||||
test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
|
||||
},
|
||||
// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
|
||||
|
@ -368,7 +368,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
||||
test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||
},
|
||||
{
|
||||
|
@ -382,7 +382,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
||||
test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||
},
|
||||
|
||||
|
@ -402,7 +402,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||
test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ",
|
||||
},
|
||||
{
|
||||
|
@ -415,7 +415,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||
test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ",
|
||||
},
|
||||
{
|
||||
|
@ -429,7 +429,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||
test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score",
|
||||
},
|
||||
// Test the symmetry cases for anti affinity
|
||||
|
@ -443,7 +443,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||
test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
|
||||
},
|
||||
// Test both affinity and anti-affinity
|
||||
|
@ -457,7 +457,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||
test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
|
||||
},
|
||||
// Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service),
|
||||
|
@ -482,7 +482,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 4}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 4}},
|
||||
test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
|
||||
},
|
||||
// Consider Affinity, Anti Affinity and symmetry together.
|
||||
|
@ -504,7 +504,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: 0}},
|
||||
test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
|
||||
},
|
||||
}
|
||||
|
@ -577,7 +577,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
||||
test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score",
|
||||
},
|
||||
{
|
||||
|
|
|
@ -53,7 +53,7 @@ func calculateUnusedScore(requested int64, capacity int64, node string) int64 {
|
|||
requested, capacity, node)
|
||||
return 0
|
||||
}
|
||||
return ((capacity - requested) * 10) / capacity
|
||||
return ((capacity - requested) * int64(schedulerapi.MaxPriority)) / capacity
|
||||
}
|
||||
|
||||
// Calculates host priority based on the amount of unused resources.
|
||||
|
|
|
@ -110,7 +110,7 @@ func TestLeastRequested(t *testing.T) {
|
|||
*/
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||
test: "nothing scheduled, nothing requested",
|
||||
},
|
||||
{
|
||||
|
@ -144,7 +144,7 @@ func TestLeastRequested(t *testing.T) {
|
|||
*/
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||
test: "no resources requested, pods scheduled",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
|
|
|
@ -57,7 +57,7 @@ func calculateUsedScore(requested int64, capacity int64, node string) int64 {
|
|||
requested, capacity, node)
|
||||
return 0
|
||||
}
|
||||
return (requested * 10) / capacity
|
||||
return (requested * schedulerapi.MaxPriority) / capacity
|
||||
}
|
||||
|
||||
// Calculate the resource used on a node. 'node' has information about the resources on the node.
|
||||
|
|
|
@ -87,7 +87,7 @@ func CalculateNodeAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeName
|
|||
var fScore float64
|
||||
for i := range result {
|
||||
if maxCount > 0 {
|
||||
fScore = 10 * (float64(result[i].Score) / maxCountFloat)
|
||||
fScore = float64(schedulerapi.MaxPriority) * (float64(result[i].Score) / maxCountFloat)
|
||||
} else {
|
||||
fScore = 0
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
test: "only machine1 matches the preferred scheduling requirements of pod",
|
||||
},
|
||||
{
|
||||
|
@ -160,7 +160,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: label5}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: 10}, {Host: "machine2", Score: 3}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 3}},
|
||||
test: "all machines matches the preferred scheduling requirements of pod but with different priorities ",
|
||||
},
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta i
|
|||
exists := labels.Set(node.Labels).Has(n.label)
|
||||
score := 0
|
||||
if (exists && n.presence) || (!exists && !n.presence) {
|
||||
score = 10
|
||||
score = schedulerapi.MaxPriority
|
||||
}
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
|
|
|
@ -55,7 +55,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||
label: "baz",
|
||||
presence: false,
|
||||
test: "no match found, presence false",
|
||||
|
@ -66,7 +66,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
label: "foo",
|
||||
presence: true,
|
||||
test: "one match found, presence true",
|
||||
|
@ -77,7 +77,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||
label: "foo",
|
||||
presence: false,
|
||||
test: "one match found, presence false",
|
||||
|
@ -88,7 +88,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||
label: "bar",
|
||||
presence: true,
|
||||
test: "two matches found, presence true",
|
||||
|
@ -99,7 +99,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
label: "bar",
|
||||
presence: false,
|
||||
test: "two matches found, presence false",
|
||||
|
|
|
@ -41,13 +41,13 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, node
|
|||
}
|
||||
}
|
||||
if controllerRef == nil {
|
||||
return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil
|
||||
return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil
|
||||
}
|
||||
|
||||
avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(node.Annotations)
|
||||
if err != nil {
|
||||
// If we cannot get annotation, assume it's schedulable there.
|
||||
return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil
|
||||
return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil
|
||||
}
|
||||
for i := range avoids.PreferAvoidPods {
|
||||
avoid := &avoids.PreferAvoidPods[i]
|
||||
|
@ -55,5 +55,5 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, node
|
|||
return schedulerapi.HostPriority{Host: node.Name, Score: 0}, nil
|
||||
}
|
||||
}
|
||||
return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil
|
||||
return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||
},
|
||||
},
|
||||
nodes: testNodes,
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||
test: "pod managed by ReplicationController should avoid a node, this node get lowest priority score",
|
||||
},
|
||||
{
|
||||
|
@ -109,7 +109,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||
},
|
||||
},
|
||||
nodes: testNodes,
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||
test: "ownership by random controller should be ignored",
|
||||
},
|
||||
{
|
||||
|
@ -122,7 +122,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||
},
|
||||
},
|
||||
nodes: testNodes,
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||
test: "owner without Controller field set should be ignored",
|
||||
},
|
||||
{
|
||||
|
@ -135,7 +135,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||
},
|
||||
},
|
||||
nodes: testNodes,
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||
test: "pod managed by ReplicaSet should avoid a node, this node get lowest priority score",
|
||||
},
|
||||
}
|
||||
|
|
|
@ -30,13 +30,9 @@ import (
|
|||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
// The maximum priority value to give to a node
|
||||
// Priority values range from 0-maxPriority
|
||||
const maxPriority float32 = 10
|
||||
|
||||
// When zone information is present, give 2/3 of the weighting to zone spreading, 1/3 to node spreading
|
||||
// TODO: Any way to justify this weighting?
|
||||
const zoneWeighting = 2.0 / 3.0
|
||||
const zoneWeighting float64 = 2.0 / 3.0
|
||||
|
||||
type SelectorSpread struct {
|
||||
serviceLister algorithm.ServiceLister
|
||||
|
@ -103,15 +99,15 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *v1.Pod, nodeNameToInfo map
|
|||
selectors := s.getSelectors(pod)
|
||||
|
||||
// Count similar pods by node
|
||||
countsByNodeName := make(map[string]float32, len(nodes))
|
||||
countsByZone := make(map[string]float32, 10)
|
||||
maxCountByNodeName := float32(0)
|
||||
countsByNodeName := make(map[string]float64, len(nodes))
|
||||
countsByZone := make(map[string]float64, 10)
|
||||
maxCountByNodeName := float64(0)
|
||||
countsByNodeNameLock := sync.Mutex{}
|
||||
|
||||
if len(selectors) > 0 {
|
||||
processNodeFunc := func(i int) {
|
||||
nodeName := nodes[i].Name
|
||||
count := float32(0)
|
||||
count := float64(0)
|
||||
for _, nodePod := range nodeNameToInfo[nodeName].Pods() {
|
||||
if pod.Namespace != nodePod.Namespace {
|
||||
continue
|
||||
|
@ -153,7 +149,7 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *v1.Pod, nodeNameToInfo map
|
|||
// Aggregate by-zone information
|
||||
// Compute the maximum number of pods hosted in any zone
|
||||
haveZones := len(countsByZone) != 0
|
||||
maxCountByZone := float32(0)
|
||||
maxCountByZone := float64(0)
|
||||
for _, count := range countsByZone {
|
||||
if count > maxCountByZone {
|
||||
maxCountByZone = count
|
||||
|
@ -165,16 +161,16 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *v1.Pod, nodeNameToInfo map
|
|||
// 0 being the lowest priority and maxPriority being the highest
|
||||
for _, node := range nodes {
|
||||
// initializing to the default/max node score of maxPriority
|
||||
fScore := maxPriority
|
||||
fScore := float64(schedulerapi.MaxPriority)
|
||||
if maxCountByNodeName > 0 {
|
||||
fScore = maxPriority * ((maxCountByNodeName - countsByNodeName[node.Name]) / maxCountByNodeName)
|
||||
fScore = float64(schedulerapi.MaxPriority) * ((maxCountByNodeName - countsByNodeName[node.Name]) / maxCountByNodeName)
|
||||
}
|
||||
|
||||
// If there is zone information present, incorporate it
|
||||
if haveZones {
|
||||
zoneId := utilnode.GetZoneKey(node)
|
||||
if zoneId != "" {
|
||||
zoneScore := maxPriority * ((maxCountByZone - countsByZone[zoneId]) / maxCountByZone)
|
||||
zoneScore := float64(schedulerapi.MaxPriority) * ((maxCountByZone - countsByZone[zoneId]) / maxCountByZone)
|
||||
fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore)
|
||||
}
|
||||
}
|
||||
|
@ -258,9 +254,9 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *v1.Pod, nodeNam
|
|||
// 0 being the lowest priority and maxPriority being the highest
|
||||
for node := range labeledNodes {
|
||||
// initializing to the default/max node score of maxPriority
|
||||
fScore := float32(maxPriority)
|
||||
fScore := float64(schedulerapi.MaxPriority)
|
||||
if numServicePods > 0 {
|
||||
fScore = maxPriority * (float32(numServicePods-podCounts[labeledNodes[node]]) / float32(numServicePods))
|
||||
fScore = float64(schedulerapi.MaxPriority) * (float64(numServicePods-podCounts[labeledNodes[node]]) / float64(numServicePods))
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{Host: node, Score: int(fScore)})
|
||||
}
|
||||
|
|
|
@ -69,14 +69,14 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||
{
|
||||
pod: new(v1.Pod),
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||
test: "nothing scheduled",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{{Spec: zone1Spec}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||
test: "no services",
|
||||
},
|
||||
{
|
||||
|
@ -84,7 +84,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||
test: "different services",
|
||||
},
|
||||
{
|
||||
|
@ -95,7 +95,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||
test: "two pods, one service pod",
|
||||
},
|
||||
{
|
||||
|
@ -109,7 +109,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||
test: "five pods, one service pod in no namespace",
|
||||
},
|
||||
{
|
||||
|
@ -122,7 +122,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||
test: "four pods, one service pod in default namespace",
|
||||
},
|
||||
{
|
||||
|
@ -136,7 +136,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||
test: "five pods, one service pod in specific namespace",
|
||||
},
|
||||
{
|
||||
|
@ -409,12 +409,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||
{
|
||||
pod: new(v1.Pod),
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: nodeMachine1Zone1, Score: 10},
|
||||
{Host: nodeMachine1Zone2, Score: 10},
|
||||
{Host: nodeMachine2Zone2, Score: 10},
|
||||
{Host: nodeMachine1Zone3, Score: 10},
|
||||
{Host: nodeMachine2Zone3, Score: 10},
|
||||
{Host: nodeMachine3Zone3, Score: 10},
|
||||
{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
||||
},
|
||||
test: "nothing scheduled",
|
||||
},
|
||||
|
@ -422,12 +422,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||
pod: buildPod("", labels1, nil),
|
||||
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, nil, nil)},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: nodeMachine1Zone1, Score: 10},
|
||||
{Host: nodeMachine1Zone2, Score: 10},
|
||||
{Host: nodeMachine2Zone2, Score: 10},
|
||||
{Host: nodeMachine1Zone3, Score: 10},
|
||||
{Host: nodeMachine2Zone3, Score: 10},
|
||||
{Host: nodeMachine3Zone3, Score: 10},
|
||||
{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
||||
},
|
||||
test: "no services",
|
||||
},
|
||||
|
@ -436,12 +436,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, labels2, nil)},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: nodeMachine1Zone1, Score: 10},
|
||||
{Host: nodeMachine1Zone2, Score: 10},
|
||||
{Host: nodeMachine2Zone2, Score: 10},
|
||||
{Host: nodeMachine1Zone3, Score: 10},
|
||||
{Host: nodeMachine2Zone3, Score: 10},
|
||||
{Host: nodeMachine3Zone3, Score: 10},
|
||||
{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
||||
},
|
||||
test: "different services",
|
||||
},
|
||||
|
@ -453,12 +453,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||
},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: nodeMachine1Zone1, Score: 10},
|
||||
{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine1Zone2, Score: 0}, // Already have pod on machine
|
||||
{Host: nodeMachine2Zone2, Score: 3}, // Already have pod in zone
|
||||
{Host: nodeMachine1Zone3, Score: 10},
|
||||
{Host: nodeMachine2Zone3, Score: 10},
|
||||
{Host: nodeMachine3Zone3, Score: 10},
|
||||
{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
||||
},
|
||||
test: "two pods, 1 matching (in z2)",
|
||||
},
|
||||
|
@ -473,7 +473,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||
},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: nodeMachine1Zone1, Score: 10},
|
||||
{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
|
||||
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
|
||||
{Host: nodeMachine2Zone2, Score: 0}, // Pod on node
|
||||
{Host: nodeMachine1Zone3, Score: 6}, // Pod in zone
|
||||
|
@ -536,12 +536,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||
// We would probably prefer to see a bigger gap between putting a second
|
||||
// pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct.
|
||||
// This is also consistent with what we have already.
|
||||
{Host: nodeMachine1Zone1, Score: 10}, // No pods in zone
|
||||
{Host: nodeMachine1Zone2, Score: 5}, // Pod on node
|
||||
{Host: nodeMachine2Zone2, Score: 6}, // Pod in zone
|
||||
{Host: nodeMachine1Zone3, Score: 0}, // Two pods on node
|
||||
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
|
||||
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
|
||||
{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority}, // No pods in zone
|
||||
{Host: nodeMachine1Zone2, Score: 5}, // Pod on node
|
||||
{Host: nodeMachine2Zone2, Score: 6}, // Pod in zone
|
||||
{Host: nodeMachine1Zone3, Score: 0}, // Two pods on node
|
||||
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
|
||||
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
|
||||
},
|
||||
test: "Replication controller spreading (z1=0, z2=1, z3=2)",
|
||||
},
|
||||
|
@ -611,8 +611,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||
{
|
||||
pod: new(v1.Pod),
|
||||
nodes: labeledNodes,
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
||||
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
|
||||
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
|
||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||
test: "nothing scheduled",
|
||||
},
|
||||
|
@ -620,8 +620,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{{Spec: zone1Spec}},
|
||||
nodes: labeledNodes,
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
||||
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
|
||||
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
|
||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||
test: "no services",
|
||||
},
|
||||
|
@ -630,8 +630,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
|
||||
nodes: labeledNodes,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
||||
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
|
||||
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
|
||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||
test: "different services",
|
||||
},
|
||||
|
@ -644,7 +644,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||
},
|
||||
nodes: labeledNodes,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
|
||||
{Host: "machine21", Score: 0}, {Host: "machine22", Score: 0},
|
||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||
test: "three pods, one service pod",
|
||||
|
@ -674,7 +674,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||
nodes: labeledNodes,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 0}, {Host: "machine12", Score: 0},
|
||||
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
||||
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
|
||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||
test: "three service label match pods in different namespaces",
|
||||
},
|
||||
|
|
|
@ -83,13 +83,10 @@ func ComputeTaintTolerationPriorityReduce(pod *v1.Pod, meta interface{}, nodeNam
|
|||
}
|
||||
maxCountFloat := float64(maxCount)
|
||||
|
||||
// The maximum priority value to give to a node
|
||||
// Priority values range from 0 - maxPriority
|
||||
const maxPriority = float64(10)
|
||||
for i := range result {
|
||||
fScore := maxPriority
|
||||
fScore := float64(schedulerapi.MaxPriority)
|
||||
if maxCountFloat > 0 {
|
||||
fScore = (1.0 - float64(result[i].Score)/maxCountFloat) * 10
|
||||
fScore = (1.0 - float64(result[i].Score)/maxCountFloat) * float64(schedulerapi.MaxPriority)
|
||||
}
|
||||
if glog.V(10) {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
|
|
|
@ -78,7 +78,7 @@ func TestTaintAndToleration(t *testing.T) {
|
|||
}}),
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: "nodeA", Score: 10},
|
||||
{Host: "nodeA", Score: schedulerapi.MaxPriority},
|
||||
{Host: "nodeB", Score: 0},
|
||||
},
|
||||
},
|
||||
|
@ -120,9 +120,9 @@ func TestTaintAndToleration(t *testing.T) {
|
|||
}),
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: "nodeA", Score: 10},
|
||||
{Host: "nodeB", Score: 10},
|
||||
{Host: "nodeC", Score: 10},
|
||||
{Host: "nodeA", Score: schedulerapi.MaxPriority},
|
||||
{Host: "nodeB", Score: schedulerapi.MaxPriority},
|
||||
{Host: "nodeC", Score: schedulerapi.MaxPriority},
|
||||
},
|
||||
},
|
||||
// the count of taints on a node that are not tolerated by pod, matters.
|
||||
|
@ -156,7 +156,7 @@ func TestTaintAndToleration(t *testing.T) {
|
|||
}),
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: "nodeA", Score: 10},
|
||||
{Host: "nodeA", Score: schedulerapi.MaxPriority},
|
||||
{Host: "nodeB", Score: 5},
|
||||
{Host: "nodeC", Score: 0},
|
||||
},
|
||||
|
@ -199,8 +199,8 @@ func TestTaintAndToleration(t *testing.T) {
|
|||
}),
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: "nodeA", Score: 10},
|
||||
{Host: "nodeB", Score: 10},
|
||||
{Host: "nodeA", Score: schedulerapi.MaxPriority},
|
||||
{Host: "nodeB", Score: schedulerapi.MaxPriority},
|
||||
{Host: "nodeC", Score: 0},
|
||||
},
|
||||
},
|
||||
|
@ -220,7 +220,7 @@ func TestTaintAndToleration(t *testing.T) {
|
|||
}),
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: "nodeA", Score: 10},
|
||||
{Host: "nodeA", Score: schedulerapi.MaxPriority},
|
||||
{Host: "nodeB", Score: 0},
|
||||
},
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue