mirror of https://github.com/k3s-io/k3s
Merge pull request #20977 from hongchaodeng/prio
priorities: move getNonzeroRequests() into util/ packagepull/6/head
commit
9ceed60b1a
|
@ -21,9 +21,9 @@ import (
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
|
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
@ -42,36 +42,6 @@ func calculateScore(requested int64, capacity int64, node string) int {
|
||||||
return int(((capacity - requested) * 10) / capacity)
|
return int(((capacity - requested) * 10) / capacity)
|
||||||
}
|
}
|
||||||
|
|
||||||
// For each of these resources, a pod that doesn't request the resource explicitly
|
|
||||||
// will be treated as having requested the amount indicated below, for the purpose
|
|
||||||
// of computing priority only. This ensures that when scheduling zero-request pods, such
|
|
||||||
// pods will not all be scheduled to the machine with the smallest in-use request,
|
|
||||||
// and that when scheduling regular pods, such pods will not see zero-request pods as
|
|
||||||
// consuming no resources whatsoever. We chose these values to be similar to the
|
|
||||||
// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary.
|
|
||||||
// As described in #11713, we use request instead of limit to deal with resource requirements.
|
|
||||||
const defaultMilliCpuRequest int64 = 100 // 0.1 core
|
|
||||||
const defaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
|
|
||||||
|
|
||||||
// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity api.ResourceList"
|
|
||||||
// as an additional argument here) rather than using constants
|
|
||||||
func getNonzeroRequests(requests *api.ResourceList) (int64, int64) {
|
|
||||||
var out_millicpu, out_memory int64
|
|
||||||
// Override if un-set, but not if explicitly set to zero
|
|
||||||
if (*requests.Cpu() == resource.Quantity{}) {
|
|
||||||
out_millicpu = defaultMilliCpuRequest
|
|
||||||
} else {
|
|
||||||
out_millicpu = requests.Cpu().MilliValue()
|
|
||||||
}
|
|
||||||
// Override if un-set, but not if explicitly set to zero
|
|
||||||
if (*requests.Memory() == resource.Quantity{}) {
|
|
||||||
out_memory = defaultMemoryRequest
|
|
||||||
} else {
|
|
||||||
out_memory = requests.Memory().Value()
|
|
||||||
}
|
|
||||||
return out_millicpu, out_memory
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate the resource occupancy on a node. 'node' has information about the resources on the node.
|
// Calculate the resource occupancy on a node. 'node' has information about the resources on the node.
|
||||||
// 'pods' is a list of pods currently scheduled on the node.
|
// 'pods' is a list of pods currently scheduled on the node.
|
||||||
func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) schedulerapi.HostPriority {
|
func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) schedulerapi.HostPriority {
|
||||||
|
@ -82,7 +52,7 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) sc
|
||||||
|
|
||||||
for _, existingPod := range pods {
|
for _, existingPod := range pods {
|
||||||
for _, container := range existingPod.Spec.Containers {
|
for _, container := range existingPod.Spec.Containers {
|
||||||
cpu, memory := getNonzeroRequests(&container.Resources.Requests)
|
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
|
||||||
totalMilliCPU += cpu
|
totalMilliCPU += cpu
|
||||||
totalMemory += memory
|
totalMemory += memory
|
||||||
}
|
}
|
||||||
|
@ -90,7 +60,7 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) sc
|
||||||
// Add the resources requested by the current pod being scheduled.
|
// Add the resources requested by the current pod being scheduled.
|
||||||
// This also helps differentiate between differently sized, but empty, nodes.
|
// This also helps differentiate between differently sized, but empty, nodes.
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
cpu, memory := getNonzeroRequests(&container.Resources.Requests)
|
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
|
||||||
totalMilliCPU += cpu
|
totalMilliCPU += cpu
|
||||||
totalMemory += memory
|
totalMemory += memory
|
||||||
}
|
}
|
||||||
|
@ -268,7 +238,7 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
|
||||||
score := int(0)
|
score := int(0)
|
||||||
for _, existingPod := range pods {
|
for _, existingPod := range pods {
|
||||||
for _, container := range existingPod.Spec.Containers {
|
for _, container := range existingPod.Spec.Containers {
|
||||||
cpu, memory := getNonzeroRequests(&container.Resources.Requests)
|
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
|
||||||
totalMilliCPU += cpu
|
totalMilliCPU += cpu
|
||||||
totalMemory += memory
|
totalMemory += memory
|
||||||
}
|
}
|
||||||
|
@ -276,7 +246,7 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
|
||||||
// Add the resources requested by the current pod being scheduled.
|
// Add the resources requested by the current pod being scheduled.
|
||||||
// This also helps differentiate between differently sized, but empty, nodes.
|
// This also helps differentiate between differently sized, but empty, nodes.
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
cpu, memory := getNonzeroRequests(&container.Resources.Requests)
|
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
|
||||||
totalMilliCPU += cpu
|
totalMilliCPU += cpu
|
||||||
totalMemory += memory
|
totalMemory += memory
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
|
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
@ -62,9 +63,9 @@ func TestZeroRequest(t *testing.T) {
|
||||||
Resources: api.ResourceRequirements{
|
Resources: api.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: api.ResourceList{
|
||||||
"cpu": resource.MustParse(
|
"cpu": resource.MustParse(
|
||||||
strconv.FormatInt(defaultMilliCpuRequest, 10) + "m"),
|
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"),
|
||||||
"memory": resource.MustParse(
|
"memory": resource.MustParse(
|
||||||
strconv.FormatInt(defaultMemoryRequest, 10)),
|
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -79,9 +80,9 @@ func TestZeroRequest(t *testing.T) {
|
||||||
Resources: api.ResourceRequirements{
|
Resources: api.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: api.ResourceList{
|
||||||
"cpu": resource.MustParse(
|
"cpu": resource.MustParse(
|
||||||
strconv.FormatInt(defaultMilliCpuRequest*3, 10) + "m"),
|
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"),
|
||||||
"memory": resource.MustParse(
|
"memory": resource.MustParse(
|
||||||
strconv.FormatInt(defaultMemoryRequest*3, 10)),
|
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -102,7 +103,7 @@ func TestZeroRequest(t *testing.T) {
|
||||||
// and when the zero-request pod is the one being scheduled.
|
// and when the zero-request pod is the one being scheduled.
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: noResources},
|
pod: &api.Pod{Spec: noResources},
|
||||||
nodes: []api.Node{makeNode("machine1", 1000, defaultMemoryRequest*10), makeNode("machine2", 1000, defaultMemoryRequest*10)},
|
nodes: []api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||||
test: "test priority of zero-request pod with machine with zero-request pod",
|
test: "test priority of zero-request pod with machine with zero-request pod",
|
||||||
pods: []*api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: large1}, {Spec: noResources1},
|
{Spec: large1}, {Spec: noResources1},
|
||||||
|
@ -111,7 +112,7 @@ func TestZeroRequest(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: small},
|
pod: &api.Pod{Spec: small},
|
||||||
nodes: []api.Node{makeNode("machine1", 1000, defaultMemoryRequest*10), makeNode("machine2", 1000, defaultMemoryRequest*10)},
|
nodes: []api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||||
test: "test priority of nonzero-request pod with machine with zero-request pod",
|
test: "test priority of nonzero-request pod with machine with zero-request pod",
|
||||||
pods: []*api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: large1}, {Spec: noResources1},
|
{Spec: large1}, {Spec: noResources1},
|
||||||
|
@ -121,7 +122,7 @@ func TestZeroRequest(t *testing.T) {
|
||||||
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
|
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: large},
|
pod: &api.Pod{Spec: large},
|
||||||
nodes: []api.Node{makeNode("machine1", 1000, defaultMemoryRequest*10), makeNode("machine2", 1000, defaultMemoryRequest*10)},
|
nodes: []api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||||
test: "test priority of larger pod with machine with zero-request pod",
|
test: "test priority of larger pod with machine with zero-request pod",
|
||||||
pods: []*api.Pod{
|
pods: []*api.Pod{
|
||||||
{Spec: large1}, {Spec: noResources1},
|
{Spec: large1}, {Spec: noResources1},
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
// For each of these resources, a pod that doesn't request the resource explicitly
|
||||||
|
// will be treated as having requested the amount indicated below, for the purpose
|
||||||
|
// of computing priority only. This ensures that when scheduling zero-request pods, such
|
||||||
|
// pods will not all be scheduled to the machine with the smallest in-use request,
|
||||||
|
// and that when scheduling regular pods, such pods will not see zero-request pods as
|
||||||
|
// consuming no resources whatsoever. We chose these values to be similar to the
|
||||||
|
// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary.
|
||||||
|
// As described in #11713, we use request instead of limit to deal with resource requirements.
|
||||||
|
const DefaultMilliCpuRequest int64 = 100 // 0.1 core
|
||||||
|
const DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
|
||||||
|
|
||||||
|
// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity api.ResourceList"
|
||||||
|
// as an additional argument here) rather than using constants
|
||||||
|
func GetNonzeroRequests(requests *api.ResourceList) (int64, int64) {
|
||||||
|
var out_millicpu, out_memory int64
|
||||||
|
// Override if un-set, but not if explicitly set to zero
|
||||||
|
if (*requests.Cpu() == resource.Quantity{}) {
|
||||||
|
out_millicpu = DefaultMilliCpuRequest
|
||||||
|
} else {
|
||||||
|
out_millicpu = requests.Cpu().MilliValue()
|
||||||
|
}
|
||||||
|
// Override if un-set, but not if explicitly set to zero
|
||||||
|
if (*requests.Memory() == resource.Quantity{}) {
|
||||||
|
out_memory = DefaultMemoryRequest
|
||||||
|
} else {
|
||||||
|
out_memory = requests.Memory().Value()
|
||||||
|
}
|
||||||
|
return out_millicpu, out_memory
|
||||||
|
}
|
Loading…
Reference in New Issue