diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 16e878ad4e..53a22dde0d 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -266,9 +266,9 @@ func getResourceRequest(pod *api.Pod) resourceRequest { return result } -func CheckPodsExceedingFreeResources(pods []*api.Pod, capacity api.ResourceList) (fitting []*api.Pod, notFittingCPU, notFittingMemory []*api.Pod) { - totalMilliCPU := capacity.Cpu().MilliValue() - totalMemory := capacity.Memory().Value() +func CheckPodsExceedingFreeResources(pods []*api.Pod, allocatable api.ResourceList) (fitting []*api.Pod, notFittingCPU, notFittingMemory []*api.Pod) { + totalMilliCPU := allocatable.Cpu().MilliValue() + totalMemory := allocatable.Memory().Value() milliCPURequested := int64(0) memoryRequested := int64(0) for _, pod := range pods { @@ -304,8 +304,10 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no return false, err } - if int64(len(existingPods))+1 > info.Status.Capacity.Pods().Value() { - glog.V(10).Infof("Cannot schedule Pod %+v, because Node %+v is full, running %v out of %v Pods.", podName(pod), node, len(existingPods), info.Status.Capacity.Pods().Value()) + allocatable := info.Status.Allocatable + + if int64(len(existingPods))+1 > allocatable.Pods().Value() { + glog.V(10).Infof("Cannot schedule Pod %+v, because Node %+v is full, running %v out of %v Pods.", podName(pod), node, len(existingPods), allocatable.Pods().Value()) return false, ErrExceededMaxPodNumber } @@ -315,7 +317,7 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no } pods := append(existingPods, pod) - _, exceedingCPU, exceedingMemory := CheckPodsExceedingFreeResources(pods, info.Status.Capacity) + _, exceedingCPU, exceedingMemory := CheckPodsExceedingFreeResources(pods, allocatable) if len(exceedingCPU) > 0 { glog.V(10).Infof("Cannot schedule Pod %+v, because Node %v does not have sufficient CPU", podName(pod), node) return false, ErrInsufficientFreeCPU @@ -324,7 +326,7 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no glog.V(10).Infof("Cannot schedule Pod %+v, because Node %v does not have sufficient Memory", podName(pod), node) return false, ErrInsufficientFreeMemory } - glog.V(10).Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node, len(pods)-1, info.Status.Capacity.Pods().Value()) + glog.V(10).Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node, len(pods)-1, allocatable.Pods().Value()) return true, nil } diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go index 544fee705b..3b6df4cfa5 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -54,6 +54,14 @@ func makeResources(milliCPU int64, memory int64, pods int64) api.NodeResources { } } +func makeAllocatableResources(milliCPU int64, memory int64, pods int64) api.ResourceList { + return api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), + api.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI), + } +} + func newResourcePod(usage ...resourceRequest) *api.Pod { containers := []api.Container{} for _, req := range usage { @@ -130,7 +138,7 @@ func TestPodFitsResources(t *testing.T) { } for _, test := range enoughPodsTests { - node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity}} + node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 32)}} fit := ResourceFit{FakeNodeInfo(node)} fits, err := fit.PodFitsResources(test.pod, test.existingPods, "machine") @@ -178,7 +186,7 @@ func TestPodFitsResources(t *testing.T) { }, } for _, test := range notEnoughPodsTests { - node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1).Capacity}} + node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1)}} fit := ResourceFit{FakeNodeInfo(node)} fits, err := fit.PodFitsResources(test.pod, test.existingPods, "machine") diff --git a/plugin/pkg/scheduler/algorithm/priorities/priorities.go b/plugin/pkg/scheduler/algorithm/priorities/priorities.go index 112387e6d3..1aebef55ca 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/priorities.go +++ b/plugin/pkg/scheduler/algorithm/priorities/priorities.go @@ -76,8 +76,8 @@ func getNonzeroRequests(requests *api.ResourceList) (int64, int64) { func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) schedulerapi.HostPriority { totalMilliCPU := int64(0) totalMemory := int64(0) - capacityMilliCPU := node.Status.Capacity.Cpu().MilliValue() - capacityMemory := node.Status.Capacity.Memory().Value() + capacityMilliCPU := node.Status.Allocatable.Cpu().MilliValue() + capacityMemory := node.Status.Allocatable.Memory().Value() for _, existingPod := range pods { for _, container := range existingPod.Spec.Containers { @@ -208,8 +208,8 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap totalMemory += memory } - capacityMilliCPU := node.Status.Capacity.Cpu().MilliValue() - capacityMemory := node.Status.Capacity.Memory().Value() + capacityMilliCPU := node.Status.Allocatable.Cpu().MilliValue() + capacityMemory := node.Status.Allocatable.Memory().Value() cpuFraction := fractionOfCapacity(totalMilliCPU, capacityMilliCPU) memoryFraction := fractionOfCapacity(totalMemory, capacityMemory) diff --git a/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go b/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go index 3d6207e5a5..7db6d2a120 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go @@ -38,6 +38,10 @@ func makeNode(node string, milliCPU, memory int64) api.Node { "cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), "memory": *resource.NewQuantity(memory, resource.BinarySI), }, + Allocatable: api.ResourceList{ + "cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), + "memory": *resource.NewQuantity(memory, resource.BinarySI), + }, }, } }