mirror of https://github.com/k3s-io/k3s
Merge pull request #29186 from wojtek-t/cleanup_resource_request
Automatic merge from submit-queue Reuse existing Resource struct instead of new resourceRequest @davidopppull/6/head
commit
9a7507c4db
|
@ -68,7 +68,7 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*api.Node, error) {
|
|||
// podMetadata is a type that is passed as metadata for predicate functions
|
||||
type predicateMetadata struct {
|
||||
podBestEffort bool
|
||||
podRequest *resourceRequest
|
||||
podRequest *schedulercache.Resource
|
||||
podPorts map[int]bool
|
||||
}
|
||||
|
||||
|
@ -402,28 +402,22 @@ func (c *VolumeZoneChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo *
|
|||
return true, nil
|
||||
}
|
||||
|
||||
type resourceRequest struct {
|
||||
milliCPU int64
|
||||
memory int64
|
||||
nvidiaGPU int64
|
||||
}
|
||||
|
||||
func getResourceRequest(pod *api.Pod) *resourceRequest {
|
||||
result := resourceRequest{}
|
||||
func getResourceRequest(pod *api.Pod) *schedulercache.Resource {
|
||||
result := schedulercache.Resource{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
requests := container.Resources.Requests
|
||||
result.memory += requests.Memory().Value()
|
||||
result.milliCPU += requests.Cpu().MilliValue()
|
||||
result.nvidiaGPU += requests.NvidiaGPU().Value()
|
||||
result.Memory += requests.Memory().Value()
|
||||
result.MilliCPU += requests.Cpu().MilliValue()
|
||||
result.NvidiaGPU += requests.NvidiaGPU().Value()
|
||||
}
|
||||
// take max_resource(sum_pod, any_init_container)
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
requests := container.Resources.Requests
|
||||
if mem := requests.Memory().Value(); mem > result.memory {
|
||||
result.memory = mem
|
||||
if mem := requests.Memory().Value(); mem > result.Memory {
|
||||
result.Memory = mem
|
||||
}
|
||||
if cpu := requests.Cpu().MilliValue(); cpu > result.milliCPU {
|
||||
result.milliCPU = cpu
|
||||
if cpu := requests.Cpu().MilliValue(); cpu > result.MilliCPU {
|
||||
result.MilliCPU = cpu
|
||||
}
|
||||
}
|
||||
return &result
|
||||
|
@ -444,29 +438,29 @@ func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N
|
|||
newInsufficientResourceError(podCountResourceName, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber))
|
||||
}
|
||||
|
||||
var podRequest *resourceRequest
|
||||
var podRequest *schedulercache.Resource
|
||||
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
||||
podRequest = predicateMeta.podRequest
|
||||
} else {
|
||||
// We couldn't parse metadata - fallback to computing it.
|
||||
podRequest = getResourceRequest(pod)
|
||||
}
|
||||
if podRequest.milliCPU == 0 && podRequest.memory == 0 && podRequest.nvidiaGPU == 0 {
|
||||
if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
allocatable := nodeInfo.AllocatableResource()
|
||||
if allocatable.MilliCPU < podRequest.milliCPU+nodeInfo.RequestedResource().MilliCPU {
|
||||
if allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU {
|
||||
return false,
|
||||
newInsufficientResourceError(cpuResourceName, podRequest.milliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU)
|
||||
newInsufficientResourceError(cpuResourceName, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU)
|
||||
}
|
||||
if allocatable.Memory < podRequest.memory+nodeInfo.RequestedResource().Memory {
|
||||
if allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory {
|
||||
return false,
|
||||
newInsufficientResourceError(memoryResourceName, podRequest.memory, nodeInfo.RequestedResource().Memory, allocatable.Memory)
|
||||
newInsufficientResourceError(memoryResourceName, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory)
|
||||
}
|
||||
if allocatable.NvidiaGPU < podRequest.nvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU {
|
||||
if allocatable.NvidiaGPU < podRequest.NvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU {
|
||||
return false,
|
||||
newInsufficientResourceError(nvidiaGpuResourceName, podRequest.nvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU)
|
||||
newInsufficientResourceError(nvidiaGpuResourceName, podRequest.NvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU)
|
||||
}
|
||||
if glog.V(10) {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
|
|
|
@ -94,15 +94,15 @@ func makeAllocatableResources(milliCPU int64, memory int64, nvidiaGPUs int64, po
|
|||
}
|
||||
}
|
||||
|
||||
func newResourcePod(usage ...resourceRequest) *api.Pod {
|
||||
func newResourcePod(usage ...schedulercache.Resource) *api.Pod {
|
||||
containers := []api.Container{}
|
||||
for _, req := range usage {
|
||||
containers = append(containers, api.Container{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(req.milliCPU, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(req.memory, resource.BinarySI),
|
||||
api.ResourceNvidiaGPU: *resource.NewQuantity(req.nvidiaGPU, resource.DecimalSI),
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(req.MilliCPU, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(req.Memory, resource.BinarySI),
|
||||
api.ResourceNvidiaGPU: *resource.NewQuantity(req.NvidiaGPU, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
@ -114,7 +114,7 @@ func newResourcePod(usage ...resourceRequest) *api.Pod {
|
|||
}
|
||||
}
|
||||
|
||||
func newResourceInitPod(pod *api.Pod, usage ...resourceRequest) *api.Pod {
|
||||
func newResourceInitPod(pod *api.Pod, usage ...schedulercache.Resource) *api.Pod {
|
||||
pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers
|
||||
return pod
|
||||
}
|
||||
|
@ -130,103 +130,103 @@ func TestPodFitsResources(t *testing.T) {
|
|||
{
|
||||
pod: &api.Pod{},
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 10, memory: 20})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})),
|
||||
fits: true,
|
||||
test: "no resources requested always fits",
|
||||
wErr: nil,
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 10, memory: 20})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})),
|
||||
fits: false,
|
||||
test: "too many resources fails",
|
||||
wErr: newInsufficientResourceError(cpuResourceName, 1, 10, 10),
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}),
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 3, Memory: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 8, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 19})),
|
||||
fits: false,
|
||||
test: "too many resources fails due to init container cpu",
|
||||
wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10),
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}, resourceRequest{milliCPU: 2, memory: 1}),
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 3, Memory: 1}, schedulercache.Resource{MilliCPU: 2, Memory: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 8, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 19})),
|
||||
fits: false,
|
||||
test: "too many resources fails due to highest init container cpu",
|
||||
wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10),
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}),
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 3}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
|
||||
fits: false,
|
||||
test: "too many resources fails due to init container memory",
|
||||
wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20),
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}, resourceRequest{milliCPU: 1, memory: 2}),
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 3}, schedulercache.Resource{MilliCPU: 1, Memory: 2}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
|
||||
fits: false,
|
||||
test: "too many resources fails due to highest init container memory",
|
||||
wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20),
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}),
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
|
||||
fits: true,
|
||||
test: "init container fits because it's the max, not sum, of containers and init containers",
|
||||
wErr: nil,
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}, resourceRequest{milliCPU: 1, memory: 1}),
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 1}, schedulercache.Resource{MilliCPU: 1, Memory: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
|
||||
fits: true,
|
||||
test: "multiple init containers fit because it's the max, not sum, of containers and init containers",
|
||||
wErr: nil,
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 5})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 5})),
|
||||
fits: true,
|
||||
test: "both resources fit",
|
||||
wErr: nil,
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 2, memory: 1}),
|
||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 9, memory: 5})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 5})),
|
||||
fits: false,
|
||||
test: "one resource memory fits",
|
||||
wErr: newInsufficientResourceError(cpuResourceName, 2, 9, 10),
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 2}),
|
||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 2}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: false,
|
||||
test: "one resource cpu fits",
|
||||
wErr: newInsufficientResourceError(memoryResourceName, 2, 19, 20),
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
|
||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: true,
|
||||
test: "equal edge case",
|
||||
wErr: nil,
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 4, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}),
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 4, Memory: 1}), schedulercache.Resource{MilliCPU: 5, Memory: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: true,
|
||||
test: "equal edge case for init container",
|
||||
wErr: nil,
|
||||
|
@ -256,31 +256,31 @@ func TestPodFitsResources(t *testing.T) {
|
|||
{
|
||||
pod: &api.Pod{},
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 10, memory: 20})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})),
|
||||
fits: false,
|
||||
test: "even without specified resources predicate fails when there's no space for additional pod",
|
||||
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 5})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 5})),
|
||||
fits: false,
|
||||
test: "even if both resources fit predicate fails when there's no space for additional pod",
|
||||
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
|
||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: false,
|
||||
test: "even for equal edge case predicate fails when there's no space for additional pod",
|
||||
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}),
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}), schedulercache.Resource{MilliCPU: 5, Memory: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
|
||||
fits: false,
|
||||
test: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
|
||||
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
|
||||
|
@ -1701,7 +1701,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
|||
{
|
||||
pod: &api.Pod{},
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
|
||||
node: &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: "machine1"},
|
||||
Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32)},
|
||||
|
@ -1711,9 +1711,9 @@ func TestRunGeneralPredicates(t *testing.T) {
|
|||
test: "no resources/port/host requested always fits",
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 8, memory: 10}),
|
||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 10}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
|
||||
node: &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: "machine1"},
|
||||
Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32)},
|
||||
|
@ -1725,25 +1725,25 @@ func TestRunGeneralPredicates(t *testing.T) {
|
|||
{
|
||||
pod: &api.Pod{},
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
|
||||
node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32)}},
|
||||
fits: true,
|
||||
wErr: nil,
|
||||
test: "no resources/port/host requested always fits on GPU machine",
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 3, memory: 1, nvidiaGPU: 1}),
|
||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 3, Memory: 1, NvidiaGPU: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 10, nvidiaGPU: 1})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 10, NvidiaGPU: 1})),
|
||||
node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32)}},
|
||||
fits: false,
|
||||
wErr: newInsufficientResourceError("NvidiaGpu", 1, 1, 1),
|
||||
test: "not enough GPU resource",
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 3, memory: 1, nvidiaGPU: 1}),
|
||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 3, Memory: 1, NvidiaGPU: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 10, nvidiaGPU: 0})),
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 10, NvidiaGPU: 0})),
|
||||
node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32)}},
|
||||
fits: true,
|
||||
wErr: nil,
|
||||
|
|
Loading…
Reference in New Issue