Merge pull request #29186 from wojtek-t/cleanup_resource_request

Automatic merge from submit-queue

Reuse existing Resource struct instead of new resourceRequest

@davidopp
pull/6/head
k8s-merge-robot 2016-07-19 07:31:19 -07:00 committed by GitHub
commit 9a7507c4db
2 changed files with 63 additions and 69 deletions

View File

@ -68,7 +68,7 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*api.Node, error) {
// podMetadata is a type that is passed as metadata for predicate functions // podMetadata is a type that is passed as metadata for predicate functions
type predicateMetadata struct { type predicateMetadata struct {
podBestEffort bool podBestEffort bool
podRequest *resourceRequest podRequest *schedulercache.Resource
podPorts map[int]bool podPorts map[int]bool
} }
@ -402,28 +402,22 @@ func (c *VolumeZoneChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo *
return true, nil return true, nil
} }
type resourceRequest struct { func getResourceRequest(pod *api.Pod) *schedulercache.Resource {
milliCPU int64 result := schedulercache.Resource{}
memory int64
nvidiaGPU int64
}
func getResourceRequest(pod *api.Pod) *resourceRequest {
result := resourceRequest{}
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
requests := container.Resources.Requests requests := container.Resources.Requests
result.memory += requests.Memory().Value() result.Memory += requests.Memory().Value()
result.milliCPU += requests.Cpu().MilliValue() result.MilliCPU += requests.Cpu().MilliValue()
result.nvidiaGPU += requests.NvidiaGPU().Value() result.NvidiaGPU += requests.NvidiaGPU().Value()
} }
// take max_resource(sum_pod, any_init_container) // take max_resource(sum_pod, any_init_container)
for _, container := range pod.Spec.InitContainers { for _, container := range pod.Spec.InitContainers {
requests := container.Resources.Requests requests := container.Resources.Requests
if mem := requests.Memory().Value(); mem > result.memory { if mem := requests.Memory().Value(); mem > result.Memory {
result.memory = mem result.Memory = mem
} }
if cpu := requests.Cpu().MilliValue(); cpu > result.milliCPU { if cpu := requests.Cpu().MilliValue(); cpu > result.MilliCPU {
result.milliCPU = cpu result.MilliCPU = cpu
} }
} }
return &result return &result
@ -444,29 +438,29 @@ func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N
newInsufficientResourceError(podCountResourceName, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber)) newInsufficientResourceError(podCountResourceName, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber))
} }
var podRequest *resourceRequest var podRequest *schedulercache.Resource
if predicateMeta, ok := meta.(*predicateMetadata); ok { if predicateMeta, ok := meta.(*predicateMetadata); ok {
podRequest = predicateMeta.podRequest podRequest = predicateMeta.podRequest
} else { } else {
// We couldn't parse metadata - fallback to computing it. // We couldn't parse metadata - fallback to computing it.
podRequest = getResourceRequest(pod) podRequest = getResourceRequest(pod)
} }
if podRequest.milliCPU == 0 && podRequest.memory == 0 && podRequest.nvidiaGPU == 0 { if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 {
return true, nil return true, nil
} }
allocatable := nodeInfo.AllocatableResource() allocatable := nodeInfo.AllocatableResource()
if allocatable.MilliCPU < podRequest.milliCPU+nodeInfo.RequestedResource().MilliCPU { if allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU {
return false, return false,
newInsufficientResourceError(cpuResourceName, podRequest.milliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU) newInsufficientResourceError(cpuResourceName, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU)
} }
if allocatable.Memory < podRequest.memory+nodeInfo.RequestedResource().Memory { if allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory {
return false, return false,
newInsufficientResourceError(memoryResourceName, podRequest.memory, nodeInfo.RequestedResource().Memory, allocatable.Memory) newInsufficientResourceError(memoryResourceName, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory)
} }
if allocatable.NvidiaGPU < podRequest.nvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU { if allocatable.NvidiaGPU < podRequest.NvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU {
return false, return false,
newInsufficientResourceError(nvidiaGpuResourceName, podRequest.nvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU) newInsufficientResourceError(nvidiaGpuResourceName, podRequest.NvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU)
} }
if glog.V(10) { if glog.V(10) {
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is

View File

@ -94,15 +94,15 @@ func makeAllocatableResources(milliCPU int64, memory int64, nvidiaGPUs int64, po
} }
} }
func newResourcePod(usage ...resourceRequest) *api.Pod { func newResourcePod(usage ...schedulercache.Resource) *api.Pod {
containers := []api.Container{} containers := []api.Container{}
for _, req := range usage { for _, req := range usage {
containers = append(containers, api.Container{ containers = append(containers, api.Container{
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Requests: api.ResourceList{ Requests: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(req.milliCPU, resource.DecimalSI), api.ResourceCPU: *resource.NewMilliQuantity(req.MilliCPU, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(req.memory, resource.BinarySI), api.ResourceMemory: *resource.NewQuantity(req.Memory, resource.BinarySI),
api.ResourceNvidiaGPU: *resource.NewQuantity(req.nvidiaGPU, resource.DecimalSI), api.ResourceNvidiaGPU: *resource.NewQuantity(req.NvidiaGPU, resource.DecimalSI),
}, },
}, },
}) })
@ -114,7 +114,7 @@ func newResourcePod(usage ...resourceRequest) *api.Pod {
} }
} }
func newResourceInitPod(pod *api.Pod, usage ...resourceRequest) *api.Pod { func newResourceInitPod(pod *api.Pod, usage ...schedulercache.Resource) *api.Pod {
pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers
return pod return pod
} }
@ -130,103 +130,103 @@ func TestPodFitsResources(t *testing.T) {
{ {
pod: &api.Pod{}, pod: &api.Pod{},
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 10, memory: 20})), newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})),
fits: true, fits: true,
test: "no resources requested always fits", test: "no resources requested always fits",
wErr: nil, wErr: nil,
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 10, memory: 20})), newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})),
fits: false, fits: false,
test: "too many resources fails", test: "too many resources fails",
wErr: newInsufficientResourceError(cpuResourceName, 1, 10, 10), wErr: newInsufficientResourceError(cpuResourceName, 1, 10, 10),
}, },
{ {
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 3, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 8, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 19})),
fits: false, fits: false,
test: "too many resources fails due to init container cpu", test: "too many resources fails due to init container cpu",
wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10), wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10),
}, },
{ {
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}, resourceRequest{milliCPU: 2, memory: 1}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 3, Memory: 1}, schedulercache.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 8, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 19})),
fits: false, fits: false,
test: "too many resources fails due to highest init container cpu", test: "too many resources fails due to highest init container cpu",
wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10), wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10),
}, },
{ {
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 3}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
fits: false, fits: false,
test: "too many resources fails due to init container memory", test: "too many resources fails due to init container memory",
wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20), wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20),
}, },
{ {
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}, resourceRequest{milliCPU: 1, memory: 2}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 3}, schedulercache.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
fits: false, fits: false,
test: "too many resources fails due to highest init container memory", test: "too many resources fails due to highest init container memory",
wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20), wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20),
}, },
{ {
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
fits: true, fits: true,
test: "init container fits because it's the max, not sum, of containers and init containers", test: "init container fits because it's the max, not sum, of containers and init containers",
wErr: nil, wErr: nil,
}, },
{ {
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}, resourceRequest{milliCPU: 1, memory: 1}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 1}, schedulercache.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
fits: true, fits: true,
test: "multiple init containers fit because it's the max, not sum, of containers and init containers", test: "multiple init containers fit because it's the max, not sum, of containers and init containers",
wErr: nil, wErr: nil,
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 5, memory: 5})), newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 5})),
fits: true, fits: true,
test: "both resources fit", test: "both resources fit",
wErr: nil, wErr: nil,
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 2, memory: 1}), pod: newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 9, memory: 5})), newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 5})),
fits: false, fits: false,
test: "one resource memory fits", test: "one resource memory fits",
wErr: newInsufficientResourceError(cpuResourceName, 2, 9, 10), wErr: newInsufficientResourceError(cpuResourceName, 2, 9, 10),
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 2}), pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
fits: false, fits: false,
test: "one resource cpu fits", test: "one resource cpu fits",
wErr: newInsufficientResourceError(memoryResourceName, 2, 19, 20), wErr: newInsufficientResourceError(memoryResourceName, 2, 19, 20),
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), pod: newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
fits: true, fits: true,
test: "equal edge case", test: "equal edge case",
wErr: nil, wErr: nil,
}, },
{ {
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 4, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 4, Memory: 1}), schedulercache.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
fits: true, fits: true,
test: "equal edge case for init container", test: "equal edge case for init container",
wErr: nil, wErr: nil,
@ -256,31 +256,31 @@ func TestPodFitsResources(t *testing.T) {
{ {
pod: &api.Pod{}, pod: &api.Pod{},
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 10, memory: 20})), newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})),
fits: false, fits: false,
test: "even without specified resources predicate fails when there's no space for additional pod", test: "even without specified resources predicate fails when there's no space for additional pod",
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1), wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 5, memory: 5})), newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 5})),
fits: false, fits: false,
test: "even if both resources fit predicate fails when there's no space for additional pod", test: "even if both resources fit predicate fails when there's no space for additional pod",
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1), wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), pod: newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
fits: false, fits: false,
test: "even for equal edge case predicate fails when there's no space for additional pod", test: "even for equal edge case predicate fails when there's no space for additional pod",
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1), wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
}, },
{ {
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}), pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}), schedulercache.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
fits: false, fits: false,
test: "even for equal edge case predicate fails when there's no space for additional pod due to init container", test: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1), wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
@ -1701,7 +1701,7 @@ func TestRunGeneralPredicates(t *testing.T) {
{ {
pod: &api.Pod{}, pod: &api.Pod{},
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
node: &api.Node{ node: &api.Node{
ObjectMeta: api.ObjectMeta{Name: "machine1"}, ObjectMeta: api.ObjectMeta{Name: "machine1"},
Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32)}, Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32)},
@ -1711,9 +1711,9 @@ func TestRunGeneralPredicates(t *testing.T) {
test: "no resources/port/host requested always fits", test: "no resources/port/host requested always fits",
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 8, memory: 10}), pod: newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 10}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
node: &api.Node{ node: &api.Node{
ObjectMeta: api.ObjectMeta{Name: "machine1"}, ObjectMeta: api.ObjectMeta{Name: "machine1"},
Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32)}, Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32)},
@ -1725,25 +1725,25 @@ func TestRunGeneralPredicates(t *testing.T) {
{ {
pod: &api.Pod{}, pod: &api.Pod{},
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32)}}, node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32)}},
fits: true, fits: true,
wErr: nil, wErr: nil,
test: "no resources/port/host requested always fits on GPU machine", test: "no resources/port/host requested always fits on GPU machine",
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 3, memory: 1, nvidiaGPU: 1}), pod: newResourcePod(schedulercache.Resource{MilliCPU: 3, Memory: 1, NvidiaGPU: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 5, memory: 10, nvidiaGPU: 1})), newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 10, NvidiaGPU: 1})),
node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32)}}, node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32)}},
fits: false, fits: false,
wErr: newInsufficientResourceError("NvidiaGpu", 1, 1, 1), wErr: newInsufficientResourceError("NvidiaGpu", 1, 1, 1),
test: "not enough GPU resource", test: "not enough GPU resource",
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 3, memory: 1, nvidiaGPU: 1}), pod: newResourcePod(schedulercache.Resource{MilliCPU: 3, Memory: 1, NvidiaGPU: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 5, memory: 10, nvidiaGPU: 0})), newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 10, NvidiaGPU: 0})),
node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32)}}, node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32)}},
fits: true, fits: true,
wErr: nil, wErr: nil,