Merge pull request #19910 from hongchaodeng/res

Auto commit by PR queue bot
pull/6/head
k8s-merge-robot 2016-02-01 00:08:28 -08:00
commit 3e04a45a95
4 changed files with 45 additions and 25 deletions

View File

@ -18,23 +18,32 @@ package predicates
import "fmt" import "fmt"
var ( const (
ErrExceededMaxPodNumber = newInsufficientResourceError("PodCount") podCountResourceName string = "PodCount"
ErrInsufficientFreeCPU = newInsufficientResourceError("CPU") cpuResourceName string = "CPU"
ErrInsufficientFreeMemory = newInsufficientResourceError("Memory") memoryResoureceName string = "Memory"
) )
// InsufficientResourceError is an error type that indicates what kind of resource limit is // InsufficientResourceError is an error type that indicates what kind of resource limit is
// hit and caused the unfitting failure. // hit and caused the unfitting failure.
type InsufficientResourceError struct { type InsufficientResourceError struct {
// ResourceName tells the name of the resource that is insufficient // resourceName is the name of the resource that is insufficient
ResourceName string resourceName string
requested int64
used int64
capacity int64
} }
func newInsufficientResourceError(resourceName string) *InsufficientResourceError { func newInsufficientResourceError(resourceName string, requested, used, capacity int64) *InsufficientResourceError {
return &InsufficientResourceError{resourceName} return &InsufficientResourceError{
resourceName: resourceName,
requested: requested,
used: used,
capacity: capacity,
}
} }
func (e *InsufficientResourceError) Error() string { func (e *InsufficientResourceError) Error() string {
return fmt.Sprintf("Node didn't have enough resource: %s", e.ResourceName) return fmt.Sprintf("Node didn't have enough resource: %s, requested: %d, used: %d, capacity: %d",
e.resourceName, e.requested, e.used, e.capacity)
} }

View File

@ -254,6 +254,18 @@ func getResourceRequest(pod *api.Pod) resourceRequest {
return result return result
} }
func getTotalResourceRequest(pods []*api.Pod) resourceRequest {
result := resourceRequest{}
for _, pod := range pods {
for _, container := range pod.Spec.Containers {
requests := container.Resources.Requests
result.memory += requests.Memory().Value()
result.milliCPU += requests.Cpu().MilliValue()
}
}
return result
}
func CheckPodsExceedingFreeResources(pods []*api.Pod, allocatable api.ResourceList) (fitting []*api.Pod, notFittingCPU, notFittingMemory []*api.Pod) { func CheckPodsExceedingFreeResources(pods []*api.Pod, allocatable api.ResourceList) (fitting []*api.Pod, notFittingCPU, notFittingMemory []*api.Pod) {
totalMilliCPU := allocatable.Cpu().MilliValue() totalMilliCPU := allocatable.Cpu().MilliValue()
totalMemory := allocatable.Memory().Value() totalMemory := allocatable.Memory().Value()
@ -294,8 +306,8 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
allocatable := info.Status.Allocatable allocatable := info.Status.Allocatable
if int64(len(existingPods))+1 > allocatable.Pods().Value() { if int64(len(existingPods))+1 > allocatable.Pods().Value() {
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %+v is full, running %v out of %v Pods.", podName(pod), node, len(existingPods), allocatable.Pods().Value()) return false, newInsufficientResourceError(podCountResourceName, 1,
return false, ErrExceededMaxPodNumber int64(len(existingPods)), allocatable.Pods().Value())
} }
podRequest := getResourceRequest(pod) podRequest := getResourceRequest(pod)
@ -306,12 +318,12 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
pods := append(existingPods, pod) pods := append(existingPods, pod)
_, exceedingCPU, exceedingMemory := CheckPodsExceedingFreeResources(pods, allocatable) _, exceedingCPU, exceedingMemory := CheckPodsExceedingFreeResources(pods, allocatable)
if len(exceedingCPU) > 0 { if len(exceedingCPU) > 0 {
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %v does not have sufficient CPU", podName(pod), node) return false, newInsufficientResourceError(cpuResourceName, podRequest.milliCPU,
return false, ErrInsufficientFreeCPU getTotalResourceRequest(existingPods).milliCPU, allocatable.Cpu().MilliValue())
} }
if len(exceedingMemory) > 0 { if len(exceedingMemory) > 0 {
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %v does not have sufficient Memory", podName(pod), node) return false, newInsufficientResourceError(memoryResoureceName, podRequest.memory,
return false, ErrInsufficientFreeMemory getTotalResourceRequest(existingPods).memory, allocatable.Memory().Value())
} }
glog.V(10).Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node, len(pods)-1, allocatable.Pods().Value()) glog.V(10).Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node, len(pods)-1, allocatable.Pods().Value())
return true, nil return true, nil

View File

@ -82,7 +82,6 @@ func newResourcePod(usage ...resourceRequest) *api.Pod {
} }
func TestPodFitsResources(t *testing.T) { func TestPodFitsResources(t *testing.T) {
enoughPodsTests := []struct { enoughPodsTests := []struct {
pod *api.Pod pod *api.Pod
existingPods []*api.Pod existingPods []*api.Pod
@ -106,7 +105,7 @@ func TestPodFitsResources(t *testing.T) {
}, },
fits: false, fits: false,
test: "too many resources fails", test: "too many resources fails",
wErr: ErrInsufficientFreeCPU, wErr: newInsufficientResourceError(cpuResourceName, 1, 10, 10),
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
@ -124,7 +123,7 @@ func TestPodFitsResources(t *testing.T) {
}, },
fits: false, fits: false,
test: "one resources fits", test: "one resources fits",
wErr: ErrInsufficientFreeMemory, wErr: newInsufficientResourceError(memoryResoureceName, 2, 19, 20),
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
@ -163,8 +162,8 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(resourceRequest{milliCPU: 10, memory: 20}), newResourcePod(resourceRequest{milliCPU: 10, memory: 20}),
}, },
fits: false, fits: false,
test: "even without specified resources predicate fails when there's no available ips", test: "even without specified resources predicate fails when there's no space for additional pod",
wErr: ErrExceededMaxPodNumber, wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
@ -172,8 +171,8 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(resourceRequest{milliCPU: 5, memory: 5}), newResourcePod(resourceRequest{milliCPU: 5, memory: 5}),
}, },
fits: false, fits: false,
test: "even if both resources fit predicate fails when there's no available ips", test: "even if both resources fit predicate fails when there's no space for additional pod",
wErr: ErrExceededMaxPodNumber, wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
}, },
{ {
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
@ -181,8 +180,8 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(resourceRequest{milliCPU: 5, memory: 19}), newResourcePod(resourceRequest{milliCPU: 5, memory: 19}),
}, },
fits: false, fits: false,
test: "even for equal edge case predicate fails when there's no available ips", test: "even for equal edge case predicate fails when there's no space for additional pod",
wErr: ErrExceededMaxPodNumber, wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
}, },
} }
for _, test := range notEnoughPodsTests { for _, test := range notEnoughPodsTests {

View File

@ -144,7 +144,7 @@ func findNodesThatFit(pod *api.Pod, machineToPods map[string][]*api.Pod, predica
failedPredicateMap[node.Name] = sets.String{} failedPredicateMap[node.Name] = sets.String{}
} }
if re, ok := err.(*predicates.InsufficientResourceError); ok { if re, ok := err.(*predicates.InsufficientResourceError); ok {
failedPredicateMap[node.Name].Insert(re.ResourceName) failedPredicateMap[node.Name].Insert(re.Error())
break break
} }
failedPredicateMap[node.Name].Insert(name) failedPredicateMap[node.Name].Insert(name)