mirror of https://github.com/k3s-io/k3s
commit
3e04a45a95
|
@ -18,23 +18,32 @@ package predicates
|
|||
|
||||
import "fmt"
|
||||
|
||||
var (
|
||||
ErrExceededMaxPodNumber = newInsufficientResourceError("PodCount")
|
||||
ErrInsufficientFreeCPU = newInsufficientResourceError("CPU")
|
||||
ErrInsufficientFreeMemory = newInsufficientResourceError("Memory")
|
||||
const (
|
||||
podCountResourceName string = "PodCount"
|
||||
cpuResourceName string = "CPU"
|
||||
memoryResoureceName string = "Memory"
|
||||
)
|
||||
|
||||
// InsufficientResourceError is an error type that indicates what kind of resource limit is
|
||||
// hit and caused the unfitting failure.
|
||||
type InsufficientResourceError struct {
|
||||
// ResourceName tells the name of the resource that is insufficient
|
||||
ResourceName string
|
||||
// resourceName is the name of the resource that is insufficient
|
||||
resourceName string
|
||||
requested int64
|
||||
used int64
|
||||
capacity int64
|
||||
}
|
||||
|
||||
func newInsufficientResourceError(resourceName string) *InsufficientResourceError {
|
||||
return &InsufficientResourceError{resourceName}
|
||||
func newInsufficientResourceError(resourceName string, requested, used, capacity int64) *InsufficientResourceError {
|
||||
return &InsufficientResourceError{
|
||||
resourceName: resourceName,
|
||||
requested: requested,
|
||||
used: used,
|
||||
capacity: capacity,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *InsufficientResourceError) Error() string {
|
||||
return fmt.Sprintf("Node didn't have enough resource: %s", e.ResourceName)
|
||||
return fmt.Sprintf("Node didn't have enough resource: %s, requested: %d, used: %d, capacity: %d",
|
||||
e.resourceName, e.requested, e.used, e.capacity)
|
||||
}
|
||||
|
|
|
@ -254,6 +254,18 @@ func getResourceRequest(pod *api.Pod) resourceRequest {
|
|||
return result
|
||||
}
|
||||
|
||||
func getTotalResourceRequest(pods []*api.Pod) resourceRequest {
|
||||
result := resourceRequest{}
|
||||
for _, pod := range pods {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
requests := container.Resources.Requests
|
||||
result.memory += requests.Memory().Value()
|
||||
result.milliCPU += requests.Cpu().MilliValue()
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func CheckPodsExceedingFreeResources(pods []*api.Pod, allocatable api.ResourceList) (fitting []*api.Pod, notFittingCPU, notFittingMemory []*api.Pod) {
|
||||
totalMilliCPU := allocatable.Cpu().MilliValue()
|
||||
totalMemory := allocatable.Memory().Value()
|
||||
|
@ -294,8 +306,8 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
|
|||
|
||||
allocatable := info.Status.Allocatable
|
||||
if int64(len(existingPods))+1 > allocatable.Pods().Value() {
|
||||
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %+v is full, running %v out of %v Pods.", podName(pod), node, len(existingPods), allocatable.Pods().Value())
|
||||
return false, ErrExceededMaxPodNumber
|
||||
return false, newInsufficientResourceError(podCountResourceName, 1,
|
||||
int64(len(existingPods)), allocatable.Pods().Value())
|
||||
}
|
||||
|
||||
podRequest := getResourceRequest(pod)
|
||||
|
@ -306,12 +318,12 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
|
|||
pods := append(existingPods, pod)
|
||||
_, exceedingCPU, exceedingMemory := CheckPodsExceedingFreeResources(pods, allocatable)
|
||||
if len(exceedingCPU) > 0 {
|
||||
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %v does not have sufficient CPU", podName(pod), node)
|
||||
return false, ErrInsufficientFreeCPU
|
||||
return false, newInsufficientResourceError(cpuResourceName, podRequest.milliCPU,
|
||||
getTotalResourceRequest(existingPods).milliCPU, allocatable.Cpu().MilliValue())
|
||||
}
|
||||
if len(exceedingMemory) > 0 {
|
||||
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %v does not have sufficient Memory", podName(pod), node)
|
||||
return false, ErrInsufficientFreeMemory
|
||||
return false, newInsufficientResourceError(memoryResoureceName, podRequest.memory,
|
||||
getTotalResourceRequest(existingPods).memory, allocatable.Memory().Value())
|
||||
}
|
||||
glog.V(10).Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node, len(pods)-1, allocatable.Pods().Value())
|
||||
return true, nil
|
||||
|
|
|
@ -82,7 +82,6 @@ func newResourcePod(usage ...resourceRequest) *api.Pod {
|
|||
}
|
||||
|
||||
func TestPodFitsResources(t *testing.T) {
|
||||
|
||||
enoughPodsTests := []struct {
|
||||
pod *api.Pod
|
||||
existingPods []*api.Pod
|
||||
|
@ -106,7 +105,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
},
|
||||
fits: false,
|
||||
test: "too many resources fails",
|
||||
wErr: ErrInsufficientFreeCPU,
|
||||
wErr: newInsufficientResourceError(cpuResourceName, 1, 10, 10),
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
||||
|
@ -124,7 +123,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||
},
|
||||
fits: false,
|
||||
test: "one resources fits",
|
||||
wErr: ErrInsufficientFreeMemory,
|
||||
wErr: newInsufficientResourceError(memoryResoureceName, 2, 19, 20),
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
|
||||
|
@ -163,8 +162,8 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(resourceRequest{milliCPU: 10, memory: 20}),
|
||||
},
|
||||
fits: false,
|
||||
test: "even without specified resources predicate fails when there's no available ips",
|
||||
wErr: ErrExceededMaxPodNumber,
|
||||
test: "even without specified resources predicate fails when there's no space for additional pod",
|
||||
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
||||
|
@ -172,8 +171,8 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(resourceRequest{milliCPU: 5, memory: 5}),
|
||||
},
|
||||
fits: false,
|
||||
test: "even if both resources fit predicate fails when there's no available ips",
|
||||
wErr: ErrExceededMaxPodNumber,
|
||||
test: "even if both resources fit predicate fails when there's no space for additional pod",
|
||||
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
|
||||
|
@ -181,8 +180,8 @@ func TestPodFitsResources(t *testing.T) {
|
|||
newResourcePod(resourceRequest{milliCPU: 5, memory: 19}),
|
||||
},
|
||||
fits: false,
|
||||
test: "even for equal edge case predicate fails when there's no available ips",
|
||||
wErr: ErrExceededMaxPodNumber,
|
||||
test: "even for equal edge case predicate fails when there's no space for additional pod",
|
||||
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
|
||||
},
|
||||
}
|
||||
for _, test := range notEnoughPodsTests {
|
||||
|
|
|
@ -144,7 +144,7 @@ func findNodesThatFit(pod *api.Pod, machineToPods map[string][]*api.Pod, predica
|
|||
failedPredicateMap[node.Name] = sets.String{}
|
||||
}
|
||||
if re, ok := err.(*predicates.InsufficientResourceError); ok {
|
||||
failedPredicateMap[node.Name].Insert(re.ResourceName)
|
||||
failedPredicateMap[node.Name].Insert(re.Error())
|
||||
break
|
||||
}
|
||||
failedPredicateMap[node.Name].Insert(name)
|
||||
|
|
Loading…
Reference in New Issue