Merge pull request #24674 from gmarek/maxpods

Automatic merge from submit-queue

Enforce --max-pods in kubelet admission; previously was only enforced in scheduler

This is an ugly hack - I spent some time trying to understand what one NodeInfo has in common with the other one, but at some point decided that I just don't have time to do that.

Fixes #24262
Fixes #20263

cc @HaiyangDING @lavalamp
pull/6/head
k8s-merge-robot 2016-04-22 23:00:20 -07:00
commit 2ec908083b
2 changed files with 53 additions and 13 deletions

View File

@ -2366,10 +2366,24 @@ func TestHandlePortConflicts(t *testing.T) {
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl.nodeLister = testNodeLister{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: kl.nodeName}},
{
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: kl.nodeName}},
{
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
spec := api.PodSpec{NodeName: kl.nodeName, Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
@ -2427,10 +2441,24 @@ func TestHandleHostNameConflicts(t *testing.T) {
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl.nodeLister = testNodeLister{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"}},
{
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"}},
{
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
pods := []*api.Pod{
@ -2486,10 +2514,24 @@ func TestHandleNodeSelector(t *testing.T) {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
kl.nodeLister = testNodeLister{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}}},
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}}},
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)

View File

@ -425,6 +425,10 @@ func podName(pod *api.Pod) string {
func podFitsResourcesInternal(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo, info *api.Node) (bool, error) {
allocatable := info.Status.Allocatable
allowedPodNumber := allocatable.Pods().Value()
if int64(len(nodeInfo.Pods()))+1 > allowedPodNumber {
return false,
newInsufficientResourceError(podCountResourceName, 1, int64(len(nodeInfo.Pods())), allowedPodNumber)
}
podRequest := getResourceRequest(pod)
if podRequest.milliCPU == 0 && podRequest.memory == 0 {
return true, nil
@ -451,13 +455,6 @@ func (r *NodeStatus) PodFitsResources(pod *api.Pod, nodeName string, nodeInfo *s
if err != nil {
return false, err
}
// TODO: move the following podNumber check to podFitsResourcesInternal when Kubelet allows podNumber check (See #20263).
allocatable := info.Status.Allocatable
allowedPodNumber := allocatable.Pods().Value()
if int64(len(nodeInfo.Pods()))+1 > allowedPodNumber {
return false,
newInsufficientResourceError(podCountResourceName, 1, int64(len(nodeInfo.Pods())), allowedPodNumber)
}
return podFitsResourcesInternal(pod, nodeName, nodeInfo, info)
}
@ -775,6 +772,7 @@ func RunGeneralPredicates(pod *api.Pod, nodeName string, nodeInfo *schedulercach
if !fit {
return fit, err
}
fit, err = PodFitsHost(pod, nodeName, nodeInfo)
if !fit {
return fit, err