From afdd9ea262e30b25492c8c3389b37b1705c9641b Mon Sep 17 00:00:00 2001 From: Vishnu kannan Date: Wed, 8 Jun 2016 16:42:10 -0700 Subject: [PATCH 1/2] When limits are not set, use capacity as limits in downward API for resources. Signed-off-by: Vishnu kannan --- pkg/fieldpath/fieldpath_test.go | 119 ++++++++++++++++++++++++++++++++ pkg/kubelet/kubelet.go | 23 +++++- pkg/kubelet/kubelet_test.go | 2 +- pkg/kubelet/resources.go | 52 ++++++++++++++ pkg/kubelet/resources_test.go | 89 ++++++++++++++++++++++++ 5 files changed, 282 insertions(+), 3 deletions(-) create mode 100644 pkg/kubelet/resources.go create mode 100644 pkg/kubelet/resources_test.go diff --git a/pkg/fieldpath/fieldpath_test.go b/pkg/fieldpath/fieldpath_test.go index d510426ee0..912011488e 100644 --- a/pkg/fieldpath/fieldpath_test.go +++ b/pkg/fieldpath/fieldpath_test.go @@ -20,7 +20,10 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" ) func TestExtractFieldPathAsString(t *testing.T) { @@ -115,3 +118,119 @@ func TestExtractFieldPathAsString(t *testing.T) { } } } + +func getPod(cname, cpuRequest, cpuLimit, memoryRequest, memoryLimit string) *api.Pod { + resources := api.ResourceRequirements{ + Limits: make(api.ResourceList), + Requests: make(api.ResourceList), + } + if cpuLimit != "" { + resources.Limits[api.ResourceCPU] = resource.MustParse(cpuLimit) + } + if memoryLimit != "" { + resources.Limits[api.ResourceMemory] = resource.MustParse(memoryLimit) + } + if cpuRequest != "" { + resources.Requests[api.ResourceCPU] = resource.MustParse(cpuRequest) + } + if memoryRequest != "" { + resources.Requests[api.ResourceMemory] = resource.MustParse(memoryRequest) + } + return &api.Pod{ + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: cname, + Resources: resources, + }, + }, + }, + } +} + +func TestExtractResourceValue(t *testing.T) { + cases := []struct { + fs *api.ResourceFieldSelector + pod *api.Pod + cName string + expectedValue string + expectedError error + }{ + { + fs: &api.ResourceFieldSelector{ + Resource: "limits.cpu", + }, + cName: "foo", + pod: getPod("foo", "", "9", "", ""), + expectedValue: "9", + }, + { + fs: &api.ResourceFieldSelector{ + Resource: "requests.cpu", + }, + cName: "foo", + pod: getPod("foo", "", "", "", ""), + expectedValue: "0", + }, + { + fs: &api.ResourceFieldSelector{ + Resource: "requests.cpu", + }, + cName: "foo", + pod: getPod("foo", "8", "", "", ""), + expectedValue: "8", + }, + { + fs: &api.ResourceFieldSelector{ + Resource: "requests.cpu", + }, + cName: "foo", + pod: getPod("foo", "100m", "", "", ""), + expectedValue: "1", + }, + { + fs: &api.ResourceFieldSelector{ + Resource: "requests.cpu", + Divisor: resource.MustParse("100m"), + }, + cName: "foo", + pod: getPod("foo", "1200m", "", "", ""), + expectedValue: "12", + }, + { + fs: &api.ResourceFieldSelector{ + Resource: "requests.memory", + }, + cName: "foo", + pod: getPod("foo", "", "", "100Mi", ""), + expectedValue: "104857600", + }, + { + fs: &api.ResourceFieldSelector{ + Resource: "requests.memory", + Divisor: resource.MustParse("1Mi"), + }, + cName: "foo", + pod: getPod("foo", "", "", "100Mi", "1Gi"), + expectedValue: "100", + }, + { + fs: &api.ResourceFieldSelector{ + Resource: "limits.memory", + }, + cName: "foo", + pod: getPod("foo", "", "", "10Mi", "100Mi"), + expectedValue: "104857600", + }, + } + as := assert.New(t) + for idx, tc := range cases { + actual, err := ExtractResourceValueByContainerName(tc.fs, tc.pod, tc.cName) + if tc.expectedError != nil { + as.Equal(tc.expectedError, err, "expected test case [%d] to fail with error %v; got %v", idx, tc.expectedError, err) + } else { + as.Nil(err, "expected test case [%d] to not return an error; got %v", idx, err) + as.Equal(tc.expectedValue, actual, "expected test case [%d] to return %q; got %q instead", idx, tc.expectedValue, actual) + } + } +} diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 8b8e14cd95..dd46559f05 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -831,6 +831,8 @@ type Kubelet struct { // should manage attachment/detachment of volumes scheduled to this node, // and disable kubelet from executing any attach/detach operations enableControllerAttachDetach bool + + lastUpdatedNodeObject atomic.Value } // Validate given node IP belongs to the current host @@ -1143,6 +1145,10 @@ func (kl *Kubelet) registerWithApiserver() { glog.Errorf("Unable to construct api.Node object for kubelet: %v", err) continue } + + // Cache the node object. + kl.lastUpdatedNodeObject.Store(node) + glog.V(2).Infof("Attempting to register node %s", node.Name) if _, err := kl.kubeClient.Core().Nodes().Create(node); err != nil { if !apierrors.IsAlreadyExists(err) { @@ -1554,7 +1560,11 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Contain return result, err } case envVar.ValueFrom.ResourceFieldRef != nil: - runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, pod, container) + defaultedPod, err := kl.defaultPodLimitsForDownwardApi(pod) + if err != nil { + return result, err + } + runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, container) if err != nil { return result, err } @@ -1894,7 +1904,12 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { } // Mount volumes and update the volume manager - podVolumes, err := kl.mountExternalVolumes(pod) + // Default limits for containers here to have downward API expose user-friendly limits to pods. + defaultedPod, err := kl.defaultPodLimitsForDownwardApi(pod) + if err != nil { + return err + } + podVolumes, err := kl.mountExternalVolumes(defaultedPod) if err != nil { ref, errGetRef := api.GetReference(pod) if errGetRef == nil && ref != nil { @@ -3507,6 +3522,10 @@ func (kl *Kubelet) tryUpdateNodeStatus() error { } // Update the current status on the API server _, err = kl.kubeClient.Core().Nodes().UpdateStatus(node) + if err == nil { + // store recently updated node information. + kl.lastUpdatedNodeObject.Store(node) + } return err } diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index b40ad7defa..08a51ce20a 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -278,7 +278,7 @@ func newTestKubeletWithImageList(t *testing.T, imageList []kubecontainer.Image) } kubelet.evictionManager = evictionManager kubelet.AddPodAdmitHandler(evictionAdmitHandler) - + kubelet.lastUpdatedNodeObject.Store(&api.Node{}) return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient, fakeClock, nil} } diff --git a/pkg/kubelet/resources.go b/pkg/kubelet/resources.go new file mode 100644 index 0000000000..40d480339e --- /dev/null +++ b/pkg/kubelet/resources.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" +) + +func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *api.Pod) (*api.Pod, error) { + capacity := make(api.ResourceList) + lastUpdatedNodeObject := kl.lastUpdatedNodeObject.Load() + if lastUpdatedNodeObject == nil { + return nil, fmt.Errorf("Failed to find node object in cache. Expected a non-nil object in the cache.") + } else { + capacity = lastUpdatedNodeObject.(*api.Node).Status.Capacity + } + podCopy, err := api.Scheme.Copy(pod) + if err != nil { + return nil, fmt.Errorf("failed to perform a deep copy of pod object. Error: %v", err) + } + pod = podCopy.(*api.Pod) + for idx, c := range pod.Spec.Containers { + for _, resource := range []api.ResourceName{api.ResourceCPU, api.ResourceMemory} { + if quantity, exists := c.Resources.Limits[resource]; !exists || quantity.IsZero() { + if cap, exists := capacity[resource]; exists { + if pod.Spec.Containers[idx].Resources.Limits == nil { + pod.Spec.Containers[idx].Resources.Limits = make(api.ResourceList) + } + pod.Spec.Containers[idx].Resources.Limits[resource] = cap + } + + } + } + } + return pod, nil +} diff --git a/pkg/kubelet/resources_test.go b/pkg/kubelet/resources_test.go new file mode 100644 index 0000000000..146e7b14b8 --- /dev/null +++ b/pkg/kubelet/resources_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" +) + +func TestPodResourceLimitsDefaulting(t *testing.T) { + tk := newTestKubelet(t) + node := &api.Node{ + Status: api.NodeStatus{ + Capacity: api.ResourceList{ + api.ResourceCPU: resource.MustParse("10"), + api.ResourceMemory: resource.MustParse("10Gi"), + }, + }, + } + tk.kubelet.lastUpdatedNodeObject.Store(node) + cases := []struct { + pod *api.Pod + expected *api.Pod + }{ + { + pod: getPod("0", "0"), + expected: getPod("10", "10Gi"), + }, + { + pod: getPod("1", "0"), + expected: getPod("1", "10Gi"), + }, + { + pod: getPod("", ""), + expected: getPod("10", "10Gi"), + }, + { + pod: getPod("0", "1Mi"), + expected: getPod("10", "1Mi"), + }, + } + as := assert.New(t) + for idx, tc := range cases { + actual, err := tk.kubelet.defaultPodLimitsForDownwardApi(tc.pod) + as.Nil(err, "failed to default pod limits: %v", err) + as.Equal(tc.expected, actual, "test case [%d] failed. Expected: %+v, Got: %+v", idx, tc.expected, actual) + } +} + +func getPod(cpuLimit, memoryLimit string) *api.Pod { + resources := api.ResourceRequirements{} + if cpuLimit != "" && memoryLimit != "" { + resources.Limits = make(api.ResourceList) + } + if cpuLimit != "" { + resources.Limits[api.ResourceCPU] = resource.MustParse(cpuLimit) + } + if memoryLimit != "" { + resources.Limits[api.ResourceMemory] = resource.MustParse(memoryLimit) + } + return &api.Pod{ + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Resources: resources, + }, + }, + }, + } +} From 712860d55face9f7aec2ab6dfef9c370713e31f3 Mon Sep 17 00:00:00 2001 From: derekwaynecarr Date: Mon, 13 Jun 2016 17:07:56 -0400 Subject: [PATCH 2/2] Fix downward API for resource limits --- pkg/kubelet/kubelet.go | 7 +- pkg/kubelet/kubelet_resources.go | 84 +++++++++++++++++++ ...rces_test.go => kubelet_resources_test.go} | 4 +- pkg/kubelet/resources.go | 52 ------------ test/e2e/downward_api.go | 48 +++++++++++ 5 files changed, 138 insertions(+), 57 deletions(-) create mode 100644 pkg/kubelet/kubelet_resources.go rename pkg/kubelet/{resources_test.go => kubelet_resources_test.go} (94%) delete mode 100644 pkg/kubelet/resources.go diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index dd46559f05..4a87e6465f 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -832,6 +832,7 @@ type Kubelet struct { // and disable kubelet from executing any attach/detach operations enableControllerAttachDetach bool + // lastUpdatedNodeObject is a cached version of the node as last reported back to the api server. lastUpdatedNodeObject atomic.Value } @@ -1560,11 +1561,11 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Contain return result, err } case envVar.ValueFrom.ResourceFieldRef != nil: - defaultedPod, err := kl.defaultPodLimitsForDownwardApi(pod) + defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardApi(pod, container) if err != nil { return result, err } - runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, container) + runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, defaultedContainer) if err != nil { return result, err } @@ -1905,7 +1906,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // Mount volumes and update the volume manager // Default limits for containers here to have downward API expose user-friendly limits to pods. - defaultedPod, err := kl.defaultPodLimitsForDownwardApi(pod) + defaultedPod, _, err := kl.defaultPodLimitsForDownwardApi(pod, nil) if err != nil { return err } diff --git a/pkg/kubelet/kubelet_resources.go b/pkg/kubelet/kubelet_resources.go new file mode 100644 index 0000000000..14557685e7 --- /dev/null +++ b/pkg/kubelet/kubelet_resources.go @@ -0,0 +1,84 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" +) + +// defaultPodLimitsForDownwardApi copies the input pod, and optional container, +// and applies default resource limits. it returns a copy of the input pod, +// and a copy of the input container (if specified) with default limits +// applied. if a container has no limit specified, it will default the limit to +// the node capacity. +// TODO: if/when we have pod level resources, we need to update this function +// to use those limits instead of node capacity. +func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *api.Pod, container *api.Container) (*api.Pod, *api.Container, error) { + if pod == nil { + return nil, nil, fmt.Errorf("invalid input, pod cannot be nil") + } + + lastUpdatedNodeObject := kl.lastUpdatedNodeObject.Load() + if lastUpdatedNodeObject == nil { + return nil, nil, fmt.Errorf("failed to find node object in cache, expected a non-nil object in the cache.") + } + capacity := lastUpdatedNodeObject.(*api.Node).Status.Capacity + + podCopy, err := api.Scheme.Copy(pod) + if err != nil { + return nil, nil, fmt.Errorf("failed to perform a deep copy of pod object: %v", err) + } + outputPod, ok := podCopy.(*api.Pod) + if !ok { + return nil, nil, fmt.Errorf("unexpected type") + } + for idx := range outputPod.Spec.Containers { + mergeContainerResourceLimitsWithCapacity(&outputPod.Spec.Containers[idx], capacity) + } + + var outputContainer *api.Container + if container != nil { + containerCopy, err := api.Scheme.DeepCopy(container) + if err != nil { + return nil, nil, fmt.Errorf("failed to perform a deep copy of container object: %v", err) + } + outputContainer, ok = containerCopy.(*api.Container) + if !ok { + return nil, nil, fmt.Errorf("unexpected type") + } + mergeContainerResourceLimitsWithCapacity(outputContainer, capacity) + } + return outputPod, outputContainer, nil +} + +// mergeContainerResourceLimitsWithCapacity checks if a limit is applied for +// the container, and if not, it sets the limit based on the capacity. +func mergeContainerResourceLimitsWithCapacity(container *api.Container, + capacity api.ResourceList) { + if container.Resources.Limits == nil { + container.Resources.Limits = make(api.ResourceList) + } + for _, resource := range []api.ResourceName{api.ResourceCPU, api.ResourceMemory} { + if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() { + if cap, exists := capacity[resource]; exists { + container.Resources.Limits[resource] = *cap.Copy() + } + } + } +} diff --git a/pkg/kubelet/resources_test.go b/pkg/kubelet/kubelet_resources_test.go similarity index 94% rename from pkg/kubelet/resources_test.go rename to pkg/kubelet/kubelet_resources_test.go index 146e7b14b8..1d7ceec298 100644 --- a/pkg/kubelet/resources_test.go +++ b/pkg/kubelet/kubelet_resources_test.go @@ -59,7 +59,7 @@ func TestPodResourceLimitsDefaulting(t *testing.T) { } as := assert.New(t) for idx, tc := range cases { - actual, err := tk.kubelet.defaultPodLimitsForDownwardApi(tc.pod) + actual, _, err := tk.kubelet.defaultPodLimitsForDownwardApi(tc.pod, nil) as.Nil(err, "failed to default pod limits: %v", err) as.Equal(tc.expected, actual, "test case [%d] failed. Expected: %+v, Got: %+v", idx, tc.expected, actual) } @@ -67,7 +67,7 @@ func TestPodResourceLimitsDefaulting(t *testing.T) { func getPod(cpuLimit, memoryLimit string) *api.Pod { resources := api.ResourceRequirements{} - if cpuLimit != "" && memoryLimit != "" { + if cpuLimit != "" || memoryLimit != "" { resources.Limits = make(api.ResourceList) } if cpuLimit != "" { diff --git a/pkg/kubelet/resources.go b/pkg/kubelet/resources.go deleted file mode 100644 index 40d480339e..0000000000 --- a/pkg/kubelet/resources.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubelet - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" -) - -func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *api.Pod) (*api.Pod, error) { - capacity := make(api.ResourceList) - lastUpdatedNodeObject := kl.lastUpdatedNodeObject.Load() - if lastUpdatedNodeObject == nil { - return nil, fmt.Errorf("Failed to find node object in cache. Expected a non-nil object in the cache.") - } else { - capacity = lastUpdatedNodeObject.(*api.Node).Status.Capacity - } - podCopy, err := api.Scheme.Copy(pod) - if err != nil { - return nil, fmt.Errorf("failed to perform a deep copy of pod object. Error: %v", err) - } - pod = podCopy.(*api.Pod) - for idx, c := range pod.Spec.Containers { - for _, resource := range []api.ResourceName{api.ResourceCPU, api.ResourceMemory} { - if quantity, exists := c.Resources.Limits[resource]; !exists || quantity.IsZero() { - if cap, exists := capacity[resource]; exists { - if pod.Spec.Containers[idx].Resources.Limits == nil { - pod.Spec.Containers[idx].Resources.Limits = make(api.ResourceList) - } - pod.Spec.Containers[idx].Resources.Limits[resource] = cap - } - - } - } - } - return pod, nil -} diff --git a/test/e2e/downward_api.go b/test/e2e/downward_api.go index 6a2fdca193..e75f787f45 100644 --- a/test/e2e/downward_api.go +++ b/test/e2e/downward_api.go @@ -128,6 +128,50 @@ var _ = framework.KubeDescribe("Downward API", func() { testDownwardAPI(f, podName, env, expectations) }) + It("should provide default limits.cpu/memory from node capacity", func() { + podName := "downward-api-" + string(util.NewUUID()) + env := []api.EnvVar{ + { + Name: "CPU_LIMIT", + ValueFrom: &api.EnvVarSource{ + ResourceFieldRef: &api.ResourceFieldSelector{ + Resource: "limits.cpu", + }, + }, + }, + { + Name: "MEMORY_LIMIT", + ValueFrom: &api.EnvVarSource{ + ResourceFieldRef: &api.ResourceFieldSelector{ + Resource: "limits.memory", + }, + }, + }, + } + expectations := []string{ + fmt.Sprintf("CPU_LIMIT=[1-9]"), + fmt.Sprintf("MEMORY_LIMIT=[1-9]"), + } + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: podName, + Labels: map[string]string{"name": podName}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "dapi-container", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"sh", "-c", "env"}, + Env: env, + }, + }, + RestartPolicy: api.RestartPolicyNever, + }, + } + + testDownwardAPIUsingPod(f, pod, env, expectations) + }) }) func testDownwardAPI(f *framework.Framework, podName string, env []api.EnvVar, expectations []string) { @@ -159,5 +203,9 @@ func testDownwardAPI(f *framework.Framework, podName string, env []api.EnvVar, e }, } + testDownwardAPIUsingPod(f, pod, env, expectations) +} + +func testDownwardAPIUsingPod(f *framework.Framework, pod *api.Pod, env []api.EnvVar, expectations []string) { f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations) }