mirror of https://github.com/k3s-io/k3s
Merge pull request #27441 from derekwaynecarr/downward_api_node_defaults
Automatic merge from submit-queue Revert revert of downward api node defaults Reverts the revert of https://github.com/kubernetes/kubernetes/pull/27439 Fixes #27062 @dchen1107 - who at Google can help debug why this caused issues with GKE infrastructure but not GCE merge queue? /cc @wojtek-t @piosz @fgrzadkowski @eparis @pmoriepull/6/head
commit
77cf11f4d7
|
@ -20,7 +20,10 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
)
|
||||
|
||||
func TestExtractFieldPathAsString(t *testing.T) {
|
||||
|
@ -115,3 +118,119 @@ func TestExtractFieldPathAsString(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getPod(cname, cpuRequest, cpuLimit, memoryRequest, memoryLimit string) *api.Pod {
|
||||
resources := api.ResourceRequirements{
|
||||
Limits: make(api.ResourceList),
|
||||
Requests: make(api.ResourceList),
|
||||
}
|
||||
if cpuLimit != "" {
|
||||
resources.Limits[api.ResourceCPU] = resource.MustParse(cpuLimit)
|
||||
}
|
||||
if memoryLimit != "" {
|
||||
resources.Limits[api.ResourceMemory] = resource.MustParse(memoryLimit)
|
||||
}
|
||||
if cpuRequest != "" {
|
||||
resources.Requests[api.ResourceCPU] = resource.MustParse(cpuRequest)
|
||||
}
|
||||
if memoryRequest != "" {
|
||||
resources.Requests[api.ResourceMemory] = resource.MustParse(memoryRequest)
|
||||
}
|
||||
return &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: cname,
|
||||
Resources: resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractResourceValue(t *testing.T) {
|
||||
cases := []struct {
|
||||
fs *api.ResourceFieldSelector
|
||||
pod *api.Pod
|
||||
cName string
|
||||
expectedValue string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
fs: &api.ResourceFieldSelector{
|
||||
Resource: "limits.cpu",
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "", "9", "", ""),
|
||||
expectedValue: "9",
|
||||
},
|
||||
{
|
||||
fs: &api.ResourceFieldSelector{
|
||||
Resource: "requests.cpu",
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "", "", "", ""),
|
||||
expectedValue: "0",
|
||||
},
|
||||
{
|
||||
fs: &api.ResourceFieldSelector{
|
||||
Resource: "requests.cpu",
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "8", "", "", ""),
|
||||
expectedValue: "8",
|
||||
},
|
||||
{
|
||||
fs: &api.ResourceFieldSelector{
|
||||
Resource: "requests.cpu",
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "100m", "", "", ""),
|
||||
expectedValue: "1",
|
||||
},
|
||||
{
|
||||
fs: &api.ResourceFieldSelector{
|
||||
Resource: "requests.cpu",
|
||||
Divisor: resource.MustParse("100m"),
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "1200m", "", "", ""),
|
||||
expectedValue: "12",
|
||||
},
|
||||
{
|
||||
fs: &api.ResourceFieldSelector{
|
||||
Resource: "requests.memory",
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "", "", "100Mi", ""),
|
||||
expectedValue: "104857600",
|
||||
},
|
||||
{
|
||||
fs: &api.ResourceFieldSelector{
|
||||
Resource: "requests.memory",
|
||||
Divisor: resource.MustParse("1Mi"),
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "", "", "100Mi", "1Gi"),
|
||||
expectedValue: "100",
|
||||
},
|
||||
{
|
||||
fs: &api.ResourceFieldSelector{
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
cName: "foo",
|
||||
pod: getPod("foo", "", "", "10Mi", "100Mi"),
|
||||
expectedValue: "104857600",
|
||||
},
|
||||
}
|
||||
as := assert.New(t)
|
||||
for idx, tc := range cases {
|
||||
actual, err := ExtractResourceValueByContainerName(tc.fs, tc.pod, tc.cName)
|
||||
if tc.expectedError != nil {
|
||||
as.Equal(tc.expectedError, err, "expected test case [%d] to fail with error %v; got %v", idx, tc.expectedError, err)
|
||||
} else {
|
||||
as.Nil(err, "expected test case [%d] to not return an error; got %v", idx, err)
|
||||
as.Equal(tc.expectedValue, actual, "expected test case [%d] to return %q; got %q instead", idx, tc.expectedValue, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1121,6 +1121,7 @@ func (kl *Kubelet) initialNodeStatus() (*api.Node, error) {
|
|||
if err := kl.setNodeStatus(node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
|
@ -1156,6 +1157,7 @@ func (kl *Kubelet) registerWithApiserver() {
|
|||
glog.Errorf("Unable to construct api.Node object for kubelet: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Attempting to register node %s", node.Name)
|
||||
if _, err := kl.kubeClient.Core().Nodes().Create(node); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
|
@ -1564,7 +1566,11 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Contain
|
|||
return result, err
|
||||
}
|
||||
case envVar.ValueFrom.ResourceFieldRef != nil:
|
||||
runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, pod, container)
|
||||
defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardApi(pod, container)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, defaultedContainer)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
@ -1904,7 +1910,11 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||
}
|
||||
|
||||
// Wait for volumes to attach/mount
|
||||
if err := kl.volumeManager.WaitForAttachAndMount(pod); err != nil {
|
||||
defaultedPod, _, err := kl.defaultPodLimitsForDownwardApi(pod, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kl.volumeManager.WaitForAttachAndMount(defaultedPod); err != nil {
|
||||
ref, errGetRef := api.GetReference(pod)
|
||||
if errGetRef == nil && ref != nil {
|
||||
kl.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err)
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
// defaultPodLimitsForDownwardApi copies the input pod, and optional container,
|
||||
// and applies default resource limits. it returns a copy of the input pod,
|
||||
// and a copy of the input container (if specified) with default limits
|
||||
// applied. if a container has no limit specified, it will default the limit to
|
||||
// the node capacity.
|
||||
// TODO: if/when we have pod level resources, we need to update this function
|
||||
// to use those limits instead of node capacity.
|
||||
func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *api.Pod, container *api.Container) (*api.Pod, *api.Container, error) {
|
||||
if pod == nil {
|
||||
return nil, nil, fmt.Errorf("invalid input, pod cannot be nil")
|
||||
}
|
||||
|
||||
node, err := kl.getNodeAnyWay()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to find node object, expected a node")
|
||||
}
|
||||
capacity := node.Status.Capacity
|
||||
|
||||
podCopy, err := api.Scheme.Copy(pod)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to perform a deep copy of pod object: %v", err)
|
||||
}
|
||||
outputPod, ok := podCopy.(*api.Pod)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("unexpected type returned from deep copy of pod object")
|
||||
}
|
||||
for idx := range outputPod.Spec.Containers {
|
||||
mergeContainerResourceLimitsWithCapacity(&outputPod.Spec.Containers[idx], capacity)
|
||||
}
|
||||
|
||||
var outputContainer *api.Container
|
||||
if container != nil {
|
||||
containerCopy, err := api.Scheme.DeepCopy(container)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to perform a deep copy of container object: %v", err)
|
||||
}
|
||||
outputContainer, ok = containerCopy.(*api.Container)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("unexpected type returned from deep copy of container object")
|
||||
}
|
||||
mergeContainerResourceLimitsWithCapacity(outputContainer, capacity)
|
||||
}
|
||||
return outputPod, outputContainer, nil
|
||||
}
|
||||
|
||||
// mergeContainerResourceLimitsWithCapacity checks if a limit is applied for
|
||||
// the container, and if not, it sets the limit based on the capacity.
|
||||
func mergeContainerResourceLimitsWithCapacity(container *api.Container,
|
||||
capacity api.ResourceList) {
|
||||
if container.Resources.Limits == nil {
|
||||
container.Resources.Limits = make(api.ResourceList)
|
||||
}
|
||||
for _, resource := range []api.ResourceName{api.ResourceCPU, api.ResourceMemory} {
|
||||
if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() {
|
||||
if cap, exists := capacity[resource]; exists {
|
||||
container.Resources.Limits[resource] = *cap.Copy()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
)
|
||||
|
||||
func TestPodResourceLimitsDefaulting(t *testing.T) {
|
||||
cpuCores := resource.MustParse("10")
|
||||
memoryCapacity := resource.MustParse("10Gi")
|
||||
tk := newTestKubelet(t, true)
|
||||
tk.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
tk.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{
|
||||
NumCores: int(cpuCores.Value()),
|
||||
MemoryCapacity: uint64(memoryCapacity.Value()),
|
||||
}, nil)
|
||||
tk.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
tk.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
cases := []struct {
|
||||
pod *api.Pod
|
||||
expected *api.Pod
|
||||
}{
|
||||
{
|
||||
pod: getPod("0", "0"),
|
||||
expected: getPod("10", "10Gi"),
|
||||
},
|
||||
{
|
||||
pod: getPod("1", "0"),
|
||||
expected: getPod("1", "10Gi"),
|
||||
},
|
||||
{
|
||||
pod: getPod("", ""),
|
||||
expected: getPod("10", "10Gi"),
|
||||
},
|
||||
{
|
||||
pod: getPod("0", "1Mi"),
|
||||
expected: getPod("10", "1Mi"),
|
||||
},
|
||||
}
|
||||
as := assert.New(t)
|
||||
for idx, tc := range cases {
|
||||
actual, _, err := tk.kubelet.defaultPodLimitsForDownwardApi(tc.pod, nil)
|
||||
as.Nil(err, "failed to default pod limits: %v", err)
|
||||
if !api.Semantic.DeepEqual(tc.expected, actual) {
|
||||
as.Fail("test case [%d] failed. Expected: %+v, Got: %+v", idx, tc.expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getPod(cpuLimit, memoryLimit string) *api.Pod {
|
||||
resources := api.ResourceRequirements{}
|
||||
if cpuLimit != "" || memoryLimit != "" {
|
||||
resources.Limits = make(api.ResourceList)
|
||||
}
|
||||
if cpuLimit != "" {
|
||||
resources.Limits[api.ResourceCPU] = resource.MustParse(cpuLimit)
|
||||
}
|
||||
if memoryLimit != "" {
|
||||
resources.Limits[api.ResourceMemory] = resource.MustParse(memoryLimit)
|
||||
}
|
||||
return &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
Resources: resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
|
@ -213,8 +213,10 @@ func newTestKubeletWithImageList(
|
|||
t.Fatalf("can't initialize kubelet data dirs: %v", err)
|
||||
}
|
||||
kubelet.daemonEndpoints = &api.NodeDaemonEndpoints{}
|
||||
|
||||
mockCadvisor := &cadvisortest.Mock{}
|
||||
kubelet.cadvisor = mockCadvisor
|
||||
|
||||
fakeMirrorClient := podtest.NewFakeMirrorClient()
|
||||
kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient)
|
||||
kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager)
|
||||
|
@ -377,6 +379,7 @@ func TestSyncLoopAbort(t *testing.T) {
|
|||
|
||||
func TestSyncPodsStartPod(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -3380,6 +3383,12 @@ func TestUpdateNodeStatusError(t *testing.T) {
|
|||
func TestCreateMirrorPod(t *testing.T) {
|
||||
for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
||||
kl := testKubelet.kubelet
|
||||
manager := testKubelet.fakeMirrorClient
|
||||
pod := podWithUidNameNs("12345678", "bar", "foo")
|
||||
|
@ -3407,6 +3416,7 @@ func TestCreateMirrorPod(t *testing.T) {
|
|||
func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -3576,6 +3586,12 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
|||
|
||||
func TestHostNetworkAllowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
capabilities.SetForTests(capabilities.Capabilities{
|
||||
|
@ -3606,6 +3622,12 @@ func TestHostNetworkAllowed(t *testing.T) {
|
|||
|
||||
func TestHostNetworkDisallowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
capabilities.SetForTests(capabilities.Capabilities{
|
||||
|
@ -3635,6 +3657,12 @@ func TestHostNetworkDisallowed(t *testing.T) {
|
|||
|
||||
func TestPrivilegeContainerAllowed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
capabilities.SetForTests(capabilities.Capabilities{
|
||||
|
@ -3891,7 +3919,12 @@ func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
|
|||
func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
now := unversioned.Now()
|
||||
|
@ -3958,6 +3991,8 @@ func podWithUidNameNsSpec(uid types.UID, name, namespace string, spec api.PodSpe
|
|||
|
||||
func TestDeletePodDirsForDeletedPods(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -4005,6 +4040,8 @@ func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*api.Pod
|
|||
|
||||
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
@ -4025,6 +4062,8 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
|
|||
|
||||
func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
||||
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
|
|
|
@ -128,6 +128,50 @@ var _ = framework.KubeDescribe("Downward API", func() {
|
|||
testDownwardAPI(f, podName, env, expectations)
|
||||
})
|
||||
|
||||
It("should provide default limits.cpu/memory from node capacity", func() {
|
||||
podName := "downward-api-" + string(util.NewUUID())
|
||||
env := []api.EnvVar{
|
||||
{
|
||||
Name: "CPU_LIMIT",
|
||||
ValueFrom: &api.EnvVarSource{
|
||||
ResourceFieldRef: &api.ResourceFieldSelector{
|
||||
Resource: "limits.cpu",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "MEMORY_LIMIT",
|
||||
ValueFrom: &api.EnvVarSource{
|
||||
ResourceFieldRef: &api.ResourceFieldSelector{
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectations := []string{
|
||||
fmt.Sprintf("CPU_LIMIT=[1-9]"),
|
||||
fmt.Sprintf("MEMORY_LIMIT=[1-9]"),
|
||||
}
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Env: env,
|
||||
},
|
||||
},
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
testDownwardAPIUsingPod(f, pod, env, expectations)
|
||||
})
|
||||
})
|
||||
|
||||
func testDownwardAPI(f *framework.Framework, podName string, env []api.EnvVar, expectations []string) {
|
||||
|
@ -159,5 +203,9 @@ func testDownwardAPI(f *framework.Framework, podName string, env []api.EnvVar, e
|
|||
},
|
||||
}
|
||||
|
||||
testDownwardAPIUsingPod(f, pod, env, expectations)
|
||||
}
|
||||
|
||||
func testDownwardAPIUsingPod(f *framework.Framework, pod *api.Pod, env []api.EnvVar, expectations []string) {
|
||||
f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue