From f68f3ff7831384ae83dfe6d95c9a2dd4925c6efa Mon Sep 17 00:00:00 2001 From: Derek Carr Date: Fri, 16 Mar 2018 14:45:14 -0400 Subject: [PATCH] Fix cpu cfs quota flag with pod cgroups --- cmd/kubelet/app/server.go | 1 + pkg/kubelet/cm/container_manager.go | 1 + pkg/kubelet/cm/container_manager_linux.go | 1 + pkg/kubelet/cm/helpers_linux.go | 7 ++- pkg/kubelet/cm/helpers_linux_test.go | 49 ++++++++++++++++--- pkg/kubelet/cm/helpers_unsupported.go | 2 +- pkg/kubelet/cm/pod_container_manager_linux.go | 4 +- 7 files changed, 54 insertions(+), 11 deletions(-) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index fd30f6aa08..ebdd31d040 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -661,6 +661,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { ExperimentalCPUManagerPolicy: s.CPUManagerPolicy, ExperimentalCPUManagerReconcilePeriod: s.CPUManagerReconcilePeriod.Duration, ExperimentalPodPidsLimit: s.PodPidsLimit, + EnforceCPULimits: s.CPUCFSQuota, }, s.FailSwapOn, devicePluginEnabled, diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index 7e2ae8e4b5..2a64db6714 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -111,6 +111,7 @@ type NodeConfig struct { ExperimentalCPUManagerPolicy string ExperimentalCPUManagerReconcilePeriod time.Duration ExperimentalPodPidsLimit int64 + EnforceCPULimits bool } type NodeAllocatableConfig struct { diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 42ad3125bf..236ac71dc1 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -301,6 +301,7 @@ func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager { subsystems: cm.subsystems, cgroupManager: cm.cgroupManager, podPidsLimit: cm.ExperimentalPodPidsLimit, + enforceCPULimits: cm.EnforceCPULimits, } } return &podContainerManagerNoop{ diff --git a/pkg/kubelet/cm/helpers_linux.go b/pkg/kubelet/cm/helpers_linux.go index 935fb6c806..d04128edd7 100644 --- a/pkg/kubelet/cm/helpers_linux.go +++ b/pkg/kubelet/cm/helpers_linux.go @@ -103,7 +103,7 @@ func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 { } // ResourceConfigForPod takes the input pod and outputs the cgroup resource config. -func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { +func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool) *ResourceConfig { // sum requests and limits. reqs, limits := resource.PodRequestsAndLimits(pod) @@ -146,6 +146,11 @@ func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { } } + // quota is not capped when cfs quota is disabled + if !enforceCPULimits { + cpuQuota = int64(-1) + } + // determine the qos class qosClass := v1qos.GetPodQOS(pod) diff --git a/pkg/kubelet/cm/helpers_linux_test.go b/pkg/kubelet/cm/helpers_linux_test.go index d92de4322c..30894e9fdf 100644 --- a/pkg/kubelet/cm/helpers_linux_test.go +++ b/pkg/kubelet/cm/helpers_linux_test.go @@ -57,10 +57,12 @@ func TestResourceConfigForPod(t *testing.T) { guaranteedShares := MilliCPUToShares(100) guaranteedQuota, guaranteedPeriod := MilliCPUToQuota(100) memoryQuantity = resource.MustParse("100Mi") + cpuNoLimit := int64(-1) guaranteedMemory := memoryQuantity.Value() testCases := map[string]struct { - pod *v1.Pod - expected *ResourceConfig + pod *v1.Pod + expected *ResourceConfig + enforceCPULimits bool }{ "besteffort": { pod: &v1.Pod{ @@ -72,7 +74,8 @@ func TestResourceConfigForPod(t *testing.T) { }, }, }, - expected: &ResourceConfig{CpuShares: &minShares}, + enforceCPULimits: true, + expected: &ResourceConfig{CpuShares: &minShares}, }, "burstable-no-limits": { pod: &v1.Pod{ @@ -84,7 +87,8 @@ func TestResourceConfigForPod(t *testing.T) { }, }, }, - expected: &ResourceConfig{CpuShares: &burstableShares}, + enforceCPULimits: true, + expected: &ResourceConfig{CpuShares: &burstableShares}, }, "burstable-with-limits": { pod: &v1.Pod{ @@ -96,7 +100,21 @@ func TestResourceConfigForPod(t *testing.T) { }, }, }, - expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &burstablePeriod, Memory: &burstableMemory}, + enforceCPULimits: true, + expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &burstablePeriod, Memory: &burstableMemory}, + }, + "burstable-with-limits-no-cpu-enforcement": { + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")), + }, + }, + }, + }, + enforceCPULimits: false, + expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &burstablePeriod, Memory: &burstableMemory}, }, "burstable-partial-limits": { pod: &v1.Pod{ @@ -111,7 +129,8 @@ func TestResourceConfigForPod(t *testing.T) { }, }, }, - expected: &ResourceConfig{CpuShares: &burstablePartialShares}, + enforceCPULimits: true, + expected: &ResourceConfig{CpuShares: &burstablePartialShares}, }, "guaranteed": { pod: &v1.Pod{ @@ -123,11 +142,25 @@ func TestResourceConfigForPod(t *testing.T) { }, }, }, - expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory}, + enforceCPULimits: true, + expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory}, + }, + "guaranteed-no-cpu-enforcement": { + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), + }, + }, + }, + }, + enforceCPULimits: false, + expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory}, }, } for testName, testCase := range testCases { - actual := ResourceConfigForPod(testCase.pod) + actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits) if !reflect.DeepEqual(actual.CpuPeriod, testCase.expected.CpuPeriod) { t.Errorf("unexpected result, test: %v, cpu period not as expected", testName) } diff --git a/pkg/kubelet/cm/helpers_unsupported.go b/pkg/kubelet/cm/helpers_unsupported.go index b572f3456f..ee3ed91d55 100644 --- a/pkg/kubelet/cm/helpers_unsupported.go +++ b/pkg/kubelet/cm/helpers_unsupported.go @@ -43,7 +43,7 @@ func MilliCPUToShares(milliCPU int64) int64 { } // ResourceConfigForPod takes the input pod and outputs the cgroup resource config. -func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { +func ResourceConfigForPod(pod *v1.Pod, enforceCPULimit bool) *ResourceConfig { return nil } diff --git a/pkg/kubelet/cm/pod_container_manager_linux.go b/pkg/kubelet/cm/pod_container_manager_linux.go index 2b0cbf8301..a9a873691c 100644 --- a/pkg/kubelet/cm/pod_container_manager_linux.go +++ b/pkg/kubelet/cm/pod_container_manager_linux.go @@ -49,6 +49,8 @@ type podContainerManagerImpl struct { cgroupManager CgroupManager // Maximum number of pids in a pod podPidsLimit int64 + // enforceCPULimits controls whether cfs quota is enforced or not + enforceCPULimits bool } // Make sure that podContainerManagerImpl implements the PodContainerManager interface @@ -79,7 +81,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error { // Create the pod container containerConfig := &CgroupConfig{ Name: podContainerName, - ResourceParameters: ResourceConfigForPod(pod), + ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits), } if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && m.podPidsLimit > 0 { containerConfig.ResourceParameters.PodPidsLimit = &m.podPidsLimit