From 4c30459e49bbce29b7d0fa0fbb4e69a4234485e2 Mon Sep 17 00:00:00 2001 From: Seth Jennings Date: Mon, 19 Dec 2016 15:02:01 -0600 Subject: [PATCH] switch from local qos types to api types --- pkg/kubelet/cm/container_manager_linux.go | 6 ++-- pkg/kubelet/cm/helpers_linux.go | 4 +-- pkg/kubelet/cm/pod_container_manager_linux.go | 6 ++-- pkg/kubelet/eviction/eviction_manager.go | 2 +- pkg/kubelet/eviction/helpers.go | 6 ++-- pkg/kubelet/qos/policy.go | 4 +-- pkg/kubelet/qos/qos.go | 25 +++++++-------- pkg/kubelet/qos/qos_test.go | 32 +++++++++---------- pkg/kubelet/qos/types.go | 29 ----------------- pkg/quota/evaluator/core/pods.go | 2 +- .../algorithm/predicates/predicates.go | 2 +- test/e2e_node/cgroup_manager_test.go | 3 +- 12 files changed, 45 insertions(+), 76 deletions(-) delete mode 100644 pkg/kubelet/qos/types.go diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 640dd244cf..3308882fa3 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -276,7 +276,7 @@ func InitQOS(cgroupDriver, rootContainer string, subsystems *CgroupSubsystems) ( cm := NewCgroupManager(subsystems, cgroupDriver) // Top level for Qos containers are created only for Burstable // and Best Effort classes - qosClasses := [2]qos.QOSClass{qos.Burstable, qos.BestEffort} + qosClasses := [2]v1.PodQOSClass{v1.PodQOSBurstable, v1.PodQOSBestEffort} // Create containers for both qos classes for _, qosClass := range qosClasses { @@ -297,8 +297,8 @@ func InitQOS(cgroupDriver, rootContainer string, subsystems *CgroupSubsystems) ( // Store the top level qos container names qosContainersInfo := QOSContainersInfo{ Guaranteed: rootContainer, - Burstable: path.Join(rootContainer, string(qos.Burstable)), - BestEffort: path.Join(rootContainer, string(qos.BestEffort)), + Burstable: path.Join(rootContainer, string(v1.PodQOSBurstable)), + BestEffort: path.Join(rootContainer, string(v1.PodQOSBestEffort)), } return qosContainersInfo, nil } diff --git a/pkg/kubelet/cm/helpers_linux.go b/pkg/kubelet/cm/helpers_linux.go index 47d5a2e63b..2e0d41b9c5 100644 --- a/pkg/kubelet/cm/helpers_linux.go +++ b/pkg/kubelet/cm/helpers_linux.go @@ -111,12 +111,12 @@ func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { // build the result result := &ResourceConfig{} - if qosClass == qos.Guaranteed { + if qosClass == v1.PodQOSGuaranteed { result.CpuShares = &cpuShares result.CpuQuota = &cpuQuota result.CpuPeriod = &cpuPeriod result.Memory = &memoryLimits - } else if qosClass == qos.Burstable { + } else if qosClass == v1.PodQOSBurstable { result.CpuShares = &cpuShares if cpuLimitsDeclared { result.CpuQuota = &cpuQuota diff --git a/pkg/kubelet/cm/pod_container_manager_linux.go b/pkg/kubelet/cm/pod_container_manager_linux.go index eb7e278966..b3c0a3a3c7 100644 --- a/pkg/kubelet/cm/pod_container_manager_linux.go +++ b/pkg/kubelet/cm/pod_container_manager_linux.go @@ -99,11 +99,11 @@ func (m *podContainerManagerImpl) GetPodContainerName(pod *v1.Pod) (CgroupName, // Get the parent QOS container name var parentContainer string switch podQOS { - case qos.Guaranteed: + case v1.PodQOSGuaranteed: parentContainer = m.qosContainersInfo.Guaranteed - case qos.Burstable: + case v1.PodQOSBurstable: parentContainer = m.qosContainersInfo.Burstable - case qos.BestEffort: + case v1.PodQOSBestEffort: parentContainer = m.qosContainersInfo.BestEffort } podContainer := podCgroupNamePrefix + string(pod.UID) diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index c4b2d5d95c..7a78f6d516 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -109,7 +109,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd // the node has memory pressure, admit if not best-effort if hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) { - notBestEffort := qos.BestEffort != qos.GetPodQOS(attrs.Pod) + notBestEffort := v1.PodQOSBestEffort != qos.GetPodQOS(attrs.Pod) if notBestEffort || kubetypes.IsCriticalPod(attrs.Pod) { return lifecycle.PodAdmitResult{Admit: true} } diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index 31b87dc81c..169e8ca01c 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -493,12 +493,12 @@ func qosComparator(p1, p2 *v1.Pod) int { return 0 } // if p1 is best effort, we know p2 is burstable or guaranteed - if qosP1 == qos.BestEffort { + if qosP1 == v1.PodQOSBestEffort { return -1 } // we know p1 and p2 are not besteffort, so if p1 is burstable, p2 must be guaranteed - if qosP1 == qos.Burstable { - if qosP2 == qos.Guaranteed { + if qosP1 == v1.PodQOSBurstable { + if qosP2 == v1.PodQOSGuaranteed { return -1 } return 1 diff --git a/pkg/kubelet/qos/policy.go b/pkg/kubelet/qos/policy.go index c6c44086ff..3f7f4bcf98 100644 --- a/pkg/kubelet/qos/policy.go +++ b/pkg/kubelet/qos/policy.go @@ -49,10 +49,10 @@ func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapa } switch GetPodQOS(pod) { - case Guaranteed: + case v1.PodQOSGuaranteed: // Guaranteed containers should be the last to get killed. return guaranteedOOMScoreAdj - case BestEffort: + case v1.PodQOSBestEffort: return besteffortOOMScoreAdj } diff --git a/pkg/kubelet/qos/qos.go b/pkg/kubelet/qos/qos.go index f515ca30dc..914ac64cb8 100644 --- a/pkg/kubelet/qos/qos.go +++ b/pkg/kubelet/qos/qos.go @@ -47,7 +47,7 @@ func isResourceBestEffort(container *v1.Container, resource v1.ResourceName) boo // A pod is besteffort if none of its containers have specified any requests or limits. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is burstable if limits and requests do not match across all containers. -func GetPodQOS(pod *v1.Pod) QOSClass { +func GetPodQOS(pod *v1.Pod) v1.PodQOSClass { requests := v1.ResourceList{} limits := v1.ResourceList{} zeroQuantity := resource.MustParse("0") @@ -91,7 +91,7 @@ func GetPodQOS(pod *v1.Pod) QOSClass { } } if len(requests) == 0 && len(limits) == 0 { - return BestEffort + return v1.PodQOSBestEffort } // Check is requests match limits for all resources. if isGuaranteed { @@ -104,21 +104,20 @@ func GetPodQOS(pod *v1.Pod) QOSClass { } if isGuaranteed && len(requests) == len(limits) { - return Guaranteed + return v1.PodQOSGuaranteed } - return Burstable + return v1.PodQOSBurstable } // InternalGetPodQOS returns the QoS class of a pod. // A pod is besteffort if none of its containers have specified any requests or limits. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is burstable if limits and requests do not match across all containers. -func InternalGetPodQOS(pod *api.Pod) QOSClass { +func InternalGetPodQOS(pod *api.Pod) api.PodQOSClass { requests := api.ResourceList{} limits := api.ResourceList{} zeroQuantity := resource.MustParse("0") isGuaranteed := true - var supportedQoSComputeResources = sets.NewString(string(api.ResourceCPU), string(api.ResourceMemory)) for _, container := range pod.Spec.Containers { // process requests for name, quantity := range container.Resources.Requests { @@ -158,7 +157,7 @@ func InternalGetPodQOS(pod *api.Pod) QOSClass { } } if len(requests) == 0 && len(limits) == 0 { - return BestEffort + return api.PodQOSBestEffort } // Check is requests match limits for all resources. if isGuaranteed { @@ -171,13 +170,13 @@ func InternalGetPodQOS(pod *api.Pod) QOSClass { } if isGuaranteed && len(requests) == len(limits) { - return Guaranteed + return api.PodQOSGuaranteed } - return Burstable + return api.PodQOSBurstable } // QOSList is a set of (resource name, QoS class) pairs. -type QOSList map[v1.ResourceName]QOSClass +type QOSList map[v1.ResourceName]v1.PodQOSClass // GetQOS returns a mapping of resource name to QoS class of a container func GetQOS(container *v1.Container) QOSList { @@ -185,11 +184,11 @@ func GetQOS(container *v1.Container) QOSList { for resource := range allResources(container) { switch { case isResourceGuaranteed(container, resource): - resourceToQOS[resource] = Guaranteed + resourceToQOS[resource] = v1.PodQOSGuaranteed case isResourceBestEffort(container, resource): - resourceToQOS[resource] = BestEffort + resourceToQOS[resource] = v1.PodQOSBestEffort default: - resourceToQOS[resource] = Burstable + resourceToQOS[resource] = v1.PodQOSBurstable } } return resourceToQOS diff --git a/pkg/kubelet/qos/qos_test.go b/pkg/kubelet/qos/qos_test.go index 9f6ffdc7b6..939ff85553 100644 --- a/pkg/kubelet/qos/qos_test.go +++ b/pkg/kubelet/qos/qos_test.go @@ -67,105 +67,105 @@ func newPod(name string, containers []v1.Container) *v1.Pod { func TestGetPodQOS(t *testing.T) { testCases := []struct { pod *v1.Pod - expected QOSClass + expected v1.PodQOSClass }{ { pod: newPod("guaranteed", []v1.Container{ newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), }), - expected: Guaranteed, + expected: v1.PodQOSGuaranteed, }, { pod: newPod("guaranteed-with-gpu", []v1.Container{ newContainer("guaranteed", getResourceList("100m", "100Mi"), addResource("nvidia-gpu", "2", getResourceList("100m", "100Mi"))), }), - expected: Guaranteed, + expected: v1.PodQOSGuaranteed, }, { pod: newPod("guaranteed-guaranteed", []v1.Container{ newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), }), - expected: Guaranteed, + expected: v1.PodQOSGuaranteed, }, { pod: newPod("guaranteed-guaranteed-with-gpu", []v1.Container{ newContainer("guaranteed", getResourceList("100m", "100Mi"), addResource("nvidia-gpu", "2", getResourceList("100m", "100Mi"))), newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), }), - expected: Guaranteed, + expected: v1.PodQOSGuaranteed, }, { pod: newPod("best-effort-best-effort", []v1.Container{ newContainer("best-effort", getResourceList("", ""), getResourceList("", "")), newContainer("best-effort", getResourceList("", ""), getResourceList("", "")), }), - expected: BestEffort, + expected: v1.PodQOSBestEffort, }, { pod: newPod("best-effort-best-effort-with-gpu", []v1.Container{ newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))), newContainer("best-effort", getResourceList("", ""), getResourceList("", "")), }), - expected: BestEffort, + expected: v1.PodQOSBestEffort, }, { pod: newPod("best-effort-with-gpu", []v1.Container{ newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))), }), - expected: BestEffort, + expected: v1.PodQOSBestEffort, }, { pod: newPod("best-effort-burstable", []v1.Container{ newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))), newContainer("burstable", getResourceList("1", ""), getResourceList("2", "")), }), - expected: Burstable, + expected: v1.PodQOSBurstable, }, { pod: newPod("best-effort-guaranteed", []v1.Container{ newContainer("best-effort", getResourceList("", ""), addResource("nvidia-gpu", "2", getResourceList("", ""))), newContainer("guaranteed", getResourceList("10m", "100Mi"), getResourceList("10m", "100Mi")), }), - expected: Burstable, + expected: v1.PodQOSBurstable, }, { pod: newPod("burstable-cpu-guaranteed-memory", []v1.Container{ newContainer("burstable", getResourceList("", "100Mi"), getResourceList("", "100Mi")), }), - expected: Burstable, + expected: v1.PodQOSBurstable, }, { pod: newPod("burstable-no-limits", []v1.Container{ newContainer("burstable", getResourceList("100m", "100Mi"), getResourceList("", "")), }), - expected: Burstable, + expected: v1.PodQOSBurstable, }, { pod: newPod("burstable-guaranteed", []v1.Container{ newContainer("burstable", getResourceList("1", "100Mi"), getResourceList("2", "100Mi")), newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), }), - expected: Burstable, + expected: v1.PodQOSBurstable, }, { pod: newPod("burstable-unbounded-but-requests-match-limits", []v1.Container{ newContainer("burstable", getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")), newContainer("burstable-unbounded", getResourceList("100m", "100Mi"), getResourceList("", "")), }), - expected: Burstable, + expected: v1.PodQOSBurstable, }, { pod: newPod("burstable-1", []v1.Container{ newContainer("burstable", getResourceList("10m", "100Mi"), getResourceList("100m", "200Mi")), }), - expected: Burstable, + expected: v1.PodQOSBurstable, }, { pod: newPod("burstable-2", []v1.Container{ newContainer("burstable", getResourceList("0", "0"), addResource("nvidia-gpu", "2", getResourceList("100m", "200Mi"))), }), - expected: Burstable, + expected: v1.PodQOSBurstable, }, } for id, testCase := range testCases { diff --git a/pkg/kubelet/qos/types.go b/pkg/kubelet/qos/types.go deleted file mode 100644 index e52dece45a..0000000000 --- a/pkg/kubelet/qos/types.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package qos - -// QOSClass defines the supported qos classes of Pods/Containers. -type QOSClass string - -const ( - // Guaranteed is the Guaranteed qos class. - Guaranteed QOSClass = "Guaranteed" - // Burstable is the Burstable qos class. - Burstable QOSClass = "Burstable" - // BestEffort is the BestEffort qos class. - BestEffort QOSClass = "BestEffort" -) diff --git a/pkg/quota/evaluator/core/pods.go b/pkg/quota/evaluator/core/pods.go index 006f835c55..884a7f03f2 100644 --- a/pkg/quota/evaluator/core/pods.go +++ b/pkg/quota/evaluator/core/pods.go @@ -256,7 +256,7 @@ func PodUsageFunc(obj runtime.Object) (api.ResourceList, error) { } func isBestEffort(pod *api.Pod) bool { - return qos.InternalGetPodQOS(pod) == qos.BestEffort + return qos.InternalGetPodQOS(pod) == api.PodQOSBestEffort } func isTerminating(pod *api.Pod) bool { diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 2e5b6d934b..f6c31268c2 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -1202,7 +1202,7 @@ func tolerationsToleratesTaints(tolerations []v1.Toleration, taints []v1.Taint) // Determine if a pod is scheduled with best-effort QoS func isPodBestEffort(pod *v1.Pod) bool { - return qos.GetPodQOS(pod) == qos.BestEffort + return qos.GetPodQOS(pod) == v1.PodQOSBestEffort } // CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node diff --git a/test/e2e_node/cgroup_manager_test.go b/test/e2e_node/cgroup_manager_test.go index ef1bb5ce2c..f812e584e1 100644 --- a/test/e2e_node/cgroup_manager_test.go +++ b/test/e2e_node/cgroup_manager_test.go @@ -20,7 +20,6 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/cm" - "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" @@ -146,7 +145,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS { return } - cgroupsToVerify := []cm.CgroupName{cm.CgroupName(qos.Burstable), cm.CgroupName(qos.BestEffort)} + cgroupsToVerify := []cm.CgroupName{cm.CgroupName(v1.PodQOSBurstable), cm.CgroupName(v1.PodQOSBestEffort)} pod := makePodToVerifyCgroups(cgroupsToVerify) f.PodClient().Create(pod) err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)