mirror of https://github.com/k3s-io/k3s
Merge pull request #28093 from dubstack/dubstack-refactor-qos2
Automatic merge from submit-queue [Refactor] Make QoS naming consistent across the codebase @derekwaynecarr @vishh PTAL. Can one of you please attach a LGTM.pull/6/head
commit
1a1a4709b9
|
@ -540,7 +540,7 @@ func describePod(pod *api.Pod, events *api.EventList) (string, error) {
|
|||
}
|
||||
}
|
||||
describeVolumes(pod.Spec.Volumes, out, "")
|
||||
fmt.Fprintf(out, "QoS Class:\t%s\n", qos.GetPodQos(pod))
|
||||
fmt.Fprintf(out, "QoS Class:\t%s\n", qos.GetPodQOS(pod))
|
||||
if events != nil {
|
||||
DescribeEvents(events, out)
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ func (list SortableResourceQuotas) Less(i, j int) bool {
|
|||
}
|
||||
|
||||
// SortedQoSResourceNames returns the sorted resource names of a QoS list.
|
||||
func SortedQoSResourceNames(list qos.QoSList) []api.ResourceName {
|
||||
func SortedQoSResourceNames(list qos.QOSList) []api.ResourceName {
|
||||
resources := make([]api.ResourceName, 0, len(list))
|
||||
for res := range list {
|
||||
resources = append(resources, res)
|
||||
|
|
|
@ -87,7 +87,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
|
|||
if len(m.nodeConditions) == 0 {
|
||||
return lifecycle.PodAdmitResult{Admit: true}
|
||||
}
|
||||
notBestEffort := qos.BestEffort != qos.GetPodQos(attrs.Pod)
|
||||
notBestEffort := qos.BestEffort != qos.GetPodQOS(attrs.Pod)
|
||||
if notBestEffort {
|
||||
return lifecycle.PodAdmitResult{Admit: true}
|
||||
}
|
||||
|
|
|
@ -299,10 +299,10 @@ func (ms *multiSorter) Less(i, j int) bool {
|
|||
return ms.cmp[k](p1, p2) < 0
|
||||
}
|
||||
|
||||
// qos compares pods by QoS (BestEffort < Burstable < Guaranteed)
|
||||
// qosComparator compares pods by QoS (BestEffort < Burstable < Guaranteed)
|
||||
func qosComparator(p1, p2 *api.Pod) int {
|
||||
qosP1 := qos.GetPodQos(p1)
|
||||
qosP2 := qos.GetPodQos(p2)
|
||||
qosP1 := qos.GetPodQOS(p1)
|
||||
qosP2 := qos.GetPodQOS(p2)
|
||||
// its a tie
|
||||
if qosP1 == qosP2 {
|
||||
return 0
|
||||
|
|
|
@ -35,7 +35,7 @@ const (
|
|||
// and 1000. Containers with higher OOM scores are killed if the system runs out of memory.
|
||||
// See https://lwn.net/Articles/391222/ for more information.
|
||||
func GetContainerOOMScoreAdjust(pod *api.Pod, container *api.Container, memoryCapacity int64) int {
|
||||
switch GetPodQos(pod) {
|
||||
switch GetPodQOS(pod) {
|
||||
case Guaranteed:
|
||||
// Guaranteed containers should be the last to get killed.
|
||||
return guaranteedOOMScoreAdj
|
||||
|
|
|
@ -41,11 +41,11 @@ func isResourceBestEffort(container *api.Container, resource api.ResourceName) b
|
|||
return !hasReq || req.Value() == 0
|
||||
}
|
||||
|
||||
// GetPodQos returns the QoS class of a pod.
|
||||
// GetPodQOS returns the QoS class of a pod.
|
||||
// A pod is besteffort if none of its containers have specified any requests or limits.
|
||||
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
||||
// A pod is burstable if limits and requests do not match across all containers.
|
||||
func GetPodQos(pod *api.Pod) QOSClass {
|
||||
func GetPodQOS(pod *api.Pod) QOSClass {
|
||||
requests := api.ResourceList{}
|
||||
limits := api.ResourceList{}
|
||||
zeroQuantity := resource.MustParse("0")
|
||||
|
@ -99,23 +99,23 @@ func GetPodQos(pod *api.Pod) QOSClass {
|
|||
return Burstable
|
||||
}
|
||||
|
||||
// QoSList is a set of (resource name, QoS class) pairs.
|
||||
type QoSList map[api.ResourceName]QOSClass
|
||||
// QOSList is a set of (resource name, QoS class) pairs.
|
||||
type QOSList map[api.ResourceName]QOSClass
|
||||
|
||||
// GetQoS returns a mapping of resource name to QoS class of a container
|
||||
func GetQoS(container *api.Container) QoSList {
|
||||
resourceToQoS := QoSList{}
|
||||
// GetQOS returns a mapping of resource name to QoS class of a container
|
||||
func GetQOS(container *api.Container) QOSList {
|
||||
resourceToQOS := QOSList{}
|
||||
for resource := range allResources(container) {
|
||||
switch {
|
||||
case isResourceGuaranteed(container, resource):
|
||||
resourceToQoS[resource] = Guaranteed
|
||||
resourceToQOS[resource] = Guaranteed
|
||||
case isResourceBestEffort(container, resource):
|
||||
resourceToQoS[resource] = BestEffort
|
||||
resourceToQOS[resource] = BestEffort
|
||||
default:
|
||||
resourceToQoS[resource] = Burstable
|
||||
resourceToQOS[resource] = Burstable
|
||||
}
|
||||
}
|
||||
return resourceToQoS
|
||||
return resourceToQOS
|
||||
}
|
||||
|
||||
// supportedComputeResources is the list of supported compute resources
|
||||
|
|
|
@ -59,7 +59,7 @@ func newPod(name string, containers []api.Container) *api.Pod {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetPodQos(t *testing.T) {
|
||||
func TestGetPodQOS(t *testing.T) {
|
||||
testCases := []struct {
|
||||
pod *api.Pod
|
||||
expected QOSClass
|
||||
|
@ -125,7 +125,7 @@ func TestGetPodQos(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
if actual := GetPodQos(testCase.pod); testCase.expected != actual {
|
||||
if actual := GetPodQOS(testCase.pod); testCase.expected != actual {
|
||||
t.Errorf("invalid qos pod %s, expected: %s, actual: %s", testCase.pod.Name, testCase.expected, actual)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ func PodMatchesScopeFunc(scope api.ResourceQuotaScope, object runtime.Object) bo
|
|||
}
|
||||
|
||||
func isBestEffort(pod *api.Pod) bool {
|
||||
return qos.GetPodQos(pod) == qos.BestEffort
|
||||
return qos.GetPodQOS(pod) == qos.BestEffort
|
||||
}
|
||||
|
||||
func isTerminating(pod *api.Pod) bool {
|
||||
|
|
|
@ -1020,7 +1020,7 @@ func tolerationsToleratesTaints(tolerations []api.Toleration, taints []api.Taint
|
|||
|
||||
// Determine if a pod is scheduled with best-effort QoS
|
||||
func isPodBestEffort(pod *api.Pod) bool {
|
||||
return qos.GetPodQos(pod) == qos.BestEffort
|
||||
return qos.GetPodQOS(pod) == qos.BestEffort
|
||||
}
|
||||
|
||||
// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node
|
||||
|
|
Loading…
Reference in New Issue