From 6e76f1aef7b66740d07fb41ea8422360d5e5fb8d Mon Sep 17 00:00:00 2001 From: Janet Kuo Date: Thu, 13 Aug 2015 17:28:01 -0700 Subject: [PATCH 1/5] Show more information in describe node/pod --- pkg/kubectl/describe.go | 109 +++++++++++++++++++++++++--------------- 1 file changed, 69 insertions(+), 40 deletions(-) diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 63a078fbf4..02cbec59b0 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -460,6 +460,9 @@ func describePod(pod *api.Pod, rcs []api.ReplicationController, events *api.Even fmt.Fprintf(out, "Namespace:\t%s\n", pod.Namespace) fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&pod.Spec)) fmt.Fprintf(out, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP) + if pod.Status.StartTime != nil { + fmt.Fprintf(out, "Start Time:\t%s\n", pod.Status.StartTime.Time.Format(time.RFC1123Z)) + } fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(pod.Labels)) if pod.DeletionTimestamp != nil { fmt.Fprintf(out, "Status:\tTerminating (expires %s)\n", pod.DeletionTimestamp.Time.Format(time.RFC1123Z)) @@ -707,7 +710,9 @@ func describeContainers(pod *api.Pod, out io.Writer) { state := status.State fmt.Fprintf(out, " %v:\n", container.Name) + fmt.Fprintf(out, " Container ID:\t%s\n", status.ContainerID) fmt.Fprintf(out, " Image:\t%s\n", container.Image) + fmt.Fprintf(out, " Image ID:\t%s\n", status.ImageID) if len(container.Resources.Limits) > 0 { fmt.Fprintf(out, " Limits:\n") @@ -716,6 +721,13 @@ func describeContainers(pod *api.Pod, out io.Writer) { fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String()) } + if len(container.Resources.Requests) > 0 { + fmt.Fprintf(out, " Requests:\n") + } + for name, quantity := range container.Resources.Requests { + fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String()) + } + describeStatus("State", state, out) if status.LastTerminationState.Terminated != nil { describeStatus("Last Termination State", status.LastTerminationState, out) @@ -1065,8 +1077,10 @@ func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (strin fmt.Fprintf(out, "Name:\t%s\n", node.Name) fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(node.Labels)) fmt.Fprintf(out, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z)) + fmt.Fprintf(out, "Phase:\t%v\n", node.Status.Phase) if len(node.Status.Conditions) > 0 { fmt.Fprint(out, "Conditions:\n Type\tStatus\tLastHeartbeatTime\tLastTransitionTime\tReason\tMessage\n") + fmt.Fprint(out, " ────\t──────\t─────────────────\t──────────────────\t──────\t───────\n") for _, c := range node.Status.Conditions { fmt.Fprintf(out, " %v \t%v \t%s \t%s \t%v \t%v\n", c.Type, @@ -1089,18 +1103,10 @@ func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (strin } } - runningPods := filterNonRunningPods(pods) - reqs, err := getPodsTotalRequests(runningPods) - if err != nil { - return err - } - fmt.Fprintf(out, "Allocated resources (total requests):\n") - for reqResource, reqValue := range reqs { - fmt.Fprintf(out, " %s:\t%s\n", reqResource, reqValue.String()) - } - fmt.Fprintf(out, " pods:\t%d\n", len(runningPods)) - - fmt.Fprintf(out, "Version:\n") + fmt.Fprintf(out, "System Info:\n") + fmt.Fprintf(out, " Machine ID:\t%s\n", node.Status.NodeInfo.MachineID) + fmt.Fprintf(out, " System UUID:\t%s\n", node.Status.NodeInfo.SystemUUID) + fmt.Fprintf(out, " Boot ID:\t%s\n", node.Status.NodeInfo.BootID) fmt.Fprintf(out, " Kernel Version:\t%s\n", node.Status.NodeInfo.KernelVersion) fmt.Fprintf(out, " OS Image:\t%s\n", node.Status.NodeInfo.OsImage) fmt.Fprintf(out, " Container Runtime Version:\t%s\n", node.Status.NodeInfo.ContainerRuntimeVersion) @@ -1113,34 +1119,8 @@ func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (strin if len(node.Spec.ExternalID) > 0 { fmt.Fprintf(out, "ExternalID:\t%s\n", node.Spec.ExternalID) } - fmt.Fprintf(out, "Pods:\t(%d in total)\n", len(pods)) - fmt.Fprint(out, " Namespace\tName\t\tCPU(milliCPU)\t\tMemory(bytes)\n") - totalMilliCPU := int64(0) - totalMemory := int64(0) - fractionPodCPU := float64(0) - fractionPodMemory := float64(0) - fractionTotalCPU := float64(0) - fractionTotalMemory := float64(0) - for _, pod := range pods { - podTotalMilliCPU := int64(0) - podTotalMemory := int64(0) + describeNodeResource(pods, node, out) - for ix := range pod.Spec.Containers { - limits := pod.Spec.Containers[ix].Resources.Limits - podTotalMilliCPU += limits.Cpu().MilliValue() - podTotalMemory += limits.Memory().Value() - } - totalMilliCPU += podTotalMilliCPU - totalMemory += podTotalMemory - fractionPodCPU = float64(podTotalMilliCPU) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 - fractionPodMemory = float64(podTotalMemory) / float64(node.Status.Capacity.Memory().Value()) * 100 - fmt.Fprintf(out, " %s\t%s\t\t%d (%d%% of total)\t\t%d (%d%% of total)\n", pod.Namespace, pod.Name, podTotalMilliCPU, int64(fractionPodCPU), podTotalMemory, int64(fractionPodMemory)) - } - fmt.Fprint(out, "TotalResourceLimits:\n") - fractionTotalCPU = float64(totalMilliCPU) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 - fractionTotalMemory = float64(totalMemory) / float64(node.Status.Capacity.Memory().Value()) * 100 - fmt.Fprintf(out, " CPU(milliCPU):\t\t%d (%d%% of total)\n", totalMilliCPU, int64(fractionTotalCPU)) - fmt.Fprintf(out, " Memory(bytes):\t\t%d (%d%% of total)\n", totalMemory, int64(fractionTotalMemory)) if events != nil { DescribeEvents(events, out) } @@ -1197,7 +1177,55 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string) (str }) } -func filterNonRunningPods(pods []*api.Pod) []*api.Pod { +func describeNodeResource(pods []*api.Pod, node *api.Node, out io.Writer) { + nonTerminatedPods := filterTerminatedPods(pods) + fmt.Fprintf(out, "Non-terminated Pods:\t(%d in total)\n", len(nonTerminatedPods)) + fmt.Fprint(out, " Namespace\tName\t\tCPU Requests\tMemory Requests\tCPU Limits\tMemory Limits\n") + fmt.Fprint(out, " ─────────\t────\t\t────────────\t───────────────\t──────────\t─────────────\n") + totalMilliCPUReq := int64(0) + totalMemoryReq := int64(0) + fractionPodCPUReq := float64(0) + fractionPodMemoryReq := float64(0) + fractionTotalCPUReq := float64(0) + fractionTotalMemoryReq := float64(0) + totalMilliCPULimit := int64(0) + totalMemoryLimit := int64(0) + fractionPodCPULimit := float64(0) + fractionPodMemoryLimit := float64(0) + for _, pod := range pods { + podTotalMilliCPUReq := int64(0) + podTotalMemoryReq := int64(0) + podTotalMilliCPULimit := int64(0) + podTotalMemoryLimit := int64(0) + + for ix := range pod.Spec.Containers { + requests := pod.Spec.Containers[ix].Resources.Requests + podTotalMilliCPUReq += requests.Cpu().MilliValue() + podTotalMemoryReq += requests.Memory().Value() + + limits := pod.Spec.Containers[ix].Resources.Limits + podTotalMilliCPULimit += limits.Cpu().MilliValue() + podTotalMemoryLimit += limits.Memory().Value() + } + totalMilliCPUReq += podTotalMilliCPUReq + totalMemoryReq += podTotalMemoryReq + totalMilliCPULimit += podTotalMilliCPULimit + totalMemoryLimit += podTotalMemoryLimit + fractionPodCPUReq = float64(podTotalMilliCPUReq) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 + fractionPodMemoryReq = float64(podTotalMemoryReq) / float64(node.Status.Capacity.Memory().Value()) * 100 + fractionPodCPULimit = float64(podTotalMilliCPULimit) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 + fractionPodMemoryLimit = float64(podTotalMemoryLimit) / float64(node.Status.Capacity.Memory().Value()) * 100 + fmt.Fprintf(out, " %s\t%s\t\t%dm (%d%%)\t%dKi (%d%%)\t%dm (%d%%)\t%dKi (%d%%)\n", pod.Namespace, pod.Name, + podTotalMilliCPUReq, int64(fractionPodCPUReq), podTotalMemoryReq/1000, int64(fractionPodMemoryReq), podTotalMilliCPULimit, int64(fractionPodCPULimit), podTotalMemoryLimit/1000, int64(fractionPodMemoryLimit)) + } + fmt.Fprint(out, "Allocated resources:\n CPU Requests\tMemory Requests\tCPU Limits\tMemory Limits\n") + fmt.Fprint(out, " ────────────\t───────────────\t──────────\t─────────────\n") + fractionTotalCPUReq = float64(totalMilliCPUReq) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 + fractionTotalMemoryReq = float64(totalMemoryReq) / float64(node.Status.Capacity.Memory().Value()) * 100 + fmt.Fprintf(out, " %dm (%d%%)\t%dKi (%d%%)\t%dm\t%dKi\n", totalMilliCPUReq, int64(fractionTotalCPUReq), totalMemoryReq/1000, int64(fractionTotalMemoryReq), totalMilliCPULimit, totalMemoryLimit/1000) +} + +func filterTerminatedPods(pods []*api.Pod) []*api.Pod { if len(pods) == 0 { return pods } @@ -1250,6 +1278,7 @@ func DescribeEvents(el *api.EventList, w io.Writer) { } sort.Sort(SortableEvents(el.Items)) fmt.Fprint(w, "Events:\n FirstSeen\tLastSeen\tCount\tFrom\tSubobjectPath\tReason\tMessage\n") + fmt.Fprint(w, " ─────────\t────────\t─────\t────\t─────────────\t──────\t───────\n") for _, e := range el.Items { fmt.Fprintf(w, " %s\t%s\t%d\t%v\t%v\t%v\t%v\n", translateTimestamp(e.FirstTimestamp), From 22a794cc22a0d5c9ada25508edda31633763afc9 Mon Sep 17 00:00:00 2001 From: Janet Kuo Date: Tue, 1 Sep 2015 16:35:32 -0700 Subject: [PATCH 2/5] List resource QoS tier of each container when describing pods; Re-order resource table --- pkg/kubectl/describe.go | 21 ++++++++--- pkg/kubelet/qos/util/qos.go | 75 +++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 6 deletions(-) create mode 100644 pkg/kubelet/qos/util/qos.go diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 02cbec59b0..33701ed2d4 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -31,6 +31,7 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fieldpath" "k8s.io/kubernetes/pkg/fields" + qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/sets" @@ -714,6 +715,14 @@ func describeContainers(pod *api.Pod, out io.Writer) { fmt.Fprintf(out, " Image:\t%s\n", container.Image) fmt.Fprintf(out, " Image ID:\t%s\n", status.ImageID) + resourceToQoS := qosutil.GetQoS(&container) + if len(resourceToQoS) > 0 { + fmt.Fprintf(out, " QoS Tier:\n") + } + for resource, qos := range resourceToQoS { + fmt.Fprintf(out, " %s:\t%s\n", resource, qos) + } + if len(container.Resources.Limits) > 0 { fmt.Fprintf(out, " Limits:\n") } @@ -1180,8 +1189,8 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string) (str func describeNodeResource(pods []*api.Pod, node *api.Node, out io.Writer) { nonTerminatedPods := filterTerminatedPods(pods) fmt.Fprintf(out, "Non-terminated Pods:\t(%d in total)\n", len(nonTerminatedPods)) - fmt.Fprint(out, " Namespace\tName\t\tCPU Requests\tMemory Requests\tCPU Limits\tMemory Limits\n") - fmt.Fprint(out, " ─────────\t────\t\t────────────\t───────────────\t──────────\t─────────────\n") + fmt.Fprint(out, " Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") + fmt.Fprint(out, " ─────────\t────\t\t────────────\t──────────\t───────────────\t─────────────\n") totalMilliCPUReq := int64(0) totalMemoryReq := int64(0) fractionPodCPUReq := float64(0) @@ -1216,13 +1225,13 @@ func describeNodeResource(pods []*api.Pod, node *api.Node, out io.Writer) { fractionPodCPULimit = float64(podTotalMilliCPULimit) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 fractionPodMemoryLimit = float64(podTotalMemoryLimit) / float64(node.Status.Capacity.Memory().Value()) * 100 fmt.Fprintf(out, " %s\t%s\t\t%dm (%d%%)\t%dKi (%d%%)\t%dm (%d%%)\t%dKi (%d%%)\n", pod.Namespace, pod.Name, - podTotalMilliCPUReq, int64(fractionPodCPUReq), podTotalMemoryReq/1000, int64(fractionPodMemoryReq), podTotalMilliCPULimit, int64(fractionPodCPULimit), podTotalMemoryLimit/1000, int64(fractionPodMemoryLimit)) + podTotalMilliCPUReq, int64(fractionPodCPUReq), podTotalMilliCPULimit, int64(fractionPodCPULimit), podTotalMemoryReq/1000, int64(fractionPodMemoryReq), podTotalMemoryLimit/1000, int64(fractionPodMemoryLimit)) } - fmt.Fprint(out, "Allocated resources:\n CPU Requests\tMemory Requests\tCPU Limits\tMemory Limits\n") - fmt.Fprint(out, " ────────────\t───────────────\t──────────\t─────────────\n") + fmt.Fprint(out, "Allocated resources:\n CPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") + fmt.Fprint(out, " ────────────\t──────────\t───────────────\t─────────────\n") fractionTotalCPUReq = float64(totalMilliCPUReq) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 fractionTotalMemoryReq = float64(totalMemoryReq) / float64(node.Status.Capacity.Memory().Value()) * 100 - fmt.Fprintf(out, " %dm (%d%%)\t%dKi (%d%%)\t%dm\t%dKi\n", totalMilliCPUReq, int64(fractionTotalCPUReq), totalMemoryReq/1000, int64(fractionTotalMemoryReq), totalMilliCPULimit, totalMemoryLimit/1000) + fmt.Fprintf(out, " %dm (%d%%)\t%dm\t%dKi (%d%%)\t%dKi\n", totalMilliCPUReq, int64(fractionTotalCPUReq), totalMilliCPULimit, totalMemoryReq/1000, int64(fractionTotalMemoryReq), totalMemoryLimit/1000) } func filterTerminatedPods(pods []*api.Pod) []*api.Pod { diff --git a/pkg/kubelet/qos/util/qos.go b/pkg/kubelet/qos/util/qos.go new file mode 100644 index 0000000000..d7a1e5c88d --- /dev/null +++ b/pkg/kubelet/qos/util/qos.go @@ -0,0 +1,75 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/kubernetes/pkg/api" +) + +const ( + Guaranteed = "Guaranteed" + Burstable = "Burstable" + BestEffort = "Best-Effort" +) + +// isResourceGuaranteed returns true if the container's resource requirements are Guaranteed. +func isResourceGuaranteed(container *api.Container, resource api.ResourceName) bool { + // A container resource is guaranteed if its request == limit. + // If request == limit, the user is very confident of resource consumption. + req, hasReq := container.Resources.Requests[resource] + limit, hasLimit := container.Resources.Limits[resource] + if !hasReq || !hasLimit { + return false + } + return req.Value() == limit.Value() && req.Value() != 0 +} + +// isResourceBestEffort returns true if the container's resource requirements are best-effort. +func isResourceBestEffort(container *api.Container, resource api.ResourceName) bool { + // A container resource is best-effort if its request is unspecified or 0. + // If a request is specified, then the user expects some kind of resource guarantee. + req, hasReq := container.Resources.Requests[resource] + return !hasReq || req.Value() == 0 +} + +// GetQos returns a mapping of resource name to QoS class of a container +func GetQoS(container *api.Container) map[api.ResourceName]string { + resourceToQoS := map[api.ResourceName]string{} + for resource := range allResources(container) { + switch { + case isResourceGuaranteed(container, resource): + resourceToQoS[resource] = Guaranteed + case isResourceBestEffort(container, resource): + resourceToQoS[resource] = BestEffort + default: + resourceToQoS[resource] = Burstable + } + } + return resourceToQoS +} + +// allResources returns a set of resources the container has +func allResources(container *api.Container) map[api.ResourceName]bool { + resources := map[api.ResourceName]bool{} + for resource := range container.Resources.Requests { + resources[resource] = true + } + for resource := range container.Resources.Limits { + resources[resource] = true + } + return resources +} From 5ffe46d376b282acf931516a2266f4a856358185 Mon Sep 17 00:00:00 2001 From: Janet Kuo Date: Wed, 9 Sep 2015 17:20:31 -0700 Subject: [PATCH 3/5] Use resource.Quantity string representation to print requests and limits --- pkg/kubectl/describe.go | 106 ++++++++++++++++++----------------- pkg/kubectl/describe_test.go | 6 +- 2 files changed, 57 insertions(+), 55 deletions(-) diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 33701ed2d4..aad470361f 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -1128,7 +1128,9 @@ func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (strin if len(node.Spec.ExternalID) > 0 { fmt.Fprintf(out, "ExternalID:\t%s\n", node.Spec.ExternalID) } - describeNodeResource(pods, node, out) + if err := describeNodeResource(pods, node, out); err != nil { + return err + } if events != nil { DescribeEvents(events, out) @@ -1186,52 +1188,38 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string) (str }) } -func describeNodeResource(pods []*api.Pod, node *api.Node, out io.Writer) { +func describeNodeResource(pods []*api.Pod, node *api.Node, out io.Writer) error { nonTerminatedPods := filterTerminatedPods(pods) fmt.Fprintf(out, "Non-terminated Pods:\t(%d in total)\n", len(nonTerminatedPods)) fmt.Fprint(out, " Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") fmt.Fprint(out, " ─────────\t────\t\t────────────\t──────────\t───────────────\t─────────────\n") - totalMilliCPUReq := int64(0) - totalMemoryReq := int64(0) - fractionPodCPUReq := float64(0) - fractionPodMemoryReq := float64(0) - fractionTotalCPUReq := float64(0) - fractionTotalMemoryReq := float64(0) - totalMilliCPULimit := int64(0) - totalMemoryLimit := int64(0) - fractionPodCPULimit := float64(0) - fractionPodMemoryLimit := float64(0) - for _, pod := range pods { - podTotalMilliCPUReq := int64(0) - podTotalMemoryReq := int64(0) - podTotalMilliCPULimit := int64(0) - podTotalMemoryLimit := int64(0) - - for ix := range pod.Spec.Containers { - requests := pod.Spec.Containers[ix].Resources.Requests - podTotalMilliCPUReq += requests.Cpu().MilliValue() - podTotalMemoryReq += requests.Memory().Value() - - limits := pod.Spec.Containers[ix].Resources.Limits - podTotalMilliCPULimit += limits.Cpu().MilliValue() - podTotalMemoryLimit += limits.Memory().Value() + for _, pod := range nonTerminatedPods { + req, limit, err := getSinglePodTotalRequestsAndLimits(pod) + if err != nil { + return err } - totalMilliCPUReq += podTotalMilliCPUReq - totalMemoryReq += podTotalMemoryReq - totalMilliCPULimit += podTotalMilliCPULimit - totalMemoryLimit += podTotalMemoryLimit - fractionPodCPUReq = float64(podTotalMilliCPUReq) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 - fractionPodMemoryReq = float64(podTotalMemoryReq) / float64(node.Status.Capacity.Memory().Value()) * 100 - fractionPodCPULimit = float64(podTotalMilliCPULimit) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 - fractionPodMemoryLimit = float64(podTotalMemoryLimit) / float64(node.Status.Capacity.Memory().Value()) * 100 - fmt.Fprintf(out, " %s\t%s\t\t%dm (%d%%)\t%dKi (%d%%)\t%dm (%d%%)\t%dKi (%d%%)\n", pod.Namespace, pod.Name, - podTotalMilliCPUReq, int64(fractionPodCPUReq), podTotalMilliCPULimit, int64(fractionPodCPULimit), podTotalMemoryReq/1000, int64(fractionPodMemoryReq), podTotalMemoryLimit/1000, int64(fractionPodMemoryLimit)) + cpuReq, cpuLimit, memoryReq, memoryLimit := req[api.ResourceCPU], limit[api.ResourceCPU], req[api.ResourceMemory], limit[api.ResourceMemory] + fractionCpuReq := float64(cpuReq.MilliValue()) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 + fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 + fractionMemoryReq := float64(memoryReq.MilliValue()) / float64(node.Status.Capacity.Memory().MilliValue()) * 100 + fractionMemoryLimit := float64(memoryLimit.MilliValue()) / float64(node.Status.Capacity.Memory().MilliValue()) * 100 + fmt.Fprintf(out, " %s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", pod.Namespace, pod.Name, + cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit), + memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit)) } + fmt.Fprint(out, "Allocated resources:\n CPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") fmt.Fprint(out, " ────────────\t──────────\t───────────────\t─────────────\n") - fractionTotalCPUReq = float64(totalMilliCPUReq) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 - fractionTotalMemoryReq = float64(totalMemoryReq) / float64(node.Status.Capacity.Memory().Value()) * 100 - fmt.Fprintf(out, " %dm (%d%%)\t%dm\t%dKi (%d%%)\t%dKi\n", totalMilliCPUReq, int64(fractionTotalCPUReq), totalMilliCPULimit, totalMemoryReq/1000, int64(fractionTotalMemoryReq), totalMemoryLimit/1000) + reqs, limits, err := getPodsTotalRequestsAndLimits(nonTerminatedPods) + if err != nil { + return err + } + cpuReqs, cpuLimits, memoryReqs, memoryLimits := reqs[api.ResourceCPU], limits[api.ResourceCPU], reqs[api.ResourceMemory], limits[api.ResourceMemory] + fractionCpuReqs := float64(cpuReqs.MilliValue()) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 + fractionMemoryReqs := float64(memoryReqs.MilliValue()) / float64(node.Status.Capacity.Memory().MilliValue()) * 100 + fmt.Fprintf(out, " %s (%d%%)\t%s\t%s (%d%%)\t%s\n", + cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String()) + return nil } func filterTerminatedPods(pods []*api.Pod) []*api.Pod { @@ -1248,36 +1236,50 @@ func filterTerminatedPods(pods []*api.Pod) []*api.Pod { return result } -func getPodsTotalRequests(pods []*api.Pod) (map[api.ResourceName]resource.Quantity, error) { - reqs := map[api.ResourceName]resource.Quantity{} +func getPodsTotalRequestsAndLimits(pods []*api.Pod) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity, err error) { + reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{} for _, pod := range pods { - podReqs, err := getSinglePodTotalRequests(pod) + podReqs, podLimits, err := getSinglePodTotalRequestsAndLimits(pod) if err != nil { - return nil, err + return nil, nil, err } for podReqName, podReqValue := range podReqs { if value, ok := reqs[podReqName]; !ok { - reqs[podReqName] = podReqValue + reqs[podReqName] = *podReqValue.Copy() } else if err = value.Add(podReqValue); err != nil { - return nil, err + return nil, nil, err + } + } + for podLimitName, podLimitValue := range podLimits { + if value, ok := limits[podLimitName]; !ok { + limits[podLimitName] = *podLimitValue.Copy() + } else if err = value.Add(podLimitValue); err != nil { + return nil, nil, err } } } - return reqs, nil + return } -func getSinglePodTotalRequests(pod *api.Pod) (map[api.ResourceName]resource.Quantity, error) { - reqs := map[api.ResourceName]resource.Quantity{} +func getSinglePodTotalRequestsAndLimits(pod *api.Pod) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity, err error) { + reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{} for _, container := range pod.Spec.Containers { for name, quantity := range container.Resources.Requests { if value, ok := reqs[name]; !ok { - reqs[name] = quantity - } else if err := value.Add(quantity); err != nil { - return nil, err + reqs[name] = *quantity.Copy() + } else if err = value.Add(quantity); err != nil { + return nil, nil, err + } + } + for name, quantity := range container.Resources.Limits { + if value, ok := limits[name]; !ok { + limits[name] = *quantity.Copy() + } else if err = value.Add(quantity); err != nil { + return nil, nil, err } } } - return reqs, nil + return } func DescribeEvents(el *api.EventList, w io.Writer) { diff --git a/pkg/kubectl/describe_test.go b/pkg/kubectl/describe_test.go index db79895fa3..131bb7f2e2 100644 --- a/pkg/kubectl/describe_test.go +++ b/pkg/kubectl/describe_test.go @@ -339,8 +339,8 @@ func TestDefaultDescribers(t *testing.T) { func TestGetPodsTotalRequests(t *testing.T) { testCases := []struct { - pods []*api.Pod - expectedReqs map[api.ResourceName]resource.Quantity + pods []*api.Pod + expectedReqs, expectedLimits map[api.ResourceName]resource.Quantity }{ { pods: []*api.Pod{ @@ -402,7 +402,7 @@ func TestGetPodsTotalRequests(t *testing.T) { } for _, testCase := range testCases { - reqs, err := getPodsTotalRequests(testCase.pods) + reqs, _, err := getPodsTotalRequestsAndLimits(testCase.pods) if err != nil { t.Errorf("Unexpected error %v", err) } From 2e5aeaff52c2e0a10df5f9822d4d0b87fb19141e Mon Sep 17 00:00:00 2001 From: Janet Kuo Date: Thu, 10 Sep 2015 16:25:48 -0700 Subject: [PATCH 4/5] Add percentage of total limits and a help doc to explain why it may be over 100% --- pkg/kubectl/describe.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index aad470361f..310c0acb23 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -1208,7 +1208,7 @@ func describeNodeResource(pods []*api.Pod, node *api.Node, out io.Writer) error memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit)) } - fmt.Fprint(out, "Allocated resources:\n CPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") + fmt.Fprint(out, "Allocated resources:\n (Total limits may be over 100%, i.e., overcommitted. More info: http://releases.k8s.io/HEAD/docs/user-guide/compute-resources.md)\n CPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") fmt.Fprint(out, " ────────────\t──────────\t───────────────\t─────────────\n") reqs, limits, err := getPodsTotalRequestsAndLimits(nonTerminatedPods) if err != nil { @@ -1216,9 +1216,12 @@ func describeNodeResource(pods []*api.Pod, node *api.Node, out io.Writer) error } cpuReqs, cpuLimits, memoryReqs, memoryLimits := reqs[api.ResourceCPU], limits[api.ResourceCPU], reqs[api.ResourceMemory], limits[api.ResourceMemory] fractionCpuReqs := float64(cpuReqs.MilliValue()) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 + fractionCpuLimits := float64(cpuLimits.MilliValue()) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100 fractionMemoryReqs := float64(memoryReqs.MilliValue()) / float64(node.Status.Capacity.Memory().MilliValue()) * 100 - fmt.Fprintf(out, " %s (%d%%)\t%s\t%s (%d%%)\t%s\n", - cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String()) + fractionMemoryLimits := float64(memoryLimits.MilliValue()) / float64(node.Status.Capacity.Memory().MilliValue()) * 100 + fmt.Fprintf(out, " %s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", + cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits), + memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) return nil } From edc75cd5656b0449541b0f4db48f0f8c32dc2230 Mon Sep 17 00:00:00 2001 From: Janet Kuo Date: Thu, 10 Sep 2015 17:20:16 -0700 Subject: [PATCH 5/5] Fix the bug that determining guaranteed qos won't work well with small numbers --- pkg/kubelet/qos/util/qos.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubelet/qos/util/qos.go b/pkg/kubelet/qos/util/qos.go index d7a1e5c88d..0ebb1c84fa 100644 --- a/pkg/kubelet/qos/util/qos.go +++ b/pkg/kubelet/qos/util/qos.go @@ -35,7 +35,7 @@ func isResourceGuaranteed(container *api.Container, resource api.ResourceName) b if !hasReq || !hasLimit { return false } - return req.Value() == limit.Value() && req.Value() != 0 + return req.Cmp(limit) == 0 && req.Value() != 0 } // isResourceBestEffort returns true if the container's resource requirements are best-effort.