Merge pull request #12695 from JanetKuo/kubectl-describe-pod-podstatus

Auto commit by PR queue bot
pull/6/head
k8s-merge-robot 2015-09-11 08:08:32 -07:00
commit bff6ee8e4a
3 changed files with 174 additions and 56 deletions

View File

@ -31,6 +31,7 @@ import (
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/fields"
qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/sets"
@ -460,6 +461,9 @@ func describePod(pod *api.Pod, rcs []api.ReplicationController, events *api.Even
fmt.Fprintf(out, "Namespace:\t%s\n", pod.Namespace)
fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&pod.Spec))
fmt.Fprintf(out, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP)
if pod.Status.StartTime != nil {
fmt.Fprintf(out, "Start Time:\t%s\n", pod.Status.StartTime.Time.Format(time.RFC1123Z))
}
fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(pod.Labels))
if pod.DeletionTimestamp != nil {
fmt.Fprintf(out, "Status:\tTerminating (expires %s)\n", pod.DeletionTimestamp.Time.Format(time.RFC1123Z))
@ -707,7 +711,17 @@ func describeContainers(pod *api.Pod, out io.Writer) {
state := status.State
fmt.Fprintf(out, " %v:\n", container.Name)
fmt.Fprintf(out, " Container ID:\t%s\n", status.ContainerID)
fmt.Fprintf(out, " Image:\t%s\n", container.Image)
fmt.Fprintf(out, " Image ID:\t%s\n", status.ImageID)
resourceToQoS := qosutil.GetQoS(&container)
if len(resourceToQoS) > 0 {
fmt.Fprintf(out, " QoS Tier:\n")
}
for resource, qos := range resourceToQoS {
fmt.Fprintf(out, " %s:\t%s\n", resource, qos)
}
if len(container.Resources.Limits) > 0 {
fmt.Fprintf(out, " Limits:\n")
@ -716,6 +730,13 @@ func describeContainers(pod *api.Pod, out io.Writer) {
fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String())
}
if len(container.Resources.Requests) > 0 {
fmt.Fprintf(out, " Requests:\n")
}
for name, quantity := range container.Resources.Requests {
fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String())
}
describeStatus("State", state, out)
if status.LastTerminationState.Terminated != nil {
describeStatus("Last Termination State", status.LastTerminationState, out)
@ -1065,8 +1086,10 @@ func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (strin
fmt.Fprintf(out, "Name:\t%s\n", node.Name)
fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(node.Labels))
fmt.Fprintf(out, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z))
fmt.Fprintf(out, "Phase:\t%v\n", node.Status.Phase)
if len(node.Status.Conditions) > 0 {
fmt.Fprint(out, "Conditions:\n Type\tStatus\tLastHeartbeatTime\tLastTransitionTime\tReason\tMessage\n")
fmt.Fprint(out, " ────\t──────\t─────────────────\t──────────────────\t──────\t───────\n")
for _, c := range node.Status.Conditions {
fmt.Fprintf(out, " %v \t%v \t%s \t%s \t%v \t%v\n",
c.Type,
@ -1089,18 +1112,10 @@ func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (strin
}
}
runningPods := filterNonRunningPods(pods)
reqs, err := getPodsTotalRequests(runningPods)
if err != nil {
return err
}
fmt.Fprintf(out, "Allocated resources (total requests):\n")
for reqResource, reqValue := range reqs {
fmt.Fprintf(out, " %s:\t%s\n", reqResource, reqValue.String())
}
fmt.Fprintf(out, " pods:\t%d\n", len(runningPods))
fmt.Fprintf(out, "Version:\n")
fmt.Fprintf(out, "System Info:\n")
fmt.Fprintf(out, " Machine ID:\t%s\n", node.Status.NodeInfo.MachineID)
fmt.Fprintf(out, " System UUID:\t%s\n", node.Status.NodeInfo.SystemUUID)
fmt.Fprintf(out, " Boot ID:\t%s\n", node.Status.NodeInfo.BootID)
fmt.Fprintf(out, " Kernel Version:\t%s\n", node.Status.NodeInfo.KernelVersion)
fmt.Fprintf(out, " OS Image:\t%s\n", node.Status.NodeInfo.OsImage)
fmt.Fprintf(out, " Container Runtime Version:\t%s\n", node.Status.NodeInfo.ContainerRuntimeVersion)
@ -1113,34 +1128,10 @@ func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (strin
if len(node.Spec.ExternalID) > 0 {
fmt.Fprintf(out, "ExternalID:\t%s\n", node.Spec.ExternalID)
}
fmt.Fprintf(out, "Pods:\t(%d in total)\n", len(pods))
fmt.Fprint(out, " Namespace\tName\t\tCPU(milliCPU)\t\tMemory(bytes)\n")
totalMilliCPU := int64(0)
totalMemory := int64(0)
fractionPodCPU := float64(0)
fractionPodMemory := float64(0)
fractionTotalCPU := float64(0)
fractionTotalMemory := float64(0)
for _, pod := range pods {
podTotalMilliCPU := int64(0)
podTotalMemory := int64(0)
for ix := range pod.Spec.Containers {
limits := pod.Spec.Containers[ix].Resources.Limits
podTotalMilliCPU += limits.Cpu().MilliValue()
podTotalMemory += limits.Memory().Value()
}
totalMilliCPU += podTotalMilliCPU
totalMemory += podTotalMemory
fractionPodCPU = float64(podTotalMilliCPU) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100
fractionPodMemory = float64(podTotalMemory) / float64(node.Status.Capacity.Memory().Value()) * 100
fmt.Fprintf(out, " %s\t%s\t\t%d (%d%% of total)\t\t%d (%d%% of total)\n", pod.Namespace, pod.Name, podTotalMilliCPU, int64(fractionPodCPU), podTotalMemory, int64(fractionPodMemory))
if err := describeNodeResource(pods, node, out); err != nil {
return err
}
fmt.Fprint(out, "TotalResourceLimits:\n")
fractionTotalCPU = float64(totalMilliCPU) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100
fractionTotalMemory = float64(totalMemory) / float64(node.Status.Capacity.Memory().Value()) * 100
fmt.Fprintf(out, " CPU(milliCPU):\t\t%d (%d%% of total)\n", totalMilliCPU, int64(fractionTotalCPU))
fmt.Fprintf(out, " Memory(bytes):\t\t%d (%d%% of total)\n", totalMemory, int64(fractionTotalMemory))
if events != nil {
DescribeEvents(events, out)
}
@ -1197,7 +1188,44 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string) (str
})
}
func filterNonRunningPods(pods []*api.Pod) []*api.Pod {
func describeNodeResource(pods []*api.Pod, node *api.Node, out io.Writer) error {
nonTerminatedPods := filterTerminatedPods(pods)
fmt.Fprintf(out, "Non-terminated Pods:\t(%d in total)\n", len(nonTerminatedPods))
fmt.Fprint(out, " Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n")
fmt.Fprint(out, " ─────────\t────\t\t────────────\t──────────\t───────────────\t─────────────\n")
for _, pod := range nonTerminatedPods {
req, limit, err := getSinglePodTotalRequestsAndLimits(pod)
if err != nil {
return err
}
cpuReq, cpuLimit, memoryReq, memoryLimit := req[api.ResourceCPU], limit[api.ResourceCPU], req[api.ResourceMemory], limit[api.ResourceMemory]
fractionCpuReq := float64(cpuReq.MilliValue()) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100
fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100
fractionMemoryReq := float64(memoryReq.MilliValue()) / float64(node.Status.Capacity.Memory().MilliValue()) * 100
fractionMemoryLimit := float64(memoryLimit.MilliValue()) / float64(node.Status.Capacity.Memory().MilliValue()) * 100
fmt.Fprintf(out, " %s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", pod.Namespace, pod.Name,
cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit),
memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit))
}
fmt.Fprint(out, "Allocated resources:\n (Total limits may be over 100%, i.e., overcommitted. More info: http://releases.k8s.io/HEAD/docs/user-guide/compute-resources.md)\n CPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n")
fmt.Fprint(out, " ────────────\t──────────\t───────────────\t─────────────\n")
reqs, limits, err := getPodsTotalRequestsAndLimits(nonTerminatedPods)
if err != nil {
return err
}
cpuReqs, cpuLimits, memoryReqs, memoryLimits := reqs[api.ResourceCPU], limits[api.ResourceCPU], reqs[api.ResourceMemory], limits[api.ResourceMemory]
fractionCpuReqs := float64(cpuReqs.MilliValue()) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100
fractionCpuLimits := float64(cpuLimits.MilliValue()) / float64(node.Status.Capacity.Cpu().MilliValue()) * 100
fractionMemoryReqs := float64(memoryReqs.MilliValue()) / float64(node.Status.Capacity.Memory().MilliValue()) * 100
fractionMemoryLimits := float64(memoryLimits.MilliValue()) / float64(node.Status.Capacity.Memory().MilliValue()) * 100
fmt.Fprintf(out, " %s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n",
cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits),
memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits))
return nil
}
func filterTerminatedPods(pods []*api.Pod) []*api.Pod {
if len(pods) == 0 {
return pods
}
@ -1211,36 +1239,50 @@ func filterNonRunningPods(pods []*api.Pod) []*api.Pod {
return result
}
func getPodsTotalRequests(pods []*api.Pod) (map[api.ResourceName]resource.Quantity, error) {
reqs := map[api.ResourceName]resource.Quantity{}
func getPodsTotalRequestsAndLimits(pods []*api.Pod) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity, err error) {
reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{}
for _, pod := range pods {
podReqs, err := getSinglePodTotalRequests(pod)
podReqs, podLimits, err := getSinglePodTotalRequestsAndLimits(pod)
if err != nil {
return nil, err
return nil, nil, err
}
for podReqName, podReqValue := range podReqs {
if value, ok := reqs[podReqName]; !ok {
reqs[podReqName] = podReqValue
reqs[podReqName] = *podReqValue.Copy()
} else if err = value.Add(podReqValue); err != nil {
return nil, err
return nil, nil, err
}
}
for podLimitName, podLimitValue := range podLimits {
if value, ok := limits[podLimitName]; !ok {
limits[podLimitName] = *podLimitValue.Copy()
} else if err = value.Add(podLimitValue); err != nil {
return nil, nil, err
}
}
}
return reqs, nil
return
}
func getSinglePodTotalRequests(pod *api.Pod) (map[api.ResourceName]resource.Quantity, error) {
reqs := map[api.ResourceName]resource.Quantity{}
func getSinglePodTotalRequestsAndLimits(pod *api.Pod) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity, err error) {
reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{}
for _, container := range pod.Spec.Containers {
for name, quantity := range container.Resources.Requests {
if value, ok := reqs[name]; !ok {
reqs[name] = quantity
} else if err := value.Add(quantity); err != nil {
return nil, err
reqs[name] = *quantity.Copy()
} else if err = value.Add(quantity); err != nil {
return nil, nil, err
}
}
for name, quantity := range container.Resources.Limits {
if value, ok := limits[name]; !ok {
limits[name] = *quantity.Copy()
} else if err = value.Add(quantity); err != nil {
return nil, nil, err
}
}
}
return reqs, nil
return
}
func DescribeEvents(el *api.EventList, w io.Writer) {
@ -1250,6 +1292,7 @@ func DescribeEvents(el *api.EventList, w io.Writer) {
}
sort.Sort(SortableEvents(el.Items))
fmt.Fprint(w, "Events:\n FirstSeen\tLastSeen\tCount\tFrom\tSubobjectPath\tReason\tMessage\n")
fmt.Fprint(w, " ─────────\t────────\t─────\t────\t─────────────\t──────\t───────\n")
for _, e := range el.Items {
fmt.Fprintf(w, " %s\t%s\t%d\t%v\t%v\t%v\t%v\n",
translateTimestamp(e.FirstTimestamp),

View File

@ -339,8 +339,8 @@ func TestDefaultDescribers(t *testing.T) {
func TestGetPodsTotalRequests(t *testing.T) {
testCases := []struct {
pods []*api.Pod
expectedReqs map[api.ResourceName]resource.Quantity
pods []*api.Pod
expectedReqs, expectedLimits map[api.ResourceName]resource.Quantity
}{
{
pods: []*api.Pod{
@ -402,7 +402,7 @@ func TestGetPodsTotalRequests(t *testing.T) {
}
for _, testCase := range testCases {
reqs, err := getPodsTotalRequests(testCase.pods)
reqs, _, err := getPodsTotalRequestsAndLimits(testCase.pods)
if err != nil {
t.Errorf("Unexpected error %v", err)
}

View File

@ -0,0 +1,75 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"k8s.io/kubernetes/pkg/api"
)
const (
Guaranteed = "Guaranteed"
Burstable = "Burstable"
BestEffort = "Best-Effort"
)
// isResourceGuaranteed returns true if the container's resource requirements are Guaranteed.
func isResourceGuaranteed(container *api.Container, resource api.ResourceName) bool {
// A container resource is guaranteed if its request == limit.
// If request == limit, the user is very confident of resource consumption.
req, hasReq := container.Resources.Requests[resource]
limit, hasLimit := container.Resources.Limits[resource]
if !hasReq || !hasLimit {
return false
}
return req.Cmp(limit) == 0 && req.Value() != 0
}
// isResourceBestEffort returns true if the container's resource requirements are best-effort.
func isResourceBestEffort(container *api.Container, resource api.ResourceName) bool {
// A container resource is best-effort if its request is unspecified or 0.
// If a request is specified, then the user expects some kind of resource guarantee.
req, hasReq := container.Resources.Requests[resource]
return !hasReq || req.Value() == 0
}
// GetQos returns a mapping of resource name to QoS class of a container
func GetQoS(container *api.Container) map[api.ResourceName]string {
resourceToQoS := map[api.ResourceName]string{}
for resource := range allResources(container) {
switch {
case isResourceGuaranteed(container, resource):
resourceToQoS[resource] = Guaranteed
case isResourceBestEffort(container, resource):
resourceToQoS[resource] = BestEffort
default:
resourceToQoS[resource] = Burstable
}
}
return resourceToQoS
}
// allResources returns a set of resources the container has
func allResources(container *api.Container) map[api.ResourceName]bool {
resources := map[api.ResourceName]bool{}
for resource := range container.Resources.Requests {
resources[resource] = true
}
for resource := range container.Resources.Limits {
resources[resource] = true
}
return resources
}