Use mCPU as CPU usage unit, add version in PerfData, fix memory usage bug.

pull/6/head
Random-Liu 2016-04-20 02:15:20 -07:00
parent 2be704f9ad
commit 7af6642e65
3 changed files with 30 additions and 8 deletions

View File

@ -669,7 +669,7 @@ func (r *ResourceMonitor) GetMasterNodeLatest(usagePerNode ResourceUsagePerNode)
nodeAvgUsage[c] = &ContainerResourceUsage{Name: usage.Name}
}
nodeAvgUsage[c].CPUUsageInCores += usage.CPUUsageInCores
nodeAvgUsage[c].MemoryRSSInBytes += usage.MemoryRSSInBytes
nodeAvgUsage[c].MemoryUsageInBytes += usage.MemoryUsageInBytes
nodeAvgUsage[c].MemoryWorkingSetInBytes += usage.MemoryWorkingSetInBytes
nodeAvgUsage[c].MemoryRSSInBytes += usage.MemoryRSSInBytes
}

View File

@ -25,9 +25,13 @@ import (
// TODO(random-liu): Change the tests to actually use PerfData from the begining instead of
// translating one to the other here.
// currentApiCallMetricsVersion is the current apicall performance metrics version. We should
// bump up the version each time we make incompatible change to the metrics.
const currentApiCallMetricsVersion = "v1"
// ApiCallToPerfData transforms APIResponsiveness to PerfData.
func ApiCallToPerfData(apicalls APIResponsiveness) *perftype.PerfData {
perfData := &perftype.PerfData{}
perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion}
for _, apicall := range apicalls.APICalls {
item := perftype.DataItem{
Data: map[string]float64{
@ -46,6 +50,10 @@ func ApiCallToPerfData(apicalls APIResponsiveness) *perftype.PerfData {
return perfData
}
// currentKubeletPerfMetricsVersion is the current kubelet performance metrics version. We should
// bump up the version each time we make incompatible change to the metrics.
const currentKubeletPerfMetricsVersion = "v1"
// ResourceUsageToPerfData transforms ResourceUsagePerNode to PerfData. Notice that this function
// only cares about memory usage, because cpu usage information will be extracted from NodesCPUSummary.
func ResourceUsageToPerfData(usagePerNode ResourceUsagePerNode) *perftype.PerfData {
@ -54,7 +62,7 @@ func ResourceUsageToPerfData(usagePerNode ResourceUsagePerNode) *perftype.PerfDa
for c, usage := range usages {
item := perftype.DataItem{
Data: map[string]float64{
"memory": float64(usage.MemoryRSSInBytes) / (1024 * 1024),
"memory": float64(usage.MemoryUsageInBytes) / (1024 * 1024),
"workingset": float64(usage.MemoryWorkingSetInBytes) / (1024 * 1024),
"rss": float64(usage.MemoryRSSInBytes) / (1024 * 1024),
},
@ -68,7 +76,10 @@ func ResourceUsageToPerfData(usagePerNode ResourceUsagePerNode) *perftype.PerfDa
items = append(items, item)
}
}
return &perftype.PerfData{DataItems: items}
return &perftype.PerfData{
Version: currentKubeletPerfMetricsVersion,
DataItems: items,
}
}
// CPUUsageToPerfData transforms NodesCPUSummary to PerfData.
@ -78,11 +89,11 @@ func CPUUsageToPerfData(usagePerNode NodesCPUSummary) *perftype.PerfData {
for c, usage := range usages {
data := map[string]float64{}
for perc, value := range usage {
data[fmt.Sprintf("Perc%02.0f", perc*100)] = value * 100
data[fmt.Sprintf("Perc%02.0f", perc*100)] = value * 1000
}
item := perftype.DataItem{
Data: data,
Unit: "%",
Unit: "mCPU",
Labels: map[string]string{
"node": node,
"container": c,
@ -92,13 +103,17 @@ func CPUUsageToPerfData(usagePerNode NodesCPUSummary) *perftype.PerfData {
items = append(items, item)
}
}
return &perftype.PerfData{DataItems: items}
return &perftype.PerfData{
Version: currentKubeletPerfMetricsVersion,
DataItems: items,
}
}
// PrintPerfData prints the perfdata in json format with PerfResultTag prefix.
// If an error occurs, nothing will be printed.
func PrintPerfData(p *perftype.PerfData) {
// Notice that we must make sure the perftype.PerfResultEnd is in a new line.
if str := PrettyPrintJSON(p); str != "" {
Logf("%s", perftype.PerfResultTag+" "+str)
Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
}
}

View File

@ -36,9 +36,16 @@ type DataItem struct {
// PerfData contains all data items generated in current test.
type PerfData struct {
// Version is the version of the metrics. The metrics consumer could use the version
// to detect metrics version change and decide what version to support.
Version string `json:"version"`
DataItems []DataItem `json:"dataItems"`
}
// PerfResultTag is the prefix of generated perfdata. Analyzing tools can find the perf result
// with this tag.
const PerfResultTag = "[Result:Performance]"
// PerfResultEnd is the end of generated perfdata. Analyzing tools can find the end of the perf
// result with this tag.
const PerfResultEnd = "[Finish:Performance]"