mirror of https://github.com/k3s-io/k3s
Implement JSON printing for test summaries
parent
ab6edd8170
commit
f5a41823c2
|
@ -152,7 +152,7 @@ func (f *Framework) afterEach() {
|
|||
|
||||
summaries := make([]TestDataSummary, 0)
|
||||
if testContext.GatherKubeSystemResourceUsageData {
|
||||
summaries = append(summaries, f.gatherer.stopAndSummarize([]int{50, 90, 99, 100}, f.addonResourceConstraints))
|
||||
summaries = append(summaries, f.gatherer.stopAndSummarize([]int{90, 99}, f.addonResourceConstraints))
|
||||
}
|
||||
|
||||
if testContext.GatherLogsSizes {
|
||||
|
|
|
@ -75,8 +75,15 @@ type LogsSizeVerifier struct {
|
|||
workers []*LogSizeGatherer
|
||||
}
|
||||
|
||||
type SingleLogSummary struct {
|
||||
AverageGenerationRate int
|
||||
NumberOfProbes int
|
||||
}
|
||||
|
||||
type LogSizeDataTimeseries map[string]map[string][]TimestampedSize
|
||||
|
||||
// node -> file -> data
|
||||
type LogsSizeDataSummary map[string]map[string][]TimestampedSize
|
||||
type LogsSizeDataSummary map[string]map[string]SingleLogSummary
|
||||
|
||||
// TODO: make sure that we don't need locking here
|
||||
func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
||||
|
@ -86,12 +93,7 @@ func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
|||
for k, v := range *s {
|
||||
fmt.Fprintf(w, "%v\t\t\t\n", k)
|
||||
for path, data := range v {
|
||||
if len(data) > 1 {
|
||||
last := data[len(data)-1]
|
||||
first := data[0]
|
||||
rate := (last.size - first.size) / int(last.timestamp.Sub(first.timestamp)/time.Second)
|
||||
fmt.Fprintf(w, "\t%v\t%v\t%v\n", path, rate, len(data))
|
||||
}
|
||||
fmt.Fprintf(w, "\t%v\t%v\t%v\n", path, data.AverageGenerationRate, data.NumberOfProbes)
|
||||
}
|
||||
}
|
||||
w.Flush()
|
||||
|
@ -99,11 +101,11 @@ func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
|||
}
|
||||
|
||||
func (s *LogsSizeDataSummary) PrintJSON() string {
|
||||
return "JSON printer not implemented for LogsSizeDataSummary"
|
||||
return prettyPrintJSON(*s)
|
||||
}
|
||||
|
||||
type LogsSizeData struct {
|
||||
data LogsSizeDataSummary
|
||||
data LogSizeDataTimeseries
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
|
@ -116,7 +118,7 @@ type WorkItem struct {
|
|||
}
|
||||
|
||||
func prepareData(masterAddress string, nodeAddresses []string) LogsSizeData {
|
||||
data := make(LogsSizeDataSummary)
|
||||
data := make(LogSizeDataTimeseries)
|
||||
ips := append(nodeAddresses, masterAddress)
|
||||
for _, ip := range ips {
|
||||
data[ip] = make(map[string][]TimestampedSize)
|
||||
|
@ -170,9 +172,24 @@ func NewLogsVerifier(c *client.Client, stopChannel chan bool) *LogsSizeVerifier
|
|||
return verifier
|
||||
}
|
||||
|
||||
// PrintData returns a string with formated results
|
||||
func (v *LogsSizeVerifier) GetSummary() *LogsSizeDataSummary {
|
||||
return &v.data.data
|
||||
// GetSummary returns a summary (average generation rate and number of probes) of the data gathered by LogSizeVerifier
|
||||
func (s *LogsSizeVerifier) GetSummary() *LogsSizeDataSummary {
|
||||
result := make(LogsSizeDataSummary)
|
||||
for k, v := range s.data.data {
|
||||
result[k] = make(map[string]SingleLogSummary)
|
||||
for path, data := range v {
|
||||
if len(data) > 1 {
|
||||
last := data[len(data)-1]
|
||||
first := data[0]
|
||||
rate := (last.size - first.size) / int(last.timestamp.Sub(first.timestamp)/time.Second)
|
||||
result[k][path] = SingleLogSummary{
|
||||
AverageGenerationRate: rate,
|
||||
NumberOfProbes: len(data),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &result
|
||||
}
|
||||
|
||||
// Run starts log size gathering. It starts a gorouting for every worker and then blocks until stopChannel is closed
|
||||
|
|
|
@ -70,7 +70,7 @@ func (m *metricsForE2E) PrintHumanReadable() string {
|
|||
}
|
||||
|
||||
func (m *metricsForE2E) PrintJSON() string {
|
||||
return "JSON printer not implemented for Metrics"
|
||||
return prettyPrintJSON(*m)
|
||||
}
|
||||
|
||||
var InterestingApiServerMetrics = []string{
|
||||
|
@ -402,10 +402,12 @@ func VerifySchedulerLatency(c *client.Client) error {
|
|||
func prettyPrintJSON(metrics interface{}) string {
|
||||
output := &bytes.Buffer{}
|
||||
if err := json.NewEncoder(output).Encode(metrics); err != nil {
|
||||
Logf("Error building encoder: %v", err)
|
||||
return ""
|
||||
}
|
||||
formatted := &bytes.Buffer{}
|
||||
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
|
||||
Logf("Error indenting: %v", err)
|
||||
return ""
|
||||
}
|
||||
return string(formatted.Bytes())
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
|
@ -48,12 +49,13 @@ type containerResourceGatherer struct {
|
|||
}
|
||||
|
||||
type singleContainerSummary struct {
|
||||
name string
|
||||
cpu float64
|
||||
mem int64
|
||||
Name string
|
||||
Cpu float64
|
||||
Mem int64
|
||||
}
|
||||
|
||||
type ResourceUsageSummary map[int][]singleContainerSummary
|
||||
// we can't have int here, as JSON does not accept integer keys.
|
||||
type ResourceUsageSummary map[string][]singleContainerSummary
|
||||
|
||||
func (s *ResourceUsageSummary) PrintHumanReadable() string {
|
||||
buf := &bytes.Buffer{}
|
||||
|
@ -62,7 +64,7 @@ func (s *ResourceUsageSummary) PrintHumanReadable() string {
|
|||
buf.WriteString(fmt.Sprintf("%v percentile:\n", perc))
|
||||
fmt.Fprintf(w, "container\tcpu(cores)\tmemory(MB)\n")
|
||||
for _, summary := range summaries {
|
||||
fmt.Fprintf(w, "%q\t%.3f\t%.2f\n", summary.name, summary.cpu, float64(summary.mem)/(1024*1024))
|
||||
fmt.Fprintf(w, "%q\t%.3f\t%.2f\n", summary.Name, summary.Cpu, float64(summary.Mem)/(1024*1024))
|
||||
}
|
||||
w.Flush()
|
||||
}
|
||||
|
@ -70,7 +72,7 @@ func (s *ResourceUsageSummary) PrintHumanReadable() string {
|
|||
}
|
||||
|
||||
func (s *ResourceUsageSummary) PrintJSON() string {
|
||||
return "JSON printer not implemented for ResourceUsageSummary"
|
||||
return prettyPrintJSON(*s)
|
||||
}
|
||||
|
||||
func (g *containerResourceGatherer) startGatheringData(c *client.Client, period time.Duration) {
|
||||
|
@ -115,10 +117,10 @@ func (g *containerResourceGatherer) stopAndSummarize(percentiles []int, constrai
|
|||
for _, perc := range percentiles {
|
||||
for _, name := range sortedKeys {
|
||||
usage := stats[perc][name]
|
||||
summary[perc] = append(summary[perc], singleContainerSummary{
|
||||
name: name,
|
||||
cpu: usage.CPUUsageInCores,
|
||||
mem: usage.MemoryWorkingSetInBytes,
|
||||
summary[strconv.Itoa(perc)] = append(summary[strconv.Itoa(perc)], singleContainerSummary{
|
||||
Name: name,
|
||||
Cpu: usage.CPUUsageInCores,
|
||||
Mem: usage.MemoryWorkingSetInBytes,
|
||||
})
|
||||
// Verifying 99th percentile of resource usage
|
||||
if perc == 99 {
|
||||
|
|
Loading…
Reference in New Issue