mirror of https://github.com/k3s-io/k3s
Merge pull request #75783 from oomichi/golint-e2e-framework-m
Fix golint failures of e2e/framework/metrics_util.gok3s-v1.15.3
commit
79000d4ac5
|
@ -65,31 +65,28 @@ const (
|
|||
caFunctionMetricLabel = "function"
|
||||
)
|
||||
|
||||
// MetricsForE2E is metrics collection of components.
|
||||
type MetricsForE2E metrics.Collection
|
||||
|
||||
func (m *MetricsForE2E) filterMetrics() {
|
||||
interestingAPIServerMetrics := make(metrics.APIServerMetrics)
|
||||
for _, metric := range InterestingAPIServerMetrics {
|
||||
interestingAPIServerMetrics[metric] = (*m).APIServerMetrics[metric]
|
||||
apiServerMetrics := make(metrics.APIServerMetrics)
|
||||
for _, metric := range interestingAPIServerMetrics {
|
||||
apiServerMetrics[metric] = (*m).APIServerMetrics[metric]
|
||||
}
|
||||
interestingControllerManagerMetrics := make(metrics.ControllerManagerMetrics)
|
||||
for _, metric := range InterestingControllerManagerMetrics {
|
||||
interestingControllerManagerMetrics[metric] = (*m).ControllerManagerMetrics[metric]
|
||||
controllerManagerMetrics := make(metrics.ControllerManagerMetrics)
|
||||
for _, metric := range interestingControllerManagerMetrics {
|
||||
controllerManagerMetrics[metric] = (*m).ControllerManagerMetrics[metric]
|
||||
}
|
||||
interestingClusterAutoscalerMetrics := make(metrics.ClusterAutoscalerMetrics)
|
||||
for _, metric := range InterestingClusterAutoscalerMetrics {
|
||||
interestingClusterAutoscalerMetrics[metric] = (*m).ClusterAutoscalerMetrics[metric]
|
||||
}
|
||||
interestingKubeletMetrics := make(map[string]metrics.KubeletMetrics)
|
||||
kubeletMetrics := make(map[string]metrics.KubeletMetrics)
|
||||
for kubelet, grabbed := range (*m).KubeletMetrics {
|
||||
interestingKubeletMetrics[kubelet] = make(metrics.KubeletMetrics)
|
||||
for _, metric := range InterestingKubeletMetrics {
|
||||
interestingKubeletMetrics[kubelet][metric] = grabbed[metric]
|
||||
kubeletMetrics[kubelet] = make(metrics.KubeletMetrics)
|
||||
for _, metric := range interestingKubeletMetrics {
|
||||
kubeletMetrics[kubelet][metric] = grabbed[metric]
|
||||
}
|
||||
}
|
||||
(*m).APIServerMetrics = interestingAPIServerMetrics
|
||||
(*m).ControllerManagerMetrics = interestingControllerManagerMetrics
|
||||
(*m).KubeletMetrics = interestingKubeletMetrics
|
||||
(*m).APIServerMetrics = apiServerMetrics
|
||||
(*m).ControllerManagerMetrics = controllerManagerMetrics
|
||||
(*m).KubeletMetrics = kubeletMetrics
|
||||
}
|
||||
|
||||
func printSample(sample *model.Sample) string {
|
||||
|
@ -112,21 +109,22 @@ func printSample(sample *model.Sample) string {
|
|||
return fmt.Sprintf("[%v] = %v", strings.Join(buf, ","), sample.Value)
|
||||
}
|
||||
|
||||
// PrintHumanReadable returns e2e metrics with JSON format.
|
||||
func (m *MetricsForE2E) PrintHumanReadable() string {
|
||||
buf := bytes.Buffer{}
|
||||
for _, interestingMetric := range InterestingAPIServerMetrics {
|
||||
for _, interestingMetric := range interestingAPIServerMetrics {
|
||||
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
|
||||
for _, sample := range (*m).APIServerMetrics[interestingMetric] {
|
||||
buf.WriteString(fmt.Sprintf("\t%v\n", printSample(sample)))
|
||||
}
|
||||
}
|
||||
for _, interestingMetric := range InterestingControllerManagerMetrics {
|
||||
for _, interestingMetric := range interestingControllerManagerMetrics {
|
||||
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
|
||||
for _, sample := range (*m).ControllerManagerMetrics[interestingMetric] {
|
||||
buf.WriteString(fmt.Sprintf("\t%v\n", printSample(sample)))
|
||||
}
|
||||
}
|
||||
for _, interestingMetric := range InterestingClusterAutoscalerMetrics {
|
||||
for _, interestingMetric := range interestingClusterAutoscalerMetrics {
|
||||
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
|
||||
for _, sample := range (*m).ClusterAutoscalerMetrics[interestingMetric] {
|
||||
buf.WriteString(fmt.Sprintf("\t%v\n", printSample(sample)))
|
||||
|
@ -134,7 +132,7 @@ func (m *MetricsForE2E) PrintHumanReadable() string {
|
|||
}
|
||||
for kubelet, grabbed := range (*m).KubeletMetrics {
|
||||
buf.WriteString(fmt.Sprintf("For %v:\n", kubelet))
|
||||
for _, interestingMetric := range InterestingKubeletMetrics {
|
||||
for _, interestingMetric := range interestingKubeletMetrics {
|
||||
buf.WriteString(fmt.Sprintf("\tFor %v:\n", interestingMetric))
|
||||
for _, sample := range grabbed[interestingMetric] {
|
||||
buf.WriteString(fmt.Sprintf("\t\t%v\n", printSample(sample)))
|
||||
|
@ -144,18 +142,20 @@ func (m *MetricsForE2E) PrintHumanReadable() string {
|
|||
return buf.String()
|
||||
}
|
||||
|
||||
// PrintJSON returns e2e metrics with JSON format.
|
||||
func (m *MetricsForE2E) PrintJSON() string {
|
||||
m.filterMetrics()
|
||||
return PrettyPrintJSON(m)
|
||||
}
|
||||
|
||||
// SummaryKind returns the summary of e2e metrics.
|
||||
func (m *MetricsForE2E) SummaryKind() string {
|
||||
return "MetricsForE2E"
|
||||
}
|
||||
|
||||
var SchedulingLatencyMetricName = model.LabelValue(schedulermetric.SchedulerSubsystem + "_" + schedulermetric.SchedulingLatencyName)
|
||||
var schedulingLatencyMetricName = model.LabelValue(schedulermetric.SchedulerSubsystem + "_" + schedulermetric.SchedulingLatencyName)
|
||||
|
||||
var InterestingAPIServerMetrics = []string{
|
||||
var interestingAPIServerMetrics = []string{
|
||||
"apiserver_request_total",
|
||||
// TODO(krzysied): apiserver_request_latencies_summary is a deprecated metric.
|
||||
// It should be replaced with new metric.
|
||||
|
@ -163,7 +163,7 @@ var InterestingAPIServerMetrics = []string{
|
|||
"apiserver_init_events_total",
|
||||
}
|
||||
|
||||
var InterestingControllerManagerMetrics = []string{
|
||||
var interestingControllerManagerMetrics = []string{
|
||||
"garbage_collector_attempt_to_delete_queue_latency",
|
||||
"garbage_collector_attempt_to_delete_work_duration",
|
||||
"garbage_collector_attempt_to_orphan_queue_latency",
|
||||
|
@ -183,7 +183,7 @@ var InterestingControllerManagerMetrics = []string{
|
|||
"namespace_work_duration_count",
|
||||
}
|
||||
|
||||
var InterestingKubeletMetrics = []string{
|
||||
var interestingKubeletMetrics = []string{
|
||||
"kubelet_docker_operations_errors_total",
|
||||
"kubelet_docker_operations_duration_seconds",
|
||||
"kubelet_pod_start_duration_seconds",
|
||||
|
@ -191,13 +191,13 @@ var InterestingKubeletMetrics = []string{
|
|||
"kubelet_pod_worker_start_duration_seconds",
|
||||
}
|
||||
|
||||
var InterestingClusterAutoscalerMetrics = []string{
|
||||
var interestingClusterAutoscalerMetrics = []string{
|
||||
"function_duration_seconds",
|
||||
"errors_total",
|
||||
"evicted_pods_total",
|
||||
}
|
||||
|
||||
// Dashboard metrics
|
||||
// LatencyMetric is a struct for dashboard metrics.
|
||||
type LatencyMetric struct {
|
||||
Perc50 time.Duration `json:"Perc50"`
|
||||
Perc90 time.Duration `json:"Perc90"`
|
||||
|
@ -205,6 +205,7 @@ type LatencyMetric struct {
|
|||
Perc100 time.Duration `json:"Perc100"`
|
||||
}
|
||||
|
||||
// PodStartupLatency is a struct for managing latency of pod startup.
|
||||
type PodStartupLatency struct {
|
||||
CreateToScheduleLatency LatencyMetric `json:"createToScheduleLatency"`
|
||||
ScheduleToRunLatency LatencyMetric `json:"scheduleToRunLatency"`
|
||||
|
@ -213,18 +214,22 @@ type PodStartupLatency struct {
|
|||
E2ELatency LatencyMetric `json:"e2eLatency"`
|
||||
}
|
||||
|
||||
// SummaryKind returns the summary of pod startup latency.
|
||||
func (l *PodStartupLatency) SummaryKind() string {
|
||||
return "PodStartupLatency"
|
||||
}
|
||||
|
||||
// PrintHumanReadable returns pod startup letency with JSON format.
|
||||
func (l *PodStartupLatency) PrintHumanReadable() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
// PrintJSON returns pod startup letency with JSON format.
|
||||
func (l *PodStartupLatency) PrintJSON() string {
|
||||
return PrettyPrintJSON(PodStartupLatencyToPerfData(l))
|
||||
}
|
||||
|
||||
// SchedulingMetrics is a struct for managing scheduling metrics.
|
||||
type SchedulingMetrics struct {
|
||||
PredicateEvaluationLatency LatencyMetric `json:"predicateEvaluationLatency"`
|
||||
PriorityEvaluationLatency LatencyMetric `json:"priorityEvaluationLatency"`
|
||||
|
@ -236,23 +241,28 @@ type SchedulingMetrics struct {
|
|||
ThroughputPerc99 float64 `json:"throughputPerc99"`
|
||||
}
|
||||
|
||||
// SummaryKind returns the summary of scheduling metrics.
|
||||
func (l *SchedulingMetrics) SummaryKind() string {
|
||||
return "SchedulingMetrics"
|
||||
}
|
||||
|
||||
// PrintHumanReadable returns scheduling metrics with JSON format.
|
||||
func (l *SchedulingMetrics) PrintHumanReadable() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
// PrintJSON returns scheduling metrics with JSON format.
|
||||
func (l *SchedulingMetrics) PrintJSON() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
// Histogram is a struct for managing histogram.
|
||||
type Histogram struct {
|
||||
Labels map[string]string `json:"labels"`
|
||||
Buckets map[string]int `json:"buckets"`
|
||||
}
|
||||
|
||||
// HistogramVec is an array of Histogram.
|
||||
type HistogramVec []Histogram
|
||||
|
||||
func newHistogram(labels map[string]string) *Histogram {
|
||||
|
@ -262,6 +272,7 @@ func newHistogram(labels map[string]string) *Histogram {
|
|||
}
|
||||
}
|
||||
|
||||
// EtcdMetrics is a struct for managing etcd metrics.
|
||||
type EtcdMetrics struct {
|
||||
BackendCommitDuration HistogramVec `json:"backendCommitDuration"`
|
||||
SnapshotSaveTotalDuration HistogramVec `json:"snapshotSaveTotalDuration"`
|
||||
|
@ -279,24 +290,29 @@ func newEtcdMetrics() *EtcdMetrics {
|
|||
}
|
||||
}
|
||||
|
||||
// SummaryKind returns the summary of etcd metrics.
|
||||
func (l *EtcdMetrics) SummaryKind() string {
|
||||
return "EtcdMetrics"
|
||||
}
|
||||
|
||||
// PrintHumanReadable returns etcd metrics with JSON format.
|
||||
func (l *EtcdMetrics) PrintHumanReadable() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
// PrintJSON returns etcd metrics with JSON format.
|
||||
func (l *EtcdMetrics) PrintJSON() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
// EtcdMetricsCollector is a struct for managing etcd metrics collector.
|
||||
type EtcdMetricsCollector struct {
|
||||
stopCh chan struct{}
|
||||
wg *sync.WaitGroup
|
||||
metrics *EtcdMetrics
|
||||
}
|
||||
|
||||
// NewEtcdMetricsCollector creates a new etcd metrics collector.
|
||||
func NewEtcdMetricsCollector() *EtcdMetricsCollector {
|
||||
return &EtcdMetricsCollector{
|
||||
stopCh: make(chan struct{}),
|
||||
|
@ -357,6 +373,7 @@ func (mc *EtcdMetricsCollector) StartCollecting(interval time.Duration) {
|
|||
}()
|
||||
}
|
||||
|
||||
// StopAndSummarize stops etcd metrics collector and summarizes the metrics.
|
||||
func (mc *EtcdMetricsCollector) StopAndSummarize() error {
|
||||
close(mc.stopCh)
|
||||
mc.wg.Wait()
|
||||
|
@ -381,17 +398,12 @@ func (mc *EtcdMetricsCollector) StopAndSummarize() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetMetrics returns metrics of etcd metrics collector.
|
||||
func (mc *EtcdMetricsCollector) GetMetrics() *EtcdMetrics {
|
||||
return mc.metrics
|
||||
}
|
||||
|
||||
type SaturationTime struct {
|
||||
TimeToSaturate time.Duration `json:"timeToSaturate"`
|
||||
NumberOfNodes int `json:"numberOfNodes"`
|
||||
NumberOfPods int `json:"numberOfPods"`
|
||||
Throughput float32 `json:"throughput"`
|
||||
}
|
||||
|
||||
// APICall is a struct for managing API call.
|
||||
type APICall struct {
|
||||
Resource string `json:"resource"`
|
||||
Subresource string `json:"subresource"`
|
||||
|
@ -401,18 +413,22 @@ type APICall struct {
|
|||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
// APIResponsiveness is a struct for managing multiple API calls.
|
||||
type APIResponsiveness struct {
|
||||
APICalls []APICall `json:"apicalls"`
|
||||
}
|
||||
|
||||
// SummaryKind returns the summary of API responsiveness.
|
||||
func (a *APIResponsiveness) SummaryKind() string {
|
||||
return "APIResponsiveness"
|
||||
}
|
||||
|
||||
// PrintHumanReadable returns metrics with JSON format.
|
||||
func (a *APIResponsiveness) PrintHumanReadable() string {
|
||||
return PrettyPrintJSON(a)
|
||||
}
|
||||
|
||||
// PrintJSON returns metrics of PerfData(50, 90 and 99th percentiles) with JSON format.
|
||||
func (a *APIResponsiveness) PrintJSON() string {
|
||||
return PrettyPrintJSON(APICallToPerfData(a))
|
||||
}
|
||||
|
@ -522,7 +538,7 @@ func readLatencyMetrics(c clientset.Interface) (*APIResponsiveness, error) {
|
|||
return &a, err
|
||||
}
|
||||
|
||||
// Prints top five summary metrics for request types with latency and returns
|
||||
// HighLatencyRequests prints top five summary metrics for request types with latency and returns
|
||||
// number of such request types above threshold. We use a higher threshold for
|
||||
// list calls if nodeCount is above a given threshold (i.e. cluster is big).
|
||||
func HighLatencyRequests(c clientset.Interface, nodeCount int) (int, *APIResponsiveness, error) {
|
||||
|
@ -562,7 +578,7 @@ func HighLatencyRequests(c clientset.Interface, nodeCount int) (int, *APIRespons
|
|||
return badMetrics, metrics, nil
|
||||
}
|
||||
|
||||
// Verifies whether 50, 90 and 99th percentiles of a latency metric are
|
||||
// VerifyLatencyWithinThreshold verifies whether 50, 90 and 99th percentiles of a latency metric are
|
||||
// within the expected threshold.
|
||||
func VerifyLatencyWithinThreshold(threshold, actual LatencyMetric, metricName string) error {
|
||||
if actual.Perc50 > threshold.Perc50 {
|
||||
|
@ -577,7 +593,7 @@ func VerifyLatencyWithinThreshold(threshold, actual LatencyMetric, metricName st
|
|||
return nil
|
||||
}
|
||||
|
||||
// Resets latency metrics in apiserver.
|
||||
// ResetMetrics resets latency metrics in apiserver.
|
||||
func ResetMetrics(c clientset.Interface) error {
|
||||
Logf("Resetting latency metrics in apiserver...")
|
||||
body, err := c.CoreV1().RESTClient().Delete().AbsPath("/metrics").DoRaw()
|
||||
|
@ -663,11 +679,11 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
|||
}
|
||||
|
||||
for _, sample := range samples {
|
||||
if sample.Metric[model.MetricNameLabel] != SchedulingLatencyMetricName {
|
||||
if sample.Metric[model.MetricNameLabel] != schedulingLatencyMetricName {
|
||||
continue
|
||||
}
|
||||
|
||||
var metric *LatencyMetric = nil
|
||||
var metric *LatencyMetric
|
||||
switch sample.Metric[schedulermetric.OperationLabel] {
|
||||
case schedulermetric.PredicateEvaluation:
|
||||
metric = &result.PredicateEvaluationLatency
|
||||
|
@ -691,7 +707,7 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
|||
return &result, nil
|
||||
}
|
||||
|
||||
// Verifies (currently just by logging them) the scheduling latencies.
|
||||
// VerifySchedulerLatency verifies (currently just by logging them) the scheduling latencies.
|
||||
func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
||||
latency, err := getSchedulingLatency(c)
|
||||
if err != nil {
|
||||
|
@ -700,6 +716,7 @@ func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
|||
return latency, nil
|
||||
}
|
||||
|
||||
// ResetSchedulerMetrics sends a DELETE request to kube-scheduler for resetting metrics.
|
||||
func ResetSchedulerMetrics(c clientset.Interface) error {
|
||||
responseText, err := sendRestRequestToScheduler(c, "DELETE")
|
||||
if err != nil {
|
||||
|
@ -729,6 +746,7 @@ func convertSampleToBucket(sample *model.Sample, h *HistogramVec) {
|
|||
hist.Buckets[string(sample.Metric["le"])] = int(sample.Value)
|
||||
}
|
||||
|
||||
// PrettyPrintJSON converts metrics to JSON format.
|
||||
func PrettyPrintJSON(metrics interface{}) string {
|
||||
output := &bytes.Buffer{}
|
||||
if err := json.NewEncoder(output).Encode(metrics); err != nil {
|
||||
|
@ -775,12 +793,14 @@ type PodLatencyData struct {
|
|||
Latency time.Duration
|
||||
}
|
||||
|
||||
// LatencySlice is an array of PodLatencyData which encapsulates pod startup latency information.
|
||||
type LatencySlice []PodLatencyData
|
||||
|
||||
func (a LatencySlice) Len() int { return len(a) }
|
||||
func (a LatencySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a LatencySlice) Less(i, j int) bool { return a[i].Latency < a[j].Latency }
|
||||
|
||||
// ExtractLatencyMetrics returns latency metrics for each percentile(50th, 90th and 99th).
|
||||
func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric {
|
||||
length := len(latencies)
|
||||
perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency
|
||||
|
@ -805,6 +825,7 @@ func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLate
|
|||
float64(nodeCount)/(latencyDataLag[len(latencyDataLag)-1].Latency.Minutes()))
|
||||
}
|
||||
|
||||
// PrintLatencies outputs latencies to log with readable format.
|
||||
func PrintLatencies(latencies []PodLatencyData, header string) {
|
||||
metrics := ExtractLatencyMetrics(latencies)
|
||||
Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
|
||||
|
|
|
@ -81,6 +81,13 @@ type DensityTestConfig struct {
|
|||
DaemonConfigs []*testutils.DaemonConfig
|
||||
}
|
||||
|
||||
type saturationTime struct {
|
||||
TimeToSaturate time.Duration `json:"timeToSaturate"`
|
||||
NumberOfNodes int `json:"numberOfNodes"`
|
||||
NumberOfPods int `json:"numberOfPods"`
|
||||
Throughput float32 `json:"throughput"`
|
||||
}
|
||||
|
||||
func (dtc *DensityTestConfig) runSecretConfigs(testPhase *timer.Phase) {
|
||||
defer testPhase.End()
|
||||
for _, sc := range dtc.SecretConfigs {
|
||||
|
@ -418,7 +425,7 @@ var _ = SIGDescribe("Density", func() {
|
|||
saturationThreshold = MinSaturationThreshold
|
||||
}
|
||||
Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold))
|
||||
saturationData := framework.SaturationTime{
|
||||
saturationData := saturationTime{
|
||||
TimeToSaturate: e2eStartupTime,
|
||||
NumberOfNodes: nodeCount,
|
||||
NumberOfPods: totalPods,
|
||||
|
|
Loading…
Reference in New Issue