mirror of https://github.com/k3s-io/k3s
e2e/instrumentation: decentralized settings
Tests settings should be defined in the test source code itself because conceptually the framework is a separate entity that not all test authors can modify. Using the new framework/config code also has several advantages: - defaults can be set with less code - no confusion around what's a duration - the options can also be set via command line flags While at it, a minor bug gets fixed: - readConfig() returns only defaults when called while registering Ginkgo tests because Viperize() gets called later, so the scale in the logging soak test couldn't really be configured; now the value is read when the test runs and thus can be changed The options get moved into the "instrumentation.logging" resp. "instrumentation.monitoring" group to make it more obvious where they are used. This is a breaking change, but that was already necessary to improve the duration setting from plain integer to a proper time duration.pull/58/head
parent
8cde9c08f0
commit
752203d3fa
|
@ -149,18 +149,6 @@ type TestContextType struct {
|
|||
|
||||
// Indicates what path the kubernetes-anywhere is installed on
|
||||
KubernetesAnywherePath string
|
||||
|
||||
// Cadvisor contains settings for test/e2e/instrumentation/monitoring.
|
||||
Cadvisor struct {
|
||||
MaxRetries int
|
||||
SleepDurationMS int
|
||||
}
|
||||
|
||||
// LoggingSoak contains settings for test/e2e/instrumentation/logging.
|
||||
LoggingSoak struct {
|
||||
Scale int
|
||||
MilliSecondsBetweenWaves int
|
||||
}
|
||||
}
|
||||
|
||||
// NodeTestContextType is part of TestContextType, it is shared by all node e2e test.
|
||||
|
|
|
@ -15,6 +15,7 @@ go_library(
|
|||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/config:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/elasticsearch:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/stackdriver:go_default_library",
|
||||
|
|
|
@ -27,10 +27,17 @@ import (
|
|||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/config"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var loggingSoak struct {
|
||||
Scale int `default:"1" usage:"number of waves of pods"`
|
||||
TimeBetweenWaves time.Duration `default:"5000ms" usage:"time to wait before dumping the next wave of pods"`
|
||||
}
|
||||
var _ = config.AddOptions(&loggingSoak, "instrumentation.logging.soak")
|
||||
|
||||
var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disruptive]", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("logging-soak")
|
||||
|
@ -44,31 +51,12 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti
|
|||
// This can expose problems in your docker configuration (logging), log searching infrastructure, to tune deployments to match high load
|
||||
// scenarios. TODO jayunit100 add this to the kube CI in a follow on infra patch.
|
||||
|
||||
// Returns scale (how many waves of pods).
|
||||
// Returns wave interval (how many seconds to wait before dumping the next wave of pods).
|
||||
readConfig := func() (int, time.Duration) {
|
||||
// Read in configuration settings, reasonable defaults.
|
||||
scale := framework.TestContext.LoggingSoak.Scale
|
||||
if framework.TestContext.LoggingSoak.Scale == 0 {
|
||||
scale = 1
|
||||
framework.Logf("Overriding default scale value of zero to %d", scale)
|
||||
}
|
||||
|
||||
milliSecondsBetweenWaves := framework.TestContext.LoggingSoak.MilliSecondsBetweenWaves
|
||||
if milliSecondsBetweenWaves == 0 {
|
||||
milliSecondsBetweenWaves = 5000
|
||||
framework.Logf("Overriding default milliseconds value of zero to %d", milliSecondsBetweenWaves)
|
||||
}
|
||||
|
||||
return scale, time.Duration(milliSecondsBetweenWaves) * time.Millisecond
|
||||
}
|
||||
|
||||
scale, millisecondsBetweenWaves := readConfig()
|
||||
It(fmt.Sprintf("should survive logging 1KB every %v seconds, for a duration of %v, scaling up to %v pods per node", kbRateInSeconds, totalLogTime, scale), func() {
|
||||
It(fmt.Sprintf("should survive logging 1KB every %v seconds, for a duration of %v", kbRateInSeconds, totalLogTime), func() {
|
||||
By(fmt.Sprintf("scaling up to %v pods per node", loggingSoak.Scale))
|
||||
defer GinkgoRecover()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(scale)
|
||||
for i := 0; i < scale; i++ {
|
||||
wg.Add(loggingSoak.Scale)
|
||||
for i := 0; i < loggingSoak.Scale; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer GinkgoRecover()
|
||||
|
@ -78,9 +66,9 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti
|
|||
framework.Logf("Completed logging soak, wave %v", i)
|
||||
}()
|
||||
// Niceness.
|
||||
time.Sleep(millisecondsBetweenWaves)
|
||||
time.Sleep(loggingSoak.TimeBetweenWaves)
|
||||
}
|
||||
framework.Logf("Waiting on all %v logging soak waves to complete", scale)
|
||||
framework.Logf("Waiting on all %v logging soak waves to complete", loggingSoak.Scale)
|
||||
wg.Wait()
|
||||
})
|
||||
})
|
||||
|
|
|
@ -36,6 +36,7 @@ go_library(
|
|||
"//staging/src/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/config:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
|
|
|
@ -23,11 +23,18 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/config"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var cadvisor struct {
|
||||
MaxRetries int `default:"6"`
|
||||
SleepDuration time.Duration `default:"10000ms"`
|
||||
}
|
||||
var _ = config.AddOptions(&cadvisor, "instrumentation.monitoring.cadvisor")
|
||||
|
||||
var _ = instrumentation.SIGDescribe("Cadvisor", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("cadvisor")
|
||||
|
@ -44,25 +51,7 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration)
|
|||
framework.ExpectNoError(err)
|
||||
var errors []error
|
||||
|
||||
// returns maxRetries, sleepDuration
|
||||
readConfig := func() (int, time.Duration) {
|
||||
// Read in configuration settings, reasonable defaults.
|
||||
retry := framework.TestContext.Cadvisor.MaxRetries
|
||||
if framework.TestContext.Cadvisor.MaxRetries == 0 {
|
||||
retry = 6
|
||||
framework.Logf("Overriding default retry value of zero to %d", retry)
|
||||
}
|
||||
|
||||
sleepDurationMS := framework.TestContext.Cadvisor.SleepDurationMS
|
||||
if sleepDurationMS == 0 {
|
||||
sleepDurationMS = 10000
|
||||
framework.Logf("Overriding default milliseconds value of zero to %d", sleepDurationMS)
|
||||
}
|
||||
|
||||
return retry, time.Duration(sleepDurationMS) * time.Millisecond
|
||||
}
|
||||
|
||||
maxRetries, sleepDuration := readConfig()
|
||||
maxRetries := cadvisor.MaxRetries
|
||||
for {
|
||||
errors = []error{}
|
||||
for _, node := range nodeList.Items {
|
||||
|
@ -82,7 +71,7 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration)
|
|||
break
|
||||
}
|
||||
framework.Logf("failed to retrieve kubelet stats -\n %v", errors)
|
||||
time.Sleep(sleepDuration)
|
||||
time.Sleep(cadvisor.SleepDuration)
|
||||
}
|
||||
framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue