Add performance test phase timing export.

pull/6/head
Marcin Owsiany 2017-11-02 12:48:02 +01:00
parent 115bd09ff9
commit 96089e0b79
8 changed files with 372 additions and 41 deletions

View File

@ -760,6 +760,7 @@ test/e2e/chaosmonkey
test/e2e/common
test/e2e/framework
test/e2e/framework/metrics
test/e2e/framework/timer
test/e2e/instrumentation
test/e2e/instrumentation/logging
test/e2e/instrumentation/monitoring

View File

@ -161,6 +161,7 @@ filegroup(
":package-srcs",
"//test/e2e/framework/ginkgowrapper:all-srcs",
"//test/e2e/framework/metrics:all-srcs",
"//test/e2e/framework/timer:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -0,0 +1,34 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["timer.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/timer",
visibility = ["//visibility:public"],
deps = [
"//test/e2e/framework:go_default_library",
"//test/e2e/perftype:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["timer_test.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/timer",
library = ":go_default_library",
deps = ["//vendor/github.com/onsi/gomega:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,126 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package timer
import (
"time"
"bytes"
"fmt"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/perftype"
"sync"
)
var now = time.Now
// Represents a phase of a test. Phases can overlap.
type Phase struct {
sequenceNumber int
name string
startTime time.Time
endTime time.Time
}
func (phase *Phase) ended() bool {
return !phase.endTime.IsZero()
}
// End marks the phase as ended, unless it had already been ended before.
func (phase *Phase) End() {
if !phase.ended() {
phase.endTime = now()
}
}
func (phase *Phase) label() string {
return fmt.Sprintf("%03d-%s", phase.sequenceNumber, phase.name)
}
func (phase *Phase) duration() time.Duration {
endTime := phase.endTime
if !phase.ended() {
endTime = now()
}
return endTime.Sub(phase.startTime)
}
func (phase *Phase) humanReadable() string {
if phase.ended() {
return fmt.Sprintf("Phase %s: %v\n", phase.label(), phase.duration())
} else {
return fmt.Sprintf("Phase %s: %v so far\n", phase.label(), phase.duration())
}
}
// A TestPhaseTimer groups phases and provides a way to export their measurements as JSON or human-readable text.
// It is safe to use concurrently.
type TestPhaseTimer struct {
lock sync.Mutex
phases []*Phase
}
// NewTestPhaseTimer creates a new TestPhaseTimer.
func NewTestPhaseTimer() *TestPhaseTimer {
return &TestPhaseTimer{}
}
// StartPhase starts a new phase.
// sequenceNumber is an integer prepended to phaseName in the output, such that lexicographic sorting
// of phases in perfdash reconstructs the order of execution. Unfortunately it needs to be
// provided manually, since a simple incrementing counter would have the effect that inserting
// a new phase would renumber subsequent phases, breaking the continuity of historical records.
func (timer *TestPhaseTimer) StartPhase(sequenceNumber int, phaseName string) *Phase {
timer.lock.Lock()
defer timer.lock.Unlock()
newPhase := &Phase{sequenceNumber: sequenceNumber, name: phaseName, startTime: now()}
timer.phases = append(timer.phases, newPhase)
return newPhase
}
func (timer *TestPhaseTimer) SummaryKind() string {
return "TestPhaseTimer"
}
func (timer *TestPhaseTimer) PrintHumanReadable() string {
buf := bytes.Buffer{}
timer.lock.Lock()
defer timer.lock.Unlock()
for _, phase := range timer.phases {
buf.WriteString(phase.humanReadable())
}
return buf.String()
}
func (timer *TestPhaseTimer) PrintJSON() string {
data := perftype.PerfData{
Version: "v1",
DataItems: []perftype.DataItem{{
Unit: "s",
Labels: map[string]string{"test": "phases"},
Data: make(map[string]float64)}}}
timer.lock.Lock()
defer timer.lock.Unlock()
for _, phase := range timer.phases {
data.DataItems[0].Data[phase.label()] = phase.duration().Seconds()
if !phase.ended() {
data.DataItems[0].Labels["ended"] = "false"
}
}
return framework.PrettyPrintJSON(data)
}

View File

@ -0,0 +1,92 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package timer
import (
"testing"
"time"
. "github.com/onsi/gomega"
)
var currentTime time.Time
func init() {
setCurrentTimeSinceEpoch(0)
now = func() time.Time { return currentTime }
}
func setCurrentTimeSinceEpoch(duration time.Duration) {
currentTime = time.Unix(0, duration.Nanoseconds())
}
func testUsageWithDefer(timer *TestPhaseTimer) {
defer timer.StartPhase(33, "two").End()
setCurrentTimeSinceEpoch(6*time.Second + 500*time.Millisecond)
}
func TestTimer(t *testing.T) {
RegisterTestingT(t)
timer := NewTestPhaseTimer()
setCurrentTimeSinceEpoch(1 * time.Second)
phaseOne := timer.StartPhase(1, "one")
setCurrentTimeSinceEpoch(3 * time.Second)
testUsageWithDefer(timer)
Expect(timer.PrintJSON()).To(MatchJSON(`{
"version": "v1",
"dataItems": [
{
"data": {
"001-one": 5.5,
"033-two": 3.5
},
"unit": "s",
"labels": {
"test": "phases",
"ended": "false"
}
}
]
}`))
Expect(timer.PrintHumanReadable()).To(Equal(`Phase 001-one: 5.5s so far
Phase 033-two: 3.5s
`))
setCurrentTimeSinceEpoch(7*time.Second + 500*time.Millisecond)
phaseOne.End()
Expect(timer.PrintJSON()).To(MatchJSON(`{
"version": "v1",
"dataItems": [
{
"data": {
"001-one": 6.5,
"033-two": 3.5
},
"unit": "s",
"labels": {
"test": "phases"
}
}
]
}`))
Expect(timer.PrintHumanReadable()).To(Equal(`Phase 001-one: 6.5s
Phase 033-two: 3.5s
`))
}

View File

@ -20,6 +20,7 @@ go_library(
"//pkg/apis/extensions:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/timer:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",

View File

@ -44,6 +44,7 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/timer"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
@ -76,6 +77,54 @@ type DensityTestConfig struct {
DaemonConfigs []*testutils.DaemonConfig
}
func (dtc *DensityTestConfig) runSecretConfigs(testPhase *timer.Phase) {
defer testPhase.End()
for _, sc := range dtc.SecretConfigs {
sc.Run()
}
}
func (dtc *DensityTestConfig) runConfigMapConfigs(testPhase *timer.Phase) {
defer testPhase.End()
for _, cmc := range dtc.ConfigMapConfigs {
cmc.Run()
}
}
func (dtc *DensityTestConfig) runDaemonConfigs(testPhase *timer.Phase) {
defer testPhase.End()
for _, dc := range dtc.DaemonConfigs {
dc.Run()
}
}
func (dtc *DensityTestConfig) deleteSecrets(testPhase *timer.Phase) {
defer testPhase.End()
for i := range dtc.SecretConfigs {
dtc.SecretConfigs[i].Stop()
}
}
func (dtc *DensityTestConfig) deleteConfigMaps(testPhase *timer.Phase) {
defer testPhase.End()
for i := range dtc.ConfigMapConfigs {
dtc.ConfigMapConfigs[i].Stop()
}
}
func (dtc *DensityTestConfig) deleteDaemonSets(numberOfClients int, testPhase *timer.Phase) {
defer testPhase.End()
for i := range dtc.DaemonConfigs {
framework.ExpectNoError(framework.DeleteResourceAndPods(
dtc.ClientSets[i%numberOfClients],
dtc.InternalClientsets[i%numberOfClients],
extensions.Kind("DaemonSet"),
dtc.DaemonConfigs[i].Namespace,
dtc.DaemonConfigs[i].Name,
))
}
}
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
var apiserverMem uint64
var controllerMem uint64
@ -196,20 +245,16 @@ func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels
// runDensityTest will perform a density test and return the time it took for
// all pods to start
func runDensityTest(dtc DensityTestConfig) time.Duration {
func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer) time.Duration {
defer GinkgoRecover()
// Create all secrets, configmaps and daemons.
for i := range dtc.SecretConfigs {
dtc.SecretConfigs[i].Run()
}
for i := range dtc.ConfigMapConfigs {
dtc.ConfigMapConfigs[i].Run()
}
for i := range dtc.DaemonConfigs {
dtc.DaemonConfigs[i].Run()
}
dtc.runSecretConfigs(testPhaseDurations.StartPhase(250, "secrets creation"))
dtc.runConfigMapConfigs(testPhaseDurations.StartPhase(260, "configmaps creation"))
dtc.runDaemonConfigs(testPhaseDurations.StartPhase(270, "daemonsets creation"))
replicationCtrlStartupPhase := testPhaseDurations.StartPhase(300, "saturation pods creation")
defer replicationCtrlStartupPhase.End()
// Start all replication controllers.
startTime := time.Now()
wg := sync.WaitGroup{}
@ -231,7 +276,10 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
close(logStopCh)
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
replicationCtrlStartupPhase.End()
printPodAllocationPhase := testPhaseDurations.StartPhase(400, "printing pod allocation")
defer printPodAllocationPhase.End()
// Print some data about Pod to Node allocation
By("Printing Pod to Node allocation data")
podList, err := dtc.ClientSets[0].CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
@ -253,11 +301,14 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
for _, node := range nodeNames {
framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
}
defer printPodAllocationPhase.End()
return startupTime
}
func cleanupDensityTest(dtc DensityTestConfig) {
func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer) {
defer GinkgoRecover()
podCleanupPhase := testPhaseDurations.StartPhase(900, "latency pods deletion")
defer podCleanupPhase.End()
By("Deleting created Collections")
numberOfClients := len(dtc.ClientSets)
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
@ -275,23 +326,11 @@ func cleanupDensityTest(dtc DensityTestConfig) {
framework.ExpectNoError(err)
}
}
podCleanupPhase.End()
// Delete all secrets, configmaps and daemons.
for i := range dtc.SecretConfigs {
dtc.SecretConfigs[i].Stop()
}
for i := range dtc.ConfigMapConfigs {
dtc.ConfigMapConfigs[i].Stop()
}
for i := range dtc.DaemonConfigs {
framework.ExpectNoError(framework.DeleteResourceAndPods(
dtc.ClientSets[i%numberOfClients],
dtc.InternalClientsets[i%numberOfClients],
extensions.Kind("DaemonSet"),
dtc.DaemonConfigs[i].Namespace,
dtc.DaemonConfigs[i].Name,
))
}
dtc.deleteSecrets(testPhaseDurations.StartPhase(910, "secrets deletion"))
dtc.deleteConfigMaps(testPhaseDurations.StartPhase(920, "configmaps deletion"))
dtc.deleteDaemonSets(numberOfClients, testPhaseDurations.StartPhase(930, "daemonsets deletion"))
}
// This test suite can take a long time to run, and can affect or be affected by other tests.
@ -316,6 +355,7 @@ var _ = SIGDescribe("Density", func() {
testCaseBaseName := "density"
missingMeasurements := 0
var testPhaseDurations *timer.TestPhaseTimer
// Gathers data prior to framework namespace teardown
AfterEach(func() {
@ -349,6 +389,7 @@ var _ = SIGDescribe("Density", func() {
if err == nil {
summaries = append(summaries, latency)
}
summaries = append(summaries, testPhaseDurations)
framework.PrintSummaries(summaries, testCaseBaseName)
@ -368,6 +409,7 @@ var _ = SIGDescribe("Density", func() {
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
testPhaseDurations = timer.NewTestPhaseTimer()
masters, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
nodeCount = len(nodes.Items)
@ -454,6 +496,8 @@ var _ = SIGDescribe("Density", func() {
)
itArg := testArg
It(name, func() {
nodePrepPhase := testPhaseDurations.StartPhase(100, "node preparation")
defer nodePrepPhase.End()
nodePreparer := framework.NewE2ETestNodePreparer(
f.ClientSet,
[]testutils.CountToStrategy{{Count: nodeCount, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
@ -469,10 +513,11 @@ var _ = SIGDescribe("Density", func() {
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
framework.ExpectNoError(err)
defer fileHndl.Close()
nodePrepPhase.End()
// nodeCountPerNamespace and CreateNamespaces are defined in load.go
numberOfCollections := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
namespaces, err := CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode))
namespaces, err := CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode), testPhaseDurations.StartPhase(200, "namespace creation"))
framework.ExpectNoError(err)
configs := make([]testutils.RunObjectConfig, numberOfCollections)
@ -567,7 +612,7 @@ var _ = SIGDescribe("Density", func() {
LogFunc: framework.Logf,
})
}
e2eStartupTime = runDensityTest(dConfig)
e2eStartupTime = runDensityTest(dConfig, testPhaseDurations)
if itArg.runLatencyTest {
By("Scheduling additional Pods to measure startup latencies")
@ -650,6 +695,8 @@ var _ = SIGDescribe("Density", func() {
}
// Create some additional pods with throughput ~5 pods/sec.
latencyPodStartupPhase := testPhaseDurations.StartPhase(800, "latency pods creation")
defer latencyPodStartupPhase.End()
var wg sync.WaitGroup
wg.Add(nodeCount)
// Explicitly set requests here.
@ -673,7 +720,10 @@ var _ = SIGDescribe("Density", func() {
time.Sleep(200 * time.Millisecond)
}
wg.Wait()
latencyPodStartupPhase.End()
latencyMeasurementPhase := testPhaseDurations.StartPhase(810, "pod startup latencies measurement")
defer latencyMeasurementPhase.End()
By("Waiting for all Pods begin observed by the watch...")
waitTimeout := 10 * time.Minute
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
@ -769,17 +819,20 @@ var _ = SIGDescribe("Density", func() {
framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
latencyMeasurementPhase.End()
By("Removing additional replication controllers")
podDeletionPhase := testPhaseDurations.StartPhase(820, "latency pods deletion")
defer podDeletionPhase.End()
deleteRC := func(i int) {
defer GinkgoRecover()
name := additionalPodsPrefix + "-" + strconv.Itoa(i+1)
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
}
workqueue.Parallelize(25, nodeCount, deleteRC)
podDeletionPhase.End()
}
cleanupDensityTest(dConfig)
cleanupDensityTest(dConfig, testPhaseDurations)
})
}
})

View File

@ -43,6 +43,7 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/timer"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
@ -88,6 +89,7 @@ var _ = SIGDescribe("Load capacity", func() {
var configMapConfigs []*testutils.ConfigMapConfig
testCaseBaseName := "load"
var testPhaseDurations *timer.TestPhaseTimer
// Gathers metrics before teardown
// TODO add flag that allows to skip cleanup on failure
@ -96,8 +98,9 @@ var _ = SIGDescribe("Load capacity", func() {
highLatencyRequests, metrics, err := framework.HighLatencyRequests(clientset, nodeCount)
framework.ExpectNoError(err)
if err == nil {
summaries := make([]framework.TestDataSummary, 0, 1)
summaries := make([]framework.TestDataSummary, 0, 2)
summaries = append(summaries, metrics)
summaries = append(summaries, testPhaseDurations)
framework.PrintSummaries(summaries, testCaseBaseName)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
}
@ -123,6 +126,7 @@ var _ = SIGDescribe("Load capacity", func() {
f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {
testPhaseDurations = timer.NewTestPhaseTimer()
clientset = f.ClientSet
ns = f.Namespace.Name
@ -187,12 +191,14 @@ var _ = SIGDescribe("Load capacity", func() {
It(name, func() {
// Create a number of namespaces.
namespaceCount := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode))
namespaces, err := CreateNamespaces(f, namespaceCount, fmt.Sprintf("load-%v-nodepods", itArg.podsPerNode), testPhaseDurations.StartPhase(110, "namespace creation"))
framework.ExpectNoError(err)
totalPods := (itArg.podsPerNode - itArg.daemonsPerNode) * nodeCount
configs, secretConfigs, configMapConfigs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind, itArg.secretsPerPod, itArg.configMapsPerPod)
serviceCreationPhase := testPhaseDurations.StartPhase(120, "services creation")
defer serviceCreationPhase.End()
if itArg.services {
framework.Logf("Creating services")
services := generateServicesForConfigs(configs)
@ -204,6 +210,8 @@ var _ = SIGDescribe("Load capacity", func() {
workqueue.Parallelize(serviceOperationsParallelism, len(services), createService)
framework.Logf("%v Services created.", len(services))
defer func(services []*v1.Service) {
serviceCleanupPhase := testPhaseDurations.StartPhase(800, "services deletion")
defer serviceCleanupPhase.End()
framework.Logf("Starting to delete services...")
deleteService := func(i int) {
defer GinkgoRecover()
@ -216,17 +224,26 @@ var _ = SIGDescribe("Load capacity", func() {
} else {
framework.Logf("Skipping service creation")
}
serviceCreationPhase.End()
// Create all secrets.
secretsCreationPhase := testPhaseDurations.StartPhase(130, "secrets creation")
defer secretsCreationPhase.End()
for i := range secretConfigs {
secretConfigs[i].Run()
defer secretConfigs[i].Stop()
}
secretsCreationPhase.End()
// Create all configmaps.
configMapsCreationPhase := testPhaseDurations.StartPhase(140, "configmaps creation")
defer configMapsCreationPhase.End()
for i := range configMapConfigs {
configMapConfigs[i].Run()
defer configMapConfigs[i].Stop()
}
configMapsCreationPhase.End()
// StartDaemon if needed
daemonSetCreationPhase := testPhaseDurations.StartPhase(150, "daemonsets creation")
defer daemonSetCreationPhase.End()
for i := 0; i < itArg.daemonsPerNode; i++ {
daemonName := fmt.Sprintf("load-daemon-%v", i)
daemonConfig := &testutils.DaemonConfig{
@ -246,6 +263,7 @@ var _ = SIGDescribe("Load capacity", func() {
))
}(daemonConfig)
}
daemonSetCreationPhase.End()
// Simulate lifetime of RC:
// * create with initial size
@ -264,7 +282,8 @@ var _ = SIGDescribe("Load capacity", func() {
// We may want to revisit it in the future.
framework.Logf("Starting to create %v objects...", itArg.kind)
creatingTime := time.Duration(totalPods/throughput) * time.Second
createAllResources(configs, creatingTime)
createAllResources(configs, creatingTime, testPhaseDurations.StartPhase(200, "load pods creation"))
By("============================================================================")
// We would like to spread scaling replication controllers over time
@ -273,11 +292,11 @@ var _ = SIGDescribe("Load capacity", func() {
// The expected number of created/deleted pods is less than totalPods/3.
scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second
framework.Logf("Starting to scale %v objects first time...", itArg.kind)
scaleAllResources(configs, scalingTime)
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(300, "scaling first time"))
By("============================================================================")
framework.Logf("Starting to scale %v objects second time...", itArg.kind)
scaleAllResources(configs, scalingTime)
scaleAllResources(configs, scalingTime, testPhaseDurations.StartPhase(400, "scaling second time"))
By("============================================================================")
// Cleanup all created replication controllers.
@ -285,7 +304,7 @@ var _ = SIGDescribe("Load capacity", func() {
// We may want to revisit it in the future.
deletingTime := time.Duration(totalPods/throughput) * time.Second
framework.Logf("Starting to delete %v objects...", itArg.kind)
deleteAllResources(configs, deletingTime)
deleteAllResources(configs, deletingTime, testPhaseDurations.StartPhase(500, "load pods deletion"))
})
}
})
@ -534,7 +553,8 @@ func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.Conditio
return wait.ExponentialBackoff(backoff, fn)
}
func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.Duration) {
func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.Duration, testPhase *timer.Phase) {
defer testPhase.End()
var wg sync.WaitGroup
wg.Add(len(configs))
for _, config := range configs {
@ -551,7 +571,8 @@ func createResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, creati
framework.ExpectNoError(config.Run(), fmt.Sprintf("creating %v %s", config.GetKind(), config.GetName()))
}
func scaleAllResources(configs []testutils.RunObjectConfig, scalingTime time.Duration) {
func scaleAllResources(configs []testutils.RunObjectConfig, scalingTime time.Duration, testPhase *timer.Phase) {
defer testPhase.End()
var wg sync.WaitGroup
wg.Add(len(configs))
for _, config := range configs {
@ -592,7 +613,8 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling
framework.ExpectNoError(err)
}
func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.Duration) {
func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.Duration, testPhase *timer.Phase) {
defer testPhase.End()
var wg sync.WaitGroup
wg.Add(len(configs))
for _, config := range configs {
@ -617,7 +639,8 @@ func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deleti
}
}
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string) ([]*v1.Namespace, error) {
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string, testPhase *timer.Phase) ([]*v1.Namespace, error) {
defer testPhase.End()
namespaces := []*v1.Namespace{}
for i := 1; i <= namespaceCount; i++ {
namespace, err := f.CreateNamespace(fmt.Sprintf("%v-%d", namePrefix, i), nil)