From 6122ff88fa9fccf1c3c3b1e3c07101998d01aed6 Mon Sep 17 00:00:00 2001 From: Mikhail Vyatskov Date: Fri, 3 Mar 2017 15:05:46 -0800 Subject: [PATCH] Distribute load in cluster load tests uniformly --- test/e2e/cluster_logging_es.go | 2 +- test/e2e/cluster_logging_gcl.go | 2 +- test/e2e/cluster_logging_gcl_load.go | 26 ++++++++++++++------------ test/e2e/cluster_logging_utils.go | 7 ++++--- 4 files changed, 20 insertions(+), 17 deletions(-) diff --git a/test/e2e/cluster_logging_es.go b/test/e2e/cluster_logging_es.go index 0f40a280f0..8c8fb70990 100644 --- a/test/e2e/cluster_logging_es.go +++ b/test/e2e/cluster_logging_es.go @@ -45,7 +45,7 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu framework.ExpectNoError(err, "Elasticsearch is not working") By("Running synthetic logger") - pod := createLoggingPod(f, podName, 10*60, 10*time.Minute) + pod := createLoggingPod(f, podName, "", 10*60, 10*time.Minute) defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{}) err = framework.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name) framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to be running", podName)) diff --git a/test/e2e/cluster_logging_gcl.go b/test/e2e/cluster_logging_gcl.go index c097f878f6..87d4fc7635 100644 --- a/test/e2e/cluster_logging_gcl.go +++ b/test/e2e/cluster_logging_gcl.go @@ -43,7 +43,7 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL", func() { framework.ExpectNoError(err, "GCL is not working") By("Running synthetic logger") - pod := createLoggingPod(f, podName, 10*60, 10*time.Minute) + pod := createLoggingPod(f, podName, "", 10*60, 10*time.Minute) defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{}) err = framework.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name) framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to be running", podName)) diff --git a/test/e2e/cluster_logging_gcl_load.go b/test/e2e/cluster_logging_gcl_load.go index 466ddf94ab..a4487f6ef4 100644 --- a/test/e2e/cluster_logging_gcl_load.go +++ b/test/e2e/cluster_logging_gcl_load.go @@ -17,7 +17,7 @@ limitations under the License. package e2e import ( - "strconv" + "fmt" "time" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,7 +40,8 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]", gclLogsProvider, err := newGclLogsProvider(f) framework.ExpectNoError(err, "Failed to create GCL logs provider") - nodeCount := len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items) + nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items + nodeCount := len(nodes) podCount := 30 * nodeCount loggingDuration := 10 * time.Minute linesPerSecond := 1000 * nodeCount @@ -50,8 +51,9 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]", By("Running logs generator pods") pods := []*loggingPod{} for podIdx := 0; podIdx < podCount; podIdx++ { - podName := f.Namespace.Name + "-logs-generator-" + strconv.Itoa(linesPerPod) + "-" + strconv.Itoa(podIdx) - pods = append(pods, createLoggingPod(f, podName, linesPerPod, loggingDuration)) + node := nodes[podIdx%len(nodes)] + podName := fmt.Sprintf("logs-generator-%d-%d", linesPerPod, podIdx) + pods = append(pods, createLoggingPod(f, podName, node.Name, linesPerPod, loggingDuration)) defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{}) } @@ -79,8 +81,8 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]", gclLogsProvider, err := newGclLogsProvider(f) framework.ExpectNoError(err, "Failed to create GCL logs provider") - nodeCount := len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items) - maxPodCount := 10 * nodeCount + nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items + maxPodCount := 10 jobDuration := 1 * time.Minute linesPerPodPerSecond := 100 testDuration := 10 * time.Minute @@ -92,13 +94,13 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Slow] [Flaky]", By("Running short-living pods") pods := []*loggingPod{} - for i := 0; i < podRunCount; i++ { - podName := f.Namespace.Name + "-job-logs-generator-" + - strconv.Itoa(maxPodCount) + "-" + strconv.Itoa(linesPerPod) + "-" + strconv.Itoa(i) - pods = append(pods, createLoggingPod(f, podName, linesPerPod, jobDuration)) - - defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{}) + for runIdx := 0; runIdx < podRunCount; runIdx++ { + for _, node := range nodes { + podName := fmt.Sprintf("job-logs-generator-%d-%d-%d", maxPodCount, linesPerPod, runIdx) + pods = append(pods, createLoggingPod(f, podName, node.Name, linesPerPod, jobDuration)) + defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{}) + } time.Sleep(podRunDelay) } diff --git a/test/e2e/cluster_logging_utils.go b/test/e2e/cluster_logging_utils.go index a8346a2954..9156b60851 100644 --- a/test/e2e/cluster_logging_utils.go +++ b/test/e2e/cluster_logging_utils.go @@ -91,9 +91,9 @@ func (entry *logEntry) getLogEntryNumber() (int, bool) { return lineNumber, err == nil } -func createLoggingPod(f *framework.Framework, podName string, totalLines int, loggingDuration time.Duration) *loggingPod { +func createLoggingPod(f *framework.Framework, podName string, nodeName string, totalLines int, loggingDuration time.Duration) *loggingPod { framework.Logf("Starting pod %s", podName) - createLogsGeneratorPod(f, podName, totalLines, loggingDuration) + createLogsGeneratorPod(f, podName, nodeName, totalLines, loggingDuration) return &loggingPod{ Name: podName, @@ -104,7 +104,7 @@ func createLoggingPod(f *framework.Framework, podName string, totalLines int, lo } } -func createLogsGeneratorPod(f *framework.Framework, podName string, linesCount int, duration time.Duration) { +func createLogsGeneratorPod(f *framework.Framework, podName string, nodeName string, linesCount int, duration time.Duration) { f.PodClient().Create(&api_v1.Pod{ ObjectMeta: meta_v1.ObjectMeta{ Name: podName, @@ -137,6 +137,7 @@ func createLogsGeneratorPod(f *framework.Framework, podName string, linesCount i }, }, }, + NodeName: nodeName, }, }) }