From 52ef3e6e94b0f6b98098dbe2943ad4426ed10308 Mon Sep 17 00:00:00 2001 From: Shyam Jeedigunta Date: Wed, 31 May 2017 11:47:01 +0200 Subject: [PATCH] Performance tests also cover configmaps now --- test/e2e/empty.go | 2 +- test/e2e/perf/density.go | 48 ++++++++++++++++++------- test/e2e/perf/load.go | 65 +++++++++++++++++++++++++-------- test/utils/runners.go | 78 ++++++++++++++++++++++++++++++++++++++-- 4 files changed, 163 insertions(+), 30 deletions(-) diff --git a/test/e2e/empty.go b/test/e2e/empty.go index c6437183ff..925982fa3d 100644 --- a/test/e2e/empty.go +++ b/test/e2e/empty.go @@ -43,7 +43,7 @@ var _ = framework.KubeDescribe("Empty [Feature:Empty]", func() { }) It("starts a pod", func() { - configs, _ := perf.GenerateConfigsForGroup([]*v1.Namespace{f.Namespace}, "empty-pod", 1, 1, framework.GetPauseImageName(f.ClientSet), []string{}, api.Kind("ReplicationController"), 0) + configs, _, _ := perf.GenerateConfigsForGroup([]*v1.Namespace{f.Namespace}, "empty-pod", 1, 1, framework.GetPauseImageName(f.ClientSet), []string{}, api.Kind("ReplicationController"), 0, 0) if len(configs) != 1 { framework.Failf("generateConfigs should have generated single config") } diff --git a/test/e2e/perf/density.go b/test/e2e/perf/density.go index 607ae7d9c3..8be4c3f75b 100644 --- a/test/e2e/perf/density.go +++ b/test/e2e/perf/density.go @@ -65,9 +65,10 @@ type DensityTestConfig struct { PollInterval time.Duration PodCount int // What kind of resource we want to create - kind schema.GroupKind - SecretConfigs []*testutils.SecretConfig - DaemonConfigs []*testutils.DaemonConfig + kind schema.GroupKind + SecretConfigs []*testutils.SecretConfig + ConfigMapConfigs []*testutils.ConfigMapConfig + DaemonConfigs []*testutils.DaemonConfig } func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint { @@ -193,11 +194,13 @@ func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels func runDensityTest(dtc DensityTestConfig) time.Duration { defer GinkgoRecover() - // Create all secrets + // Create all secrets, configmaps and daemons. for i := range dtc.SecretConfigs { dtc.SecretConfigs[i].Run() } - + for i := range dtc.ConfigMapConfigs { + dtc.ConfigMapConfigs[i].Run() + } for i := range dtc.DaemonConfigs { dtc.DaemonConfigs[i].Run() } @@ -267,11 +270,13 @@ func cleanupDensityTest(dtc DensityTestConfig) { } } - // Delete all secrets + // Delete all secrets, configmaps and daemons. for i := range dtc.SecretConfigs { dtc.SecretConfigs[i].Stop() } - + for i := range dtc.ConfigMapConfigs { + dtc.ConfigMapConfigs[i].Stop() + } for i := range dtc.DaemonConfigs { framework.ExpectNoError(framework.DeleteResourceAndPods( dtc.ClientSet, @@ -395,9 +400,10 @@ var _ = framework.KubeDescribe("Density", func() { // Controls how often the apiserver is polled for pods interval time.Duration // What kind of resource we should be creating. Default: ReplicationController - kind schema.GroupKind - secretsPerPod int - daemonsPerNode int + kind schema.GroupKind + secretsPerPod int + configMapsPerPod int + daemonsPerNode int } densityTests := []Density{ @@ -414,24 +420,27 @@ var _ = framework.KubeDescribe("Density", func() { {podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController"), daemonsPerNode: 2}, // Test with secrets {podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), secretsPerPod: 2}, + // Test with configmaps + {podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), configMapsPerPod: 2}, } for _, testArg := range densityTests { feature := "ManualPerformance" switch testArg.podsPerNode { case 30: - if testArg.kind == api.Kind("ReplicationController") && testArg.daemonsPerNode == 0 && testArg.secretsPerPod == 0 { + if testArg.kind == api.Kind("ReplicationController") && testArg.daemonsPerNode == 0 && testArg.secretsPerPod == 0 && testArg.configMapsPerPod == 0 { feature = "Performance" } case 95: feature = "HighDensityPerformance" } - name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v with %v secrets and %v daemons", + name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v with %v secrets, %v configmaps and %v daemons", feature, testArg.podsPerNode, testArg.kind, testArg.secretsPerPod, + testArg.configMapsPerPod, testArg.daemonsPerNode, ) itArg := testArg @@ -459,6 +468,7 @@ var _ = framework.KubeDescribe("Density", func() { configs := make([]testutils.RunObjectConfig, numberOfCollections) secretConfigs := make([]*testutils.SecretConfig, 0, numberOfCollections*itArg.secretsPerPod) + configMapConfigs := make([]*testutils.ConfigMapConfig, 0, numberOfCollections*itArg.configMapsPerPod) // Since all RCs are created at the same time, timeout for each config // has to assume that it will be run at the very end. podThroughput := 20 @@ -479,6 +489,18 @@ var _ = framework.KubeDescribe("Density", func() { }) secretNames = append(secretNames, secretName) } + configMapNames := []string{} + for j := 0; j < itArg.configMapsPerPod; j++ { + configMapName := fmt.Sprintf("density-configmap-%v-%v", i, j) + configMapConfigs = append(configMapConfigs, &testutils.ConfigMapConfig{ + Content: map[string]string{"foo": "bar"}, + Client: clients[i], + Name: configMapName, + Namespace: nsName, + LogFunc: framework.Logf, + }) + configMapNames = append(configMapNames, configMapName) + } name := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid) baseConfig := &testutils.RCConfig{ Client: clients[i], @@ -497,6 +519,7 @@ var _ = framework.KubeDescribe("Density", func() { Silent: true, LogFunc: framework.Logf, SecretNames: secretNames, + ConfigMapNames: configMapNames, } switch itArg.kind { case api.Kind("ReplicationController"): @@ -520,6 +543,7 @@ var _ = framework.KubeDescribe("Density", func() { PollInterval: DensityPollInterval, kind: itArg.kind, SecretConfigs: secretConfigs, + ConfigMapConfigs: configMapConfigs, } for i := 0; i < itArg.daemonsPerNode; i++ { diff --git a/test/e2e/perf/load.go b/test/e2e/perf/load.go index a365580bbd..2fc4af6c5f 100644 --- a/test/e2e/perf/load.go +++ b/test/e2e/perf/load.go @@ -83,6 +83,7 @@ var _ = framework.KubeDescribe("Load capacity", func() { var ns string var configs []testutils.RunObjectConfig var secretConfigs []*testutils.SecretConfig + var configMapConfigs []*testutils.ConfigMapConfig testCaseBaseName := "load" @@ -141,10 +142,11 @@ var _ = framework.KubeDescribe("Load capacity", func() { image string command []string // What kind of resource we want to create - kind schema.GroupKind - services bool - secretsPerPod int - daemonsPerNode int + kind schema.GroupKind + services bool + secretsPerPod int + configMapsPerPod int + daemonsPerNode int } loadTests := []Load{ @@ -158,20 +160,23 @@ var _ = framework.KubeDescribe("Load capacity", func() { {podsPerNode: 30, image: framework.ServeHostnameImage, kind: api.Kind("ReplicationController"), daemonsPerNode: 2}, // Test with secrets {podsPerNode: 30, image: framework.ServeHostnameImage, kind: extensions.Kind("Deployment"), secretsPerPod: 2}, + // Test with configmaps + {podsPerNode: 30, image: framework.ServeHostnameImage, kind: extensions.Kind("Deployment"), configMapsPerPod: 2}, // Special test case which randomizes created resources {podsPerNode: 30, image: framework.ServeHostnameImage, kind: randomKind}, } for _, testArg := range loadTests { feature := "ManualPerformance" - if testArg.podsPerNode == 30 && testArg.kind == api.Kind("ReplicationController") && testArg.daemonsPerNode == 0 && testArg.secretsPerPod == 0 { + if testArg.podsPerNode == 30 && testArg.kind == api.Kind("ReplicationController") && testArg.daemonsPerNode == 0 && testArg.secretsPerPod == 0 && testArg.configMapsPerPod == 0 { feature = "Performance" } - name := fmt.Sprintf("[Feature:%s] should be able to handle %v pods per node %v with %v secrets and %v daemons", + name := fmt.Sprintf("[Feature:%s] should be able to handle %v pods per node %v with %v secrets, %v configmaps and %v daemons", feature, testArg.podsPerNode, testArg.kind, testArg.secretsPerPod, + testArg.configMapsPerPod, testArg.daemonsPerNode, ) itArg := testArg @@ -184,7 +189,8 @@ var _ = framework.KubeDescribe("Load capacity", func() { framework.ExpectNoError(err) totalPods := (itArg.podsPerNode - itArg.daemonsPerNode) * nodeCount - configs, secretConfigs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind, itArg.secretsPerPod) + configs, secretConfigs, configMapConfigs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind, itArg.secretsPerPod, itArg.configMapsPerPod) + if itArg.services { framework.Logf("Creating services") services := generateServicesForConfigs(configs) @@ -208,11 +214,16 @@ var _ = framework.KubeDescribe("Load capacity", func() { } else { framework.Logf("Skipping service creation") } - // Create all secrets + // Create all secrets. for i := range secretConfigs { secretConfigs[i].Run() defer secretConfigs[i].Stop() } + // Create all configmaps. + for i := range configMapConfigs { + configMapConfigs[i].Run() + defer configMapConfigs[i].Stop() + } // StartDeamon if needed for i := 0; i < itArg.daemonsPerNode; i++ { daemonName := fmt.Sprintf("load-daemon-%v", i) @@ -350,20 +361,25 @@ func generateConfigs( nss []*v1.Namespace, kind schema.GroupKind, secretsPerPod int, -) ([]testutils.RunObjectConfig, []*testutils.SecretConfig) { + configMapsPerPod int, +) ([]testutils.RunObjectConfig, []*testutils.SecretConfig, []*testutils.ConfigMapConfig) { configs := make([]testutils.RunObjectConfig, 0) secretConfigs := make([]*testutils.SecretConfig, 0) + configMapConfigs := make([]*testutils.ConfigMapConfig, 0) smallGroupCount, mediumGroupCount, bigGroupCount := computePodCounts(totalPods) - newConfigs, newSecretConfigs := GenerateConfigsForGroup(nss, smallGroupName, smallGroupSize, smallGroupCount, image, command, kind, secretsPerPod) + newConfigs, newSecretConfigs, newConfigMapConfigs := GenerateConfigsForGroup(nss, smallGroupName, smallGroupSize, smallGroupCount, image, command, kind, secretsPerPod, configMapsPerPod) configs = append(configs, newConfigs...) secretConfigs = append(secretConfigs, newSecretConfigs...) - newConfigs, newSecretConfigs = GenerateConfigsForGroup(nss, mediumGroupName, mediumGroupSize, mediumGroupCount, image, command, kind, secretsPerPod) + configMapConfigs = append(configMapConfigs, newConfigMapConfigs...) + newConfigs, newSecretConfigs, newConfigMapConfigs = GenerateConfigsForGroup(nss, mediumGroupName, mediumGroupSize, mediumGroupCount, image, command, kind, secretsPerPod, configMapsPerPod) configs = append(configs, newConfigs...) secretConfigs = append(secretConfigs, newSecretConfigs...) - newConfigs, newSecretConfigs = GenerateConfigsForGroup(nss, bigGroupName, bigGroupSize, bigGroupCount, image, command, kind, secretsPerPod) + configMapConfigs = append(configMapConfigs, newConfigMapConfigs...) + newConfigs, newSecretConfigs, newConfigMapConfigs = GenerateConfigsForGroup(nss, bigGroupName, bigGroupSize, bigGroupCount, image, command, kind, secretsPerPod, configMapsPerPod) configs = append(configs, newConfigs...) secretConfigs = append(secretConfigs, newSecretConfigs...) + configMapConfigs = append(configMapConfigs, newConfigMapConfigs...) // Create a number of clients to better simulate real usecase // where not everyone is using exactly the same client. @@ -378,8 +394,11 @@ func generateConfigs( for i := 0; i < len(secretConfigs); i++ { secretConfigs[i].Client = clients[i%len(clients)] } + for i := 0; i < len(configMapConfigs); i++ { + configMapConfigs[i].Client = clients[i%len(clients)] + } - return configs, secretConfigs + return configs, secretConfigs, configMapConfigs } func GenerateConfigsForGroup( @@ -390,14 +409,17 @@ func GenerateConfigsForGroup( command []string, kind schema.GroupKind, secretsPerPod int, -) ([]testutils.RunObjectConfig, []*testutils.SecretConfig) { + configMapsPerPod int, +) ([]testutils.RunObjectConfig, []*testutils.SecretConfig, []*testutils.ConfigMapConfig) { configs := make([]testutils.RunObjectConfig, 0, count) secretConfigs := make([]*testutils.SecretConfig, 0, count*secretsPerPod) + configMapConfigs := make([]*testutils.ConfigMapConfig, 0, count*configMapsPerPod) savedKind := kind for i := 1; i <= count; i++ { kind = savedKind namespace := nss[i%len(nss)].Name secretNames := make([]string, 0, secretsPerPod) + configMapNames := make([]string, 0, configMapsPerPod) for j := 0; j < secretsPerPod; j++ { secretName := fmt.Sprintf("%v-%v-secret-%v", groupName, i, j) @@ -411,6 +433,18 @@ func GenerateConfigsForGroup( secretNames = append(secretNames, secretName) } + for j := 0; j < configMapsPerPod; j++ { + configMapName := fmt.Sprintf("%v-%v-configmap-%v", groupName, i, j) + configMapConfigs = append(configMapConfigs, &testutils.ConfigMapConfig{ + Content: map[string]string{"foo": "bar"}, + Client: nil, // this will be overwritten later + Name: configMapName, + Namespace: namespace, + LogFunc: framework.Logf, + }) + configMapNames = append(configMapNames, configMapName) + } + baseConfig := &testutils.RCConfig{ Client: nil, // this will be overwritten later InternalClient: nil, // this will be overwritten later @@ -423,6 +457,7 @@ func GenerateConfigsForGroup( CpuRequest: 10, // 0.01 core MemRequest: 26214400, // 25MB SecretNames: secretNames, + ConfigMapNames: configMapNames, } if kind == randomKind { @@ -444,7 +479,7 @@ func GenerateConfigsForGroup( } configs = append(configs, config) } - return configs, secretConfigs + return configs, secretConfigs, configMapConfigs } func generateServicesForConfigs(configs []testutils.RunObjectConfig) []*v1.Service { diff --git a/test/utils/runners.go b/test/utils/runners.go index c1cdb21a2d..ae3e5990ae 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -163,8 +163,9 @@ type RCConfig struct { NodeDumpFunc func(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) ContainerDumpFunc func(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) - // Names of the secrets to mount - SecretNames []string + // Names of the secrets and configmaps to mount. + SecretNames []string + ConfigMapNames []string } func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) { @@ -306,6 +307,9 @@ func (config *DeploymentConfig) create() error { if len(config.SecretNames) > 0 { attachSecrets(&deployment.Spec.Template, config.SecretNames) } + if len(config.ConfigMapNames) > 0 { + attachConfigMaps(&deployment.Spec.Template, config.ConfigMapNames) + } config.applyTo(&deployment.Spec.Template) @@ -370,6 +374,9 @@ func (config *ReplicaSetConfig) create() error { if len(config.SecretNames) > 0 { attachSecrets(&rs.Spec.Template, config.SecretNames) } + if len(config.ConfigMapNames) > 0 { + attachConfigMaps(&rs.Spec.Template, config.ConfigMapNames) + } config.applyTo(&rs.Spec.Template) @@ -430,6 +437,9 @@ func (config *JobConfig) create() error { if len(config.SecretNames) > 0 { attachSecrets(&job.Spec.Template, config.SecretNames) } + if len(config.ConfigMapNames) > 0 { + attachConfigMaps(&job.Spec.Template, config.ConfigMapNames) + } config.applyTo(&job.Spec.Template) @@ -529,6 +539,9 @@ func (config *RCConfig) create() error { if len(config.SecretNames) > 0 { attachSecrets(rc.Spec.Template, config.SecretNames) } + if len(config.ConfigMapNames) > 0 { + attachConfigMaps(rc.Spec.Template, config.ConfigMapNames) + } config.applyTo(rc.Spec.Template) @@ -1118,6 +1131,67 @@ func attachSecrets(template *v1.PodTemplateSpec, secretNames []string) { template.Spec.Containers[0].VolumeMounts = mounts } +type ConfigMapConfig struct { + Content map[string]string + Client clientset.Interface + Name string + Namespace string + // If set this function will be used to print log lines instead of glog. + LogFunc func(fmt string, args ...interface{}) +} + +func (config *ConfigMapConfig) Run() error { + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.Name, + }, + Data: map[string]string{}, + } + for k, v := range config.Content { + configMap.Data[k] = v + } + + _, err := config.Client.Core().ConfigMaps(config.Namespace).Create(configMap) + if err != nil { + return fmt.Errorf("Error creating configmap: %v", err) + } + config.LogFunc("Created configmap %v/%v", config.Namespace, config.Name) + return nil +} + +func (config *ConfigMapConfig) Stop() error { + if err := config.Client.Core().ConfigMaps(config.Namespace).Delete(config.Name, &metav1.DeleteOptions{}); err != nil { + return fmt.Errorf("Error deleting configmap: %v", err) + } + config.LogFunc("Deleted configmap %v/%v", config.Namespace, config.Name) + return nil +} + +// TODO: attach configmaps using different possibilities: env vars. +func attachConfigMaps(template *v1.PodTemplateSpec, configMapNames []string) { + volumes := make([]v1.Volume, 0, len(configMapNames)) + mounts := make([]v1.VolumeMount, 0, len(configMapNames)) + for _, name := range configMapNames { + volumes = append(volumes, v1.Volume{ + Name: name, + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: name, + }, + }, + }, + }) + mounts = append(mounts, v1.VolumeMount{ + Name: name, + MountPath: fmt.Sprintf("/%v", name), + }) + } + + template.Spec.Volumes = volumes + template.Spec.Containers[0].VolumeMounts = mounts +} + type DaemonConfig struct { Client clientset.Interface Name string