Fix secret/configmap/projected update tests to work for large clusters

pull/6/head
Shyam Jeedigunta 2017-07-03 21:08:05 +02:00
parent f38adf37de
commit b5b4ba8fec
4 changed files with 54 additions and 84 deletions

View File

@ -20,7 +20,6 @@ import (
"fmt"
"os"
"path"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -73,12 +72,8 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
})
It("updates should be reflected in volume [Conformance] [Volume]", func() {
// We may have to wait or a full sync period to elapse before the
// Kubelet projects the update into the volume and the container picks
// it up. This timeout is based on the default Kubelet sync period (1
// minute) plus additional time for fudge factor.
const podLogTimeout = 300 * time.Second
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
name := "configmap-test-upd-" + string(uuid.NewUUID())
volumeName := "configmap-volume"
@ -122,7 +117,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
{
Name: containerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
@ -155,14 +150,9 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
})
It("optional updates should be reflected in volume [Conformance] [Volume]", func() {
// We may have to wait or a full sync period to elapse before the
// Kubelet projects the update into the volume and the container picks
// it up. This timeout is based on the default Kubelet sync period (1
// minute) plus additional time for fudge factor.
const podLogTimeout = 300 * time.Second
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
volumeMountPath := "/etc/configmap-volumes"
deleteName := "cm-test-opt-del-" + string(uuid.NewUUID())
@ -259,7 +249,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
{
Name: deleteContainerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volumes/delete/data-1"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: deleteVolumeName,
@ -271,7 +261,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
{
Name: updateContainerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volumes/update/data-3"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
Name: updateVolumeName,
@ -283,7 +273,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
{
Name: createContainerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: createVolumeName,

View File

@ -171,24 +171,8 @@ var _ = framework.KubeDescribe("Projected", func() {
})
It("optional updates should be reflected in volume [Conformance] [Volume]", func() {
// With SecretManager, we may have to wait up to full sync period + TTL of
// a secret to elapse before the Kubelet projects the update into the volume
// and the container picks it ip.
// This timeout is based on default Kubelet sync period (1 minute) plus
// maximum secret TTL (based on cluster size) plus additional time for fudge
// factor.
nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
// Since TTL the kubelet is using are stored in node object, for the timeout
// purpose we take it from a first node (all of them should be the same).
// We take the TTL from the first node.
secretTTL, exists := framework.GetTTLAnnotationFromNode(&nodes.Items[0])
if !exists {
framework.Logf("Couldn't get ttl annotation from: %#v", nodes.Items[0])
}
podLogTimeout := 240*time.Second + secretTTL
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
volumeMountPath := "/etc/projected-secret-volumes"
@ -232,6 +216,7 @@ var _ = framework.KubeDescribe("Projected", func() {
}
By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
@ -303,7 +288,7 @@ var _ = framework.KubeDescribe("Projected", func() {
{
Name: deleteContainerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-secret-volumes/delete/data-1"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: deleteVolumeName,
@ -315,7 +300,7 @@ var _ = framework.KubeDescribe("Projected", func() {
{
Name: updateContainerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-secret-volumes/update/data-3"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
Name: updateVolumeName,
@ -327,7 +312,7 @@ var _ = framework.KubeDescribe("Projected", func() {
{
Name: createContainerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-secret-volumes/create/data-1"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: createVolumeName,
@ -422,12 +407,8 @@ var _ = framework.KubeDescribe("Projected", func() {
})
It("updates should be reflected in volume [Conformance] [Volume]", func() {
// We may have to wait or a full sync period to elapse before the
// Kubelet projects the update into the volume and the container picks
// it up. This timeout is based on the default Kubelet sync period (1
// minute) plus additional time for fudge factor.
const podLogTimeout = 300 * time.Second
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
name := "projected-configmap-test-upd-" + string(uuid.NewUUID())
volumeName := "projected-configmap-volume"
@ -476,7 +457,7 @@ var _ = framework.KubeDescribe("Projected", func() {
{
Name: containerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volume/data-1"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
@ -509,14 +490,9 @@ var _ = framework.KubeDescribe("Projected", func() {
})
It("optional updates should be reflected in volume [Conformance] [Volume]", func() {
// We may have to wait or a full sync period to elapse before the
// Kubelet projects the update into the volume and the container picks
// it up. This timeout is based on the default Kubelet sync period (1
// minute) plus additional time for fudge factor.
const podLogTimeout = 300 * time.Second
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
volumeMountPath := "/etc/projected-configmap-volumes"
deleteName := "cm-test-opt-del-" + string(uuid.NewUUID())
@ -631,7 +607,7 @@ var _ = framework.KubeDescribe("Projected", func() {
{
Name: deleteContainerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volumes/delete/data-1"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: deleteVolumeName,
@ -643,7 +619,7 @@ var _ = framework.KubeDescribe("Projected", func() {
{
Name: updateContainerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volumes/update/data-3"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
Name: updateVolumeName,
@ -655,7 +631,7 @@ var _ = framework.KubeDescribe("Projected", func() {
{
Name: createContainerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volumes/create/data-1"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: createVolumeName,

View File

@ -20,7 +20,6 @@ import (
"fmt"
"os"
"path"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -154,24 +153,8 @@ var _ = framework.KubeDescribe("Secrets", func() {
})
It("optional updates should be reflected in volume [Conformance] [Volume]", func() {
// With SecretManager, we may have to wait up to full sync period + TTL of
// a secret to elapse before the Kubelet projects the update into the volume
// and the container picks it ip.
// This timeout is based on default Kubelet sync period (1 minute) plus
// maximum secret TTL (based on cluster size) plus additional time for fudge
// factor.
nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
// Since TTL the kubelet is using are stored in node object, for the timeout
// purpose we take it from a first node (all of them should be the same).
// We take the TTL from the first node.
secretTTL, exists := framework.GetTTLAnnotationFromNode(&nodes.Items[0])
if !exists {
framework.Logf("Couldn't get ttl annotation from: %#v", nodes.Items[0])
}
podLogTimeout := 240*time.Second + secretTTL
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
volumeMountPath := "/etc/secret-volumes"
@ -215,6 +198,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
}
By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
@ -262,7 +246,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
{
Name: deleteContainerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/secret-volumes/delete/data-1"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: deleteVolumeName,
@ -274,7 +258,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
{
Name: updateContainerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/secret-volumes/update/data-3"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
Name: updateVolumeName,
@ -286,7 +270,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
{
Name: createContainerName,
Image: "gcr.io/google_containers/mounttest:0.8",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
Command: []string{"/mt", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: createVolumeName,

View File

@ -2421,20 +2421,40 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
})
}
func GetTTLAnnotationFromNode(node *v1.Node) (time.Duration, bool) {
func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
// With SecretManager(ConfigMapManager), we may have to wait up to full sync period +
// TTL of secret(configmap) to elapse before the Kubelet projects the update into the
// volume and the container picks it up.
// So this timeout is based on default Kubelet sync period (1 minute) + maximum TTL for
// secret(configmap) that's based on cluster size + additional time as a fudge factor.
secretTTL, err := GetNodeTTLAnnotationValue(c)
if err != nil {
Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
}
podLogTimeout := 240*time.Second + secretTTL
return podLogTimeout
}
func GetNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) {
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
if err != nil || len(nodes.Items) == 0 {
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err)
}
// Since TTL the kubelet is using is stored in node object, for the timeout
// purpose we take it from the first node (all of them should be the same).
node := &nodes.Items[0]
if node.Annotations == nil {
return time.Duration(0), false
return time.Duration(0), fmt.Errorf("No annotations found on the node")
}
value, ok := node.Annotations[v1.ObjectTTLAnnotationKey]
if !ok {
return time.Duration(0), false
return time.Duration(0), fmt.Errorf("No TTL annotation found on the node")
}
intValue, err := strconv.Atoi(value)
if err != nil {
Logf("Cannot convert TTL annotation from %#v to int", *node)
return time.Duration(0), false
return time.Duration(0), fmt.Errorf("Cannot convert TTL annotation from %#v to int", *node)
}
return time.Duration(intValue) * time.Second, true
return time.Duration(intValue) * time.Second, nil
}
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {