From 8fff7c88058bd9c493cb005004ee783df95813da Mon Sep 17 00:00:00 2001 From: Christian Bell Date: Tue, 21 Feb 2017 17:39:26 -0800 Subject: [PATCH] Add support for 'providerUid' in ingress objects. The providerUid gives each ingress object a cluster-unique Uid that can be leveraged by ingress providers. In the process, supplement the testing of configMap updates to ensure that the updates are propagated prior to any ingress object being created. Configmap key/vals for Uid and ProviderUid must exist at time of Ingress creation. --- .../ingress/ingress_controller.go | 53 ++++-- .../ingress/ingress_controller_test.go | 42 +++-- test/e2e_federation/BUILD | 1 + test/e2e_federation/ingress.go | 161 ++++++++++++++---- test/e2e_federation/util.go | 8 +- 5 files changed, 210 insertions(+), 55 deletions(-) diff --git a/federation/pkg/federation-controller/ingress/ingress_controller.go b/federation/pkg/federation-controller/ingress/ingress_controller.go index ab1d0d1e6f..3ca2d6d76b 100644 --- a/federation/pkg/federation-controller/ingress/ingress_controller.go +++ b/federation/pkg/federation-controller/ingress/ingress_controller.go @@ -17,6 +17,7 @@ limitations under the License. package ingress import ( + "crypto/md5" "fmt" "sync" "time" @@ -55,6 +56,7 @@ const ( uidConfigMapName = "ingress-uid" // Name of the config-map and key the ingress controller stores its uid in. uidConfigMapNamespace = "kube-system" uidKey = "uid" + providerUidKey = "provider-uid" // Annotation on the ingress in federation control plane that is used to keep // track of the first cluster in which we create ingress. // We wait for ingress to be created in this cluster before creating it any @@ -272,7 +274,7 @@ func NewIngressController(client federationclientset.Interface) *IngressControll glog.V(4).Infof("Attempting to update ConfigMap: %v", configMap) _, err := client.Core().ConfigMaps(configMap.Namespace).Update(configMap) if err == nil { - glog.V(4).Infof("Successfully updated ConfigMap %q", configMapName) + glog.V(4).Infof("Successfully updated ConfigMap %q %v", configMapName, configMap) } else { glog.V(4).Infof("Failed to update ConfigMap %q: %v", configMapName, err) } @@ -523,7 +525,10 @@ func (ic *IngressController) reconcileConfigMapForCluster(clusterName string) { uidConfigMapNamespacedName := types.NamespacedName{Name: uidConfigMapName, Namespace: uidConfigMapNamespace} configMapObj, found, err := ic.configMapFederatedInformer.GetTargetStore().GetByKey(cluster.Name, uidConfigMapNamespacedName.String()) if !found || err != nil { - logmsg := fmt.Sprintf("Failed to get ConfigMap %q for cluster %q. Will try again later: %v", uidConfigMapNamespacedName, cluster.Name, err) + logmsg := fmt.Sprintf("Failed to get ConfigMap %q for cluster %q. Will try again later", uidConfigMapNamespacedName, cluster.Name) + if err != nil { + logmsg = fmt.Sprintf("%v: %v", logmsg, err) + } if len(ic.ingressInformerStore.List()) > 0 { // Error-level if ingresses are active, Info-level otherwise. glog.Errorf(logmsg) } else { @@ -543,6 +548,12 @@ func (ic *IngressController) reconcileConfigMapForCluster(clusterName string) { } } +// getProviderUid returns a provider ID based on the provided clusterName. +func getProviderUid(clusterName string) string { + hashedName := md5.Sum([]byte(clusterName)) + return fmt.Sprintf("%x", hashedName[:8]) +} + /* reconcileConfigMap ensures that the configmap in the cluster has a UID consistent with the federation cluster's associated annotation. @@ -570,12 +581,29 @@ func (ic *IngressController) reconcileConfigMap(cluster *federationapi.Cluster, } if !clusterIngressUIDExists || clusterIngressUID == "" { - ic.updateClusterIngressUIDToMasters(cluster, configMapUID) // Second argument is the fallback, in case this is the only cluster, in which case it becomes the master - return + glog.V(4).Infof("Cluster %q is the only master", cluster.Name) + // Second argument is the fallback, in case this is the only cluster, in which case it becomes the master + var err error + if clusterIngressUID, err = ic.updateClusterIngressUIDToMasters(cluster, configMapUID); err != nil { + return + } + // If we successfully update the Cluster Object, fallthrough and update the configMap. } - if configMapUID != clusterIngressUID { // An update is required - glog.V(4).Infof("Ingress UID update is required: configMapUID %q not equal to clusterIngressUID %q", configMapUID, clusterIngressUID) + + // Figure out providerUid. + providerUid := getProviderUid(cluster.Name) + configMapProviderUid := configMap.Data[providerUidKey] + + if configMapUID == clusterIngressUID && configMapProviderUid == providerUid { + glog.V(4).Infof("Ingress configMap update is not required: UID %q and ProviderUid %q are equal", configMapUID, providerUid) + } else { + if configMapUID != clusterIngressUID { + glog.V(4).Infof("Ingress configMap update is required for UID: configMapUID %q not equal to clusterIngressUID %q", configMapUID, clusterIngressUID) + } else if configMapProviderUid != providerUid { + glog.V(4).Infof("Ingress configMap update is required: configMapProviderUid %q not equal to providerUid %q", configMapProviderUid, providerUid) + } configMap.Data[uidKey] = clusterIngressUID + configMap.Data[providerUidKey] = providerUid operations := []util.FederatedOperation{{ Type: util.OperationTypeUpdate, Obj: configMap, @@ -588,8 +616,6 @@ func (ic *IngressController) reconcileConfigMap(cluster *federationapi.Cluster, glog.Errorf("Failed to execute update of ConfigMap %q on cluster %q: %v", configMapName, cluster.Name, err) ic.configMapDeliverer.DeliverAfter(cluster.Name, nil, ic.configMapReviewDelay) } - } else { - glog.V(4).Infof("Ingress UID update is not required: configMapUID %q is equal to clusterIngressUID %q", configMapUID, clusterIngressUID) } } @@ -619,13 +645,13 @@ func (ic *IngressController) getMasterCluster() (master *federationapi.Cluster, updateClusterIngressUIDToMasters takes the ingress UID annotation on the master cluster and applies it to cluster. If there is no master cluster, then fallbackUID is used (and hence this cluster becomes the master). */ -func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federationapi.Cluster, fallbackUID string) { +func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federationapi.Cluster, fallbackUID string) (string, error) { masterCluster, masterUID, err := ic.getMasterCluster() clusterObj, clusterErr := api.Scheme.DeepCopy(cluster) // Make a clone so that we don't clobber our input param cluster, ok := clusterObj.(*federationapi.Cluster) if clusterErr != nil || !ok { glog.Errorf("Internal error: Failed clone cluster resource while attempting to add master ingress UID annotation (%q = %q) from master cluster %q to cluster %q, will try again later: %v", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name, err) - return + return "", err } if err == nil { if masterCluster.Name != cluster.Name { // We're not the master, need to get in sync @@ -635,12 +661,14 @@ func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federatio cluster.ObjectMeta.Annotations[uidAnnotationKey] = masterUID if _, err = ic.federatedApiClient.Federation().Clusters().Update(cluster); err != nil { glog.Errorf("Failed to add master ingress UID annotation (%q = %q) from master cluster %q to cluster %q, will try again later: %v", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name, err) - return + return "", err } else { glog.V(4).Infof("Successfully added master ingress UID annotation (%q = %q) from master cluster %q to cluster %q.", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name) + return masterUID, nil } } else { glog.V(4).Infof("Cluster %q with ingress UID is already the master with annotation (%q = %q), no need to update.", cluster.Name, uidAnnotationKey, cluster.ObjectMeta.Annotations[uidAnnotationKey]) + return cluster.ObjectMeta.Annotations[uidAnnotationKey], nil } } else { glog.V(2).Infof("No master cluster found to source an ingress UID from for cluster %q. Attempting to elect new master cluster %q with ingress UID %q = %q", cluster.Name, cluster.Name, uidAnnotationKey, fallbackUID) @@ -651,11 +679,14 @@ func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federatio cluster.ObjectMeta.Annotations[uidAnnotationKey] = fallbackUID if _, err = ic.federatedApiClient.Federation().Clusters().Update(cluster); err != nil { glog.Errorf("Failed to add ingress UID annotation (%q = %q) to cluster %q. No master elected. Will try again later: %v", uidAnnotationKey, fallbackUID, cluster.Name, err) + return "", err } else { glog.V(4).Infof("Successfully added ingress UID annotation (%q = %q) to cluster %q.", uidAnnotationKey, fallbackUID, cluster.Name) + return fallbackUID, nil } } else { glog.Errorf("No master cluster exists, and fallbackUID for cluster %q is invalid (%q). This probably means that no clusters have an ingress controller configmap with key %q. Federated Ingress currently supports clusters running Google Loadbalancer Controller (\"GLBC\")", cluster.Name, fallbackUID, uidKey) + return "", err } } } diff --git a/federation/pkg/federation-controller/ingress/ingress_controller_test.go b/federation/pkg/federation-controller/ingress/ingress_controller_test.go index 62f509a06e..1a06bbf260 100644 --- a/federation/pkg/federation-controller/ingress/ingress_controller_test.go +++ b/federation/pkg/federation-controller/ingress/ingress_controller_test.go @@ -57,6 +57,7 @@ func TestIngressController(t *testing.T) { cluster2 := NewCluster("cluster2", apiv1.ConditionTrue) cfg1 := NewConfigMap("foo") cfg2 := NewConfigMap("bar") // Different UID from cfg1, so that we can check that they get reconciled. + assert.NotEqual(t, cfg1.Data[uidKey], cfg2.Data[uidKey], fmt.Sprintf("ConfigMap in cluster 2 must initially not equal that in cluster 1 for this test - please fix test")) t.Log("Creating fake infrastructure") fedClient := &fakefedclientset.Clientset{} @@ -74,6 +75,7 @@ func TestIngressController(t *testing.T) { cluster1ConfigMapWatch := RegisterFakeWatch(configmaps, &cluster1Client.Fake) cluster1IngressCreateChan := RegisterFakeCopyOnCreate(ingresses, &cluster1Client.Fake, cluster1IngressWatch) cluster1IngressUpdateChan := RegisterFakeCopyOnUpdate(ingresses, &cluster1Client.Fake, cluster1IngressWatch) + cluster1ConfigMapUpdateChan := RegisterFakeCopyOnUpdate(configmaps, &cluster1Client.Fake, cluster1ConfigMapWatch) cluster2Client := &fakekubeclientset.Clientset{} RegisterFakeList(ingresses, &cluster2Client.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}}) @@ -138,11 +140,24 @@ func TestIngressController(t *testing.T) { assert.NotNil(t, cluster) assert.Equal(t, cluster.ObjectMeta.Annotations[uidAnnotationKey], cfg1.Data[uidKey]) + t.Log("Adding cluster 2") + clusterWatch.Add(cluster2) + cluster2ConfigMapWatch.Add(cfg2) + + t.Log("Checking that a configmap updates are propagated prior to Federated Ingress") + referenceUid := cfg1.Data[uidKey] + uid1, providerId1 := GetConfigMapUidAndProviderId(t, cluster1ConfigMapUpdateChan) + uid2, providerId2 := GetConfigMapUidAndProviderId(t, cluster2ConfigMapUpdateChan) + t.Logf("uid2 = %v and ref = %v", uid2, referenceUid) + assert.True(t, referenceUid == uid1, "Expected cluster1 configmap uid %q to be equal to referenceUid %q", uid1, referenceUid) + assert.True(t, referenceUid == uid2, "Expected cluster2 configmap uid %q to be equal to referenceUid %q", uid2, referenceUid) + assert.True(t, providerId1 != providerId2, "Expected cluster1 providerUid %q to be unique and different from cluster2 providerUid %q", providerId1, providerId2) + // Test add federated ingress. t.Log("Adding Federated Ingress") fedIngressWatch.Add(&fedIngress) - t.Logf("Checking that appropriate finalizers are added") + t.Log("Checking that appropriate finalizers are added") // There should be an update to add both the finalizers. updatedIngress := GetIngressFromChan(t, fedIngressUpdateChan) assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, deletionhelper.FinalizerDeleteFromUnderlyingClusters)) @@ -243,9 +258,7 @@ func TestIngressController(t *testing.T) { fedIngress.Annotations[staticIPNameKeyWritable] = "foo" // Make sure that the base object has a static IP name first. fedIngressWatch.Modify(&fedIngress) clusterWatch.Add(cluster2) - // First check that the original values are not equal - see above comment - assert.NotEqual(t, cfg1.Data[uidKey], cfg2.Data[uidKey], fmt.Sprintf("ConfigMap in cluster 2 must initially not equal that in cluster 1 for this test - please fix test")) - cluster2ConfigMapWatch.Add(cfg2) + t.Log("Checking that the ingress got created in cluster 2") createdIngress2 := GetIngressFromChan(t, cluster2IngressCreateChan) assert.NotNil(t, createdIngress2) @@ -253,17 +266,22 @@ func TestIngressController(t *testing.T) { t.Logf("created meta: %v fed meta: %v", createdIngress2.ObjectMeta, fedIngress.ObjectMeta) assert.True(t, util.ObjectMetaEquivalent(fedIngress.ObjectMeta, createdIngress2.ObjectMeta), "Metadata of created object is not equivalent") - t.Log("Checking that the configmap in cluster 2 got updated.") - updatedConfigMap2 := GetConfigMapFromChan(cluster2ConfigMapUpdateChan) - assert.NotNil(t, updatedConfigMap2, fmt.Sprintf("ConfigMap in cluster 2 was not updated (or more likely the test is broken and the API type written is wrong)")) - if updatedConfigMap2 != nil { - assert.Equal(t, cfg1.Data[uidKey], updatedConfigMap2.Data[uidKey], - fmt.Sprintf("UID's in configmaps in cluster's 1 and 2 are not equal (%q != %q)", cfg1.Data["uid"], updatedConfigMap2.Data["uid"])) - } - close(stop) } +func GetConfigMapUidAndProviderId(t *testing.T, c chan runtime.Object) (string, string) { + updatedConfigMap := GetConfigMapFromChan(c) + assert.NotNil(t, updatedConfigMap, "ConfigMap should have received an update") + assert.NotNil(t, updatedConfigMap.Data, "ConfigMap data is empty") + + for _, key := range []string{uidKey, providerUidKey} { + val, ok := updatedConfigMap.Data[key] + assert.True(t, ok, fmt.Sprintf("Didn't receive an update for key %v: %v", key, updatedConfigMap.Data)) + assert.True(t, len(val) > 0, fmt.Sprintf("Received an empty update for key %v", key)) + } + return updatedConfigMap.Data[uidKey], updatedConfigMap.Data[providerUidKey] +} + func GetIngressFromChan(t *testing.T, c chan runtime.Object) *extensionsv1beta1.Ingress { obj := GetObjectFromChan(c) diff --git a/test/e2e_federation/BUILD b/test/e2e_federation/BUILD index befa476c8f..365e421dab 100644 --- a/test/e2e_federation/BUILD +++ b/test/e2e_federation/BUILD @@ -42,6 +42,7 @@ go_library( "//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/util/intstr", + "//vendor:k8s.io/apimachinery/pkg/util/net", "//vendor:k8s.io/apimachinery/pkg/util/wait", "//vendor:k8s.io/client-go/rest", "//vendor:k8s.io/client-go/tools/clientcmd", diff --git a/test/e2e_federation/ingress.go b/test/e2e_federation/ingress.go index c2d2089175..bcac62b052 100644 --- a/test/e2e_federation/ingress.go +++ b/test/e2e_federation/ingress.go @@ -17,6 +17,7 @@ limitations under the License. package e2e_federation import ( + "crypto/tls" "fmt" "net/http" "os" @@ -28,11 +29,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + "k8s.io/kubernetes/test/e2e/framework" fedframework "k8s.io/kubernetes/test/e2e_federation/framework" @@ -42,10 +45,67 @@ import ( const ( MaxRetriesOnFederatedApiserver = 3 - FederatedIngressTimeout = 120 * time.Second + FederatedIngressTimeout = 10 * time.Minute FederatedIngressName = "federated-ingress" FederatedIngressServiceName = "federated-ingress-service" + FederatedIngressTLSSecretName = "federated-ingress-tls-secret" FederatedIngressServicePodName = "federated-ingress-service-test-pod" + FederatedIngressHost = "test-f8n.k8s.io." + + // TLS Certificate and Key for the ingress resource + // Generated using: + // $ openssl req -nodes -x509 -newkey rsa:2048 -keyout fedingtestkey.pem -out fedingtestcrt.pem -days 2485 + // 2485 days is an arbitrary large number chosen below int32 seconds. + FederatedIngressTLSCrt = `-----BEGIN CERTIFICATE----- +MIIDaTCCAlGgAwIBAgIJANwsCbwxm9pyMA0GCSqGSIb3DQEBCwUAMEoxCzAJBgNV +BAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMQswCQYDVQQKDAJOQTEZMBcGA1UE +AwwQdGVzdC1mOG4uazhzLmlvLjAgFw0xNjEyMTYwNjA1NDRaGA8yMDg1MDEwMzA2 +MDU0NFowSjELMAkGA1UEBhMCVVMxEzARBgNVBAgMClNvbWUtU3RhdGUxCzAJBgNV +BAoMAk5BMRkwFwYDVQQDDBB0ZXN0LWY4bi5rOHMuaW8uMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAmsHYnLhqSeO1Q6SEjaiPiLUQV8tyGfttwNQiOT5u +ULz6ZWYA40m/1hhla9KH9sJZ515Iq+jTtiVH0rUjryT96SjxitLCAZlxVwQ63B50 +aZF2T2OPSzvrmN+J6VGcRIq0N8fUeyp2WTIEdWlpQ7DTmDNArQqFSIvJndkLow3d +hec7O+PErnvZQQC9zqa23rGndDzlgDJ4HJGAQNm3uYVh5WHv+wziP67T/82bEGgO +A6EdDPWzpYxzAA1wsqz9lX5jitlbKdI56698fPR2KRelySf7OXVvZCS4/ED1lF4k +b7fQgtBhAWe1BkuAMUl7vdRjMps7nkxmBSuxBkVQ7sb5AwIDAQABo1AwTjAdBgNV +HQ4EFgQUjf53O/W/iE2mxuJkNjZGUfjJ9RUwHwYDVR0jBBgwFoAUjf53O/W/iE2m +xuJkNjZGUfjJ9RUwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEABE7B +bAiORDBA3qE5lh6JCs/lEfz93E/gOhD9oDnm9SRND4kjy7qeGxk4Wzsd/Vr+R2mi +EZ40d4MA/mCCPnYsNQoEXMFc8IvwAbzkhh2gqTNgG0/Ks0A1mIPQNpvUcSetS4IV +732DvB3nSnFtlzf6afw+V1Vf5ydRNuM/c9GEOOHSz+rs+9M364d+wNaFD64M72ol +iDMAdtcrhOqkQi0lUING904jlJcyYM5oVNCCtme4F8nkIX9bxP/9Ea6VhDGPeJiX +tVwZuudkoEbrFlEYbyLrbVeVa9oTf4Jn66iz49/+th+bUtEoTt9gk9Cul5TFgfzx +EscdahceC7afheq6zg== +-----END CERTIFICATE-----` + + FederatedIngressTLSKey = `-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCawdicuGpJ47VD +pISNqI+ItRBXy3IZ+23A1CI5Pm5QvPplZgDjSb/WGGVr0of2wlnnXkir6NO2JUfS +tSOvJP3pKPGK0sIBmXFXBDrcHnRpkXZPY49LO+uY34npUZxEirQ3x9R7KnZZMgR1 +aWlDsNOYM0CtCoVIi8md2QujDd2F5zs748Sue9lBAL3Oprbesad0POWAMngckYBA +2be5hWHlYe/7DOI/rtP/zZsQaA4DoR0M9bOljHMADXCyrP2VfmOK2Vsp0jnrr3x8 +9HYpF6XJJ/s5dW9kJLj8QPWUXiRvt9CC0GEBZ7UGS4AxSXu91GMymzueTGYFK7EG +RVDuxvkDAgMBAAECggEAYrXGPqB6W0r88XpceibL9rzXAcjorJ3s8ZPdiHnDz4fa +hxa69j6yOBMzjcSpqMFqquM+ozhM4d+BomqbqjmEI1ZUSuIHkRGYc5JlIMXkJvn7 +ZsPwQGKl8cqTotjFPgrizLmPVEhPWLFImsNzuxNsw6XdWQJe5VkUbrRkccqEQ8Wt +xwq/SlRercIMnRVLOOESq8EyjOY4yDgOdIifq9K9xiI8W6nMiPs0X5AcIJoTMbCe +cX0zUqW317awDWWP8u2GswwDDm4qPeWnXOrDkDx8Eo0dWJbmxw9su0XrM6KMvEMe +2a/Fy/enr5Cc6/jgsh3gO5sa8dJ1Cu+wexcoEbez8QKBgQDMXlXJu/C7djke94s3 +vGxati7AGO95bBQHW+cPuN4l0rfPZ8YuUAWD4csW4BOlUPAOukreD/SKdanigR3N +FqVPeI8rXd5kzy8/lPIOGuSkkVEpKsAJ7prFbSUVKjVPYQk2dsOEeR0r7pr2FxC9 +SBhVS/LgmPYh++iny9D0aU23hQKBgQDB2t55OE+00vgoauUc10LEY+J6tiwXuNm7 +43JtrH5ET4N+TJ2BOUl5f88TY/3QuTu6vYwlxjyn+LFuWQNhShX6lFMjt5zqPTdw +ZPDA+9B6a45cV3YjXjRsYidpWj0D2lJgy0DbucC4f3eIhNGyFUbAQB9npKDzOeUh +7Z+p/Grg5wKBgGUnVCLzySWgUImJUPkXZDJJ9j3SmcVpv0gdLvLTN/FUqPIZlTgb +F3+9ZL4/zrmGpCtF/gSHtSxLLPkVm2CFkvEQ5Rw76/XNrr8zw9NDcGQcISXVKRRB +a43IhhBBwf02NE8m3YNWRyAVi9G+fOSTKKgfXWnZjAoqG2/iK9ytum/ZAoGAYlP8 +KIxxkYy5Jvchg4GEck0f4ZJpxxaSCoWR0yN9YHTcg8Gk2pkONbyocnNzmN17+HqQ +jdCBj8nLZedsmXqUr2dwzFskEoQ+jJoGrDyOQKoxqZELcWElQhx/VSbacAvbYRF3 +snwDzxGItgx4uNWl73oW8+FDalvhZ1Y6eGR6ad0CgYEAtlNa92Fbvd3r9O2mdyWe +D2SXNMi45+wsNafX2sdkyb+qNN6qZXC9ylUl9h0zdky88JNgtAOgxIaRIdEZajnD +/Zq17sTNtgpm53x16gOAgD8M+/wmBZxA+/IKfFCubuV77MbQoPfcjT5wBMRnFQnY +Ks7c+dzaRlgDKZ6v/L/8iZU= +-----END PRIVATE KEY-----` ) const ( @@ -69,7 +129,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func( fedframework.SkipUnlessFederated(f.ClientSet) framework.SkipUnlessProviderIs("gce", "gke") // TODO: Federated ingress is not yet supported on non-GCP platforms. nsName := f.FederationNamespace.Name - ingress := createIngressOrFail(f.FederationClientset, nsName) + ingress := createIngressOrFail(f.FederationClientset, nsName, FederatedIngressServiceName, FederatedIngressTLSSecretName) By(fmt.Sprintf("Creation of ingress %q in namespace %q succeeded. Deleting ingress.", ingress.Name, nsName)) // Cleanup err := f.FederationClientset.Extensions().Ingresses(nsName).Delete(ingress.Name, &metav1.DeleteOptions{}) @@ -84,6 +144,8 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func( clusters map[string]*cluster // All clusters, keyed by cluster name primaryClusterName, federationName, ns string jig *federationTestJig + service *v1.Service + secret *v1.Secret ) // register clusters in federation apiserver @@ -96,16 +158,38 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func( jig = newFederationTestJig(f.FederationClientset) clusters, primaryClusterName = getRegisteredClusters(UserAgentName, f) ns = f.FederationNamespace.Name + // create backend service + service = createServiceOrFail(f.FederationClientset, ns, FederatedIngressServiceName) + // create the TLS secret + secret = createTLSSecretOrFail(f.FederationClientset, ns, FederatedIngressTLSSecretName) + // wait for services objects sync + waitForServiceShardsOrFail(ns, service, clusters) + // wait for TLS secret sync + waitForSecretShardsOrFail(ns, secret, clusters) }) AfterEach(func() { // Delete all ingresses. nsName := f.FederationNamespace.Name deleteAllIngressesOrFail(f.FederationClientset, nsName) + if secret != nil { + orphanDependents := false + deleteSecretOrFail(f.FederationClientset, ns, secret.Name, &orphanDependents) + secret = nil + } else { + By("No secret to delete. Secret is nil") + } + if service != nil { + deleteServiceOrFail(f.FederationClientset, ns, service.Name, nil) + cleanupServiceShardsAndProviderResources(ns, service, clusters) + service = nil + } else { + By("No service to delete. Service is nil") + } }) It("should create and update matching ingresses in underlying clusters", func() { - ingress := createIngressOrFail(f.FederationClientset, ns) + ingress := createIngressOrFail(f.FederationClientset, ns, FederatedIngressServiceName, FederatedIngressTLSSecretName) // wait for ingress shards being created waitForIngressShardsOrFail(ns, ingress, clusters) ingress = updateIngressOrFail(f.FederationClientset, ns) @@ -137,33 +221,18 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func( var _ = Describe("Ingress connectivity and DNS", func() { - var ( - service *v1.Service - ) - BeforeEach(func() { fedframework.SkipUnlessFederated(f.ClientSet) // create backend pod createBackendPodsOrFail(clusters, ns, FederatedIngressServicePodName) - // create backend service - service = createServiceOrFail(f.FederationClientset, ns, FederatedIngressServiceName) // create ingress object - jig.ing = createIngressOrFail(f.FederationClientset, ns) - // wait for services objects sync - waitForServiceShardsOrFail(ns, service, clusters) + jig.ing = createIngressOrFail(f.FederationClientset, ns, service.Name, FederatedIngressTLSSecretName) // wait for ingress objects sync waitForIngressShardsOrFail(ns, jig.ing, clusters) }) AfterEach(func() { deleteBackendPodsOrFail(clusters, ns) - if service != nil { - deleteServiceOrFail(f.FederationClientset, ns, service.Name, nil) - cleanupServiceShardsAndProviderResources(ns, service, clusters) - service = nil - } else { - By("No service to delete. Service is nil") - } if jig.ing != nil { deleteIngressOrFail(f.FederationClientset, ns, jig.ing.Name, nil) for clusterName, cluster := range clusters { @@ -216,7 +285,7 @@ func equivalentIngress(federatedIngress, clusterIngress v1beta1.Ingress) bool { // underlying clusters when orphan dependents is false and they are not deleted // when orphan dependents is true. func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) { - ingress := createIngressOrFail(clientset, nsName) + ingress := createIngressOrFail(clientset, nsName, FederatedIngressServiceName, FederatedIngressTLSSecretName) ingressName := ingress.Name // Check subclusters if the ingress was created there. By(fmt.Sprintf("Waiting for ingress %s to be created in all underlying clusters", ingressName)) @@ -339,7 +408,7 @@ func deleteClusterIngressOrFail(clusterName string, clientset *kubeclientset.Cli framework.ExpectNoError(err, "Error deleting cluster ingress %q/%q from cluster %q", namespace, ingressName, clusterName) } -func createIngressOrFail(clientset *fedclientset.Clientset, namespace string) *v1beta1.Ingress { +func createIngressOrFail(clientset *fedclientset.Clientset, namespace, serviceName, secretName string) *v1beta1.Ingress { if clientset == nil || len(namespace) == 0 { Fail(fmt.Sprintf("Internal error: invalid parameters passed to createIngressOrFail: clientset: %v, namespace: %v", clientset, namespace)) } @@ -351,9 +420,14 @@ func createIngressOrFail(clientset *fedclientset.Clientset, namespace string) *v }, Spec: v1beta1.IngressSpec{ Backend: &v1beta1.IngressBackend{ - ServiceName: "testingress-service", + ServiceName: serviceName, ServicePort: intstr.FromInt(80), }, + TLS: []v1beta1.IngressTLS{ + { + SecretName: secretName, + }, + }, }, } @@ -406,17 +480,42 @@ func (j *federationTestJig) waitForFederatedIngress() { } j.address = address framework.Logf("Found address %v for ingress %v", j.address, j.ing.Name) - timeoutClient := &http.Client{Timeout: reqTimeout} - // Check that all rules respond to a simple GET. - for _, rules := range j.ing.Spec.Rules { - proto := "http" - for _, p := range rules.IngressRuleValue.HTTP.Paths { - route := fmt.Sprintf("%v://%v%v", proto, address, p.Path) - framework.Logf("Testing route %v host %v with simple GET", route, rules.Host) - framework.ExpectNoError(framework.PollURL(route, rules.Host, framework.LoadBalancerPollTimeout, framework.LoadBalancerPollInterval, timeoutClient, false)) - } + client := &http.Client{ + // This is mostly `http.DefaultTransport` except for the + // `TLSClientConfig`. + Transport: utilnet.SetTransportDefaults(&http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }), + Timeout: reqTimeout, } + + // Verify that simple GET works. + route := fmt.Sprintf("https://%v", address) + framework.Logf("Testing route %v with simple GET", route) + framework.ExpectNoError(framework.PollURL(route, FederatedIngressHost, framework.LoadBalancerPollTimeout, framework.LoadBalancerPollInterval, client, false)) +} + +func createTLSSecretOrFail(clientset *fedclientset.Clientset, namespace, secretName string) *v1.Secret { + if clientset == nil || len(namespace) == 0 { + framework.Logf("Internal error: invalid parameters passed to createTLSSecretOrFail: clientset: %v, namespace: %v", clientset, namespace) + } + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + }, + Type: v1.SecretTypeOpaque, + Data: map[string][]byte{ + "tls.crt": []byte(FederatedIngressTLSCrt), + "tls.key": []byte(FederatedIngressTLSKey), + }, + } + By(fmt.Sprintf("Creating federated secret %q in namespace %q", secretName, namespace)) + newSecret, err := clientset.Core().Secrets(namespace).Create(secret) + framework.ExpectNoError(err, "creating secret %q in namespace %q", secret.Name, namespace) + return newSecret } type federationTestJig struct { diff --git a/test/e2e_federation/util.go b/test/e2e_federation/util.go index a8876a0251..21964e59c5 100644 --- a/test/e2e_federation/util.go +++ b/test/e2e_federation/util.go @@ -48,6 +48,10 @@ var ( UserAgentName = "federation-e2e" // We use this to decide how long to wait for our DNS probes to succeed. DNSTTL = 180 * time.Second // TODO: make k8s.io/kubernetes/federation/pkg/federation-controller/service.minDnsTtl exported, and import it here. + + // FederatedSvcNodePort is the node port on which the federated + // service shards are exposed in the underlying cluster. + FederatedSvcNodePort = int32(32256) ) var FederationSuite common.Suite @@ -259,6 +263,7 @@ func createService(clientset *fedclientset.Clientset, namespace, name string) (* Protocol: v1.ProtocolTCP, Port: 80, TargetPort: intstr.FromInt(8080), + NodePort: FederatedSvcNodePort, }, }, SessionAffinity: v1.ServiceAffinityNone, @@ -279,10 +284,11 @@ func deleteServiceOrFail(clientset *fedclientset.Clientset, namespace string, se if clientset == nil || len(namespace) == 0 || len(serviceName) == 0 { Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteServiceOrFail: clientset: %v, namespace: %v, service: %v", clientset, namespace, serviceName)) } + framework.Logf("Deleting service %q in namespace %v", serviceName, namespace) err := clientset.Services(namespace).Delete(serviceName, &metav1.DeleteOptions{OrphanDependents: orphanDependents}) framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace) // Wait for the service to be deleted. - err = wait.Poll(5*time.Second, 3*wait.ForeverTestTimeout, func() (bool, error) { + err = wait.Poll(5*time.Second, 10*wait.ForeverTestTimeout, func() (bool, error) { _, err := clientset.Core().Services(namespace).Get(serviceName, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { return true, nil