Add support for 'providerUid' in ingress objects. The providerUid

gives each ingress object a cluster-unique Uid that can be
leveraged by ingress providers.

In the process, supplement the testing of configMap updates to
ensure that the updates are propagated prior to any ingress
object being created. Configmap key/vals for Uid and ProviderUid
must exist at time of Ingress creation.
pull/6/head
Christian Bell 2017-02-21 17:39:26 -08:00
parent 1d97472361
commit 8fff7c8805
5 changed files with 210 additions and 55 deletions

View File

@ -17,6 +17,7 @@ limitations under the License.
package ingress package ingress
import ( import (
"crypto/md5"
"fmt" "fmt"
"sync" "sync"
"time" "time"
@ -55,6 +56,7 @@ const (
uidConfigMapName = "ingress-uid" // Name of the config-map and key the ingress controller stores its uid in. uidConfigMapName = "ingress-uid" // Name of the config-map and key the ingress controller stores its uid in.
uidConfigMapNamespace = "kube-system" uidConfigMapNamespace = "kube-system"
uidKey = "uid" uidKey = "uid"
providerUidKey = "provider-uid"
// Annotation on the ingress in federation control plane that is used to keep // Annotation on the ingress in federation control plane that is used to keep
// track of the first cluster in which we create ingress. // track of the first cluster in which we create ingress.
// We wait for ingress to be created in this cluster before creating it any // We wait for ingress to be created in this cluster before creating it any
@ -272,7 +274,7 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
glog.V(4).Infof("Attempting to update ConfigMap: %v", configMap) glog.V(4).Infof("Attempting to update ConfigMap: %v", configMap)
_, err := client.Core().ConfigMaps(configMap.Namespace).Update(configMap) _, err := client.Core().ConfigMaps(configMap.Namespace).Update(configMap)
if err == nil { if err == nil {
glog.V(4).Infof("Successfully updated ConfigMap %q", configMapName) glog.V(4).Infof("Successfully updated ConfigMap %q %v", configMapName, configMap)
} else { } else {
glog.V(4).Infof("Failed to update ConfigMap %q: %v", configMapName, err) glog.V(4).Infof("Failed to update ConfigMap %q: %v", configMapName, err)
} }
@ -523,7 +525,10 @@ func (ic *IngressController) reconcileConfigMapForCluster(clusterName string) {
uidConfigMapNamespacedName := types.NamespacedName{Name: uidConfigMapName, Namespace: uidConfigMapNamespace} uidConfigMapNamespacedName := types.NamespacedName{Name: uidConfigMapName, Namespace: uidConfigMapNamespace}
configMapObj, found, err := ic.configMapFederatedInformer.GetTargetStore().GetByKey(cluster.Name, uidConfigMapNamespacedName.String()) configMapObj, found, err := ic.configMapFederatedInformer.GetTargetStore().GetByKey(cluster.Name, uidConfigMapNamespacedName.String())
if !found || err != nil { if !found || err != nil {
logmsg := fmt.Sprintf("Failed to get ConfigMap %q for cluster %q. Will try again later: %v", uidConfigMapNamespacedName, cluster.Name, err) logmsg := fmt.Sprintf("Failed to get ConfigMap %q for cluster %q. Will try again later", uidConfigMapNamespacedName, cluster.Name)
if err != nil {
logmsg = fmt.Sprintf("%v: %v", logmsg, err)
}
if len(ic.ingressInformerStore.List()) > 0 { // Error-level if ingresses are active, Info-level otherwise. if len(ic.ingressInformerStore.List()) > 0 { // Error-level if ingresses are active, Info-level otherwise.
glog.Errorf(logmsg) glog.Errorf(logmsg)
} else { } else {
@ -543,6 +548,12 @@ func (ic *IngressController) reconcileConfigMapForCluster(clusterName string) {
} }
} }
// getProviderUid returns a provider ID based on the provided clusterName.
func getProviderUid(clusterName string) string {
hashedName := md5.Sum([]byte(clusterName))
return fmt.Sprintf("%x", hashedName[:8])
}
/* /*
reconcileConfigMap ensures that the configmap in the cluster has a UID reconcileConfigMap ensures that the configmap in the cluster has a UID
consistent with the federation cluster's associated annotation. consistent with the federation cluster's associated annotation.
@ -570,12 +581,29 @@ func (ic *IngressController) reconcileConfigMap(cluster *federationapi.Cluster,
} }
if !clusterIngressUIDExists || clusterIngressUID == "" { if !clusterIngressUIDExists || clusterIngressUID == "" {
ic.updateClusterIngressUIDToMasters(cluster, configMapUID) // Second argument is the fallback, in case this is the only cluster, in which case it becomes the master glog.V(4).Infof("Cluster %q is the only master", cluster.Name)
return // Second argument is the fallback, in case this is the only cluster, in which case it becomes the master
var err error
if clusterIngressUID, err = ic.updateClusterIngressUIDToMasters(cluster, configMapUID); err != nil {
return
}
// If we successfully update the Cluster Object, fallthrough and update the configMap.
} }
if configMapUID != clusterIngressUID { // An update is required
glog.V(4).Infof("Ingress UID update is required: configMapUID %q not equal to clusterIngressUID %q", configMapUID, clusterIngressUID) // Figure out providerUid.
providerUid := getProviderUid(cluster.Name)
configMapProviderUid := configMap.Data[providerUidKey]
if configMapUID == clusterIngressUID && configMapProviderUid == providerUid {
glog.V(4).Infof("Ingress configMap update is not required: UID %q and ProviderUid %q are equal", configMapUID, providerUid)
} else {
if configMapUID != clusterIngressUID {
glog.V(4).Infof("Ingress configMap update is required for UID: configMapUID %q not equal to clusterIngressUID %q", configMapUID, clusterIngressUID)
} else if configMapProviderUid != providerUid {
glog.V(4).Infof("Ingress configMap update is required: configMapProviderUid %q not equal to providerUid %q", configMapProviderUid, providerUid)
}
configMap.Data[uidKey] = clusterIngressUID configMap.Data[uidKey] = clusterIngressUID
configMap.Data[providerUidKey] = providerUid
operations := []util.FederatedOperation{{ operations := []util.FederatedOperation{{
Type: util.OperationTypeUpdate, Type: util.OperationTypeUpdate,
Obj: configMap, Obj: configMap,
@ -588,8 +616,6 @@ func (ic *IngressController) reconcileConfigMap(cluster *federationapi.Cluster,
glog.Errorf("Failed to execute update of ConfigMap %q on cluster %q: %v", configMapName, cluster.Name, err) glog.Errorf("Failed to execute update of ConfigMap %q on cluster %q: %v", configMapName, cluster.Name, err)
ic.configMapDeliverer.DeliverAfter(cluster.Name, nil, ic.configMapReviewDelay) ic.configMapDeliverer.DeliverAfter(cluster.Name, nil, ic.configMapReviewDelay)
} }
} else {
glog.V(4).Infof("Ingress UID update is not required: configMapUID %q is equal to clusterIngressUID %q", configMapUID, clusterIngressUID)
} }
} }
@ -619,13 +645,13 @@ func (ic *IngressController) getMasterCluster() (master *federationapi.Cluster,
updateClusterIngressUIDToMasters takes the ingress UID annotation on the master cluster and applies it to cluster. updateClusterIngressUIDToMasters takes the ingress UID annotation on the master cluster and applies it to cluster.
If there is no master cluster, then fallbackUID is used (and hence this cluster becomes the master). If there is no master cluster, then fallbackUID is used (and hence this cluster becomes the master).
*/ */
func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federationapi.Cluster, fallbackUID string) { func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federationapi.Cluster, fallbackUID string) (string, error) {
masterCluster, masterUID, err := ic.getMasterCluster() masterCluster, masterUID, err := ic.getMasterCluster()
clusterObj, clusterErr := api.Scheme.DeepCopy(cluster) // Make a clone so that we don't clobber our input param clusterObj, clusterErr := api.Scheme.DeepCopy(cluster) // Make a clone so that we don't clobber our input param
cluster, ok := clusterObj.(*federationapi.Cluster) cluster, ok := clusterObj.(*federationapi.Cluster)
if clusterErr != nil || !ok { if clusterErr != nil || !ok {
glog.Errorf("Internal error: Failed clone cluster resource while attempting to add master ingress UID annotation (%q = %q) from master cluster %q to cluster %q, will try again later: %v", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name, err) glog.Errorf("Internal error: Failed clone cluster resource while attempting to add master ingress UID annotation (%q = %q) from master cluster %q to cluster %q, will try again later: %v", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name, err)
return return "", err
} }
if err == nil { if err == nil {
if masterCluster.Name != cluster.Name { // We're not the master, need to get in sync if masterCluster.Name != cluster.Name { // We're not the master, need to get in sync
@ -635,12 +661,14 @@ func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federatio
cluster.ObjectMeta.Annotations[uidAnnotationKey] = masterUID cluster.ObjectMeta.Annotations[uidAnnotationKey] = masterUID
if _, err = ic.federatedApiClient.Federation().Clusters().Update(cluster); err != nil { if _, err = ic.federatedApiClient.Federation().Clusters().Update(cluster); err != nil {
glog.Errorf("Failed to add master ingress UID annotation (%q = %q) from master cluster %q to cluster %q, will try again later: %v", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name, err) glog.Errorf("Failed to add master ingress UID annotation (%q = %q) from master cluster %q to cluster %q, will try again later: %v", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name, err)
return return "", err
} else { } else {
glog.V(4).Infof("Successfully added master ingress UID annotation (%q = %q) from master cluster %q to cluster %q.", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name) glog.V(4).Infof("Successfully added master ingress UID annotation (%q = %q) from master cluster %q to cluster %q.", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name)
return masterUID, nil
} }
} else { } else {
glog.V(4).Infof("Cluster %q with ingress UID is already the master with annotation (%q = %q), no need to update.", cluster.Name, uidAnnotationKey, cluster.ObjectMeta.Annotations[uidAnnotationKey]) glog.V(4).Infof("Cluster %q with ingress UID is already the master with annotation (%q = %q), no need to update.", cluster.Name, uidAnnotationKey, cluster.ObjectMeta.Annotations[uidAnnotationKey])
return cluster.ObjectMeta.Annotations[uidAnnotationKey], nil
} }
} else { } else {
glog.V(2).Infof("No master cluster found to source an ingress UID from for cluster %q. Attempting to elect new master cluster %q with ingress UID %q = %q", cluster.Name, cluster.Name, uidAnnotationKey, fallbackUID) glog.V(2).Infof("No master cluster found to source an ingress UID from for cluster %q. Attempting to elect new master cluster %q with ingress UID %q = %q", cluster.Name, cluster.Name, uidAnnotationKey, fallbackUID)
@ -651,11 +679,14 @@ func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federatio
cluster.ObjectMeta.Annotations[uidAnnotationKey] = fallbackUID cluster.ObjectMeta.Annotations[uidAnnotationKey] = fallbackUID
if _, err = ic.federatedApiClient.Federation().Clusters().Update(cluster); err != nil { if _, err = ic.federatedApiClient.Federation().Clusters().Update(cluster); err != nil {
glog.Errorf("Failed to add ingress UID annotation (%q = %q) to cluster %q. No master elected. Will try again later: %v", uidAnnotationKey, fallbackUID, cluster.Name, err) glog.Errorf("Failed to add ingress UID annotation (%q = %q) to cluster %q. No master elected. Will try again later: %v", uidAnnotationKey, fallbackUID, cluster.Name, err)
return "", err
} else { } else {
glog.V(4).Infof("Successfully added ingress UID annotation (%q = %q) to cluster %q.", uidAnnotationKey, fallbackUID, cluster.Name) glog.V(4).Infof("Successfully added ingress UID annotation (%q = %q) to cluster %q.", uidAnnotationKey, fallbackUID, cluster.Name)
return fallbackUID, nil
} }
} else { } else {
glog.Errorf("No master cluster exists, and fallbackUID for cluster %q is invalid (%q). This probably means that no clusters have an ingress controller configmap with key %q. Federated Ingress currently supports clusters running Google Loadbalancer Controller (\"GLBC\")", cluster.Name, fallbackUID, uidKey) glog.Errorf("No master cluster exists, and fallbackUID for cluster %q is invalid (%q). This probably means that no clusters have an ingress controller configmap with key %q. Federated Ingress currently supports clusters running Google Loadbalancer Controller (\"GLBC\")", cluster.Name, fallbackUID, uidKey)
return "", err
} }
} }
} }

View File

@ -57,6 +57,7 @@ func TestIngressController(t *testing.T) {
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue) cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
cfg1 := NewConfigMap("foo") cfg1 := NewConfigMap("foo")
cfg2 := NewConfigMap("bar") // Different UID from cfg1, so that we can check that they get reconciled. cfg2 := NewConfigMap("bar") // Different UID from cfg1, so that we can check that they get reconciled.
assert.NotEqual(t, cfg1.Data[uidKey], cfg2.Data[uidKey], fmt.Sprintf("ConfigMap in cluster 2 must initially not equal that in cluster 1 for this test - please fix test"))
t.Log("Creating fake infrastructure") t.Log("Creating fake infrastructure")
fedClient := &fakefedclientset.Clientset{} fedClient := &fakefedclientset.Clientset{}
@ -74,6 +75,7 @@ func TestIngressController(t *testing.T) {
cluster1ConfigMapWatch := RegisterFakeWatch(configmaps, &cluster1Client.Fake) cluster1ConfigMapWatch := RegisterFakeWatch(configmaps, &cluster1Client.Fake)
cluster1IngressCreateChan := RegisterFakeCopyOnCreate(ingresses, &cluster1Client.Fake, cluster1IngressWatch) cluster1IngressCreateChan := RegisterFakeCopyOnCreate(ingresses, &cluster1Client.Fake, cluster1IngressWatch)
cluster1IngressUpdateChan := RegisterFakeCopyOnUpdate(ingresses, &cluster1Client.Fake, cluster1IngressWatch) cluster1IngressUpdateChan := RegisterFakeCopyOnUpdate(ingresses, &cluster1Client.Fake, cluster1IngressWatch)
cluster1ConfigMapUpdateChan := RegisterFakeCopyOnUpdate(configmaps, &cluster1Client.Fake, cluster1ConfigMapWatch)
cluster2Client := &fakekubeclientset.Clientset{} cluster2Client := &fakekubeclientset.Clientset{}
RegisterFakeList(ingresses, &cluster2Client.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}}) RegisterFakeList(ingresses, &cluster2Client.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}})
@ -138,11 +140,24 @@ func TestIngressController(t *testing.T) {
assert.NotNil(t, cluster) assert.NotNil(t, cluster)
assert.Equal(t, cluster.ObjectMeta.Annotations[uidAnnotationKey], cfg1.Data[uidKey]) assert.Equal(t, cluster.ObjectMeta.Annotations[uidAnnotationKey], cfg1.Data[uidKey])
t.Log("Adding cluster 2")
clusterWatch.Add(cluster2)
cluster2ConfigMapWatch.Add(cfg2)
t.Log("Checking that a configmap updates are propagated prior to Federated Ingress")
referenceUid := cfg1.Data[uidKey]
uid1, providerId1 := GetConfigMapUidAndProviderId(t, cluster1ConfigMapUpdateChan)
uid2, providerId2 := GetConfigMapUidAndProviderId(t, cluster2ConfigMapUpdateChan)
t.Logf("uid2 = %v and ref = %v", uid2, referenceUid)
assert.True(t, referenceUid == uid1, "Expected cluster1 configmap uid %q to be equal to referenceUid %q", uid1, referenceUid)
assert.True(t, referenceUid == uid2, "Expected cluster2 configmap uid %q to be equal to referenceUid %q", uid2, referenceUid)
assert.True(t, providerId1 != providerId2, "Expected cluster1 providerUid %q to be unique and different from cluster2 providerUid %q", providerId1, providerId2)
// Test add federated ingress. // Test add federated ingress.
t.Log("Adding Federated Ingress") t.Log("Adding Federated Ingress")
fedIngressWatch.Add(&fedIngress) fedIngressWatch.Add(&fedIngress)
t.Logf("Checking that appropriate finalizers are added") t.Log("Checking that appropriate finalizers are added")
// There should be an update to add both the finalizers. // There should be an update to add both the finalizers.
updatedIngress := GetIngressFromChan(t, fedIngressUpdateChan) updatedIngress := GetIngressFromChan(t, fedIngressUpdateChan)
assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, deletionhelper.FinalizerDeleteFromUnderlyingClusters)) assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, deletionhelper.FinalizerDeleteFromUnderlyingClusters))
@ -243,9 +258,7 @@ func TestIngressController(t *testing.T) {
fedIngress.Annotations[staticIPNameKeyWritable] = "foo" // Make sure that the base object has a static IP name first. fedIngress.Annotations[staticIPNameKeyWritable] = "foo" // Make sure that the base object has a static IP name first.
fedIngressWatch.Modify(&fedIngress) fedIngressWatch.Modify(&fedIngress)
clusterWatch.Add(cluster2) clusterWatch.Add(cluster2)
// First check that the original values are not equal - see above comment
assert.NotEqual(t, cfg1.Data[uidKey], cfg2.Data[uidKey], fmt.Sprintf("ConfigMap in cluster 2 must initially not equal that in cluster 1 for this test - please fix test"))
cluster2ConfigMapWatch.Add(cfg2)
t.Log("Checking that the ingress got created in cluster 2") t.Log("Checking that the ingress got created in cluster 2")
createdIngress2 := GetIngressFromChan(t, cluster2IngressCreateChan) createdIngress2 := GetIngressFromChan(t, cluster2IngressCreateChan)
assert.NotNil(t, createdIngress2) assert.NotNil(t, createdIngress2)
@ -253,17 +266,22 @@ func TestIngressController(t *testing.T) {
t.Logf("created meta: %v fed meta: %v", createdIngress2.ObjectMeta, fedIngress.ObjectMeta) t.Logf("created meta: %v fed meta: %v", createdIngress2.ObjectMeta, fedIngress.ObjectMeta)
assert.True(t, util.ObjectMetaEquivalent(fedIngress.ObjectMeta, createdIngress2.ObjectMeta), "Metadata of created object is not equivalent") assert.True(t, util.ObjectMetaEquivalent(fedIngress.ObjectMeta, createdIngress2.ObjectMeta), "Metadata of created object is not equivalent")
t.Log("Checking that the configmap in cluster 2 got updated.")
updatedConfigMap2 := GetConfigMapFromChan(cluster2ConfigMapUpdateChan)
assert.NotNil(t, updatedConfigMap2, fmt.Sprintf("ConfigMap in cluster 2 was not updated (or more likely the test is broken and the API type written is wrong)"))
if updatedConfigMap2 != nil {
assert.Equal(t, cfg1.Data[uidKey], updatedConfigMap2.Data[uidKey],
fmt.Sprintf("UID's in configmaps in cluster's 1 and 2 are not equal (%q != %q)", cfg1.Data["uid"], updatedConfigMap2.Data["uid"]))
}
close(stop) close(stop)
} }
func GetConfigMapUidAndProviderId(t *testing.T, c chan runtime.Object) (string, string) {
updatedConfigMap := GetConfigMapFromChan(c)
assert.NotNil(t, updatedConfigMap, "ConfigMap should have received an update")
assert.NotNil(t, updatedConfigMap.Data, "ConfigMap data is empty")
for _, key := range []string{uidKey, providerUidKey} {
val, ok := updatedConfigMap.Data[key]
assert.True(t, ok, fmt.Sprintf("Didn't receive an update for key %v: %v", key, updatedConfigMap.Data))
assert.True(t, len(val) > 0, fmt.Sprintf("Received an empty update for key %v", key))
}
return updatedConfigMap.Data[uidKey], updatedConfigMap.Data[providerUidKey]
}
func GetIngressFromChan(t *testing.T, c chan runtime.Object) *extensionsv1beta1.Ingress { func GetIngressFromChan(t *testing.T, c chan runtime.Object) *extensionsv1beta1.Ingress {
obj := GetObjectFromChan(c) obj := GetObjectFromChan(c)

View File

@ -42,6 +42,7 @@ go_library(
"//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/api/errors",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/util/intstr", "//vendor:k8s.io/apimachinery/pkg/util/intstr",
"//vendor:k8s.io/apimachinery/pkg/util/net",
"//vendor:k8s.io/apimachinery/pkg/util/wait", "//vendor:k8s.io/apimachinery/pkg/util/wait",
"//vendor:k8s.io/client-go/rest", "//vendor:k8s.io/client-go/rest",
"//vendor:k8s.io/client-go/tools/clientcmd", "//vendor:k8s.io/client-go/tools/clientcmd",

View File

@ -17,6 +17,7 @@ limitations under the License.
package e2e_federation package e2e_federation
import ( import (
"crypto/tls"
"fmt" "fmt"
"net/http" "net/http"
"os" "os"
@ -28,11 +29,13 @@ import (
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
fedframework "k8s.io/kubernetes/test/e2e_federation/framework" fedframework "k8s.io/kubernetes/test/e2e_federation/framework"
@ -42,10 +45,67 @@ import (
const ( const (
MaxRetriesOnFederatedApiserver = 3 MaxRetriesOnFederatedApiserver = 3
FederatedIngressTimeout = 120 * time.Second FederatedIngressTimeout = 10 * time.Minute
FederatedIngressName = "federated-ingress" FederatedIngressName = "federated-ingress"
FederatedIngressServiceName = "federated-ingress-service" FederatedIngressServiceName = "federated-ingress-service"
FederatedIngressTLSSecretName = "federated-ingress-tls-secret"
FederatedIngressServicePodName = "federated-ingress-service-test-pod" FederatedIngressServicePodName = "federated-ingress-service-test-pod"
FederatedIngressHost = "test-f8n.k8s.io."
// TLS Certificate and Key for the ingress resource
// Generated using:
// $ openssl req -nodes -x509 -newkey rsa:2048 -keyout fedingtestkey.pem -out fedingtestcrt.pem -days 2485
// 2485 days is an arbitrary large number chosen below int32 seconds.
FederatedIngressTLSCrt = `-----BEGIN CERTIFICATE-----
MIIDaTCCAlGgAwIBAgIJANwsCbwxm9pyMA0GCSqGSIb3DQEBCwUAMEoxCzAJBgNV
BAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMQswCQYDVQQKDAJOQTEZMBcGA1UE
AwwQdGVzdC1mOG4uazhzLmlvLjAgFw0xNjEyMTYwNjA1NDRaGA8yMDg1MDEwMzA2
MDU0NFowSjELMAkGA1UEBhMCVVMxEzARBgNVBAgMClNvbWUtU3RhdGUxCzAJBgNV
BAoMAk5BMRkwFwYDVQQDDBB0ZXN0LWY4bi5rOHMuaW8uMIIBIjANBgkqhkiG9w0B
AQEFAAOCAQ8AMIIBCgKCAQEAmsHYnLhqSeO1Q6SEjaiPiLUQV8tyGfttwNQiOT5u
ULz6ZWYA40m/1hhla9KH9sJZ515Iq+jTtiVH0rUjryT96SjxitLCAZlxVwQ63B50
aZF2T2OPSzvrmN+J6VGcRIq0N8fUeyp2WTIEdWlpQ7DTmDNArQqFSIvJndkLow3d
hec7O+PErnvZQQC9zqa23rGndDzlgDJ4HJGAQNm3uYVh5WHv+wziP67T/82bEGgO
A6EdDPWzpYxzAA1wsqz9lX5jitlbKdI56698fPR2KRelySf7OXVvZCS4/ED1lF4k
b7fQgtBhAWe1BkuAMUl7vdRjMps7nkxmBSuxBkVQ7sb5AwIDAQABo1AwTjAdBgNV
HQ4EFgQUjf53O/W/iE2mxuJkNjZGUfjJ9RUwHwYDVR0jBBgwFoAUjf53O/W/iE2m
xuJkNjZGUfjJ9RUwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEABE7B
bAiORDBA3qE5lh6JCs/lEfz93E/gOhD9oDnm9SRND4kjy7qeGxk4Wzsd/Vr+R2mi
EZ40d4MA/mCCPnYsNQoEXMFc8IvwAbzkhh2gqTNgG0/Ks0A1mIPQNpvUcSetS4IV
732DvB3nSnFtlzf6afw+V1Vf5ydRNuM/c9GEOOHSz+rs+9M364d+wNaFD64M72ol
iDMAdtcrhOqkQi0lUING904jlJcyYM5oVNCCtme4F8nkIX9bxP/9Ea6VhDGPeJiX
tVwZuudkoEbrFlEYbyLrbVeVa9oTf4Jn66iz49/+th+bUtEoTt9gk9Cul5TFgfzx
EscdahceC7afheq6zg==
-----END CERTIFICATE-----`
FederatedIngressTLSKey = `-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCawdicuGpJ47VD
pISNqI+ItRBXy3IZ+23A1CI5Pm5QvPplZgDjSb/WGGVr0of2wlnnXkir6NO2JUfS
tSOvJP3pKPGK0sIBmXFXBDrcHnRpkXZPY49LO+uY34npUZxEirQ3x9R7KnZZMgR1
aWlDsNOYM0CtCoVIi8md2QujDd2F5zs748Sue9lBAL3Oprbesad0POWAMngckYBA
2be5hWHlYe/7DOI/rtP/zZsQaA4DoR0M9bOljHMADXCyrP2VfmOK2Vsp0jnrr3x8
9HYpF6XJJ/s5dW9kJLj8QPWUXiRvt9CC0GEBZ7UGS4AxSXu91GMymzueTGYFK7EG
RVDuxvkDAgMBAAECggEAYrXGPqB6W0r88XpceibL9rzXAcjorJ3s8ZPdiHnDz4fa
hxa69j6yOBMzjcSpqMFqquM+ozhM4d+BomqbqjmEI1ZUSuIHkRGYc5JlIMXkJvn7
ZsPwQGKl8cqTotjFPgrizLmPVEhPWLFImsNzuxNsw6XdWQJe5VkUbrRkccqEQ8Wt
xwq/SlRercIMnRVLOOESq8EyjOY4yDgOdIifq9K9xiI8W6nMiPs0X5AcIJoTMbCe
cX0zUqW317awDWWP8u2GswwDDm4qPeWnXOrDkDx8Eo0dWJbmxw9su0XrM6KMvEMe
2a/Fy/enr5Cc6/jgsh3gO5sa8dJ1Cu+wexcoEbez8QKBgQDMXlXJu/C7djke94s3
vGxati7AGO95bBQHW+cPuN4l0rfPZ8YuUAWD4csW4BOlUPAOukreD/SKdanigR3N
FqVPeI8rXd5kzy8/lPIOGuSkkVEpKsAJ7prFbSUVKjVPYQk2dsOEeR0r7pr2FxC9
SBhVS/LgmPYh++iny9D0aU23hQKBgQDB2t55OE+00vgoauUc10LEY+J6tiwXuNm7
43JtrH5ET4N+TJ2BOUl5f88TY/3QuTu6vYwlxjyn+LFuWQNhShX6lFMjt5zqPTdw
ZPDA+9B6a45cV3YjXjRsYidpWj0D2lJgy0DbucC4f3eIhNGyFUbAQB9npKDzOeUh
7Z+p/Grg5wKBgGUnVCLzySWgUImJUPkXZDJJ9j3SmcVpv0gdLvLTN/FUqPIZlTgb
F3+9ZL4/zrmGpCtF/gSHtSxLLPkVm2CFkvEQ5Rw76/XNrr8zw9NDcGQcISXVKRRB
a43IhhBBwf02NE8m3YNWRyAVi9G+fOSTKKgfXWnZjAoqG2/iK9ytum/ZAoGAYlP8
KIxxkYy5Jvchg4GEck0f4ZJpxxaSCoWR0yN9YHTcg8Gk2pkONbyocnNzmN17+HqQ
jdCBj8nLZedsmXqUr2dwzFskEoQ+jJoGrDyOQKoxqZELcWElQhx/VSbacAvbYRF3
snwDzxGItgx4uNWl73oW8+FDalvhZ1Y6eGR6ad0CgYEAtlNa92Fbvd3r9O2mdyWe
D2SXNMi45+wsNafX2sdkyb+qNN6qZXC9ylUl9h0zdky88JNgtAOgxIaRIdEZajnD
/Zq17sTNtgpm53x16gOAgD8M+/wmBZxA+/IKfFCubuV77MbQoPfcjT5wBMRnFQnY
Ks7c+dzaRlgDKZ6v/L/8iZU=
-----END PRIVATE KEY-----`
) )
const ( const (
@ -69,7 +129,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
framework.SkipUnlessProviderIs("gce", "gke") // TODO: Federated ingress is not yet supported on non-GCP platforms. framework.SkipUnlessProviderIs("gce", "gke") // TODO: Federated ingress is not yet supported on non-GCP platforms.
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
ingress := createIngressOrFail(f.FederationClientset, nsName) ingress := createIngressOrFail(f.FederationClientset, nsName, FederatedIngressServiceName, FederatedIngressTLSSecretName)
By(fmt.Sprintf("Creation of ingress %q in namespace %q succeeded. Deleting ingress.", ingress.Name, nsName)) By(fmt.Sprintf("Creation of ingress %q in namespace %q succeeded. Deleting ingress.", ingress.Name, nsName))
// Cleanup // Cleanup
err := f.FederationClientset.Extensions().Ingresses(nsName).Delete(ingress.Name, &metav1.DeleteOptions{}) err := f.FederationClientset.Extensions().Ingresses(nsName).Delete(ingress.Name, &metav1.DeleteOptions{})
@ -84,6 +144,8 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
clusters map[string]*cluster // All clusters, keyed by cluster name clusters map[string]*cluster // All clusters, keyed by cluster name
primaryClusterName, federationName, ns string primaryClusterName, federationName, ns string
jig *federationTestJig jig *federationTestJig
service *v1.Service
secret *v1.Secret
) )
// register clusters in federation apiserver // register clusters in federation apiserver
@ -96,16 +158,38 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
jig = newFederationTestJig(f.FederationClientset) jig = newFederationTestJig(f.FederationClientset)
clusters, primaryClusterName = getRegisteredClusters(UserAgentName, f) clusters, primaryClusterName = getRegisteredClusters(UserAgentName, f)
ns = f.FederationNamespace.Name ns = f.FederationNamespace.Name
// create backend service
service = createServiceOrFail(f.FederationClientset, ns, FederatedIngressServiceName)
// create the TLS secret
secret = createTLSSecretOrFail(f.FederationClientset, ns, FederatedIngressTLSSecretName)
// wait for services objects sync
waitForServiceShardsOrFail(ns, service, clusters)
// wait for TLS secret sync
waitForSecretShardsOrFail(ns, secret, clusters)
}) })
AfterEach(func() { AfterEach(func() {
// Delete all ingresses. // Delete all ingresses.
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
deleteAllIngressesOrFail(f.FederationClientset, nsName) deleteAllIngressesOrFail(f.FederationClientset, nsName)
if secret != nil {
orphanDependents := false
deleteSecretOrFail(f.FederationClientset, ns, secret.Name, &orphanDependents)
secret = nil
} else {
By("No secret to delete. Secret is nil")
}
if service != nil {
deleteServiceOrFail(f.FederationClientset, ns, service.Name, nil)
cleanupServiceShardsAndProviderResources(ns, service, clusters)
service = nil
} else {
By("No service to delete. Service is nil")
}
}) })
It("should create and update matching ingresses in underlying clusters", func() { It("should create and update matching ingresses in underlying clusters", func() {
ingress := createIngressOrFail(f.FederationClientset, ns) ingress := createIngressOrFail(f.FederationClientset, ns, FederatedIngressServiceName, FederatedIngressTLSSecretName)
// wait for ingress shards being created // wait for ingress shards being created
waitForIngressShardsOrFail(ns, ingress, clusters) waitForIngressShardsOrFail(ns, ingress, clusters)
ingress = updateIngressOrFail(f.FederationClientset, ns) ingress = updateIngressOrFail(f.FederationClientset, ns)
@ -137,33 +221,18 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
var _ = Describe("Ingress connectivity and DNS", func() { var _ = Describe("Ingress connectivity and DNS", func() {
var (
service *v1.Service
)
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
// create backend pod // create backend pod
createBackendPodsOrFail(clusters, ns, FederatedIngressServicePodName) createBackendPodsOrFail(clusters, ns, FederatedIngressServicePodName)
// create backend service
service = createServiceOrFail(f.FederationClientset, ns, FederatedIngressServiceName)
// create ingress object // create ingress object
jig.ing = createIngressOrFail(f.FederationClientset, ns) jig.ing = createIngressOrFail(f.FederationClientset, ns, service.Name, FederatedIngressTLSSecretName)
// wait for services objects sync
waitForServiceShardsOrFail(ns, service, clusters)
// wait for ingress objects sync // wait for ingress objects sync
waitForIngressShardsOrFail(ns, jig.ing, clusters) waitForIngressShardsOrFail(ns, jig.ing, clusters)
}) })
AfterEach(func() { AfterEach(func() {
deleteBackendPodsOrFail(clusters, ns) deleteBackendPodsOrFail(clusters, ns)
if service != nil {
deleteServiceOrFail(f.FederationClientset, ns, service.Name, nil)
cleanupServiceShardsAndProviderResources(ns, service, clusters)
service = nil
} else {
By("No service to delete. Service is nil")
}
if jig.ing != nil { if jig.ing != nil {
deleteIngressOrFail(f.FederationClientset, ns, jig.ing.Name, nil) deleteIngressOrFail(f.FederationClientset, ns, jig.ing.Name, nil)
for clusterName, cluster := range clusters { for clusterName, cluster := range clusters {
@ -216,7 +285,7 @@ func equivalentIngress(federatedIngress, clusterIngress v1beta1.Ingress) bool {
// underlying clusters when orphan dependents is false and they are not deleted // underlying clusters when orphan dependents is false and they are not deleted
// when orphan dependents is true. // when orphan dependents is true.
func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) { func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) {
ingress := createIngressOrFail(clientset, nsName) ingress := createIngressOrFail(clientset, nsName, FederatedIngressServiceName, FederatedIngressTLSSecretName)
ingressName := ingress.Name ingressName := ingress.Name
// Check subclusters if the ingress was created there. // Check subclusters if the ingress was created there.
By(fmt.Sprintf("Waiting for ingress %s to be created in all underlying clusters", ingressName)) By(fmt.Sprintf("Waiting for ingress %s to be created in all underlying clusters", ingressName))
@ -339,7 +408,7 @@ func deleteClusterIngressOrFail(clusterName string, clientset *kubeclientset.Cli
framework.ExpectNoError(err, "Error deleting cluster ingress %q/%q from cluster %q", namespace, ingressName, clusterName) framework.ExpectNoError(err, "Error deleting cluster ingress %q/%q from cluster %q", namespace, ingressName, clusterName)
} }
func createIngressOrFail(clientset *fedclientset.Clientset, namespace string) *v1beta1.Ingress { func createIngressOrFail(clientset *fedclientset.Clientset, namespace, serviceName, secretName string) *v1beta1.Ingress {
if clientset == nil || len(namespace) == 0 { if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to createIngressOrFail: clientset: %v, namespace: %v", clientset, namespace)) Fail(fmt.Sprintf("Internal error: invalid parameters passed to createIngressOrFail: clientset: %v, namespace: %v", clientset, namespace))
} }
@ -351,9 +420,14 @@ func createIngressOrFail(clientset *fedclientset.Clientset, namespace string) *v
}, },
Spec: v1beta1.IngressSpec{ Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{ Backend: &v1beta1.IngressBackend{
ServiceName: "testingress-service", ServiceName: serviceName,
ServicePort: intstr.FromInt(80), ServicePort: intstr.FromInt(80),
}, },
TLS: []v1beta1.IngressTLS{
{
SecretName: secretName,
},
},
}, },
} }
@ -406,17 +480,42 @@ func (j *federationTestJig) waitForFederatedIngress() {
} }
j.address = address j.address = address
framework.Logf("Found address %v for ingress %v", j.address, j.ing.Name) framework.Logf("Found address %v for ingress %v", j.address, j.ing.Name)
timeoutClient := &http.Client{Timeout: reqTimeout}
// Check that all rules respond to a simple GET. client := &http.Client{
for _, rules := range j.ing.Spec.Rules { // This is mostly `http.DefaultTransport` except for the
proto := "http" // `TLSClientConfig`.
for _, p := range rules.IngressRuleValue.HTTP.Paths { Transport: utilnet.SetTransportDefaults(&http.Transport{
route := fmt.Sprintf("%v://%v%v", proto, address, p.Path) TLSClientConfig: &tls.Config{
framework.Logf("Testing route %v host %v with simple GET", route, rules.Host) InsecureSkipVerify: true,
framework.ExpectNoError(framework.PollURL(route, rules.Host, framework.LoadBalancerPollTimeout, framework.LoadBalancerPollInterval, timeoutClient, false)) },
} }),
Timeout: reqTimeout,
} }
// Verify that simple GET works.
route := fmt.Sprintf("https://%v", address)
framework.Logf("Testing route %v with simple GET", route)
framework.ExpectNoError(framework.PollURL(route, FederatedIngressHost, framework.LoadBalancerPollTimeout, framework.LoadBalancerPollInterval, client, false))
}
func createTLSSecretOrFail(clientset *fedclientset.Clientset, namespace, secretName string) *v1.Secret {
if clientset == nil || len(namespace) == 0 {
framework.Logf("Internal error: invalid parameters passed to createTLSSecretOrFail: clientset: %v, namespace: %v", clientset, namespace)
}
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
},
Type: v1.SecretTypeOpaque,
Data: map[string][]byte{
"tls.crt": []byte(FederatedIngressTLSCrt),
"tls.key": []byte(FederatedIngressTLSKey),
},
}
By(fmt.Sprintf("Creating federated secret %q in namespace %q", secretName, namespace))
newSecret, err := clientset.Core().Secrets(namespace).Create(secret)
framework.ExpectNoError(err, "creating secret %q in namespace %q", secret.Name, namespace)
return newSecret
} }
type federationTestJig struct { type federationTestJig struct {

View File

@ -48,6 +48,10 @@ var (
UserAgentName = "federation-e2e" UserAgentName = "federation-e2e"
// We use this to decide how long to wait for our DNS probes to succeed. // We use this to decide how long to wait for our DNS probes to succeed.
DNSTTL = 180 * time.Second // TODO: make k8s.io/kubernetes/federation/pkg/federation-controller/service.minDnsTtl exported, and import it here. DNSTTL = 180 * time.Second // TODO: make k8s.io/kubernetes/federation/pkg/federation-controller/service.minDnsTtl exported, and import it here.
// FederatedSvcNodePort is the node port on which the federated
// service shards are exposed in the underlying cluster.
FederatedSvcNodePort = int32(32256)
) )
var FederationSuite common.Suite var FederationSuite common.Suite
@ -259,6 +263,7 @@ func createService(clientset *fedclientset.Clientset, namespace, name string) (*
Protocol: v1.ProtocolTCP, Protocol: v1.ProtocolTCP,
Port: 80, Port: 80,
TargetPort: intstr.FromInt(8080), TargetPort: intstr.FromInt(8080),
NodePort: FederatedSvcNodePort,
}, },
}, },
SessionAffinity: v1.ServiceAffinityNone, SessionAffinity: v1.ServiceAffinityNone,
@ -279,10 +284,11 @@ func deleteServiceOrFail(clientset *fedclientset.Clientset, namespace string, se
if clientset == nil || len(namespace) == 0 || len(serviceName) == 0 { if clientset == nil || len(namespace) == 0 || len(serviceName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteServiceOrFail: clientset: %v, namespace: %v, service: %v", clientset, namespace, serviceName)) Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteServiceOrFail: clientset: %v, namespace: %v, service: %v", clientset, namespace, serviceName))
} }
framework.Logf("Deleting service %q in namespace %v", serviceName, namespace)
err := clientset.Services(namespace).Delete(serviceName, &metav1.DeleteOptions{OrphanDependents: orphanDependents}) err := clientset.Services(namespace).Delete(serviceName, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace) framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace)
// Wait for the service to be deleted. // Wait for the service to be deleted.
err = wait.Poll(5*time.Second, 3*wait.ForeverTestTimeout, func() (bool, error) { err = wait.Poll(5*time.Second, 10*wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientset.Core().Services(namespace).Get(serviceName, metav1.GetOptions{}) _, err := clientset.Core().Services(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return true, nil return true, nil