From 122dccbffabc351f60b767f86d7984aa8981ac5a Mon Sep 17 00:00:00 2001 From: Prashanth Balasubramanian Date: Sat, 24 Oct 2015 18:51:25 -0700 Subject: [PATCH 1/2] Kubectl describe --- pkg/kubectl/describe.go | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 3f13ec4f9a..64c95481b2 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -89,6 +89,7 @@ func expDescriberMap(c *client.Client) map[string]Describer { "DaemonSet": &DaemonSetDescriber{c}, "Job": &JobDescriber{c}, "Deployment": &DeploymentDescriber{c}, + "Ingress": &IngressDescriber{c}, } } @@ -996,6 +997,43 @@ func describeSecret(secret *api.Secret) (string, error) { }) } +type IngressDescriber struct { + client.Interface +} + +func (i *IngressDescriber) Describe(namespace, name string) (string, error) { + c := i.Extensions().Ingress(namespace) + ing, err := c.Get(name) + if err != nil { + return "", err + } + events, _ := i.Events(namespace).Search(ing) + return describeIngress(ing, events) +} + +func describeIngress(ing *extensions.Ingress, events *api.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + describeIngressAnnotations(out, ing.Annotations) + if events != nil { + DescribeEvents(events, out) + } + return nil + }) +} + +// TODO: Move from annotations into Ingress status. +func describeIngressAnnotations(out io.Writer, annotations map[string]string) { + for k, v := range annotations { + if !strings.HasPrefix(k, "ingress") { + continue + } + parts := strings.Split(k, "/") + name := parts[len(parts)-1] + fmt.Fprintf(out, "%v:\t%s\n", name, v) + } + return +} + // ServiceDescriber generates information about a service. type ServiceDescriber struct { client.Interface From 962cc11f735b4a0777c42fdc2816210be668643f Mon Sep 17 00:00:00 2001 From: Prashanth Balasubramanian Date: Thu, 22 Oct 2015 23:11:34 -0700 Subject: [PATCH 2/2] Cluster-loadbalancing addon --- .../cluster-loadbalancing/MAINTAINERS.md | 6 + .../cluster-loadbalancing/glbc/README.md | 104 ++++++++++++++++++ .../glbc/default-svc.yaml | 21 ++++ .../glbc/glbc-controller.yaml | 66 +++++++++++ cluster/gce/config-default.sh | 4 + cluster/gce/config-test.sh | 4 + cluster/gce/configure-vm.sh | 1 + cluster/gce/debian/helper.sh | 1 + cluster/gke/config-default.sh | 8 +- cluster/saltbase/salt/kube-addons/init.sls | 11 ++ 10 files changed, 224 insertions(+), 2 deletions(-) create mode 100644 cluster/addons/cluster-loadbalancing/MAINTAINERS.md create mode 100644 cluster/addons/cluster-loadbalancing/glbc/README.md create mode 100644 cluster/addons/cluster-loadbalancing/glbc/default-svc.yaml create mode 100644 cluster/addons/cluster-loadbalancing/glbc/glbc-controller.yaml diff --git a/cluster/addons/cluster-loadbalancing/MAINTAINERS.md b/cluster/addons/cluster-loadbalancing/MAINTAINERS.md new file mode 100644 index 0000000000..12a16b2004 --- /dev/null +++ b/cluster/addons/cluster-loadbalancing/MAINTAINERS.md @@ -0,0 +1,6 @@ +# Maintainers + +Prashanth.B + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/cluster-loadbalancing/MAINTAINERS.md?pixel)]() diff --git a/cluster/addons/cluster-loadbalancing/glbc/README.md b/cluster/addons/cluster-loadbalancing/glbc/README.md new file mode 100644 index 0000000000..f79f01187c --- /dev/null +++ b/cluster/addons/cluster-loadbalancing/glbc/README.md @@ -0,0 +1,104 @@ +# GCE Load-Balancer Controller (GLBC) Cluster Addon + +This cluster addon is composed of: +* A [Google L7 LoadBalancer Controller](https://github.com/kubernetes/contrib/tree/master/Ingress/controllers/gce) +* A [404 default backend](https://github.com/kubernetes/contrib/tree/master/404-server) Service + RC + +It relies on the [Ingress resource](../../../../docs/user-guide/ingress.md) only available in Kubernetes version 1.1 and beyond. + +## Quota + +GLBC is not aware of your GCE quota. As of this writing users get 3 [GCE Backend Services](https://cloud.google.com/compute/docs/load-balancing/http/backend-service) by default. If you plan on creating Ingresses for multiple Kubernetes Services, remember that each one requires a backend service, and request quota. Should you fail to do so the controller will poll periodically and grab the first free backend service slot it finds. You can view your quota: + +```console +$ gcloud compute project-info describe --project myproject +``` +See [GCE documentation](https://cloud.google.com/compute/docs/resource-quotas#checking_your_quota) for how to request more. + +## Latency + +It takes ~1m to spin up a loadbalancer (this includes acquiring the public ip), and ~5-6m before the GCE api starts healthchecking backends. So as far as latency goes, here's what to expect: + +Assume one creates the following simple Ingress: +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: test-ingress +spec: + backend: + # This will just loopback to the default backend of GLBC + serviceName: default-http-backend + servicePort: 80 +``` + +* time, t=0 +```console +$ kubectl get ing +NAME RULE BACKEND ADDRESS +test-ingress - default-http-backend:80 +$ kubectl describe ing +No events. +``` + +* time, t=1m +```console +$ kubectl get ing +NAME RULE BACKEND ADDRESS +test-ingress - default-http-backend:80 130.211.5.27 + +$ kubectl describe ing +target-proxy: k8s-tp-default-test-ingress +url-map: k8s-um-default-test-ingress +backends: {"k8s-be-32342":"UNKNOWN"} +forwarding-rule: k8s-fw-default-test-ingress +Events: + FirstSeen LastSeen Count From SubobjectPath Reason Message + ───────── ──────── ───── ──── ───────────── ────── ─────── + 46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27 +``` + +* time, t=5m +```console +$ kubectl describe ing +target-proxy: k8s-tp-default-test-ingress +url-map: k8s-um-default-test-ingress +backends: {"k8s-be-32342":"HEALTHY"} +forwarding-rule: k8s-fw-default-test-ingress +Events: + FirstSeen LastSeen Count From SubobjectPath Reason Message + ───────── ──────── ───── ──── ───────────── ────── ─────── + 46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27 + +``` + +## Disabling GLBC + +Since GLBC runs as a cluster addon, you cannot simply delete the RC. The easiest way to disable it is to do as follows: + +* IFF you wat to tear down existing L7 loadbalancers, hit the /delete-all-and-quit endpoint on the pod: + +```console +$ kubectl get pods --namespace=kube-system +NAME READY STATUS RESTARTS AGE +l7-lb-controller-7bb21 1/1 Running 0 1h +$ kubectl exec l7-lb-controller-7bb21 -c l7-lb-controller curl http://localhost:8081/delete-all-and-quit --namespace=kube-system +$ kubectl logs l7-lb-controller-7b221 -c l7-lb-controller --follow +... +I1007 00:30:00.322528 1 main.go:160] Handled quit, awaiting pod deletion. +``` + +* Nullify the RC (but don't delete it or the addon controller will "fix" it for you) +```console +$ kubectl scale rc l7-lb-controller --replicas=0 --namespace=kube-system +``` + +## Limitations + +* This cluster addon is still in the Beta phase. It behooves you to read through the GLBC documentation mentioned above and make sure there are no surprises. +* The recommended way to tear down a cluster with active Ingresses is to either delete each Ingress, or hit the /delete-all-and-quit endpoint on GLBC as described below, before invoking a cluster teardown script (eg: kube-down.sh). You will have to manually cleanup GCE resources through the [cloud console](https://cloud.google.com/compute/docs/console#access) or [gcloud CLI](https://cloud.google.com/compute/docs/gcloud-compute/) if you simply tear down the cluster with active Ingresses. +* All L7 Loadbalancers created by GLBC have a default backend. If you don't specify one in your Ingress, GLBC will assign the 404 default backend mentioned above. +* All Kubernetes services must serve a 200 page on '/', or whatever custom value you've specified through GLBC's `--health-check-path argument`. +* GLBC is not built for performance. Creating many Ingresses at a time can overwhelm it. It won't fall over, but will take its own time to churn through the Ingress queue. It doesn't understand concepts like fairness or backoff just yet. + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/cluster-loadbalancing/glbc/README.md?pixel)]() diff --git a/cluster/addons/cluster-loadbalancing/glbc/default-svc.yaml b/cluster/addons/cluster-loadbalancing/glbc/default-svc.yaml new file mode 100644 index 0000000000..1bd59320db --- /dev/null +++ b/cluster/addons/cluster-loadbalancing/glbc/default-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + # This must match the --default-backend-service argument of the l7 lb + # controller and is required because GCE mandates a default backend. + name: default-http-backend + namespace: kube-system + labels: + k8s-app: glbc + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "GLBCDefaultBackend" +spec: + # The default backend must be of type NodePort. + type: NodePort + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + k8s-app: glbc diff --git a/cluster/addons/cluster-loadbalancing/glbc/glbc-controller.yaml b/cluster/addons/cluster-loadbalancing/glbc/glbc-controller.yaml new file mode 100644 index 0000000000..ad76d47e9d --- /dev/null +++ b/cluster/addons/cluster-loadbalancing/glbc/glbc-controller.yaml @@ -0,0 +1,66 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: l7-lb-controller + namespace: kube-system + labels: + k8s-app: glbc + version: v0.5 + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "GLBC" +spec: + # There should never be more than 1 controller alive simultaneously. + replicas: 1 + selector: + k8s-app: glbc + version: v0.5 + template: + metadata: + labels: + k8s-app: glbc + version: v0.5 + name: glbc + kubernetes.io/cluster-service: "true" + spec: + terminationGracePeriodSeconds: 600 + containers: + - name: default-http-backend + # Any image is permissable as long as: + # 1. It serves a 404 page at / + # 2. It serves 200 on a /healthz endpoint + image: gcr.io/google_containers/defaultbackend:1.0 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + - image: gcr.io/google_containers/glbc:0.5 + livenessProbe: + httpGet: + path: /healthz + port: 8081 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + name: l7-lb-controller + resources: + limits: + cpu: 100m + memory: 100Mi + requests: + cpu: 100m + memory: 50Mi + args: + - --default-backend-service=kube-system/default-http-backend + - --sync-period=300s diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index d5d18e7abd..2b1f5bbd81 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -52,6 +52,10 @@ POLL_SLEEP_INTERVAL="${POLL_SLEEP_INTERVAL:-3}" SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET ALLOCATE_NODE_CIDRS=true +# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests: +# glbc - CE L7 Load Balancer Controller +ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}" + # Optional: Cluster monitoring to setup as part of the cluster bring up: # none - No cluster monitoring setup # influxdb - Heapster, InfluxDB, and Grafana diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 32cab384e8..8cc79730be 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -53,6 +53,10 @@ TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100} POLL_SLEEP_INTERVAL=3 SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET +# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests: +# glbc - CE L7 Load Balancer Controller +ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}" + # Optional: Cluster monitoring to setup as part of the cluster bring up: # none - No cluster monitoring setup # influxdb - Heapster, InfluxDB, and Grafana diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index e1055c8b18..a8ee4d0a64 100755 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -268,6 +268,7 @@ service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")' +enable_l7_loadbalancing: '$(echo "$ENABLE_L7_LOADBALANCING" | sed -e "s/'/''/g")' enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")' logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")' elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")' diff --git a/cluster/gce/debian/helper.sh b/cluster/gce/debian/helper.sh index 82794f0c70..964ff4dfee 100755 --- a/cluster/gce/debian/helper.sh +++ b/cluster/gce/debian/helper.sh @@ -35,6 +35,7 @@ SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE}) KUBERNETES_MASTER_NAME: $(yaml-quote ${MASTER_NAME}) ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false}) ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none}) +ENABLE_L7_LOADBALANCING: $(yaml-quote ${ENABLE_L7_LOADBALANCING:-none}) ENABLE_CLUSTER_LOGGING: $(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false}) ENABLE_CLUSTER_UI: $(yaml-quote ${ENABLE_CLUSTER_UI:-false}) ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false}) diff --git a/cluster/gke/config-default.sh b/cluster/gke/config-default.sh index 4692882cf6..5d5df61a8f 100644 --- a/cluster/gke/config-default.sh +++ b/cluster/gke/config-default.sh @@ -30,9 +30,13 @@ LOGGING_DESTINATION=gcp # options: elasticsearch, gcp ENABLE_CLUSTER_LOGGING=false ELASTICSEARCH_LOGGING_REPLICAS=1 +# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests: +# glbc - CE L7 Load Balancer Controller +ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}" + # Optional: Cluster monitoring to setup as part of the cluster bring up: -# none - No cluster monitoring setup -# influxdb - Heapster, InfluxDB, and Grafana +# none - No cluster monitoring setup +# influxdb - Heapster, InfluxDB, and Grafana # google - Heapster, Google Cloud Monitoring, and Google Cloud Logging # standalone - Heapster only. Metrics available via Heapster REST API. ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-standalone}" diff --git a/cluster/saltbase/salt/kube-addons/init.sls b/cluster/saltbase/salt/kube-addons/init.sls index 2630e42db2..16c5f21da2 100644 --- a/cluster/saltbase/salt/kube-addons/init.sls +++ b/cluster/saltbase/salt/kube-addons/init.sls @@ -30,6 +30,17 @@ addon-dir-create: - file_mode: 644 {% endif %} +{% if pillar.get('enable_l7_loadbalancing', '').lower() == 'glbc' %} +/etc/kubernetes/addons/cluster-loadbalancing/glbc: + file.recurse: + - source: salt://kube-addons/cluster-loadbalancing/glbc + - include_pat: E@(^.+\.yaml$|^.+\.json$) + - user: root + - group: root + - dir_mode: 755 + - file_mode: 644 +{% endif %} + {% if pillar.get('enable_cluster_monitoring', '').lower() == 'google' %} /etc/kubernetes/addons/cluster-monitoring/google: file.recurse: