mirror of https://github.com/k3s-io/k3s
remove service-loadbalancer since it lives in contrib/ now
parent
eed655a6e6
commit
37bb279fae
|
@ -1,40 +0,0 @@
|
|||
# Copyright 2015 The Kubernetes Authors. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Prashanth B <beeps@google.com>
|
||||
|
||||
# so apt-get doesn't complain
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN sed -i 's/^exit 101/exit 0/' /usr/sbin/policy-rc.d
|
||||
|
||||
# TODO: Move to using haproxy:1.5 image instead. Honestly,
|
||||
# that image isn't much smaller and the convenience of having
|
||||
# an ubuntu container for dev purposes trumps the tiny amounts
|
||||
# of disk and bandwidth we'd save in doing so.
|
||||
RUN \
|
||||
apt-get update && \
|
||||
apt-get install -y haproxy && \
|
||||
sed -i 's/^ENABLED=.*/ENABLED=1/' /etc/default/haproxy && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ADD haproxy.cfg /etc/haproxy/haproxy.cfg
|
||||
ADD service_loadbalancer service_loadbalancer
|
||||
ADD service_loadbalancer.go service_loadbalancer.go
|
||||
ADD template.cfg template.cfg
|
||||
ADD loadbalancer.json loadbalancer.json
|
||||
ADD haproxy_reload haproxy_reload
|
||||
ADD README.md README.md
|
||||
ENTRYPOINT ["/service_loadbalancer"]
|
|
@ -1,17 +0,0 @@
|
|||
all: push
|
||||
|
||||
# 0.0 shouldn't clobber any released builds
|
||||
TAG = 0.0
|
||||
PREFIX = gcr.io/google_containers/servicelb
|
||||
|
||||
server: service_loadbalancer.go
|
||||
CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-w' -o service_loadbalancer ./service_loadbalancer.go
|
||||
|
||||
container: server
|
||||
docker build -t $(PREFIX):$(TAG) .
|
||||
|
||||
push: container
|
||||
gcloud docker push $(PREFIX):$(TAG)
|
||||
|
||||
clean:
|
||||
rm -f service_loadbalancer
|
|
@ -1,308 +0,0 @@
|
|||
# Bare Metal Service Load Balancers
|
||||
|
||||
AKA "how to set up a bank of haproxy for platforms that don't have load balancers".
|
||||
|
||||
## Disclaimer:
|
||||
- This is a **work in progress**.
|
||||
- A better way to achieve this will probably emerge once discussions on (#260, #561) converge.
|
||||
- Backends are pluggable, but [Haproxy](https://cbonte.github.io/haproxy-dconv/configuration-1.5.html) is the only loadbalancer with a working implementation.
|
||||
- I have never deployed haproxy to production, so contributions are welcome (see [wishlist](#wishlist) for ideas).
|
||||
- For fault tolerant load balancing of ingress traffic, you need:
|
||||
1. Multiple hosts running load balancers
|
||||
2. Multiple A records for each hostname in a DNS service.
|
||||
|
||||
This module will not help with the latter
|
||||
|
||||
## Overview
|
||||
|
||||
### Ingress
|
||||
|
||||
There are 2 ways to expose a service to ingress traffic in the current kubernetes service model:
|
||||
|
||||
- Create a cloud load balancer.
|
||||
- Allocate a port (the same port) on every node in your cluster and proxy ingress traffic through that port to the endpoints.
|
||||
|
||||
The service-loadbalancer aims to give you 1 on bare metal, making 2 unnecessary for the common case. The replication controller manifest in this directly creates a service-loadbalancer pod on all nodes with the `role=loadbalancer` label. Each service-loadbalancer pod contains:
|
||||
- A load balancer controller that watches the kubernetes api for services and endpoints.
|
||||
- A load balancer manifest. This is used to bootstrap the load balancer. The load balancer itself is pluggable, so you can easily swap
|
||||
haproxy for something like [f5](https://f5.com/glossary/load-balancer) or [pound](http://www.apsis.ch/pound).
|
||||
- A template used to write load balancer rules. This is tied to the loadbalancer used in the manifest, since each one has a different config format.
|
||||
|
||||
__L7 load balancing of Http services__: The load balancer controller automatically exposes http services to ingress traffic on all nodes with a `role=loadbalancer` label. It assumes all services are http unless otherwise instructed. Each http service gets a loadbalancer forwarding rule, such that requests received on `http://loadbalancer-node/serviceName:port` balanced between its endpoints according to the algorithm specified in the loadbalacer.json manifest. You do not need more than a single loadbalancer pod to balance across all your http services (you can scale the rc to increase capacity).
|
||||
|
||||
__L4 loadbalancing of Tcp services__: Since one needs to specify ports at pod creation time (kubernetes doesn't currently support port ranges), a single loadbalancer is tied to a set of preconfigured node ports, and hence a set of TCP services it can expose. The load balancer controller will dynamically add rules for each configured TCP service as it pops into existence. However, each "new" (unspecified in the tcpServices section of the loadbalancer.json) service will need you to open up a new container-host port pair for traffic. You can achieve this by creating a new loadbalancer pod with the `targetPort` set to the name of your service, and that service specified in the tcpServices map of the new loadbalancer.
|
||||
|
||||
### Cross-cluster loadbalancing
|
||||
|
||||
On cloud providers that offer a private ip range for all instances on a network, you can setup multiple clusters in different availability zones, on the same network, and loadbalancer services across these zones. On GCE for example, every instance is a member of a single network. A network performs the same function that a router does: it defines the network range and gateway IP address, handles communication between instances, and serves as a gateway between instances and other networks. On such networks the endpoints of a service in one cluster are visible in all other clusters in the same network, so you can setup an edge loadbalancer that watches a kubernetes master of another cluster for services. Such a deployment allows you to fallback to a different AZ during times of duress or planned downtime (eg: database update).
|
||||
|
||||
### Examples
|
||||
|
||||
Initial cluster state:
|
||||
```console
|
||||
$ kubectl get svc --all-namespaces -o yaml | grep -i "selfLink"
|
||||
selfLink: /api/v1/namespaces/default/services/kubernetes
|
||||
selfLink: /api/v1/namespaces/default/services/nginxsvc
|
||||
selfLink: /api/v1/namespaces/kube-system/services/elasticsearch-logging
|
||||
selfLink: /api/v1/namespaces/kube-system/services/kibana-logging
|
||||
selfLink: /api/v1/namespaces/kube-system/services/kube-dns
|
||||
selfLink: /api/v1/namespaces/kube-system/services/kube-ui
|
||||
selfLink: /api/v1/namespaces/kube-system/services/monitoring-grafana
|
||||
selfLink: /api/v1/namespaces/kube-system/services/monitoring-heapster
|
||||
selfLink: /api/v1/namespaces/kube-system/services/monitoring-influxdb
|
||||
```
|
||||
These are all the [cluster addon](../../cluster/addons) services in `namespace=kube-system`.
|
||||
|
||||
#### Create a loadbalancer
|
||||
* Loadbalancers are created via a ReplicationController.
|
||||
* Load balancers will only run on nodes with the `role=loadbalancer` label.
|
||||
```console
|
||||
$ kubectl create -f ./rc.yaml
|
||||
replicationcontrollers/service-loadbalancer
|
||||
$ kubectl get pods -l app=service-loadbalancer
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
service-loadbalancer-dapxv 0/2 Pending 0 1m
|
||||
$ kubectl describe pods -l app=service-loadbalancer
|
||||
Events:
|
||||
FirstSeen From Reason Message
|
||||
Tue, 21 Jul 2015 11:19:22 -0700 {scheduler } failedScheduling Failed for reason MatchNodeSelector and possibly others
|
||||
```
|
||||
|
||||
Notice that the pod hasn't started because the scheduler is waiting for you to tell it which nodes to use as a load balancer.
|
||||
|
||||
```console
|
||||
$ kubectl label node e2e-test-beeps-minion-c9up role=loadbalancer
|
||||
NAME LABELS STATUS
|
||||
e2e-test-beeps-minion-c9up kubernetes.io/hostname=e2e-test-beeps-minion-c9up,role=loadbalancer Ready
|
||||
```
|
||||
#### Expose services
|
||||
Your kube-ui should be publicly accessible once the loadbalancer created in the previous step is in `Running` (if you're on a cloud provider, you need to create firewall-rules for :80)
|
||||
```console
|
||||
$ kubectl get nodes e2e-test-beeps-minion-c9up -o json | grep -i externalip -A 1
|
||||
"type": "ExternalIP",
|
||||
"address": "104.197.63.17"
|
||||
$ curl http://104.197.63.17/kube-ui
|
||||
```
|
||||
|
||||
#### HTTP
|
||||
You can use the [https-nginx](../../examples/https-nginx) example to create some new HTTP/HTTPS services.
|
||||
|
||||
```console
|
||||
$ cd ../../examples/https-nginx
|
||||
$ make keys secret KEY=/tmp/nginx.key CERT=/tmp/nginx.crt SECRET=/tmp/secret.json
|
||||
$ kubectl create -f /tmp/secret.json
|
||||
$ kubectl get secrets
|
||||
NAME TYPE DATA
|
||||
default-token-vklfs kubernetes.io/service-account-token 2
|
||||
nginxsecret Opaque 2
|
||||
```
|
||||
|
||||
Lets introduce a small twist. The nginx-app example exposes the nginx service using `NodePort`, which means it opens up a random port on every node in your cluster and exposes the service on that. Delete the `type: NodePort` line before creating it.
|
||||
|
||||
```console
|
||||
$ kubectl create -f nginx-app.yaml
|
||||
$ kubectl get svc
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
kubernetes component=apiserver,provider=kubernetes <none> 10.0.0.1 443/TCP
|
||||
nginxsvc app=nginx app=nginx 10.0.79.131 80/TCP
|
||||
443/TCP
|
||||
$ curl http://104.197.63.17/nginxsvc
|
||||
```
|
||||
|
||||
#### HTTPS
|
||||
HTTPS services are handled at L4 (see [wishlist](#wishlist))
|
||||
```console
|
||||
$ curl https://104.197.63.17:8080 -k
|
||||
```
|
||||
|
||||
A couple of points to note:
|
||||
- The nginxsvc is specified in the tcpServices of the loadbalancer.json manifest.
|
||||
- The https service is accessible directly on the specified port, which matches the *service port*.
|
||||
- You need to take care of ensuring there is no collision between these service ports on the node.
|
||||
|
||||
#### TCP
|
||||
|
||||
```yaml
|
||||
$ cat mysql-app.yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: mysql
|
||||
labels:
|
||||
name: mysql
|
||||
spec:
|
||||
containers:
|
||||
- image: mysql
|
||||
name: mysql
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
# Use secrets instead of env for passwords
|
||||
value: password
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
volumeMounts:
|
||||
# name must match the volume name below
|
||||
- name: mysql-storage
|
||||
# mount path within the container
|
||||
mountPath: /var/lib/mysql
|
||||
volumes:
|
||||
- name: mysql-storage
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
name: mysql
|
||||
name: mysql
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 3306
|
||||
# label keys and values that must match in order to receive traffic for this service
|
||||
selector:
|
||||
name: mysql
|
||||
```
|
||||
|
||||
We'll create the service and access mysql from outside the cluster:
|
||||
```console
|
||||
$ kubectl create -f mysql-app.yaml
|
||||
$ kubeclt get svc
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
kubernetes component=apiserver,provider=kubernetes <none> 10.0.0.1 443/TCP
|
||||
nginxsvc app=nginx app=nginx 10.0.79.131 80/TCP
|
||||
443/TCP
|
||||
mysql app=mysql app=mysql 10.0.63.72 3306/TCP
|
||||
|
||||
$ mysql -u root -ppassword --host 104.197.63.17 --port 3306 -e 'show databases;'
|
||||
+--------------------+
|
||||
| Database |
|
||||
+--------------------+
|
||||
| information_schema |
|
||||
| mysql |
|
||||
| performance_schema |
|
||||
+--------------------+
|
||||
```
|
||||
|
||||
|
||||
#### Cross-cluster loadbalancing
|
||||
|
||||
First setup your 2 clusters, and a kubeconfig secret as described in the [sharing clusters example] (../../examples/sharing-clusters/README.md). We will create a loadbalancer in our first cluster (US) and have it publish the services from the second cluster (EU). This is the entire modified loadbalancer manifest:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: service-loadbalancer
|
||||
labels:
|
||||
app: service-loadbalancer
|
||||
version: v1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
app: service-loadbalancer
|
||||
version: v1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: service-loadbalancer
|
||||
version: v1
|
||||
spec:
|
||||
volumes:
|
||||
# token from the eu cluster, must already exist
|
||||
# and match the name of the volume using in container
|
||||
- name: eu-config
|
||||
secret:
|
||||
secretName: kubeconfig
|
||||
nodeSelector:
|
||||
role: loadbalancer
|
||||
containers:
|
||||
- image: gcr.io/google_containers/servicelb:0.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
name: haproxy
|
||||
ports:
|
||||
# All http services
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
protocol: TCP
|
||||
# nginx https
|
||||
- containerPort: 443
|
||||
hostPort: 8080
|
||||
protocol: TCP
|
||||
# mysql
|
||||
- containerPort: 3306
|
||||
hostPort: 3306
|
||||
protocol: TCP
|
||||
# haproxy stats
|
||||
- containerPort: 1936
|
||||
hostPort: 1936
|
||||
protocol: TCP
|
||||
resources: {}
|
||||
args:
|
||||
- --tcp-services=mysql:3306,nginxsvc:443
|
||||
- --use-kubernetes-cluster-service=false
|
||||
# use-kubernetes-cluster-service=false in conjunction with the
|
||||
# kube/config will force the service-loadbalancer to watch for
|
||||
# services form the eu cluster.
|
||||
volumeMounts:
|
||||
- mountPath: /.kube
|
||||
name: eu-config
|
||||
env:
|
||||
- name: KUBECONFIG
|
||||
value: /.kube/config
|
||||
```
|
||||
|
||||
Note that it is essentially the same as the rc.yaml checked into the service-loadbalancer directory expect that it consumes the kubeconfig secret as an extra KUBECONFIG environment variable.
|
||||
|
||||
```cmd
|
||||
$ kubectl config use-context <us-clustername>
|
||||
$ kubectl create -f rc.yaml
|
||||
$ kubectl get pods -o wide
|
||||
service-loadbalancer-5o2p4 1/1 Running 0 13m kubernetes-minion-5jtd
|
||||
$ kubectl get node kubernetes-minion-5jtd -o json | grep -i externalip -A 2
|
||||
"type": "ExternalIP",
|
||||
"address": "104.197.81.116"
|
||||
$ curl http://104.197.81.116/nginxsvc
|
||||
Europe
|
||||
```
|
||||
|
||||
### Troubleshooting:
|
||||
- If you can curl or netcat the endpoint from the pod (with kubectl exec) and not from the node, you have not specified hostport and containerport.
|
||||
- If you can hit the ips from the node but not from your machine outside the cluster, you have not opened firewall rules for the right network.
|
||||
- If you can't hit the ips from within the container, either haproxy or the service_loadbalacer script is not running.
|
||||
1. Use ps in the pod
|
||||
2. sudo restart haproxy in the pod
|
||||
3. cat /etc/haproxy/haproxy.cfg in the pod
|
||||
4. try kubectl logs haproxy
|
||||
5. run the service_loadbalancer with --dry
|
||||
- Check http://<node_ip>:1936 for the stats page. It requires the password used in the template file.
|
||||
- Try talking to haproxy on the stats socket directly on the container using kubectl exec, eg: echo “show info” | socat unix-connect:/tmp/haproxy stdio
|
||||
|
||||
### Wishlist:
|
||||
|
||||
- Allow services to specify their url routes (see [openshift routes](https://github.com/openshift/origin/blob/master/docs/routing.md))
|
||||
- Scrape :1926 and scale replica count of the loadbalancer rc from a helper pod (this is basically ELB)
|
||||
- Scrape :1936/;csv and autoscale services
|
||||
- Better https support. 3 options to handle ssl:
|
||||
1. __Termination__: certificate lives on load balancer. All traffic to load balancer is encrypted, traffic from load balancer to service is not.
|
||||
2. __Pass Through__: Load balancer drops down to L4 balancing and forwards TCP encrypted packets to destination.
|
||||
3. __Redirect__: All traffic is https. HTTP connections are encrypted using load balancer certs.
|
||||
|
||||
Currently you need to trigger TCP loadbalancing for your https service by specifying it in loadbalancer.json. Support for the other 2 would be nice.
|
||||
- Multinamespace support: Currently the controller only watches a single namespace for services.
|
||||
- Support for external services (eg: amazon rds)
|
||||
- Dynamically modify loadbalancer.json. Will become unnecessary when we have a loadbalancer resource.
|
||||
- Headless services: I just didn't think people would care enough about this.
|
||||
|
||||
|
||||
|
||||
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/contrib/service-loadbalancer/README.md?pixel)]()
|
|
@ -1,24 +0,0 @@
|
|||
# Default haprxy config file. The service-loadbalancer uses
|
||||
# go templates (http://golang.org/pkg/text/template/) to
|
||||
# generate the config dynamically.
|
||||
global
|
||||
daemon
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode http
|
||||
option httplog
|
||||
option dontlognull
|
||||
contimeout 5000
|
||||
clitimeout 50000
|
||||
srvtimeout 50000
|
||||
errorfile 400 /etc/haproxy/errors/400.http
|
||||
errorfile 403 /etc/haproxy/errors/403.http
|
||||
errorfile 408 /etc/haproxy/errors/408.http
|
||||
errorfile 500 /etc/haproxy/errors/500.http
|
||||
errorfile 502 /etc/haproxy/errors/502.http
|
||||
errorfile 503 /etc/haproxy/errors/503.http
|
||||
errorfile 504 /etc/haproxy/errors/504.http
|
||||
|
||||
frontend localnodes
|
||||
bind *:80
|
|
@ -1,26 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A script to help with haproxy reloads. Needs sudo for :80. Running it for the
|
||||
# first time starts haproxy, each subsequent invocation will perform a
|
||||
# soft-reload.
|
||||
# -f config file
|
||||
# -p pid file
|
||||
# -D run as daemon
|
||||
# -s soft reload, wait for pids to finish handling requests
|
||||
# -f send pids a resume signal if reload of new config fails
|
||||
|
||||
haproxy -f /etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid -D -sf $(cat /var/run/haproxy.pid)
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"name": "haproxy",
|
||||
"reloadCmd": "./haproxy_reload",
|
||||
"config": "/etc/haproxy/haproxy.cfg",
|
||||
"template": "template.cfg",
|
||||
"algorithm": "roundrobin"
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: service-loadbalancer
|
||||
labels:
|
||||
app: service-loadbalancer
|
||||
version: v1
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
app: service-loadbalancer
|
||||
version: v1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: service-loadbalancer
|
||||
version: v1
|
||||
spec:
|
||||
nodeSelector:
|
||||
role: loadbalancer
|
||||
containers:
|
||||
- image: gcr.io/google_containers/servicelb:0.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
name: haproxy
|
||||
ports:
|
||||
# All http services
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
protocol: TCP
|
||||
# nginx https
|
||||
- containerPort: 443
|
||||
hostPort: 8080
|
||||
protocol: TCP
|
||||
# mysql
|
||||
- containerPort: 3306
|
||||
hostPort: 3306
|
||||
protocol: TCP
|
||||
# haproxy stats
|
||||
- containerPort: 1936
|
||||
hostPort: 1936
|
||||
protocol: TCP
|
||||
resources: {}
|
||||
args:
|
||||
- --tcp-services=mysql:3306,nginxsvc:443
|
|
@ -1,462 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
flag "github.com/spf13/pflag"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
kubectl_util "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
)
|
||||
|
||||
const (
|
||||
reloadQPS = 10.0
|
||||
resyncPeriod = 10 * time.Second
|
||||
healthzPort = 8081
|
||||
)
|
||||
|
||||
var (
|
||||
flags = flag.NewFlagSet("", flag.ContinueOnError)
|
||||
|
||||
// keyFunc for endpoints and services.
|
||||
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
|
||||
|
||||
// Error used to indicate that a sync is deferred because the controller isn't ready yet
|
||||
deferredSync = fmt.Errorf("Deferring sync till endpoints controller has synced.")
|
||||
|
||||
config = flags.String("cfg", "loadbalancer.json", `path to load balancer json config.
|
||||
Note that this is *not* the path to the configuration file for the load balancer
|
||||
itself, but rather, the path to the json configuration of how you would like the
|
||||
load balancer to behave in the kubernetes cluster.`)
|
||||
|
||||
dry = flags.Bool("dry", false, `if set, a single dry run of configuration
|
||||
parsing is executed. Results written to stdout.`)
|
||||
|
||||
cluster = flags.Bool("use-kubernetes-cluster-service", true, `If true, use the built in kubernetes
|
||||
cluster for creating the client`)
|
||||
|
||||
// If you have pure tcp services or https services that need L3 routing, you
|
||||
// must specify them by name. Note that you are responsible for:
|
||||
// 1. Making sure there is no collision between the service ports of these services.
|
||||
// - You can have multiple <mysql svc name>:3306 specifications in this map, and as
|
||||
// long as the service ports of your mysql service don't clash, you'll get
|
||||
// loadbalancing for each one.
|
||||
// 2. Exposing the service ports as node ports on a pod.
|
||||
// 3. Adding firewall rules so these ports can ingress traffic.
|
||||
//
|
||||
// Any service not specified in this map is treated as an http:80 service,
|
||||
// unless TargetService dictates otherwise.
|
||||
|
||||
tcpServices = flags.String("tcp-services", "", `Comma separated list of tcp/https
|
||||
serviceName:servicePort pairings. This assumes you've opened up the right
|
||||
hostPorts for each service that serves ingress traffic.`)
|
||||
|
||||
targetService = flags.String(
|
||||
"target-service", "", `Restrict loadbalancing to a single target service.`)
|
||||
|
||||
// ForwardServices == true:
|
||||
// The lb just forwards packets to the vip of the service and we use
|
||||
// kube-proxy's inbuilt load balancing. You get rules:
|
||||
// backend svc_p1: svc_ip:p1
|
||||
// backend svc_p2: svc_ip:p2
|
||||
//
|
||||
// ForwardServices == false:
|
||||
// The lb is configured to match up services to endpoints. So for example,
|
||||
// you have (svc:p1, p2 -> tp1, tp2) we essentially get all endpoints with
|
||||
// the same targetport and create a new svc backend for them, i.e:
|
||||
// backend svc_p1: pod1:tp1, pod2:tp1
|
||||
// backend svc_p2: pod1:tp2, pod2:tp2
|
||||
|
||||
forwardServices = flags.Bool("forward-services", false, `Forward to service vip
|
||||
instead of endpoints. This will use kube-proxy's inbuilt load balancing.`)
|
||||
|
||||
httpPort = flags.Int("http-port", 80, `Port to expose http services.`)
|
||||
statsPort = flags.Int("stats-port", 1936, `Port for loadbalancer stats,
|
||||
Used in the loadbalancer liveness probe.`)
|
||||
)
|
||||
|
||||
// service encapsulates a single backend entry in the load balancer config.
|
||||
// The Ep field can contain the ips of the pods that make up a service, or the
|
||||
// clusterIP of the service itself (in which case the list has a single entry,
|
||||
// and kubernetes handles loadbalancing across the service endpoints).
|
||||
type service struct {
|
||||
Name string
|
||||
Ep []string
|
||||
|
||||
// FrontendPort is the port that the loadbalancer listens on for traffic
|
||||
// for this service. For http, it's always :80, for each tcp service it
|
||||
// is the service port of any service matching a name in the tcpServices set.
|
||||
FrontendPort int
|
||||
}
|
||||
|
||||
// loadBalancerConfig represents loadbalancer specific configuration. Eventually
|
||||
// kubernetes will have an api for l7 loadbalancing.
|
||||
type loadBalancerConfig struct {
|
||||
Name string `json:"name" description:"Name of the load balancer, eg: haproxy."`
|
||||
ReloadCmd string `json:"reloadCmd" description:"command used to reload the load balancer."`
|
||||
Config string `json:"config" description:"path to loadbalancers configuration file."`
|
||||
Template string `json:"template" description:"template for the load balancer config."`
|
||||
Algorithm string `json:"algorithm" description:"loadbalancing algorithm."`
|
||||
}
|
||||
|
||||
// write writes the configuration file, will write to stdout if dryRun == true
|
||||
func (cfg *loadBalancerConfig) write(services map[string][]service, dryRun bool) (err error) {
|
||||
var w io.Writer
|
||||
if dryRun {
|
||||
w = os.Stdout
|
||||
} else {
|
||||
w, err = os.Create(cfg.Config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
var t *template.Template
|
||||
t, err = template.ParseFiles(cfg.Template)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return t.Execute(w, services)
|
||||
}
|
||||
|
||||
// reload reloads the loadbalancer using the reload cmd specified in the json manifest.
|
||||
func (cfg *loadBalancerConfig) reload() error {
|
||||
output, err := exec.Command("sh", "-c", cfg.ReloadCmd).CombinedOutput()
|
||||
msg := fmt.Sprintf("%v -- %v", cfg.Name, string(output))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error restarting %v: %v", msg, err)
|
||||
}
|
||||
glog.Infof(msg)
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadBalancerController watches the kubernetes api and adds/removes services
|
||||
// from the loadbalancer, via loadBalancerConfig.
|
||||
type loadBalancerController struct {
|
||||
cfg *loadBalancerConfig
|
||||
queue *workqueue.Type
|
||||
client *client.Client
|
||||
epController *framework.Controller
|
||||
svcController *framework.Controller
|
||||
svcLister cache.StoreToServiceLister
|
||||
epLister cache.StoreToEndpointsLister
|
||||
reloadRateLimiter util.RateLimiter
|
||||
template string
|
||||
targetService string
|
||||
forwardServices bool
|
||||
tcpServices map[string]int
|
||||
httpPort int
|
||||
}
|
||||
|
||||
// getEndpoints returns a list of <endpoint ip>:<port> for a given service/target port combination.
|
||||
func (lbc *loadBalancerController) getEndpoints(
|
||||
s *api.Service, servicePort *api.ServicePort) (endpoints []string) {
|
||||
ep, err := lbc.epLister.GetServiceEndpoints(s)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// The intent here is to create a union of all subsets that match a targetPort.
|
||||
// We know the endpoint already matches the service, so all pod ips that have
|
||||
// the target port are capable of service traffic for it.
|
||||
for _, ss := range ep.Subsets {
|
||||
for _, epPort := range ss.Ports {
|
||||
var targetPort int
|
||||
switch servicePort.TargetPort.Kind {
|
||||
case util.IntstrInt:
|
||||
if epPort.Port == servicePort.TargetPort.IntVal {
|
||||
targetPort = epPort.Port
|
||||
}
|
||||
case util.IntstrString:
|
||||
if epPort.Name == servicePort.TargetPort.StrVal {
|
||||
targetPort = epPort.Port
|
||||
}
|
||||
}
|
||||
if targetPort == 0 {
|
||||
continue
|
||||
}
|
||||
for _, epAddress := range ss.Addresses {
|
||||
endpoints = append(endpoints, fmt.Sprintf("%v:%v", epAddress.IP, targetPort))
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// encapsulates all the hacky convenience type name modifications for lb rules.
|
||||
// - :80 services don't need a :80 postfix
|
||||
// - default ns should be accessible without /ns/name (when we have /ns support)
|
||||
func getServiceNameForLBRule(s *api.Service, servicePort int) string {
|
||||
if servicePort == 80 {
|
||||
return s.Name
|
||||
}
|
||||
return fmt.Sprintf("%v:%v", s.Name, servicePort)
|
||||
}
|
||||
|
||||
// getServices returns a list of services and their endpoints.
|
||||
func (lbc *loadBalancerController) getServices() (httpSvc []service, tcpSvc []service) {
|
||||
ep := []string{}
|
||||
services, _ := lbc.svcLister.List()
|
||||
for _, s := range services.Items {
|
||||
if s.Spec.Type == api.ServiceTypeLoadBalancer {
|
||||
glog.Infof("Ignoring service %v, it already has a loadbalancer", s.Name)
|
||||
continue
|
||||
}
|
||||
for _, servicePort := range s.Spec.Ports {
|
||||
// TODO: headless services?
|
||||
sName := s.Name
|
||||
if servicePort.Protocol == api.ProtocolUDP ||
|
||||
(lbc.targetService != "" && lbc.targetService != sName) {
|
||||
glog.Infof("Ignoring %v: %+v", sName, servicePort)
|
||||
continue
|
||||
}
|
||||
|
||||
if lbc.forwardServices {
|
||||
ep = []string{
|
||||
fmt.Sprintf("%v:%v", s.Spec.ClusterIP, servicePort.Port)}
|
||||
} else {
|
||||
ep = lbc.getEndpoints(&s, &servicePort)
|
||||
}
|
||||
if len(ep) == 0 {
|
||||
glog.Infof("No endpoints found for service %v, port %+v",
|
||||
sName, servicePort)
|
||||
continue
|
||||
}
|
||||
newSvc := service{
|
||||
Name: getServiceNameForLBRule(&s, servicePort.Port),
|
||||
Ep: ep,
|
||||
}
|
||||
if port, ok := lbc.tcpServices[sName]; ok && port == servicePort.Port {
|
||||
newSvc.FrontendPort = servicePort.Port
|
||||
tcpSvc = append(tcpSvc, newSvc)
|
||||
} else {
|
||||
newSvc.FrontendPort = lbc.httpPort
|
||||
httpSvc = append(httpSvc, newSvc)
|
||||
}
|
||||
glog.Infof("Found service: %+v", newSvc)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// sync all services with the loadbalancer.
|
||||
func (lbc *loadBalancerController) sync(dryRun bool) error {
|
||||
if !lbc.epController.HasSynced() || !lbc.svcController.HasSynced() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
return deferredSync
|
||||
}
|
||||
httpSvc, tcpSvc := lbc.getServices()
|
||||
if len(httpSvc) == 0 && len(tcpSvc) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := lbc.cfg.write(
|
||||
map[string][]service{
|
||||
"httpServices": httpSvc,
|
||||
"tcpServices": tcpSvc,
|
||||
}, dryRun); err != nil {
|
||||
return err
|
||||
}
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
lbc.reloadRateLimiter.Accept()
|
||||
return lbc.cfg.reload()
|
||||
}
|
||||
|
||||
// worker handles the work queue.
|
||||
func (lbc *loadBalancerController) worker() {
|
||||
for {
|
||||
key, _ := lbc.queue.Get()
|
||||
glog.Infof("Sync triggered by service %v", key)
|
||||
if err := lbc.sync(false); err != nil {
|
||||
glog.Infof("Requeuing %v because of error: %v", key, err)
|
||||
lbc.queue.Add(key)
|
||||
} else {
|
||||
lbc.queue.Done(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newLoadBalancerController creates a new controller from the given config.
|
||||
func newLoadBalancerController(cfg *loadBalancerConfig, kubeClient *client.Client, namespace string) *loadBalancerController {
|
||||
|
||||
lbc := loadBalancerController{
|
||||
cfg: cfg,
|
||||
client: kubeClient,
|
||||
queue: workqueue.New(),
|
||||
reloadRateLimiter: util.NewTokenBucketRateLimiter(
|
||||
reloadQPS, int(reloadQPS)),
|
||||
targetService: *targetService,
|
||||
forwardServices: *forwardServices,
|
||||
httpPort: *httpPort,
|
||||
tcpServices: map[string]int{},
|
||||
}
|
||||
|
||||
for _, service := range strings.Split(*tcpServices, ",") {
|
||||
portSplit := strings.Split(service, ":")
|
||||
if len(portSplit) != 2 {
|
||||
glog.Errorf("Ignoring misconfigured TCP service %v", service)
|
||||
continue
|
||||
}
|
||||
if port, err := strconv.Atoi(portSplit[1]); err != nil {
|
||||
glog.Errorf("Ignoring misconfigured TCP service %v: %v", service, err)
|
||||
continue
|
||||
} else {
|
||||
lbc.tcpServices[portSplit[0]] = port
|
||||
}
|
||||
}
|
||||
enqueue := func(obj interface{}) {
|
||||
key, err := keyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Infof("Couldn't get key for object %+v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
lbc.queue.Add(key)
|
||||
}
|
||||
eventHandlers := framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: enqueue,
|
||||
DeleteFunc: enqueue,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
if !reflect.DeepEqual(old, cur) {
|
||||
enqueue(cur)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
|
||||
cache.NewListWatchFromClient(
|
||||
lbc.client, "services", namespace, fields.Everything()),
|
||||
&api.Service{}, resyncPeriod, eventHandlers)
|
||||
|
||||
lbc.epLister.Store, lbc.epController = framework.NewInformer(
|
||||
cache.NewListWatchFromClient(
|
||||
lbc.client, "endpoints", namespace, fields.Everything()),
|
||||
&api.Endpoints{}, resyncPeriod, eventHandlers)
|
||||
|
||||
return &lbc
|
||||
}
|
||||
|
||||
// parseCfg parses the given configuration file.
|
||||
// cmd line params take precedence over config directives.
|
||||
func parseCfg(configPath string) *loadBalancerConfig {
|
||||
jsonBlob, err := ioutil.ReadFile(configPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not parse lb config: %v", err)
|
||||
}
|
||||
var cfg loadBalancerConfig
|
||||
err = json.Unmarshal(jsonBlob, &cfg)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to unmarshal json blob: %v", string(jsonBlob))
|
||||
}
|
||||
glog.Infof("Creating new loadbalancer: %+v", cfg)
|
||||
return &cfg
|
||||
}
|
||||
|
||||
// healthzServer services liveness probes.
|
||||
func healthzServer() {
|
||||
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Delegate a check to the haproxy stats service.
|
||||
response, err := http.Get(fmt.Sprintf("http://localhost:%v", *statsPort))
|
||||
if err != nil {
|
||||
glog.Infof("Error %v", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
} else {
|
||||
defer response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
contents, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
glog.Infof("Error reading resonse on receiving status %v: %v",
|
||||
response.StatusCode, err)
|
||||
}
|
||||
glog.Infof("%v\n", string(contents))
|
||||
w.WriteHeader(response.StatusCode)
|
||||
} else {
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte("ok"))
|
||||
}
|
||||
}
|
||||
})
|
||||
glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", healthzPort), nil))
|
||||
}
|
||||
|
||||
func dryRun(lbc *loadBalancerController) {
|
||||
var err error
|
||||
for err = lbc.sync(true); err == deferredSync; err = lbc.sync(true) {
|
||||
}
|
||||
if err != nil {
|
||||
glog.Infof("ERROR: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flags.Parse(os.Args)
|
||||
cfg := parseCfg(*config)
|
||||
if len(*tcpServices) == 0 {
|
||||
glog.Infof("All tcp/https services will be ignored.")
|
||||
}
|
||||
go healthzServer()
|
||||
|
||||
var kubeClient *client.Client
|
||||
var err error
|
||||
clientConfig := kubectl_util.DefaultClientConfig(flags)
|
||||
if *cluster {
|
||||
if kubeClient, err = client.NewInCluster(); err != nil {
|
||||
glog.Fatalf("Failed to create client: %v", err)
|
||||
}
|
||||
} else {
|
||||
config, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
glog.Fatalf("error connecting to the client: %v", err)
|
||||
}
|
||||
kubeClient, err = client.New(config)
|
||||
}
|
||||
namespace, specified, err := clientConfig.Namespace()
|
||||
if err != nil {
|
||||
glog.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if !specified {
|
||||
namespace = "default"
|
||||
}
|
||||
|
||||
// TODO: Handle multiple namespaces
|
||||
lbc := newLoadBalancerController(cfg, kubeClient, namespace)
|
||||
go lbc.epController.Run(util.NeverStop)
|
||||
go lbc.svcController.Run(util.NeverStop)
|
||||
if *dry {
|
||||
dryRun(lbc)
|
||||
} else {
|
||||
util.Until(lbc.worker, time.Second, util.NeverStop)
|
||||
}
|
||||
}
|
|
@ -1,196 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const ns = "default"
|
||||
|
||||
// storeEps stores the given endpoints in a store.
|
||||
func storeEps(eps []*api.Endpoints) cache.Store {
|
||||
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
found := make([]interface{}, 0, len(eps))
|
||||
for i := range eps {
|
||||
found = append(found, eps[i])
|
||||
}
|
||||
if err := store.Replace(found); err != nil {
|
||||
glog.Fatalf("Unable to replace endpoints %v", err)
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
// storeServices stores the given services in a store.
|
||||
func storeServices(svcs []*api.Service) cache.Store {
|
||||
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
found := make([]interface{}, 0, len(svcs))
|
||||
for i := range svcs {
|
||||
found = append(found, svcs[i])
|
||||
}
|
||||
if err := store.Replace(found); err != nil {
|
||||
glog.Fatalf("Unable to replace services %v", err)
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
func getEndpoints(svc *api.Service, endpointAddresses []api.EndpointAddress, endpointPorts []api.EndpointPort) *api.Endpoints {
|
||||
return &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: svc.Name,
|
||||
Namespace: svc.Namespace,
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: endpointAddresses,
|
||||
Ports: endpointPorts,
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func getService(servicePorts []api.ServicePort) *api.Service {
|
||||
return &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: string(util.NewUUID()), Namespace: ns},
|
||||
Spec: api.ServiceSpec{
|
||||
Ports: servicePorts,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newFakeLoadBalancerController(endpoints []*api.Endpoints, services []*api.Service) *loadBalancerController {
|
||||
flb := loadBalancerController{}
|
||||
flb.epLister.Store = storeEps(endpoints)
|
||||
flb.svcLister.Store = storeServices(services)
|
||||
flb.httpPort = 80
|
||||
return &flb
|
||||
}
|
||||
|
||||
func TestGetEndpoints(t *testing.T) {
|
||||
// 2 pods each of which have 3 targetPorts exposed via a single service
|
||||
endpointAddresses := []api.EndpointAddress{
|
||||
{IP: "1.2.3.4"},
|
||||
{IP: "6.7.8.9"},
|
||||
}
|
||||
ports := []int{80, 443, 3306}
|
||||
endpointPorts := []api.EndpointPort{
|
||||
{Port: ports[0], Protocol: "TCP"},
|
||||
{Port: ports[1], Protocol: "TCP"},
|
||||
{Port: ports[2], Protocol: "TCP", Name: "mysql"},
|
||||
}
|
||||
servicePorts := []api.ServicePort{
|
||||
{Port: 10, TargetPort: util.NewIntOrStringFromInt(ports[0])},
|
||||
{Port: 20, TargetPort: util.NewIntOrStringFromInt(ports[1])},
|
||||
{Port: 30, TargetPort: util.NewIntOrStringFromString("mysql")},
|
||||
}
|
||||
|
||||
svc := getService(servicePorts)
|
||||
endpoints := []*api.Endpoints{getEndpoints(svc, endpointAddresses, endpointPorts)}
|
||||
flb := newFakeLoadBalancerController(endpoints, []*api.Service{svc})
|
||||
|
||||
for i := range ports {
|
||||
eps := flb.getEndpoints(svc, &svc.Spec.Ports[i])
|
||||
expectedEps := util.NewStringSet()
|
||||
for _, address := range endpointAddresses {
|
||||
expectedEps.Insert(fmt.Sprintf("%v:%v", address.IP, ports[i]))
|
||||
}
|
||||
|
||||
receivedEps := util.NewStringSet()
|
||||
for _, ep := range eps {
|
||||
receivedEps.Insert(ep)
|
||||
}
|
||||
if len(receivedEps) != len(expectedEps) || !expectedEps.IsSuperset(receivedEps) {
|
||||
t.Fatalf("Unexpected endpoints, received %+v, expected %+v", receivedEps, expectedEps)
|
||||
}
|
||||
glog.Infof("Got endpoints %+v", receivedEps)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetServices(t *testing.T) {
|
||||
endpointAddresses := []api.EndpointAddress{
|
||||
{IP: "1.2.3.4"},
|
||||
{IP: "6.7.8.9"},
|
||||
}
|
||||
ports := []int{80, 443}
|
||||
endpointPorts := []api.EndpointPort{
|
||||
{Port: ports[0], Protocol: "TCP"},
|
||||
{Port: ports[1], Protocol: "TCP"},
|
||||
}
|
||||
servicePorts := []api.ServicePort{
|
||||
{Port: 10, TargetPort: util.NewIntOrStringFromInt(ports[0])},
|
||||
{Port: 20, TargetPort: util.NewIntOrStringFromInt(ports[1])},
|
||||
}
|
||||
|
||||
// 2 services targeting the same endpoints, one of which is declared as a tcp service.
|
||||
svc1 := getService(servicePorts)
|
||||
svc2 := getService(servicePorts)
|
||||
endpoints := []*api.Endpoints{
|
||||
getEndpoints(svc1, endpointAddresses, endpointPorts),
|
||||
getEndpoints(svc2, endpointAddresses, endpointPorts),
|
||||
}
|
||||
flb := newFakeLoadBalancerController(endpoints, []*api.Service{svc1, svc2})
|
||||
flb.tcpServices = map[string]int{
|
||||
svc1.Name: 20,
|
||||
}
|
||||
http, tcp := flb.getServices()
|
||||
serviceURLEp := fmt.Sprintf("%v:%v", svc1.Name, 20)
|
||||
if len(tcp) != 1 || tcp[0].Name != serviceURLEp || tcp[0].FrontendPort != 20 {
|
||||
t.Fatalf("Unexpected tcp service %+v expected %+v", tcp, svc1.Name)
|
||||
}
|
||||
|
||||
// All pods of svc1 exposed under servicePort 20 are tcp
|
||||
expectedTCPEps := util.NewStringSet()
|
||||
for _, address := range endpointAddresses {
|
||||
expectedTCPEps.Insert(fmt.Sprintf("%v:%v", address.IP, 443))
|
||||
}
|
||||
receivedTCPEps := util.NewStringSet()
|
||||
for _, ep := range tcp[0].Ep {
|
||||
receivedTCPEps.Insert(ep)
|
||||
}
|
||||
if len(receivedTCPEps) != len(expectedTCPEps) || !expectedTCPEps.IsSuperset(receivedTCPEps) {
|
||||
t.Fatalf("Unexpected tcp serice %+v", tcp)
|
||||
}
|
||||
|
||||
// All pods of either service not mentioned in the tcpmap are multiplexed on port :80 as http services.
|
||||
expectedURLMapping := map[string]util.StringSet{
|
||||
fmt.Sprintf("%v:%v", svc1.Name, 10): util.NewStringSet("1.2.3.4:80", "6.7.8.9:80"),
|
||||
fmt.Sprintf("%v:%v", svc2.Name, 10): util.NewStringSet("1.2.3.4:80", "6.7.8.9:80"),
|
||||
fmt.Sprintf("%v:%v", svc2.Name, 20): util.NewStringSet("1.2.3.4:443", "6.7.8.9:443"),
|
||||
}
|
||||
for _, s := range http {
|
||||
if s.FrontendPort != 80 {
|
||||
t.Fatalf("All http services should get multiplexed via the same frontend port: %+v", s)
|
||||
}
|
||||
expectedEps, ok := expectedURLMapping[s.Name]
|
||||
if !ok {
|
||||
t.Fatalf("Expected url endpoint %v, found %+v", s.Name, expectedURLMapping)
|
||||
}
|
||||
receivedEp := util.NewStringSet()
|
||||
for i := range s.Ep {
|
||||
receivedEp.Insert(s.Ep[i])
|
||||
}
|
||||
if len(receivedEp) != len(expectedEps) && !receivedEp.IsSuperset(expectedEps) {
|
||||
t.Fatalf("Expected %+v, got %+v", expectedEps, receivedEp)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
# This file uses golang text templates (http://golang.org/pkg/text/template/) to
|
||||
# dynamically configure the haproxy loadbalancer.
|
||||
global
|
||||
daemon
|
||||
stats socket /tmp/haproxy
|
||||
|
||||
defaults
|
||||
log global
|
||||
option dontlognull
|
||||
timeout connect 5000
|
||||
timeout client 50000
|
||||
timeout server 50000
|
||||
|
||||
# haproxy stats, required hostport and firewall rules for :1936
|
||||
listen stats :1936
|
||||
mode http
|
||||
stats enable
|
||||
stats hide-version
|
||||
stats realm Haproxy\ Statistics
|
||||
stats uri /
|
||||
|
||||
frontend httpfrontend
|
||||
# Frontend bound on all network interfaces on port 80
|
||||
bind *:80
|
||||
mode http
|
||||
|
||||
# inherit default mode, needs changing for tcp
|
||||
# forward everything meant for /foo to the foo backend
|
||||
# default_backend foo
|
||||
{{range $i, $svc := .httpServices}}
|
||||
acl url_{{$svc.Name}} path_beg /{{$svc.Name}}
|
||||
use_backend {{$svc.Name}} if url_{{$svc.Name}}
|
||||
{{end}}
|
||||
|
||||
{{range $i, $svc := .httpServices}}
|
||||
{{ $svcName := $svc.Name }}
|
||||
backend {{$svc.Name}}
|
||||
mode http
|
||||
option httplog
|
||||
errorfile 400 /etc/haproxy/errors/400.http
|
||||
errorfile 403 /etc/haproxy/errors/403.http
|
||||
errorfile 408 /etc/haproxy/errors/408.http
|
||||
errorfile 500 /etc/haproxy/errors/500.http
|
||||
errorfile 502 /etc/haproxy/errors/502.http
|
||||
errorfile 503 /etc/haproxy/errors/503.http
|
||||
errorfile 504 /etc/haproxy/errors/504.http
|
||||
|
||||
balance roundrobin
|
||||
# TODO: Make the path used to access a service customizable.
|
||||
reqrep ^([^\ :]*)\ /{{$svc.Name}}[/]?(.*) \1\ /\2
|
||||
{{range $j, $ep := $svc.Ep}}server {{$svcName}}_{{$j}} {{$ep}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
|
||||
|
||||
{{range $i, $svc := .tcpServices}}
|
||||
{{ $svcName := $svc.Name }}
|
||||
frontend {{$svc.Name}}
|
||||
bind *:{{$svc.FrontendPort}}
|
||||
mode tcp
|
||||
default_backend {{$svc.Name}}
|
||||
|
||||
backend {{$svc.Name}}
|
||||
balance roundrobin
|
||||
mode tcp
|
||||
{{range $j, $ep := $svc.Ep}}server {{$svcName}}_{{$j}} {{$ep}}
|
||||
{{end}}
|
||||
{{end}}
|
|
@ -212,7 +212,7 @@ eu-minion-7wz7 kubernetes.io/hostname=eu-minion-7wz7 Ready
|
|||
eu-minion-loh2 kubernetes.io/hostname=eu-minion-loh2 Ready
|
||||
```
|
||||
|
||||
For a more advanced example of sharing clusters, see the [service-loadbalancer](../../contrib/service-loadbalancer/README.md)
|
||||
For a more advanced example of sharing clusters, see the [service-loadbalancer](https://github.com/kubernetes/contrib/tree/master/service-loadbalancer/README.md)
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
Loading…
Reference in New Issue