Flannel server in static pod with private etcd.

pull/6/head
Prashanth Balasubramanian 2015-11-21 17:37:32 -08:00 committed by gmarek
parent 7aa8ebe30f
commit 321bc73264
13 changed files with 229 additions and 36 deletions

View File

@ -19,7 +19,7 @@
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/etcd --listen-peer-urls http://0.0.0.0:{{ server_port }} --addr 0.0.0.0:{{ port }} --bind-addr 0.0.0.0:{{ port }} --data-dir /var/etcd/data{{ suffix }} 1>>/var/log/etcd{{ suffix }}.log 2>&1"
"/usr/local/bin/etcd --listen-peer-urls http://127.0.0.1:{{ server_port }} --addr 127.0.0.1:{{ port }} --bind-addr 127.0.0.1:{{ port }} --data-dir /var/etcd/data{{ suffix }} 1>>/var/log/etcd{{ suffix }}.log 2>&1"
],
"livenessProbe": {
"httpGet": {
@ -33,7 +33,7 @@
"ports":[
{ "name": "serverport",
"containerPort": {{ server_port }},
"hostPort": {{ server_port }}
"hostPort": {{ server_port }}
},{
"name": "clientport",
"containerPort": {{ port }},

View File

@ -0,0 +1,99 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "flannel-helper",
"namespace": "kube-system",
"labels": {
"app": "flannel-helper",
"version": "v0.1"
}
},
"spec": {
"volumes": [
{
"name": "varlogflannel",
"hostPath": {
"path": "/var/log"
}
},
{
"name": "etcdstorage",
"emptyDir": {}
},
{
"name": "networkconfig",
"hostPath": {
"path": "/etc/kubernetes/network.json"
}
}
],
"containers": [
{
"name": "flannel-helper",
"image": "bprashanth/flannel-helper:0.1",
"args": [
"--network-config=/etc/kubernetes/network.json",
"--etcd-prefix=/kubernetes.io/network",
"--etcd-server=http://127.0.0.1:4001"
],
"volumeMounts": [
{
"name": "networkconfig",
"mountPath": "/etc/kubernetes/network.json"
}
],
"imagePullPolicy": "Always"
},
{
"name": "flannel-container",
"image": "quay.io/coreos/flannel:0.5.5",
"command": [
"/bin/sh",
"-c",
"/opt/bin/flanneld -listen 0.0.0.0:10253 -etcd-endpoints http://127.0.0.1:4001 -etcd-prefix /kubernetes.io/network 1>>/var/log/flannel_server.log 2>&1"
],
"ports": [
{
"hostPort": 10253,
"containerPort": 10253
}
],
"resources": {
"limits": {
"cpu": "100m"
}
},
"volumeMounts": [
{
"name": "varlogflannel",
"mountPath": "/var/log"
}
]
},
{
"name": "etcd-container",
"image": "gcr.io/google_containers/etcd:2.2.1",
"command": [
"/bin/sh",
"-c",
"/opt/bin/etcd --listen-peer-urls http://127.0.0.1:4001 --addr http://127.0.0.1:4001 --bind-addr 127.0.0.1:4001 --data-dir /var/etcd/data 1>>/var/log/etcd_flannel.log 2>&1"
],
"resources": {
"limits": {
"cpu": "100m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "etcdstorage",
"mountPath": "/var/etcd/data"
}
]
}
],
"hostNetwork": true
}
}

View File

@ -0,0 +1,24 @@
touch /var/log/flannel.log:
cmd.run:
- creates: /var/log/flannel.log
touch /var/log/etcd_flannel.log:
cmd.run:
- creates: /var/log/etcd_flannel.log
/etc/kubernetes/network.json:
file.managed:
- source: salt://flannel/network.json
- makedirs: True
- user: root
- group: root
- mode: 755
/etc/kubernetes/manifests/flannel-server.manifest:
file.managed:
- source: salt://flannel-server/flannel-server.manifest
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755

View File

@ -0,0 +1,8 @@
{
"Network": "192.168.0.0/16",
"SubnetLen": 26,
"Backend": {
"Type": "vxlan",
"VNI": 1
}
}

View File

@ -0,0 +1,6 @@
{% if grains.api_servers is defined -%}
{% set daemon_args = "-remote " + grains.api_servers + ":10253" -%}
{% else -%}
{% set daemon_args = "-remote 127.0.0.1:10253" -%}
{% endif -%}
DAEMON_ARGS="{{daemon_args}}"

View File

@ -18,6 +18,14 @@ flannel-symlink:
- watch:
- archive: flannel-tar
/etc/default/flannel:
file.managed:
- source: salt://flannel/default
- template: jinja
- user: root
- group: root
- mode: 644
/etc/init.d/flannel:
file.managed:
- source: salt://flannel/initd
@ -25,17 +33,10 @@ flannel-symlink:
- group: root
- mode: 755
/var/run/flannel/network.json:
file.managed:
- source: salt://flannel/network.json
- makedirs: True
- user: root
- group: root
- mode: 755
flannel:
service.running:
- enable: True
- watch:
- file: /usr/local/bin/flanneld
- file: /etc/init.d/flannel
- file: /etc/default/flannel

View File

@ -15,9 +15,9 @@
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="Flannel overlay network daemon"
NAME=flanneld
NAME=flannel
DAEMON=/usr/local/bin/flanneld
DAEMON_ARGS="-etcd-endpoints http://e2e-test-beeps-master:4001 -etcd-prefix /kubernetes.io/network"
DAEMON_ARGS=""
DAEMON_LOG_FILE=/var/log/$NAME.log
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME

View File

@ -1,5 +1,5 @@
{
"Network": "10.245.0.0/16",
"Network": "18.16.0.0/16",
"SubnetLen": 24,
"Backend": {
"Type": "vxlan",

View File

@ -44,6 +44,7 @@ base:
- match: grain
- generate-cert
- etcd
- flannel-server
- flannel
- kube-apiserver
- kube-controller-manager

View File

@ -0,0 +1,35 @@
# Flannel integration with Kubernetes
## Why?
* Networking works out of the box.
* Cloud gateway configuration is regulated.
* Consistent bare metal and cloud experience.
* Lays foundation for integrating with networking backends and vendors.
# How?
```
Master Node1
---------------------|--------------------------------
database |
| |
{10.250.0.0/16} | docker
| here's podcidr |restart with podcidr
apiserver <------------------- kubelet
| | |here's podcidr
flannel-server:10253 <------- flannel-daemon
--/16--->
<--watch-- [config iptables]
subscribe to new node subnets
--------> [config VXLan]
|
```
There is a tiny lie in the above diagram, as of now, the flannel server on the master maintains a private etcd. This will not be necessary once we have a generalized network resource, and a Kubernetes x flannel backend.
# Limitations
* Integration is experimental
# Wishlist

View File

@ -4,15 +4,18 @@ import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"strconv"
"strings"
utildbus "k8s.io/kubernetes/pkg/util/dbus"
utilexec "k8s.io/kubernetes/pkg/util/exec"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
"github.com/golang/glog"
)
// TODO: Move all this to a network plugin.
const (
networkType = "vxlan"
dockerOptsFile = "/etc/default/docker"
flannelSubnetKey = "FLANNEL_SUBNET"
flannelNetworkKey = "FLANNEL_NETWORK"
@ -21,27 +24,46 @@ const (
flannelSubnetFile = "/var/run/flannel/subnet.env"
)
type FlannelServer struct {
subnetFile string
// TODO: Manage subnet file.
// A Kubelet to flannel bridging helper.
type FlannelHelper struct {
subnetFile string
iptablesHelper utiliptables.Interface
}
func NewFlannelServer() *FlannelServer {
return &FlannelServer{flannelSubnetFile}
// NewFlannelHelper creates a new flannel helper.
func NewFlannelHelper() *FlannelHelper {
return &FlannelHelper{
subnetFile: flannelSubnetFile,
iptablesHelper: utiliptables.New(utilexec.New(), utildbus.New(), utiliptables.ProtocolIpv4),
}
}
func (f *FlannelServer) Handshake() (podCIDR string, err error) {
// Flannel daemon will hang till the server comes up, kubelet will hang until
// flannel daemon has written subnet env variables. This is the kubelet handshake.
// To improve performance, we could defer just the configuration of the container
// bridge till after subnet.env is written. Keeping it local is clearer for now.
// Ensure the required MASQUERADE rules exist for the given network/cidr.
func (f *FlannelHelper) ensureFlannelMasqRule(kubeNetwork, podCIDR string) error {
// TODO: Investigate delegation to flannel via -ip-masq=true once flannel
// issue #374 is resolved.
comment := "Flannel masquerade facilitates pod<->node traffic."
args := []string{
"-m", "comment", "--comment", comment,
"!", "-d", kubeNetwork, "-s", podCIDR, "-j", "MASQUERADE",
}
_, err := f.iptablesHelper.EnsureRule(
utiliptables.Append,
utiliptables.TableNAT,
utiliptables.ChainPostrouting,
args...)
return err
}
// Handshake waits for the flannel subnet file and installs a few IPTables
// rules, returning the pod CIDR allocated for this node.
func (f *FlannelHelper) Handshake() (podCIDR string, err error) {
// TODO: Using a file to communicate is brittle
if _, err = os.Stat(f.subnetFile); err != nil {
return "", fmt.Errorf("Waiting for subnet file %v", f.subnetFile)
}
glog.Infof("(kubelet)Found flannel subnet file %v", f.subnetFile)
// TODO: Rest of this function is a hack.
config, err := parseKVConfig(f.subnetFile)
if err != nil {
return "", err
@ -57,13 +79,8 @@ func (f *FlannelServer) Handshake() (podCIDR string, err error) {
if !ok {
return "", fmt.Errorf("No flannel network, config %+v", config)
}
if err := exec.Command("iptables",
"-t", "nat",
"-A", "POSTROUTING",
"!", "-d", kubeNetwork,
"-s", podCIDR,
"-j", "MASQUERADE").Run(); err != nil {
return "", fmt.Errorf("Unable to install iptables rule for flannel.")
if f.ensureFlannelMasqRule(kubeNetwork, podCIDR); err != nil {
return "", fmt.Errorf("Unable to install flannel masquerade %v", err)
}
return podCIDR, nil
}

View File

@ -331,7 +331,7 @@ func NewMainKubelet(
// Flannel options
// TODO: This is currently a dummy server.
flannelServer: NewFlannelServer(),
flannelHelper: NewFlannelHelper(),
useDefaultOverlay: useDefaultOverlay,
}
if klet.kubeClient == nil {
@ -661,7 +661,7 @@ type Kubelet struct {
// Flannel options.
useDefaultOverlay bool
flannelServer *FlannelServer
flannelHelper *FlannelHelper
}
func (kl *Kubelet) allSourcesReady() bool {
@ -2635,7 +2635,7 @@ func (kl *Kubelet) syncNetworkStatus() {
if kl.configureCBR0 {
if kl.useDefaultOverlay {
glog.Infof("(kubelet) handshaking")
podCIDR, err := kl.flannelServer.Handshake()
podCIDR, err := kl.flannelHelper.Handshake()
if err != nil {
glog.Infof("Flannel server handshake failed %v", err)
return

View File

@ -32,6 +32,8 @@ const (
// ControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
ControllerManagerPort = 10252
// Port for flannel daemon.
FlannelDaemonPort = 10253
// KubeletReadOnlyPort exposes basic read-only services from the kubelet.
// May be overridden by a flag at startup.
// This is necessary for heapster to collect monitoring stats from the kubelet