Upgrade to Fedora 21, Docker 1.6, clean-up SDN

pull/6/head
derekwaynecarr 2015-06-01 21:19:38 -04:00
parent a161edb396
commit 2168cee414
11 changed files with 137 additions and 94 deletions

6
Vagrantfile vendored
View File

@ -59,8 +59,8 @@ $kube_provider_boxes = {
},
:virtualbox => {
'fedora' => {
:box_name => 'kube-fedora20',
:box_url => 'http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-20_chef-provisionerless.box'
:box_name => 'kube-fedora21',
:box_url => 'http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-21_chef-provisionerless.box'
}
},
:libvirt => {
@ -211,7 +211,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
c.vm.provision "shell", run: "always", path: script
end
c.vm.network "private_network", ip: "#{$master_ip}"
c.vm.hostname = ENV['MASTER_NAME']
end
# Kubernetes minion
@ -229,7 +228,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
minion.vm.provision "shell", run: "always", path: script
end
minion.vm.network "private_network", ip: "#{minion_ip}"
minion.vm.hostname = minion_hostname
end
end
end

View File

@ -0,0 +1,8 @@
{% set docker_opts = "" -%}
{% if grains.docker_opts is defined -%}
{% set docker_opts = grains.docker_opts -%}
{% endif -%}
DOCKER_OPTS='{{docker_opts}}'
OPTIONS='{{docker_opts}}'
DOCKER_CERT_PATH=/etc/docker

View File

@ -8,15 +8,28 @@ bridge-utils:
pkg.installed
{% if grains.os_family == 'RedHat' %}
docker-io:
pkg:
- installed
{{ environment_file }}:
file.managed:
- source: salt://docker/default
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
docker:
service.running:
- enable: True
- require:
- pkg: docker-io
- watch:
- file: {{ environment_file }}
- pkg: docker-io
{% else %}

View File

@ -26,8 +26,9 @@
# the debugging handlers (/run and /exec) to prevent arbitrary code execution on
# the master.
# TODO(roberthbailey): Make this configurable via an env var in config-default.sh
{% set debugging_handlers = "--enable-debugging-handlers=true" -%}
{% if grains.cloud == 'gce' -%}
{% if grains.cloud in ['gce', 'vagrant'] -%}
{% if grains['roles'][0] == 'kubernetes-master' -%}
{% set api_servers_with_port = "" -%}
{% set debugging_handlers = "--enable-debugging-handlers=false" -%}

View File

@ -1,14 +0,0 @@
{% if grains.network_mode is defined and grains.network_mode == 'openvswitch' %}
sdn:
cmd.script:
- source: /kubernetes-vagrant/network_closure.sh
- require:
- pkg: docker-io
- sls: kubelet
- cwd: /
- user: root
- group: root
- shell: /bin/bash
{% endif %}

View File

@ -8,8 +8,6 @@ base:
- docker
{% if grains['cloud'] is defined and grains['cloud'] == 'azure' %}
- openvpn-client
{% else %}
- sdn
{% endif %}
- helpers
- cadvisor
@ -50,7 +48,6 @@ base:
{% if grains['cloud'] is defined and grains['cloud'] == 'vagrant' %}
- docker
- kubelet
- sdn
{% endif %}
{% if grains['cloud'] is defined and grains['cloud'] == 'aws' %}
- docker

View File

@ -70,8 +70,11 @@ ELASTICSEARCH_LOGGING_REPLICAS=1
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# Extra options to set on the Docker command line. This is useful for setting
# --insecure-registry for local registries.
DOCKER_OPTS=""
# --insecure-registry for local registries, or globally configuring selinux options
# TODO Enable selinux when Fedora 21 repositories get an updated docker package
# see https://bugzilla.redhat.com/show_bug.cgi?id=1216151
#EXTRA_DOCKER_OPTS="-b=cbr0 --selinux-enabled --insecure-registry 10.0.0.0/8"
EXTRA_DOCKER_OPTS="-b=cbr0 --insecure-registry 10.0.0.0/8"
# Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS=true

View File

@ -17,6 +17,19 @@
# exit on any error
set -e
# Set the host name explicitly
# See: https://github.com/mitchellh/vagrant/issues/2430
hostnamectl set-hostname ${MASTER_NAME}
# Workaround to vagrant inability to guess interface naming sequence
# Tell system to abandon the new naming scheme and use eth* instead
rm -f /etc/sysconfig/network-scripts/ifcfg-enp0s3
# Disable network interface being managed by Network Manager (needed for Fedora 21+)
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
sed -i 's/^NM_CONTROLLED=no/#NM_CONTROLLED=no/' ${NETWORK_CONF_PATH}ifcfg-eth1
systemctl restart network
function release_not_found() {
echo "It looks as if you don't have a compiled version of Kubernetes. If you" >&2
echo "are running from a clone of the git repo, please run ./build/release.sh." >&2
@ -56,6 +69,10 @@ for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
fi
done
echo "127.0.0.1 localhost" >> /etc/hosts # enables cmds like 'kubectl get pods' on master.
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
# Configure the openvswitch network
provision-network
# Update salt configuration
mkdir -p /etc/salt/minion.d
@ -81,6 +98,7 @@ grains:
roles:
- kubernetes-master
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
EOF
mkdir -p /srv/salt-overlay/pillar
@ -147,6 +165,11 @@ if [[ ! -f "${known_tokens_file}" ]]; then
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file)
kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
# Make a kubeconfig file with the token.
(umask 077;
cat > "${kubelet_kubeconfig_file}" <<EOF
)
mkdir -p /srv/salt-overlay/salt/kube-proxy
kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"

View File

@ -17,11 +17,25 @@
# exit on any error
set -e
# Set the host name explicitly
# See: https://github.com/mitchellh/vagrant/issues/2430
hostnamectl set-hostname ${MINION_NAME}
# Workaround to vagrant inability to guess interface naming sequence
# Tell system to abandon the new naming scheme and use eth* instead
rm -f /etc/sysconfig/network-scripts/ifcfg-enp0s3
# Disable network interface being managed by Network Manager (needed for Fedora 21+)
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
sed -i 's/^NM_CONTROLLED=no/#NM_CONTROLLED=no/' ${NETWORK_CONF_PATH}ifcfg-eth1
systemctl restart network
# Setup hosts file to support ping by hostname to master
if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then
echo "Adding $MASTER_NAME to hosts file"
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
fi
echo "$MINION_IP $MINION_NAME" >> /etc/hosts
# Setup hosts file to support ping by hostname to each minion in the cluster
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
@ -33,6 +47,12 @@ for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
fi
done
# Configure network
provision-network
# Placeholder for any other manifests that may be per-node.
mkdir -p /etc/kubernetes/manifests
# Let the minion know who its master is
# Recover the salt-minion if the salt-master network changes
## auth_timeout - how long we want to wait for a time out
@ -73,6 +93,7 @@ grains:
- kubernetes-pool
cbr-cidr: '$(echo "$CONTAINER_SUBNET" | sed -e "s/'/''/g")'
hostname_override: '$(echo "$MINION_IP" | sed -e "s/'/''/g")'
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
EOF
# we will run provision to update code each time we test, so we do not want to do salt install each time

View File

@ -14,85 +14,76 @@
# See the License for the specific language governing permissions and
# limitations under the License.
DOCKER_BRIDGE=kbr0
DOCKER_BRIDGE=cbr0
OVS_SWITCH=obr0
DOCKER_OVS_TUN=tun0
TUNNEL_BASE=gre
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
POST_NETWORK_SCRIPT_DIR=/kubernetes-vagrant
POST_NETWORK_SCRIPT=${POST_NETWORK_SCRIPT_DIR}/network_closure.sh
# ensure location of POST_NETWORK_SCRIPT exists
mkdir -p $POST_NETWORK_SCRIPT_DIR
# provision network configures the ovs network for pods
function provision-network {
echo "Verifying network configuration"
# generate the post-configure script to be called by salt as cmd.wait
cat <<EOF > ${POST_NETWORK_SCRIPT}
#!/bin/bash
# Only do this operation if the bridge is not defined
ifconfig | grep -q ${DOCKER_BRIDGE} || {
set -e
echo "It looks like the required network bridge has not yet been created"
# Only do this operation if the bridge is not defined
ifconfig | grep -q kbr0 || {
CONTAINER_SUBNETS=(${MASTER_CONTAINER_SUBNET} ${MINION_CONTAINER_SUBNETS[@]})
CONTAINER_IPS=(${MASTER_IP} ${MINION_IPS[@]})
CONTAINER_SUBNETS=(${MASTER_CONTAINER_SUBNET} ${MINION_CONTAINER_SUBNETS[@]})
CONTAINER_IPS=(${MASTER_IP} ${MINION_IPS[@]})
# Stop docker before making these updates
systemctl stop docker
# Install openvswitch
echo "Installing, enabling prerequisites"
yum install -y openvswitch bridge-utils
systemctl enable openvswitch
systemctl start openvswitch
# Install openvswitch
yum install -y openvswitch
systemctl enable openvswitch
systemctl start openvswitch
# create new docker bridge
echo "Create a new docker bridge"
ip link set dev ${DOCKER_BRIDGE} down || true
brctl delbr ${DOCKER_BRIDGE} || true
brctl addbr ${DOCKER_BRIDGE}
ip link set dev ${DOCKER_BRIDGE} up
ifconfig ${DOCKER_BRIDGE} ${CONTAINER_ADDR} netmask ${CONTAINER_NETMASK} up
# create new docker bridge
ip link set dev ${DOCKER_BRIDGE} down || true
brctl delbr ${DOCKER_BRIDGE} || true
brctl addbr ${DOCKER_BRIDGE}
ip link set dev ${DOCKER_BRIDGE} up
ifconfig ${DOCKER_BRIDGE} ${CONTAINER_ADDR} netmask ${CONTAINER_NETMASK} up
# add ovs bridge
echo "Add ovs bridge"
ovs-vsctl del-br ${OVS_SWITCH} || true
ovs-vsctl add-br ${OVS_SWITCH} -- set Bridge ${OVS_SWITCH} fail-mode=secure
ovs-vsctl set bridge ${OVS_SWITCH} protocols=OpenFlow13
ovs-vsctl del-port ${OVS_SWITCH} ${TUNNEL_BASE}0 || true
ovs-vsctl add-port ${OVS_SWITCH} ${TUNNEL_BASE}0 -- set Interface ${TUNNEL_BASE}0 type=${TUNNEL_BASE} options:remote_ip="flow" options:key="flow" ofport_request=10
# add ovs bridge
ovs-vsctl del-br ${OVS_SWITCH} || true
ovs-vsctl add-br ${OVS_SWITCH} -- set Bridge ${OVS_SWITCH} fail-mode=secure
ovs-vsctl set bridge ${OVS_SWITCH} protocols=OpenFlow13
ovs-vsctl del-port ${OVS_SWITCH} ${TUNNEL_BASE}0 || true
ovs-vsctl add-port ${OVS_SWITCH} ${TUNNEL_BASE}0 -- set Interface ${TUNNEL_BASE}0 type=${TUNNEL_BASE} options:remote_ip="flow" options:key="flow" ofport_request=10
# add tun device
echo "Add tun device"
ovs-vsctl del-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} || true
ovs-vsctl add-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} -- set Interface ${DOCKER_OVS_TUN} type=internal ofport_request=9
brctl addif ${DOCKER_BRIDGE} ${DOCKER_OVS_TUN}
ip link set ${DOCKER_OVS_TUN} up
# add tun device
ovs-vsctl del-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} || true
ovs-vsctl add-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} -- set Interface ${DOCKER_OVS_TUN} type=internal ofport_request=9
brctl addif ${DOCKER_BRIDGE} ${DOCKER_OVS_TUN}
ip link set ${DOCKER_OVS_TUN} up
# add oflow rules, because we do not want to use stp
echo "Add oflow rules"
ovs-ofctl -O OpenFlow13 del-flows ${OVS_SWITCH}
# now loop through all other minions and create persistent gre tunnels
echo "Creating persistent gre tunnels"
NODE_INDEX=0
for remote_ip in "${CONTAINER_IPS[@]}"
do
if [ "\${remote_ip}" == "${NODE_IP}" ]; then
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,ip,in_port=10,nw_dst=${CONTAINER_SUBNETS[${NODE_INDEX}]},actions=output:9"
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,arp,in_port=10,nw_dst=${CONTAINER_SUBNETS[${NODE_INDEX}]},actions=output:9"
else
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,ip,nw_dst=${CONTAINER_SUBNETS[${NODE_INDEX}]},actions=set_field:${remote_ip}->tun_dst,output:10"
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,arp,nw_dst=${CONTAINER_SUBNETS[${NODE_INDEX}]},actions=set_field:${remote_ip}->tun_dst,output:10"
fi
((NODE_INDEX++)) || true
done
echo "Created persistent gre tunnels"
# add oflow rules, because we do not want to use stp
ovs-ofctl -O OpenFlow13 del-flows ${OVS_SWITCH}
# now loop through all other minions and create persistent gre tunnels
NODE_INDEX=0
for remote_ip in "\${CONTAINER_IPS[@]}"
do
if [ "\${remote_ip}" == "${NODE_IP}" ]; then
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,ip,in_port=10,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=output:9"
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,arp,in_port=10,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=output:9"
else
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,ip,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=set_field:\${remote_ip}->tun_dst,output:10"
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,arp,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=set_field:\${remote_ip}->tun_dst,output:10"
fi
((NODE_INDEX++)) || true
done
# add ip route rules such that all pod traffic flows through docker bridge and consequently to the gre tunnels
ip route add ${CONTAINER_SUBNET} dev ${DOCKER_BRIDGE} scope link src ${CONTAINER_ADDR}
# modify the docker service file such that it uses the kube docker bridge and not its own
echo "OPTIONS='-b=kbr0 --selinux-enabled ${DOCKER_OPTS}'" >/etc/sysconfig/docker
systemctl daemon-reload
systemctl start docker
systemctl restart kubelet
# add ip route rules such that all pod traffic flows through docker bridge and consequently to the gre tunnels
echo "Add ip route rules such that all pod traffic flows through docker bridge"
ip route add ${CONTAINER_SUBNET} dev ${DOCKER_BRIDGE} scope link src ${CONTAINER_ADDR}
}
echo "Network configuration verified"
}
EOF
chmod +x ${POST_NETWORK_SCRIPT}

View File

@ -139,9 +139,10 @@ function create-provision-scripts {
echo "DNS_REPLICAS='${DNS_REPLICAS:-}'"
echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'"
echo "ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS-}'"
echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
@ -150,6 +151,7 @@ function create-provision-scripts {
echo "MASTER_NAME='${MASTER_NAME}'"
echo "MASTER_IP='${MASTER_IP}'"
echo "MINION_NAMES=(${MINION_NAMES[@]})"
echo "MINION_NAME=(${MINION_NAMES[$i]})"
echo "MINION_IPS=(${MINION_IPS[@]})"
echo "MINION_IP='${MINION_IPS[$i]}'"
echo "MINION_ID='$i'"
@ -161,8 +163,8 @@ function create-provision-scripts {
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS-}'"
echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
done
}