Merge pull request #4945 from lhuard1A/libvirt-coreos

Make libvirt-coreos cluster able to run e2e tests
pull/6/head
roberthbailey 2015-03-02 14:35:00 -08:00
commit 167d1b714c
10 changed files with 226 additions and 23 deletions

View File

@ -19,3 +19,44 @@
# Number of minions in the cluster
NUM_MINIONS=${NUM_MINIONS:-3}
export NUM_MINIONS
# The IP of the master
export MASTER_IP="192.168.10.1"
export INSTANCE_PREFIX=kubernetes
export MASTER_NAME="${INSTANCE_PREFIX}-master"
# Map out the IPs, names and container subnets of each minion
export MINION_IP_BASE="192.168.10."
MINION_CONTAINER_SUBNET_BASE="10.10"
MASTER_CONTAINER_NETMASK="255.255.255.0"
MASTER_CONTAINER_ADDR="${MINION_CONTAINER_SUBNET_BASE}.0.1"
MASTER_CONTAINER_SUBNET="${MINION_CONTAINER_SUBNET_BASE}.0.1/24"
CONTAINER_SUBNET="${MINION_CONTAINER_SUBNET_BASE}.0.0/16"
if [[ "$NUM_MINIONS" -gt 253 ]]; then
echo "ERROR: Because of how IPs are allocated in ${BASH_SOURCE}, you cannot create more than 253 minions"
exit 1
fi
for ((i=0; i < NUM_MINIONS; i++)) do
MINION_IPS[$i]="${MINION_IP_BASE}$((i+2))"
MINION_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))"
MINION_CONTAINER_SUBNETS[$i]="${MINION_CONTAINER_SUBNET_BASE}.$((i+1)).1/24"
MINION_CONTAINER_ADDRS[$i]="${MINION_CONTAINER_SUBNET_BASE}.$((i+1)).1"
MINION_CONTAINER_NETMASKS[$i]="255.255.255.0"
done
MINION_CONTAINER_SUBNETS[$NUM_MINIONS]=$MASTER_CONTAINER_SUBNET
PORTAL_NET=10.11.0.0/16
# Optional: Install node monitoring.
ENABLE_NODE_MONITORING=true
# Optional: Enable node logging.
ENABLE_NODE_LOGGING=false
LOGGING_DESTINATION=elasticsearch
# Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS=true
DNS_SERVER_IP="10.11.0.254"
DNS_DOMAIN="kubernetes.local"
DNS_REPLICAS=1

View File

@ -0,0 +1,19 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Contains configuration values for interacting with the Vagrant cluster in test mode
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/libvirt-coreos/config-default.sh"

View File

@ -0,0 +1,44 @@
kind: ReplicationController
apiVersion: v1beta1
id: skydns
namespace: default
labels:
k8s-app: skydns
desiredState:
replicas: ${DNS_REPLICAS}
replicaSelector:
k8s-app: skydns
podTemplate:
labels:
k8s-app: skydns
desiredState:
manifest:
version: v1beta2
id: skydns
dnsPolicy: "Default" # Don't use cluster DNS.
containers:
- name: etcd
image: quay.io/coreos/etcd:latest
command: [
\"/etcd\",
\"-bind-addr=127.0.0.1\",
\"-peer-bind-addr=127.0.0.1\",
]
- name: kube2sky
image: kubernetes/kube2sky:1.0
command: [
# entrypoint = \"/kube2sky\",
\"-domain=${DNS_DOMAIN}\",
]
- name: skydns
image: kubernetes/skydns:2014-12-23-001
command: [
# entrypoint = \"/skydns\",
\"-machines=http://localhost:4001\",
\"-addr=0.0.0.0:53\",
\"-domain=${DNS_DOMAIN}.\",
]
ports:
- name: dns
containerPort: 53
protocol: UDP

View File

@ -0,0 +1,12 @@
kind: Service
apiVersion: v1beta1
id: skydns
namespace: default
protocol: UDP
port: 53
portalIP: ${DNS_SERVER_IP}
containerPort: 53
labels:
k8s-app: skydns
selector:
k8s-app: skydns

View File

@ -16,9 +16,9 @@ write_files:
coreos:
etcd:
name: ${name}
addr: 192.168.10.$(($i+1)):4001
addr: ${public_ip}:4001
bind-addr: 0.0.0.0
peer-addr: 192.168.10.$(($i+1)):7001
peer-addr: ${public_ip}:7001
# peers: {etcd_peers}
discovery: ${discovery}
units:
@ -30,7 +30,7 @@ coreos:
MACAddress=52:54:00:00:00:${i}
[Network]
Address=192.168.10.$(($i+1))/24
Address=${public_ip}/24
DNS=192.168.10.254
Gateway=192.168.10.254
- name: cbr0.netdev
@ -46,10 +46,10 @@ coreos:
Name=cbr0
[Network]
Address=10.10.$(($i+1)).1/24
Address=${MINION_CONTAINER_SUBNETS[$i]}
[Route]
Destination=10.10.0.0/16
Destination=${CONTAINER_SUBNET}
- name: cbr0-interface.network
command: start
content: |
@ -66,7 +66,7 @@ coreos:
Description=NAT non container traffic
[Service]
ExecStart=/usr/sbin/iptables -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE ! -d 10.10.0.0/16
ExecStart=/usr/sbin/iptables -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE ! -d ${CONTAINER_SUBNET}
RemainAfterExit=yes
Type=oneshot
- name: etcd.service

View File

@ -18,7 +18,7 @@ coreos:
--port=8080 \
--etcd_servers=http://127.0.0.1:4001 \
--kubelet_port=10250 \
--portal_net=10.10.254.0/24
--portal_net=${PORTAL_NET}
Restart=always
RestartSec=2
@ -59,5 +59,25 @@ coreos:
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target
- name: kube-addons.service
command: start
content: |
[Unit]
After=opt-kubernetes.mount kube-apiserver.service
ConditionPathIsDirectory=/opt/kubernetes/addons
Description=Kubernetes addons
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=opt-kubernetes.mount kube-apiserver.service
[Service]
Type=oneshot
ExecStartPre=/bin/bash -c 'while [[ \"\$(curl -s http://127.0.0.1:8080/healthz)\" != \"ok\" ]]; do sleep 1; done'
ExecStartPre=/bin/sleep 10
ExecStart=/opt/kubernetes/bin/kubectl create -f /opt/kubernetes/addons
ExecStop=/opt/kubernetes/bin/kubectl stop -f /opt/kubernetes/addons
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target

View File

@ -15,8 +15,12 @@ coreos:
[Service]
ExecStart=/opt/kubernetes/bin/kubelet \
--address=0.0.0.0 \
--hostname_override=192.168.10.$(($i+1)) \
--etcd_servers=http://127.0.0.1:4001
--hostname_override=${MINION_IPS[$i]} \
--etcd_servers=http://127.0.0.1:4001 \
--api_servers=http://${MASTER_IP}:8080 \
$( [[ "$ENABLE_CLUSTER_DNS" == "true" ]] && echo "--cluster_dns=${DNS_SERVER_IP}" ) \
$( [[ "$ENABLE_CLUSTER_DNS" == "true" ]] && echo "--cluster_domain=${DNS_DOMAIN}" ) \
--config=/opt/kubernetes/manifests
Restart=always
RestartSec=2
@ -35,7 +39,7 @@ coreos:
[Service]
ExecStart=/opt/kubernetes/bin/kube-proxy \
--etcd_servers=http://127.0.0.1:4001 \
--master=http://192.168.10.1:7080
--master=http://${MASTER_IP}:7080
Restart=always
RestartSec=2

View File

@ -16,7 +16,7 @@
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
readonly KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
readonly ROOT=$(dirname "${BASH_SOURCE}")
source $ROOT/${KUBE_CONFIG_FILE:-"config-default.sh"}
@ -38,8 +38,8 @@ function join {
# Must ensure that the following ENV vars are set
function detect-master {
KUBE_MASTER_IP=192.168.10.1
KUBE_MASTER=kubernetes-master
KUBE_MASTER_IP=$MASTER_IP
KUBE_MASTER=$MASTER_NAME
export KUBERNETES_MASTER=http://$KUBE_MASTER_IP:8080
echo "KUBE_MASTER_IP: $KUBE_MASTER_IP"
echo "KUBE_MASTER: $KUBE_MASTER"
@ -47,10 +47,7 @@ function detect-master {
# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[]
function detect-minions {
for (( i = 0 ; i < $NUM_MINIONS ; i++ )); do
KUBE_MINION_IP_ADDRESSES[$i]=192.168.10.$(($i+2))
done
echo "KUBE_MINION_IP_ADDRESSES=[${KUBE_MINION_IP_ADDRESSES[@]}]"
KUBE_MINION_IP_ADDRESSES=("${MINION_IPS[@]}")
}
# Verify prereqs on host machine
@ -127,6 +124,25 @@ function initialize-pool {
mkdir -p "$POOL_PATH/kubernetes"
kube-push
mkdir -p "$POOL_PATH/kubernetes/manifests"
if [[ "$ENABLE_NODE_MONITORING" == "true" ]]; then
cp "$KUBE_ROOT/cluster/saltbase/salt/cadvisor/cadvisor.manifest" "$POOL_PATH/kubernetes/manifests"
fi
if [[ "$ENABLE_NODE_LOGGING" == "true" ]]; then
if [[ "$LOGGING_DESTINATION" == "elasticsearch" ]]; then
cp "$KUBE_ROOT/cluster/saltbase/salt/fluentd-es/fluentd-es.manifest" "$POOL_PATH/kubernetes/manifests"
elif [[ "$LOGGING_DESTINATION" == "gcp" ]]; then
cp "$KUBE_ROOT/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.manifest" "$POOL_PATH/kubernetes/manifests"
fi
fi
mkdir -p "$POOL_PATH/kubernetes/addons"
if [[ "$ENABLE_CLUSTER_DNS" == "true" ]]; then
render-template "$ROOT/skydns-svc.yaml" > "$POOL_PATH/kubernetes/addons/skydns-svc.yaml"
render-template "$ROOT/skydns-rc.yaml" > "$POOL_PATH/kubernetes/addons/skydns-rc.yaml"
fi
virsh pool-refresh $POOL
}
@ -146,6 +162,25 @@ function render-template {
eval "echo \"$(cat $1)\""
}
function wait-cluster-readiness {
echo "Wait for cluster readiness"
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
local timeout=50
while [[ $timeout -ne 0 ]]; do
nb_ready_minions=$("${kubectl}" get minions -o template -t "{{range.items}}{{range.status.conditions}}{{.kind}}{{end}}:{{end}}" 2>/dev/null | tr ':' '\n' | grep -c Ready || true)
echo "Nb ready minions: $nb_ready_minions / $NUM_MINIONS"
if [[ "$nb_ready_minions" -eq "$NUM_MINIONS" ]]; then
return 0
fi
timeout=$(($timeout-1))
sleep .5
done
return 1
}
# Instantiate a kubernetes cluster
function kube-up {
detect-master
@ -161,12 +196,15 @@ function kube-up {
local i
for (( i = 0 ; i <= $NUM_MINIONS ; i++ )); do
if [[ $i -eq 0 ]]; then
if [[ $i -eq $NUM_MINIONS ]]; then
type=master
name=$MASTER_NAME
public_ip=$MASTER_IP
else
type=minion-$(printf "%02d" $i)
name=${MINION_NAMES[$i]}
public_ip=${MINION_IPS[$i]}
fi
name=kubernetes_$type
image=$name.img
config=kubernetes_config_$type
@ -181,6 +219,15 @@ function kube-up {
virsh create $domain_xml
rm $domain_xml
done
wait-cluster-readiness
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " http://${KUBE_MASTER_IP}:8080"
echo
echo "You can control the Kubernetes cluster with: 'cluster/kubectl.sh'"
echo "You can connect on the master with: 'ssh core@${KUBE_MASTER_IP}'"
}
# Delete a kubernetes cluster
@ -230,7 +277,7 @@ function test-setup {
# Execute after running tests to perform any required clean-up
function test-teardown {
echo "TODO"
kube-down
}
# Set the {KUBE_USER} and {KUBE_PASSWORD} environment values required to interact with provider
@ -247,6 +294,11 @@ function teardown-monitoring-firewall {
echo "TODO" 1>&2
}
# Perform preparations required to run e2e tests
function prepare-e2e() {
echo "libvirt-coreos doesn't need special preparations for e2e tests" 1>&2
}
function setup-logging-firewall {
echo "TODO: setup logging"
}

View File

@ -69,7 +69,7 @@ for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
fi
name="${MINION_NAMES[$i]}"
if [ "$KUBERNETES_PROVIDER" != "vsphere" ] && [ "$KUBERNETES_PROVIDER" != "vagrant" ]; then
if [ "$KUBERNETES_PROVIDER" != "vsphere" ] && [ "$KUBERNETES_PROVIDER" != "vagrant" ] && [ "$KUBERNETES_PROVIDER" != "libvirt-coreos" ]; then
# Grab fully qualified name
name=$(grep "${MINION_NAMES[$i]}\." "${MINIONS_FILE}")
fi
@ -79,8 +79,13 @@ for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
attempt=0
while true; do
echo -n "Attempt $((attempt+1)) at checking Kubelet installation on node ${MINION_NAMES[$i]} ..."
curl_output=$(curl -s --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" \
if [ "$KUBERNETES_PROVIDER" != "libvirt-coreos" ]; then
curl_output=$(curl -s --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" \
"https://${KUBE_MASTER_IP}/api/v1beta1/proxy/minions/${name}/healthz")
else
curl_output=$(curl -s \
"http://${KUBE_MASTER_IP}:8080/api/v1beta1/proxy/minions/${name}/healthz")
fi
if [[ "${curl_output}" != "ok" ]]; then
if (( attempt > 5 )); then
echo

View File

@ -103,10 +103,16 @@ else
auth_config=()
fi
if [[ "$KUBERNETES_PROVIDER" == "libvirt-coreos" ]]; then
host="http://${KUBE_MASTER_IP-}:8080"
else
host="https://${KUBE_MASTER_IP-}"
fi
# Use the kubectl binary from the same directory as the e2e binary.
export PATH=$(dirname "${e2e}"):"${PATH}"
"${e2e}" "${auth_config[@]:+${auth_config[@]}}" \
--host="https://${KUBE_MASTER_IP-}" \
--host="$host" \
--provider="${KUBERNETES_PROVIDER}" \
--gce_project="${PROJECT:-}" \
--gce_zone="${ZONE:-}" \