Merge pull request #19489 from cloudnativeapps/vsphere-support-1

Auto commit by PR queue bot
pull/6/head
k8s-merge-robot 2016-01-26 22:27:55 -08:00
commit 86dd321c73
16 changed files with 360 additions and 59 deletions

View File

@ -0,0 +1 @@
deb https://apt.dockerproject.org/repo debian-{{ salt['grains.get']('oscodename') }} main

View File

@ -1,7 +1,7 @@
{% if pillar.get('is_systemd') %}
{% set environment_file = '/etc/sysconfig/docker' %}
{% set environment_file = '/etc/sysconfig/docker' %}
{% else %}
{% set environment_file = '/etc/default/docker' %}
{% set environment_file = '/etc/default/docker' %}
{% endif %}
bridge-utils:
@ -47,6 +47,96 @@ docker:
- pkg: docker-io
{% endif %}
{% elif grains.cloud is defined and grains.cloud == 'vsphere' and grains.os == 'Debian' and grains.osrelease_info[0] >=8 %}
{% if pillar.get('is_systemd') %}
{{ pillar.get('systemd_system_path') }}/docker.service:
file.managed:
- source: salt://docker/docker.service
- template: jinja
- user: root
- group: root
- mode: 644
- defaults:
environment_file: {{ environment_file }}
# The docker service.running block below doesn't work reliably
# Instead we run our script which e.g. does a systemd daemon-reload
# But we keep the service block below, so it can be used by dependencies
# TODO: Fix this
fix-service-docker:
cmd.wait:
- name: /opt/kubernetes/helpers/services bounce docker
- watch:
- file: {{ pillar.get('systemd_system_path') }}/docker.service
- file: {{ environment_file }}
{% endif %}
{{ environment_file }}:
file.managed:
- source: salt://docker/docker-defaults
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- require:
- pkg: docker-engine
'apt-key':
cmd.run:
- name: 'apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D'
- unless: 'apt-key finger | grep "5811 8E89"'
'apt-update':
cmd.wait:
- name: '/usr/bin/apt-get update -y'
- require:
- cmd : 'apt-key'
lxc-docker:
pkg:
- purged
docker-io:
pkg:
- purged
cbr0:
network.managed:
- enabled: True
- type: bridge
- proto: dhcp
- ports: none
- bridge: cbr0
- delay: 0
- bypassfirewall: True
- require_in:
- service: docker
/etc/apt/sources.list.d/docker.list:
file.managed:
- source: salt://docker/docker.list
- template: jinja
- user: root
- group: root
- mode: 644
- require:
- cmd: 'apt-update'
docker-engine:
pkg:
- installed
- require:
- file: /etc/apt/sources.list.d/docker.list
docker:
service.running:
- enable: True
- require:
- file: {{ environment_file }}
- watch:
- file: {{ environment_file }}
{% else %}
@ -216,3 +306,4 @@ docker:
- pkg: docker-upgrade
{% endif %}
{% endif %} # end grains.os_family != 'RedHat'

View File

@ -133,7 +133,7 @@ addon-dir-create:
{% endif %}
{% if pillar.get('enable_node_logging', '').lower() == 'true'
and pillar.get('logging_destination').lower() == 'elasticsearch'
and pillar.get('logging_destination', '').lower() == 'elasticsearch'
and pillar.get('enable_cluster_logging', '').lower() == 'true' %}
/etc/kubernetes/addons/fluentd-elasticsearch:
file.recurse:

View File

@ -1,5 +1,5 @@
{% if grains.cloud is defined %}
{% if grains.cloud in ['aws', 'gce', 'vagrant'] %}
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere'] %}
# TODO: generate and distribute tokens on other cloud providers.
/srv/kubernetes/known_tokens.csv:
file.managed:
@ -9,7 +9,7 @@
{% endif %}
{% endif %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant' ] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant' ,'vsphere'] %}
/srv/kubernetes/basic_auth.csv:
file.managed:
- source: salt://kube-apiserver/basic_auth.csv

View File

@ -9,7 +9,7 @@
{% set cloud_config_volume = "" -%}
{% if grains.cloud is defined -%}
{% if grains.cloud != 'vagrant' -%}
{% if grains.cloud not in ['vagrant', 'vsphere'] -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}
@ -57,7 +57,7 @@
{% set client_ca_file = "" -%}
{% set secure_port = "6443" -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant' ] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere' ] %}
{% set secure_port = "443" -%}
{% set client_ca_file = "--client-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%}
@ -71,12 +71,12 @@
{% endif -%}
{% if grains.cloud is defined -%}
{% if grains.cloud in [ 'aws', 'gce', 'vagrant' ] -%}
{% if grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere' ] -%}
{% set token_auth_file = "--token-auth-file=/srv/kubernetes/known_tokens.csv" -%}
{% endif -%}
{% endif -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant'] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere'] %}
{% set basic_auth_file = "--basic-auth-file=/srv/kubernetes/basic_auth.csv" -%}
{% endif -%}

View File

@ -27,7 +27,7 @@
{% set cloud_config_volume = "" -%}
{% if grains.cloud is defined -%}
{% if grains.cloud != 'vagrant' -%}
{% if grains.cloud not in ['vagrant', 'vsphere'] -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}
{% set service_account_key = "--service-account-private-key-file=/srv/kubernetes/server.key" -%}
@ -41,7 +41,7 @@
{% set root_ca_file = "" -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant' ] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere' ] %}
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%}

View File

@ -5,7 +5,7 @@
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
{% set api_servers = "--master=https://" + ips[0][0] -%}
{% endif -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant' ] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere' ] %}
{% set api_servers_with_port = api_servers -%}
{% else -%}
{% set api_servers_with_port = api_servers + ":6443" -%}

View File

@ -16,7 +16,7 @@
{% endif -%}
# TODO: remove nginx for other cloud providers.
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant' ] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere' ] %}
{% set api_servers_with_port = api_servers -%}
{% else -%}
{% set api_servers_with_port = api_servers + ":6443" -%}
@ -27,7 +27,7 @@
{% set debugging_handlers = "--enable-debugging-handlers=true" -%}
{% if grains['roles'][0] == 'kubernetes-master' -%}
{% if grains.cloud in ['aws', 'gce', 'vagrant'] -%}
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere'] -%}
# Unless given a specific directive, disable registration for the kubelet
# running on the master.
@ -46,7 +46,7 @@
{% endif -%}
{% set cloud_provider = "" -%}
{% if grains.cloud is defined and grains.cloud != 'vagrant' -%}
{% if grains.cloud is defined and grains.cloud not in ['vagrant', 'vsphere'] -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}
@ -105,7 +105,11 @@
{% set cgroup_root = "" -%}
{% if grains['os_family'] == 'Debian' -%}
{% set system_container = "--system-container=/system" -%}
{% set cgroup_root = "--cgroup-root=/" -%}
{% if pillar.get('is_systemd') %}
{% set cgroup_root = "--cgroup-root=docker" -%}
{% else %}
{% set cgroup_root = "--cgroup-root=/" -%}
{% endif %}
{% endif -%}
{% if grains['oscodename'] == 'vivid' -%}
{% set cgroup_root = "--cgroup-root=docker" -%}

View File

@ -3,8 +3,14 @@
[ "$IFACE" == "eth0" ] || exit 0
{% for host, ip_addrs in salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', 'grain').items() %}
{% if ip_addrs[0] != salt['network.ip_addrs']('eth0')[0] %}
{% set cidr = salt['mine.get'](host, 'grains.items')[host]['cbr-cidr'] %}
route del -net {{ cidr }}
{% endif %}
{% set network_ipaddr = None %}
{% if salt['network.ip_addrs']('eth0') is defined %}
{% set network_ipaddr = salt['network.ip_addrs']('eth0')[0] %}
{% elif salt['network.ip_addrs']('local') is defined %}
{% set network_ipaddr = salt['network.ip_addrs']('local')[0] %}
{% endif %}
{% if network_ipaddr is defined and ip_addrs[0] != network_ipaddr %}
{% set cidr = salt['mine.get'](host, 'grains.items')[host]['cbr-cidr'] %}
route del -net {{ cidr }}
{% endif %}
{% endfor %}

View File

@ -3,8 +3,14 @@
[ "$IFACE" == "eth0" ] || exit 0
{% for host, ip_addrs in salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', 'grain').items() %}
{% if ip_addrs[0] != salt['network.ip_addrs']('eth0')[0] %}
{% set cidr = salt['mine.get'](host, 'grains.items')[host]['cbr-cidr'] %}
route add -net {{ cidr }} gw {{ ip_addrs[0] }}
{% endif %}
{% set network_ipaddr = None %}
{% if salt['network.ip_addrs']('eth0') %}
{% set network_ipaddr = salt['network.ip_addrs']('eth0')[0] %}
{% elif salt['network.ip_addrs']('local') %}
{% set network_ipaddr = salt['network.ip_addrs']('local').first %}
{% endif %}
{% if network_ipaddr and ip_addrs[0] != network_ipaddr %}
{% set cidr = salt['mine.get'](host, 'grains.items')[host]['cbr-cidr'] %}
route add -net {{ cidr }} gw {{ ip_addrs[0] }}
{% endif %}
{% endfor %}

View File

@ -51,7 +51,7 @@ base:
- kube-controller-manager
- kube-scheduler
- supervisor
{% if grains['cloud'] is defined and not grains.cloud in [ 'aws', 'gce', 'vagrant' ] %}
{% if grains['cloud'] is defined and not grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere'] %}
- nginx
{% endif %}
- cadvisor
@ -69,7 +69,7 @@ base:
- logrotate
{% endif %}
- kube-addons
{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws' ] %}
{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws', 'vsphere' ] %}
- docker
- kubelet
{% endif %}

View File

@ -20,9 +20,8 @@ SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=E
#export GOVC_URL=
#export GOVC_DATACENTER=
#export GOVC_DATASTORE=
#export GOVC_RESOURCE_POOL=
#export GOVC_NETWORK=
#export GOVC_GUEST_LOGIN='kube:kube'
#export GOVC_GUEST_LOGIN=
# Set GOVC_INSECURE if the host in GOVC_URL is using a certificate that cannot
# be verified (i.e. a self-signed certificate), but IS trusted.

View File

@ -14,6 +14,93 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#generate token files
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
if [[ ! -f "${known_tokens_file}" ]]; then
mkdir -p /srv/salt-overlay/salt/kube-apiserver
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
(umask u=rw,go= ;
echo "$KUBELET_TOKEN,kubelet,kubelet" > $known_tokens_file;
echo "$KUBE_PROXY_TOKEN,kube_proxy,kube_proxy" >> $known_tokens_file)
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$KUBELET_TOKEN\", \"Insecure\": true }" > $kubelet_auth_file)
kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
mkdir -p /srv/salt-overlay/salt/kubelet
(umask 077;
cat > "${kubelet_kubeconfig_file}" << EOF
apiVersion: v1
kind: Config
clusters:
- cluster:
insecure-skip-tls-verify: true
name: local
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
users:
- name: kubelet
user:
token: ${KUBELET_TOKEN}
EOF
)
mkdir -p /srv/salt-overlay/salt/kube-proxy
kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
# Make a kubeconfig file with the token.
# TODO(etune): put apiserver certs into secret too, and reference from authfile,
# so that "Insecure" is not needed.
(umask 077;
cat > "${kube_proxy_kubeconfig_file}" << EOF
apiVersion: v1
kind: Config
clusters:
- cluster:
insecure-skip-tls-verify: true
name: local
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
EOF
)
# Generate tokens for other "service accounts". Append to known_tokens.
#
# NB: If this list ever changes, this script actually has to
# change to detect the existence of this file, kill any deleted
# old tokens and add any new tokens (to handle the upgrade case).
service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns")
for account in "${service_accounts[@]}"; do
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "${token},${account},${account}" >> "${known_tokens_file}"
done
fi
readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv"
if [ ! -e "${BASIC_AUTH_FILE}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-apiserver
(umask 077;
echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${BASIC_AUTH_FILE}")
fi
# Create the overlay files for the salt tree. We create these in a separate
# place so that we can blow away the rest of the salt configs on a kube-push and
# re-apply these.
@ -23,17 +110,17 @@ cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: $NODE_INSTANCE_PREFIX
service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE
enable_cluster_monitoring: $ENABLE_CLUSTER_MONITORING
enable_cluster_logging: $ENABLE_CLUSTER_LOGGING
enable_cluster_ui: $ENABLE_CLUSTER_UI
enable_node_logging: $ENABLE_NODE_LOGGING
enable_cluster_monitoring: "${ENABLE_CLUSTER_MONITORING:-none}"
enable_cluster_logging: "${ENABLE_CLUSTER_LOGGING:false}"
enable_cluster_ui: "${ENABLE_CLUSTER_UI:false}"
enable_node_logging: "${ENABLE_NODE_LOGGING:false}"
logging_destination: $LOGGING_DESTINATION
elasticsearch_replicas: $ELASTICSEARCH_LOGGING_REPLICAS
enable_cluster_dns: $ENABLE_CLUSTER_DNS
dns_replicas: $DNS_REPLICAS
enable_cluster_dns: "${ENABLE_CLUSTER_DNS:-false}"
dns_replicas: ${DNS_REPLICAS:-1}
dns_server: $DNS_SERVER_IP
dns_domain: $DNS_DOMAIN
e2e_storage_test_environment: $E2E_STORAGE_TEST_ENVIRONMENT
e2e_storage_test_environment: "${E2E_STORAGE_TEST_ENVIRONMENT:-false}"
EOF

View File

@ -42,6 +42,7 @@ grains:
- kubernetes-pool
- kubernetes-pool-vsphere
cbr-cidr: $NODE_IP_RANGE
cloud: vsphere
EOF
# Install Salt

View File

@ -18,6 +18,7 @@
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/vsphere/config-common.sh"
source "${KUBE_ROOT}/cluster/vsphere/${KUBE_CONFIG_FILE-"config-default.sh"}"
@ -30,6 +31,7 @@ source "${KUBE_ROOT}/cluster/common.sh"
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master {
KUBE_MASTER=${MASTER_NAME}
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
@ -42,7 +44,7 @@ function detect-master {
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}
# Detect the information about the minions
# Detect the information about the nodes
#
# Assumed vars:
# NODE_NAMES
@ -51,16 +53,16 @@ function detect-master {
function detect-nodes {
KUBE_NODE_IP_ADDRESSES=()
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
local minion_ip=$(govc vm.ip ${NODE_NAMES[$i]})
if [[ -z "${minion_ip-}" ]] ; then
local nodeip=$(govc vm.ip ${NODE_NAMES[$i]})
if [[ -z "${nodeip-}" ]] ; then
echo "Did not find ${NODE_NAMES[$i]}" >&2
else
echo "Found ${NODE_NAMES[$i]} at ${minion_ip}"
KUBE_NODE_IP_ADDRESSES+=("${minion_ip}")
echo "Found ${NODE_NAMES[$i]} at ${nodeip}"
KUBE_NODE_IP_ADDRESSES+=("${nodeip}")
fi
done
if [[ -z "${KUBE_NODE_IP_ADDRESSES-}" ]]; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2
echo "Could not detect Kubernetes nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
}
@ -159,7 +161,7 @@ function kube-scp {
scp ${SSH_OPTS-} "${src}" "kube@${host}:${dst}"
}
# Instantiate a generic kubernetes virtual machine (master or minion)
# Instantiate a generic kubernetes virtual machine (master or node)
#
# Usage:
# kube-up-vm VM_NAME [options to pass to govc vm.create]
@ -199,7 +201,7 @@ function kube-up-vm {
/home/kube/.ssh/authorized_keys
}
# Kick off a local script on a kubernetes virtual machine (master or minion)
# Kick off a local script on a kubernetes virtual machine (master or node)
#
# Usage:
# kube-run VM_NAME LOCAL_FILE
@ -208,12 +210,100 @@ function kube-run {
local file="$2"
local dst="/tmp/$(basename "${file}")"
govc guest.upload -vm="${vm_name}" -f -perm=0755 "${file}" "${dst}"
echo "uploaded ${file} to ${dst}"
local vm_ip
vm_ip=$(govc vm.ip "${vm_name}")
kube-ssh ${vm_ip} "nohup sudo ${dst} < /dev/null 1> ${dst}.out 2> ${dst}.err &"
}
#
# run the command remotely and check if the specific kube artifact is running or not.
# keep checking till the you hit the timeout. default timeout 300s
#
# Usage:
# kube_check 10.0.0.1 cmd timeout
function kube-check {
nodeip=$1
cmd=$2
sleepstep=5
if [[ $# -lt 3 || -z $3 ]]; then
timeout=300
else
timeout=$3
fi
let effective_timeout=($timeout/$sleepstep)
attempt=0
echo
printf "This may take several minutes. Bound to $effective_timeout attemmps"
while true; do
local rc=0
output=$(kube-ssh ${nodeip} "${cmd}") || rc=1
if [[ $rc != 0 ]]; then
if (( $attempt == $effective_timeout )); then
echo
echo "(Failed) rc: $rc Output: ${output}"
echo
echo -e "${cmd} failed to start on ${nodeip}. Your cluster is unlikely" >&2
echo "to work correctly. You may have to debug it by logging in." >&2
echo
exit 1
fi
else
echo
echo -e "[${cmd}] passed"
echo
break
fi
printf "."
attempt=$(($attempt+1))
sleep $sleepstep
done
}
#
# verify if salt master is up. check 30 times and then echo out bad output and return 0
#
# Usage:
# remote-pgrep 10.0.0.1 salt-master
#
function remote-pgrep {
nodeip=$1
regex=$2
max_attempt=60
printf "This may take several minutes. Bound to $max_attempt attemmps"
attempt=0
while true; do
local rc=0
output=$(kube-ssh ${nodeip} pgrep ${regex}) || rc=1
if [[ $rc != 0 ]]; then
if (( $attempt == $max_attempt )); then
echo
echo "(Failed) rc: $rc, output:${output}"
echo
echo -e "${regex} failed to start on ${nodeip} after checking for $attempt attempts. Your cluster is unlikely" >&2
echo "to work correctly. You may have to debug it by logging in." >&2
echo
exit 1
fi
else
echo
echo -e "[${regex} running]"
echo
break
fi
printf "."
attempt=$(($attempt+1))
sleep 10
done
}
# Instantiate a kubernetes cluster
#
# Assumed vars:
@ -240,13 +330,15 @@ function kube-up {
echo "cd /home/kube/cache/kubernetes-install"
echo "readonly MASTER_NAME='${MASTER_NAME}'"
echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-node'"
echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
echo "readonly ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
echo "readonly DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
echo "readonly DNS_DOMAIN='${DNS_DOMAIN:-}'"
echo "readonly KUBE_USER='${KUBE_USER:-}'"
echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD:-}'"
echo "readonly SERVER_BINARY_TAR='${SERVER_BINARY_TAR##*/}'"
echo "readonly SALT_TAR='${SALT_TAR##*/}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
@ -259,13 +351,11 @@ function kube-up {
kube-up-vm ${MASTER_NAME} -c ${MASTER_CPU-1} -m ${MASTER_MEMORY_MB-1024}
upload-server-tars
kube-run ${MASTER_NAME} "${KUBE_TEMP}/master-start.sh"
# Print master IP, so user can log in for debugging.
detect-master
echo
echo "Starting minion VMs (this can take a minute)..."
echo "Starting node VMs (this can take a minute)..."
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
(
echo "#! /bin/bash"
@ -275,11 +365,11 @@ function kube-up {
echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}"
echo "NODE_IP_RANGE=${NODE_IP_RANGES[$i]}"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
) > "${KUBE_TEMP}/node-start-${i}.sh"
(
kube-up-vm "${NODE_NAMES[$i]}" -c ${NODE_CPU-1} -m ${NODE_MEMORY_MB-1024}
kube-run "${NODE_NAMES[$i]}" "${KUBE_TEMP}/minion-start-${i}.sh"
kube-run "${NODE_NAMES[$i]}" "${KUBE_TEMP}/node-start-${i}.sh"
) &
done
@ -293,17 +383,32 @@ function kube-up {
exit 2
fi
# Print minion IPs, so user can log in for debugging.
# Print node IPs, so user can log in for debugging.
detect-nodes
printf "Waiting for salt-master to be up on ${KUBE_MASTER} ...\n"
remote-pgrep ${KUBE_MASTER_IP} "salt-master"
printf "Waiting for all packages to be installed on ${KUBE_MASTER} ...\n"
kube-check ${KUBE_MASTER_IP} 'sudo salt "kubernetes-master" state.highstate -t 30 | grep -E "Failed:[[:space:]]+0"'
local i
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
printf "Waiting for salt-minion to be up on ${NODE_NAMES[$i]} ....\n"
remote-pgrep ${KUBE_NODE_IP_ADDRESSES[$i]} "salt-minion"
printf "Waiting for all salt packages to be installed on ${NODE_NAMES[$i]} .... \n"
kube-check ${KUBE_MASTER_IP} 'sudo salt '"${NODE_NAMES[$i]}"' state.highstate -t 30 | grep -E "Failed:[[:space:]]+0"'
printf " OK\n"
done
echo
echo "Waiting for master and minion initialization."
echo "Waiting for master and node initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This might loop forever if there was some uncaught error during start up."
echo
printf "Waiting for ${KUBE_MASTER} to become available..."
until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/healthz"; do
printf "."
@ -311,7 +416,6 @@ function kube-up {
done
printf " OK\n"
local i
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
printf "Waiting for ${NODE_NAMES[$i]} to become available..."
until curl --max-time 5 \
@ -321,7 +425,6 @@ function kube-up {
done
printf " OK\n"
done
echo "Kubernetes cluster created."
# TODO use token instead of basic auth

View File

@ -68,10 +68,10 @@ convenient).
### Setup
Download a prebuilt Debian 7.7 VMDK that we'll use as a base image:
Download a prebuilt Debian 8.2 VMDK that we'll use as a base image:
```sh
curl --remote-name-all https://storage.googleapis.com/govmomi/vmdk/2014-11-11/kube.vmdk.gz{,.md5}
curl --remote-name-all https://storage.googleapis.com/govmomi/vmdk/2016-01-08/kube.vmdk.gz{,.md5}
md5sum -c kube.vmdk.gz.md5
gzip -d kube.vmdk.gz
```
@ -79,7 +79,10 @@ gzip -d kube.vmdk.gz
Import this VMDK into your vSphere datastore:
```sh
export GOVC_URL='user:pass@hostname'
export GOVC_URL='hostname' # hostname of the vc
export GOVC_USER='username' # username for logging into the vsphere.
export GOVC_PASSWORD='password' # password for the above username
export GOVC_NETWORK='Network Name' # Name of the network the vms should join. Many times it could be "VM Network"
export GOVC_INSECURE=1 # If the host above uses a self-signed cert
export GOVC_DATASTORE='target datastore'
export GOVC_RESOURCE_POOL='resource pool or cluster with access to datastore'
@ -99,7 +102,7 @@ parameters. The guest login for the image that you imported is `kube:kube`.
### Starting a cluster
Now, let's continue with deploying Kubernetes.
This process takes about ~10 minutes.
This process takes about ~20-30 minutes depending on your network.
```sh
cd kubernetes # Extracted binary release OR repository root