2014-07-14 17:50:04 +00:00
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
2014-10-03 21:58:49 +00:00
KUBE_ROOT = $( dirname " ${ BASH_SOURCE } " ) /../..
source " ${ KUBE_ROOT } /cluster/gce/ ${ KUBE_CONFIG_FILE - "config-default.sh" } "
2014-07-14 17:50:04 +00:00
2014-09-23 22:54:27 +00:00
# Verify prereqs
function verify-prereqs {
2014-10-06 20:25:27 +00:00
local cmd
for cmd in gcloud gcutil gsutil; do
which " ${ cmd } " >/dev/null || {
echo " Can't find ${ cmd } in PATH, please fix and retry. The Google Cloud "
echo "SDK can be downloaded from https://cloud.google.com/sdk/."
2014-09-23 22:54:27 +00:00
exit 1
2014-10-06 20:25:27 +00:00
}
2014-09-23 22:54:27 +00:00
done
}
2014-10-06 20:25:27 +00:00
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [ [ -z ${ KUBE_TEMP - } ] ] ; then
KUBE_TEMP = $( mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${KUBE_TEMP}"' EXIT
fi
}
2014-09-23 22:54:27 +00:00
# Verify and find the various tar files that we are going to use on the server.
#
# Vars set:
# SERVER_BINARY_TAR
# SALT_TAR
function find-release-tars {
2014-10-03 21:58:49 +00:00
SERVER_BINARY_TAR = " ${ KUBE_ROOT } /server/kubernetes-server-linux-amd64.tar.gz "
2014-09-23 22:54:27 +00:00
if [ [ ! -f " $SERVER_BINARY_TAR " ] ] ; then
2014-10-03 21:58:49 +00:00
SERVER_BINARY_TAR = " ${ KUBE_ROOT } /_output/release-tars/kubernetes-server-linux-amd64.tar.gz "
2014-09-23 22:54:27 +00:00
fi
if [ [ ! -f " $SERVER_BINARY_TAR " ] ] ; then
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
exit 1
2014-07-14 17:50:04 +00:00
fi
2014-10-03 21:58:49 +00:00
SALT_TAR = " ${ KUBE_ROOT } /server/kubernetes-salt.tar.gz "
2014-09-23 22:54:27 +00:00
if [ [ ! -f " $SALT_TAR " ] ] ; then
2014-10-03 21:58:49 +00:00
SALT_TAR = " ${ KUBE_ROOT } /_output/release-tars/kubernetes-salt.tar.gz "
2014-09-23 22:54:27 +00:00
fi
if [ [ ! -f " $SALT_TAR " ] ] ; then
echo "!!! Cannot find kubernetes-salt.tar.gz"
2014-07-14 17:50:04 +00:00
exit 1
fi
}
# Use the gcloud defaults to find the project. If it is already set in the
# environment then go with that.
2014-09-23 22:54:27 +00:00
#
# Vars set:
# PROJECT
2014-07-14 17:50:04 +00:00
function detect-project ( ) {
2014-10-06 20:25:27 +00:00
if [ [ -z " ${ PROJECT - } " ] ] ; then
2014-07-14 17:50:04 +00:00
PROJECT = $( gcloud config list project | tail -n 1 | cut -f 3 -d ' ' )
fi
2014-10-06 20:25:27 +00:00
if [ [ -z " ${ PROJECT - } " ] ] ; then
echo "Could not detect Google Cloud Platform project. Set the default project using " >& 2
echo "'gcloud config set project <PROJECT>'" >& 2
2014-07-14 17:50:04 +00:00
exit 1
fi
echo " Project: $PROJECT (autodetected from gcloud config) "
}
2014-09-23 22:54:27 +00:00
# Take the local tar files and upload them to Google Storage. They will then be
# downloaded by the master as part of the start up script for the master.
#
# Assumed vars:
# PROJECT
# SERVER_BINARY_TAR
# SALT_TAR
# Vars set:
# SERVER_BINARY_TAR_URL
# SALT_TAR_URL
function upload-server-tars( ) {
SERVER_BINARY_TAR_URL =
SALT_TAR_URL =
local project_hash
if which md5 > /dev/null 2>& 1; then
project_hash = $( md5 -q -s " $PROJECT " )
else
2014-11-12 07:04:01 +00:00
project_hash = $( echo -n " $PROJECT " | md5sum | awk '{ print $1 }' )
2014-09-23 22:54:27 +00:00
fi
2014-10-10 21:24:35 +00:00
project_hash = ${ project_hash : 0 : 5 }
2014-09-23 22:54:27 +00:00
local -r staging_bucket = " gs://kubernetes-staging- ${ project_hash } "
# Ensure the bucket is created
if ! gsutil ls " $staging_bucket " > /dev/null 2>& 1 ; then
echo " Creating $staging_bucket "
gsutil mb " ${ staging_bucket } "
fi
local -r staging_path = " ${ staging_bucket } /devel "
echo " +++ Staging server tars to Google Storage: ${ staging_path } "
2014-11-08 00:16:45 +00:00
local server_binary_gs_url = " ${ staging_path } / ${ SERVER_BINARY_TAR ##*/ } "
gsutil -q -h "Cache-Control:private, max-age=0" cp " ${ SERVER_BINARY_TAR } " " ${ server_binary_gs_url } "
gsutil acl ch -g all:R " ${ server_binary_gs_url } " >/dev/null 2>& 1
local salt_gs_url = " ${ staging_path } / ${ SALT_TAR ##*/ } "
gsutil -q -h "Cache-Control:private, max-age=0" cp " ${ SALT_TAR } " " ${ salt_gs_url } "
gsutil acl ch -g all:R " ${ salt_gs_url } " >/dev/null 2>& 1
# Convert from gs:// URL to an https:// URL
SERVER_BINARY_TAR_URL = " ${ server_binary_gs_url /gs : \/ \/ /https : //storage.googleapis.com/ } "
SALT_TAR_URL = " ${ salt_gs_url /gs : \/ \/ /https : //storage.googleapis.com/ } "
2014-09-23 22:54:27 +00:00
}
# Detect the information about the minions
#
# Assumed vars:
# MINION_NAMES
# ZONE
# Vars set:
# KUBE_MINION_IP_ADDRESS (array)
2014-07-14 17:50:04 +00:00
function detect-minions ( ) {
KUBE_MINION_IP_ADDRESSES = ( )
for ( ( i = 0; i<${# MINION_NAMES [@] } ; i++) ) ; do
2014-09-22 17:25:25 +00:00
# gcutil will print the "external-ip" column header even if no instances are found
2014-07-14 17:50:04 +00:00
local minion_ip = $( gcutil listinstances --format= csv --sort= external-ip \
2014-09-02 11:47:08 +00:00
--columns= external-ip --zone ${ ZONE } --filter= " name eq ${ MINION_NAMES [ $i ] } " \
2014-09-23 14:16:43 +00:00
| tail -n '+2' | tail -n 1)
2014-10-06 20:25:27 +00:00
if [ [ -z " ${ minion_ip - } " ] ] ; then
echo " Did not find ${ MINION_NAMES [ $i ] } " >& 2
2014-09-22 17:25:25 +00:00
else
echo " Found ${ MINION_NAMES [ $i ] } at ${ minion_ip } "
KUBE_MINION_IP_ADDRESSES += ( " ${ minion_ip } " )
fi
2014-07-14 17:50:04 +00:00
done
2014-10-06 20:25:27 +00:00
if [ [ -z " ${ KUBE_MINION_IP_ADDRESSES - } " ] ] ; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >& 2
2014-07-14 17:50:04 +00:00
exit 1
fi
}
2014-09-23 22:54:27 +00:00
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# ZONE
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
2014-07-14 17:50:04 +00:00
function detect-master ( ) {
KUBE_MASTER = ${ MASTER_NAME }
2014-10-06 20:25:27 +00:00
if [ [ -z " ${ KUBE_MASTER_IP - } " ] ] ; then
2014-09-22 17:25:25 +00:00
# gcutil will print the "external-ip" column header even if no instances are found
2014-07-14 17:50:04 +00:00
KUBE_MASTER_IP = $( gcutil listinstances --format= csv --sort= external-ip \
2014-09-02 11:47:08 +00:00
--columns= external-ip --zone ${ ZONE } --filter= " name eq ${ MASTER_NAME } " \
2014-09-23 14:16:43 +00:00
| tail -n '+2' | tail -n 1)
2014-07-14 17:50:04 +00:00
fi
2014-10-06 20:25:27 +00:00
if [ [ -z " ${ KUBE_MASTER_IP - } " ] ] ; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >& 2
2014-07-14 17:50:04 +00:00
exit 1
fi
echo " Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP ) "
}
2014-09-23 22:54:27 +00:00
# Ensure that we have a password created for validating to the master. Will
# read from $HOME/.kubernetres_auth if available.
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
2014-07-14 17:50:04 +00:00
function get-password {
2014-09-23 22:54:27 +00:00
local file = " $HOME /.kubernetes_auth "
if [ [ -r " $file " ] ] ; then
KUBE_USER = $( cat " $file " | python -c 'import json,sys;print json.load(sys.stdin)["User"]' )
KUBE_PASSWORD = $( cat " $file " | python -c 'import json,sys;print json.load(sys.stdin)["Password"]' )
2014-07-14 17:50:04 +00:00
return
fi
2014-09-23 22:54:27 +00:00
KUBE_USER = admin
KUBE_PASSWORD = $( python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))' )
2014-07-14 17:50:04 +00:00
2014-10-20 20:49:24 +00:00
# Remove this code, since in all use cases I can see, we are overwriting this
# at cluster creation time.
2014-09-23 22:54:27 +00:00
cat << EOF > " $file "
2014-07-14 17:50:04 +00:00
{
2014-09-23 22:54:27 +00:00
"User" : " $KUBE_USER " ,
"Password" : " $KUBE_PASSWORD "
2014-07-14 17:50:04 +00:00
}
EOF
2014-09-23 22:54:27 +00:00
chmod 0600 " $file "
2014-07-14 17:50:04 +00:00
}
2014-10-20 20:49:24 +00:00
# Generate authentication token for admin user. Will
# read from $HOME/.kubernetes_auth if available.
#
# Vars set:
# KUBE_ADMIN_TOKEN
function get-admin-token {
local file = " $HOME /.kubernetes_auth "
if [ [ -r " $file " ] ] ; then
KUBE_ADMIN_TOKEN = $( cat " $file " | python -c 'import json,sys;print json.load(sys.stdin)["BearerToken"]' )
return
fi
KUBE_ADMIN_TOKEN = $( python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(32))' )
}
2014-07-14 17:50:04 +00:00
# Instantiate a kubernetes cluster
2014-09-23 22:54:27 +00:00
#
# Assumed vars
2014-10-03 21:58:49 +00:00
# KUBE_ROOT
2014-09-23 22:54:27 +00:00
# <Various vars set in config file>
2014-07-14 17:50:04 +00:00
function kube-up {
# Detect the project into $PROJECT if it isn't set
detect-project
2014-09-23 22:54:27 +00:00
# Make sure we have the tar files staged on Google Storage
find-release-tars
upload-server-tars
2014-09-24 17:55:58 +00:00
2014-10-06 20:25:27 +00:00
ensure-temp-dir
2014-07-14 17:50:04 +00:00
get-password
2014-10-03 21:58:49 +00:00
python " ${ KUBE_ROOT } /third_party/htpasswd/htpasswd.py " \
2014-10-06 20:25:27 +00:00
-b -c " ${ KUBE_TEMP } /htpasswd " " $KUBE_USER " " $KUBE_PASSWORD "
local htpasswd
htpasswd = $( cat " ${ KUBE_TEMP } /htpasswd " )
2014-07-14 17:50:04 +00:00
2014-10-06 20:25:27 +00:00
if ! gcutil getnetwork " ${ NETWORK } " >/dev/null 2>& 1; then
2014-09-24 23:03:38 +00:00
echo " Creating new network for: ${ NETWORK } "
2014-09-24 17:55:58 +00:00
# The network needs to be created synchronously or we have a race. The
# firewalls can be added concurrent with instance creation.
2014-09-24 23:03:38 +00:00
gcutil addnetwork " ${ NETWORK } " --range "10.240.0.0/16"
2014-10-28 20:47:49 +00:00
fi
if ! gcutil getfirewall " ${ NETWORK } -default-internal " >/dev/null 2>& 1; then
2014-09-24 23:03:38 +00:00
gcutil addfirewall " ${ NETWORK } -default-internal " \
--project " ${ PROJECT } " \
2014-10-02 17:43:20 +00:00
--norespect_terminal_width \
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
2014-09-24 23:03:38 +00:00
--network " ${ NETWORK } " \
--allowed_ip_sources "10.0.0.0/8" \
--allowed "tcp:1-65535,udp:1-65535,icmp" &
2014-10-28 20:47:49 +00:00
fi
if ! gcutil getfirewall " ${ NETWORK } -default-ssh " >/dev/null 2>& 1; then
2014-09-24 23:03:38 +00:00
gcutil addfirewall " ${ NETWORK } -default-ssh " \
--project " ${ PROJECT } " \
2014-10-02 17:43:20 +00:00
--norespect_terminal_width \
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
2014-09-24 23:03:38 +00:00
--network " ${ NETWORK } " \
--allowed_ip_sources "0.0.0.0/0" \
--allowed "tcp:22" &
fi
2014-07-14 17:50:04 +00:00
echo "Starting VMs and configuring firewalls"
2014-10-06 20:25:27 +00:00
gcutil addfirewall " ${ MASTER_NAME } -https " \
--project " ${ PROJECT } " \
2014-10-02 17:43:20 +00:00
--norespect_terminal_width \
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
2014-10-06 20:25:27 +00:00
--network " ${ NETWORK } " \
--target_tags " ${ MASTER_TAG } " \
2014-07-14 17:50:04 +00:00
--allowed tcp:443 &
2014-09-24 17:55:58 +00:00
(
echo "#! /bin/bash"
2014-09-23 22:54:27 +00:00
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo " readonly MASTER_NAME=' ${ MASTER_NAME } ' "
echo " readonly NODE_INSTANCE_PREFIX=' ${ INSTANCE_PREFIX } -minion' "
echo " readonly SERVER_BINARY_TAR_URL=' ${ SERVER_BINARY_TAR_URL } ' "
echo " readonly SALT_TAR_URL=' ${ SALT_TAR_URL } ' "
echo " readonly MASTER_HTPASSWD=' ${ htpasswd } ' "
2014-09-18 23:03:34 +00:00
echo " readonly PORTAL_NET=' ${ PORTAL_NET } ' "
2014-10-20 18:01:13 +00:00
echo " readonly FLUENTD_ELASTICSEARCH=' ${ FLUENTD_ELASTICSEARCH :- false } ' "
2014-10-21 00:37:58 +00:00
echo " readonly FLUENTD_GCP=' ${ FLUENTD_GCP :- false } ' "
2014-11-08 00:16:45 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/common.sh "
2014-10-14 22:00:52 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/create-dynamic-salt-files.sh "
2014-10-03 21:58:49 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/download-release.sh "
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/salt-master.sh "
2014-10-06 20:25:27 +00:00
) > " ${ KUBE_TEMP } /master-start.sh "
2014-09-24 17:55:58 +00:00
2014-11-11 22:44:59 +00:00
# Report logging choice (if any).
if [ [ " ${ FLUENTD_ELASTICSEARCH - } " = = "true" ] ] ; then
echo "+++ Logging using Fluentd to Elasticsearch"
fi
if [ [ " ${ FLUENTD_GCP - } " = = "true" ] ] ; then
echo "+++ Logging using Fluentd to Google Cloud Logging"
fi
2014-10-21 00:37:58 +00:00
# For logging to GCP we need to enable some minion scopes.
2014-10-21 21:58:03 +00:00
if [ [ " ${ FLUENTD_GCP - } " = = "true" ] ] ; then
2014-10-21 00:37:58 +00:00
MINION_SCOPES = " ${ MINION_SCOPES } , https://www.googleapis.com/auth/logging.write "
fi
2014-10-06 20:25:27 +00:00
gcutil addinstance " ${ MASTER_NAME } " \
--project " ${ PROJECT } " \
2014-10-02 17:43:20 +00:00
--norespect_terminal_width \
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
2014-10-06 20:25:27 +00:00
--zone " ${ ZONE } " \
--machine_type " ${ MASTER_SIZE } " \
2014-10-30 18:21:37 +00:00
--image " projects/ ${ IMAGE_PROJECT } /global/images/ ${ IMAGE } " \
2014-10-06 20:25:27 +00:00
--tags " ${ MASTER_TAG } " \
--network " ${ NETWORK } " \
2014-07-14 17:50:04 +00:00
--service_account_scopes= "storage-ro,compute-rw" \
--automatic_restart \
2014-10-06 20:25:27 +00:00
--metadata_from_file " startup-script: ${ KUBE_TEMP } /master-start.sh " &
2014-07-14 17:50:04 +00:00
for ( ( i = 0; i<${# MINION_NAMES [@] } ; i++) ) ; do
(
echo "#! /bin/bash"
2014-09-24 17:55:58 +00:00
echo " MASTER_NAME=' ${ MASTER_NAME } ' "
2014-10-06 20:25:27 +00:00
echo " MINION_IP_RANGE=' ${ MINION_IP_RANGES [ $i ] } ' "
2014-11-08 00:16:45 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/common.sh "
2014-10-03 21:58:49 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/salt-minion.sh "
2014-10-06 20:25:27 +00:00
) > " ${ KUBE_TEMP } /minion-start- ${ i } .sh "
2014-07-14 17:50:04 +00:00
2014-10-06 20:25:27 +00:00
gcutil addfirewall " ${ MINION_NAMES [ $i ] } -all " \
--project " ${ PROJECT } " \
2014-10-02 17:43:20 +00:00
--norespect_terminal_width \
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
2014-10-06 20:25:27 +00:00
--network " ${ NETWORK } " \
--allowed_ip_sources " ${ MINION_IP_RANGES [ $i ] } " \
2014-08-13 20:26:03 +00:00
--allowed "tcp,udp,icmp,esp,ah,sctp" &
2014-07-14 17:50:04 +00:00
gcutil addinstance ${ MINION_NAMES [ $i ] } \
2014-10-06 20:25:27 +00:00
--project " ${ PROJECT } " \
2014-10-02 17:43:20 +00:00
--norespect_terminal_width \
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
2014-10-06 20:25:27 +00:00
--zone " ${ ZONE } " \
--machine_type " ${ MINION_SIZE } " \
2014-10-30 18:21:37 +00:00
--image " projects/ ${ IMAGE_PROJECT } /global/images/ ${ IMAGE } " \
2014-10-06 20:25:27 +00:00
--tags " ${ MINION_TAG } " \
--network " ${ NETWORK } " \
--service_account_scopes " ${ MINION_SCOPES } " \
2014-07-14 17:50:04 +00:00
--automatic_restart \
--can_ip_forward \
2014-10-06 20:25:27 +00:00
--metadata_from_file " startup-script: ${ KUBE_TEMP } /minion-start- ${ i } .sh " &
2014-07-14 17:50:04 +00:00
2014-10-06 20:25:27 +00:00
gcutil addroute " ${ MINION_NAMES [ $i ] } " " ${ MINION_IP_RANGES [ $i ] } " \
--project " ${ PROJECT } " \
2014-10-02 17:43:20 +00:00
--norespect_terminal_width \
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
2014-10-06 20:25:27 +00:00
--network " ${ NETWORK } " \
--next_hop_instance " ${ ZONE } /instances/ ${ MINION_NAMES [ $i ] } " &
2014-07-14 17:50:04 +00:00
done
2014-09-23 22:54:27 +00:00
local fail = 0
local job
2014-10-06 20:25:27 +00:00
for job in $( jobs -p) ; do
wait " ${ job } " || fail = $(( fail + 1 ))
2014-07-14 17:50:04 +00:00
done
2014-09-23 22:54:27 +00:00
if ( ( $fail != 0 ) ) ; then
2014-10-06 20:25:27 +00:00
echo " ${ fail } commands failed. Exiting. " >& 2
2014-07-14 17:50:04 +00:00
exit 2
fi
detect-master > /dev/null
echo "Waiting for cluster initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This might loop forever if there was some uncaught error during start"
echo " up."
echo
2014-10-06 20:25:27 +00:00
until curl --insecure --user " ${ KUBE_USER } : ${ KUBE_PASSWORD } " --max-time 5 \
--fail --output /dev/null --silent " https:// ${ KUBE_MASTER_IP } /api/v1beta1/pods " ; do
2014-07-14 17:50:04 +00:00
printf "."
sleep 2
done
echo "Kubernetes cluster created."
echo "Sanity checking cluster..."
sleep 5
# Basic sanity checking
2014-09-23 22:54:27 +00:00
local i
local rc # Capture return code without exiting because of errexit bash option
2014-07-14 17:50:04 +00:00
for ( ( i = 0; i<${# MINION_NAMES [@] } ; i++) ) ; do
# Make sure docker is installed
2014-10-06 20:25:27 +00:00
gcutil ssh " ${ MINION_NAMES [ $i ] } " which docker >/dev/null || {
echo " Docker failed to install on ${ MINION_NAMES [ $i ] } . Your cluster is unlikely " >& 2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >& 2
echo "cluster. (sorry!)" >& 2
exit 1
}
2014-07-14 17:50:04 +00:00
done
echo
2014-09-29 20:11:31 +00:00
echo "Kubernetes cluster is running. The master is running at:"
2014-07-14 17:50:04 +00:00
echo
2014-09-29 20:11:31 +00:00
echo " https:// ${ KUBE_MASTER_IP } "
echo
echo "The user name and password to use is located in ~/.kubernetes_auth."
2014-07-14 17:50:04 +00:00
echo
2014-08-06 16:57:00 +00:00
2014-09-23 22:54:27 +00:00
local kube_cert = ".kubecfg.crt"
local kube_key = ".kubecfg.key"
local ca_cert = ".kubernetes.ca.crt"
2014-09-23 21:14:34 +00:00
2014-10-20 20:49:24 +00:00
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
# config file. Distribute the same way the htpasswd is done.
2014-09-24 16:39:42 +00:00
( umask 077
2014-11-13 02:14:24 +00:00
gcutil ssh " ${ MASTER_NAME } " sudo cat /srv/kubernetes/kubecfg.crt >" ${ HOME } / ${ kube_cert } " 2>/dev/null
gcutil ssh " ${ MASTER_NAME } " sudo cat /srv/kubernetes/kubecfg.key >" ${ HOME } / ${ kube_key } " 2>/dev/null
gcutil ssh " ${ MASTER_NAME } " sudo cat /srv/kubernetes/ca.crt >" ${ HOME } / ${ ca_cert } " 2>/dev/null
2014-09-24 16:39:42 +00:00
2014-09-24 16:39:42 +00:00
cat << EOF > ~/.kubernetes_auth
2014-09-23 21:14:34 +00:00
{
2014-09-23 22:54:27 +00:00
"User" : " $KUBE_USER " ,
"Password" : " $KUBE_PASSWORD " ,
2014-09-24 16:39:42 +00:00
"CAFile" : " $HOME / $ca_cert " ,
"CertFile" : " $HOME / $kube_cert " ,
2014-10-22 03:52:09 +00:00
"KeyFile" : " $HOME / $kube_key "
2014-09-23 21:14:34 +00:00
}
2014-09-24 16:39:42 +00:00
EOF
2014-09-24 16:39:42 +00:00
2014-09-23 22:54:27 +00:00
chmod 0600 ~/.kubernetes_auth " ${ HOME } / ${ kube_cert } " \
" ${ HOME } / ${ kube_key } " " ${ HOME } / ${ ca_cert } "
)
2014-07-14 17:50:04 +00:00
}
# Delete a kubernetes cluster
function kube-down {
# Detect the project into $PROJECT
detect-project
echo "Bringing down cluster"
gcutil deletefirewall \
2014-10-06 20:25:27 +00:00
--project " ${ PROJECT } " \
2014-07-14 17:50:04 +00:00
--norespect_terminal_width \
2014-10-02 17:43:20 +00:00
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
2014-07-14 17:50:04 +00:00
--force \
2014-10-06 20:25:27 +00:00
" ${ MASTER_NAME } -https " &
2014-07-14 17:50:04 +00:00
gcutil deleteinstance \
2014-10-06 20:25:27 +00:00
--project " ${ PROJECT } " \
2014-07-14 17:50:04 +00:00
--norespect_terminal_width \
2014-10-02 17:43:20 +00:00
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
2014-07-14 17:50:04 +00:00
--force \
--delete_boot_pd \
2014-10-06 20:25:27 +00:00
--zone " ${ ZONE } " \
" ${ MASTER_NAME } " &
2014-07-14 17:50:04 +00:00
2014-08-13 20:26:03 +00:00
gcutil deletefirewall \
2014-10-06 20:25:27 +00:00
--project " ${ PROJECT } " \
2014-08-13 20:26:03 +00:00
--norespect_terminal_width \
2014-10-02 17:43:20 +00:00
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
2014-08-13 20:26:03 +00:00
--force \
2014-10-06 20:25:27 +00:00
" ${ MINION_NAMES [@]/%/-all } " &
2014-08-13 20:26:03 +00:00
2014-07-14 17:50:04 +00:00
gcutil deleteinstance \
2014-10-06 20:25:27 +00:00
--project " ${ PROJECT } " \
2014-07-14 17:50:04 +00:00
--norespect_terminal_width \
2014-10-02 17:43:20 +00:00
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
2014-07-14 17:50:04 +00:00
--force \
--delete_boot_pd \
2014-10-06 20:25:27 +00:00
--zone " ${ ZONE } " \
" ${ MINION_NAMES [@] } " &
2014-07-14 17:50:04 +00:00
gcutil deleteroute \
2014-10-06 20:25:27 +00:00
--project " ${ PROJECT } " \
2014-10-02 17:43:20 +00:00
--norespect_terminal_width \
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
2014-07-14 17:50:04 +00:00
--force \
2014-10-06 20:25:27 +00:00
" ${ MINION_NAMES [@] } " &
2014-07-14 17:50:04 +00:00
wait
}
# Update a kubernetes cluster with latest source
function kube-push {
2014-09-23 22:54:27 +00:00
detect-project
2014-07-14 17:50:04 +00:00
detect-master
2014-09-23 22:54:27 +00:00
# Make sure we have the tar files staged on Google Storage
find-release-tars
upload-server-tars
2014-07-14 17:50:04 +00:00
(
2014-09-23 22:54:27 +00:00
echo "#! /bin/bash"
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
2014-10-06 20:25:27 +00:00
echo " readonly SERVER_BINARY_TAR_URL=' ${ SERVER_BINARY_TAR_URL } ' "
echo " readonly SALT_TAR_URL=' ${ SALT_TAR_URL } ' "
2014-11-08 00:16:45 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/common.sh "
2014-10-03 21:58:49 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/download-release.sh "
2014-07-14 17:50:04 +00:00
echo "echo Executing configuration"
echo "sudo salt '*' mine.update"
echo "sudo salt --force-color '*' state.highstate"
2014-10-06 20:25:27 +00:00
) | gcutil ssh --project " $PROJECT " --zone " $ZONE " " $KUBE_MASTER " sudo bash
2014-07-14 17:50:04 +00:00
get-password
echo
2014-09-29 20:11:31 +00:00
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https:// ${ KUBE_MASTER_IP } "
echo
echo "The user name and password to use is located in ~/.kubernetes_auth."
2014-07-14 17:50:04 +00:00
echo
}
2014-09-23 22:54:27 +00:00
# -----------------------------------------------------------------------------
# Cluster specific test helpers used from hack/e2e-test.sh
# Execute prior to running tests to build a release if required for env.
#
# Assumed Vars:
2014-10-03 21:58:49 +00:00
# KUBE_ROOT
2014-07-14 17:50:04 +00:00
function test-build-release {
# Make a release
2014-10-06 20:25:27 +00:00
" ${ KUBE_ROOT } /build/release.sh "
2014-07-14 17:50:04 +00:00
}
2014-09-23 22:54:27 +00:00
# Execute prior to running tests to initialize required structure. This is
# called from hack/e2e-test.sh.
#
# Assumed vars:
# PROJECT
# Variables from config.sh
2014-07-14 17:50:04 +00:00
function test-setup {
# Detect the project into $PROJECT if it isn't set
# gce specific
detect-project
2014-10-23 00:49:40 +00:00
# Open up port 80 & 8080 so common containers on minions can be reached
gcutil addfirewall \
--project " ${ PROJECT } " \
--norespect_terminal_width \
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
--target_tags " ${ MINION_TAG } " \
--allowed tcp:80,tcp:8080 \
--network " ${ NETWORK } " \
" ${ MINION_TAG } - ${ INSTANCE_PREFIX } -http-alt "
2014-07-14 17:50:04 +00:00
}
2014-09-23 22:54:27 +00:00
# Execute after running tests to perform any required clean-up. This is called
# from hack/e2e-test.sh
#
# Assumed Vars:
# PROJECT
2014-07-14 17:50:04 +00:00
function test-teardown {
echo "Shutting down test cluster in background."
gcutil deletefirewall \
2014-10-06 20:25:27 +00:00
--project " ${ PROJECT } " \
2014-07-14 17:50:04 +00:00
--norespect_terminal_width \
2014-10-02 17:43:20 +00:00
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
2014-07-14 17:50:04 +00:00
--force \
2014-10-06 20:25:27 +00:00
" ${ MINION_TAG } - ${ INSTANCE_PREFIX } -http-alt " || true > /dev/null
2014-10-03 21:58:49 +00:00
" ${ KUBE_ROOT } /cluster/kube-down.sh " > /dev/null
2014-07-14 17:50:04 +00:00
}
2014-10-10 05:38:00 +00:00
# SSH to a node by name ($1) and run a command ($2).
function ssh-to-node {
local node = " $1 "
local cmd = " $2 "
gcutil --log_level= WARNING ssh --ssh_arg "-o LogLevel=quiet" " ${ node } " " ${ cmd } "
}
# Restart the kube-proxy on a node ($1)
function restart-kube-proxy {
ssh-to-node " $1 " "sudo /etc/init.d/kube-proxy restart"
}
2014-11-06 19:35:33 +00:00
# Setup monitoring using heapster and InfluxDB
function setup-monitoring {
2014-11-07 21:30:31 +00:00
if [ [ " ${ MONITORING } " = = "true" ] ] ; then
echo "Setting up Cluster Monitoring using Heapster."
detect-project
2014-11-07 01:23:14 +00:00
if ! gcutil getfirewall monitoring-heapster & > /dev/null; then
gcutil addfirewall monitoring-heapster \
--project " ${ PROJECT } " \
--norespect_terminal_width \
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
--target_tags= " ${ MINION_TAG } " \
2014-11-07 21:30:31 +00:00
--allowed "tcp:80,tcp:8083,tcp:8086,tcp:9200" & > /dev/null;
2014-11-07 01:23:14 +00:00
if [ $? -ne 0 ] ; then
echo "Failed to Setup Firewall for Monitoring" && false
fi
fi
2014-11-07 21:30:31 +00:00
# Re-use master auth for Grafana
get-password
sed -i " s/HTTP_USER, \"value\": \"[^\"]*\"/HTTP_USER, \"value\": \" $KUBE_USER \"/g " " ${ KUBE_ROOT } /examples/monitoring/influx-grafana-pod.json "
sed -i " s/HTTP_PASS, \"value\": \"[^\"]*\"/HTTP_PASS, \"value\": \" $KUBE_PASSWORD \"/g " " ${ KUBE_ROOT } /examples/monitoring/influx-grafana-pod.json "
2014-11-12 04:55:12 +00:00
local kubectl = ${ KUBE_ROOT } /cluster/kubectl.sh
2014-11-07 21:30:31 +00:00
if " ${ kubectl } " create -f " ${ KUBE_ROOT } /examples/monitoring/influx-grafana-pod.json " & > /dev/null \
&& " ${ kubectl } " create -f " ${ KUBE_ROOT } /examples/monitoring/influx-grafana-service.json " & > /dev/null \
&& " ${ kubectl } " create -f " ${ KUBE_ROOT } /examples/monitoring/heapster-pod.json " & > /dev/null; then
dashboardIP = " http:// $KUBE_USER : $KUBE_PASSWORD @` ${ kubectl } get -o json pod influx-grafana | grep hostIP | awk '{print $2 }' | sed 's/[,|\"]//g'` "
echo " Grafana dashboard will be available at $dashboardIP . Wait for the monitoring dashboard to be online. Use the master user name and password for the dashboard. "
else
2014-11-07 01:23:14 +00:00
echo "Failed to Setup Monitoring"
2014-11-06 19:35:33 +00:00
teardown-monitoring
fi
fi
}
function teardown-monitoring {
2014-11-07 21:30:31 +00:00
if [ [ " ${ MONITORING } " = = "true" ] ] ; then
detect-project
2014-11-12 04:55:12 +00:00
local kubectl = ${ KUBE_ROOT } /cluster/kubectl.sh
${ kubectl } delete pods heapster & > /dev/null || true
${ kubectl } delete pods influx-grafana & > /dev/null || true
${ kubectl } delete services influx-master & > /dev/null || true
2014-11-07 21:30:31 +00:00
if gcutil getfirewall monitoring-heapster & > /dev/null; then
gcutil deletefirewall \
2014-11-07 01:23:14 +00:00
--project " ${ PROJECT } " \
--norespect_terminal_width \
--sleep_between_polls " ${ POLL_SLEEP_INTERVAL } " \
--force \
2014-11-07 21:30:31 +00:00
monitoring-heapster & > /dev/null || true
fi
2014-11-07 01:23:14 +00:00
fi
}
2014-11-11 19:03:07 +00:00
# Perform preparations required to run e2e tests
function prepare-e2e( ) {
detect-project
}