2014-07-14 17:50:04 +00:00
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
2014-10-03 21:58:49 +00:00
KUBE_ROOT = $( dirname " ${ BASH_SOURCE } " ) /../..
source " ${ KUBE_ROOT } /cluster/gce/ ${ KUBE_CONFIG_FILE - "config-default.sh" } "
2014-07-14 17:50:04 +00:00
2015-01-28 14:57:10 +00:00
NODE_INSTANCE_PREFIX = " ${ INSTANCE_PREFIX } -minion "
2014-09-23 22:54:27 +00:00
# Verify prereqs
function verify-prereqs {
2014-10-06 20:25:27 +00:00
local cmd
2014-11-25 18:32:27 +00:00
for cmd in gcloud gsutil; do
2014-10-06 20:25:27 +00:00
which " ${ cmd } " >/dev/null || {
echo " Can't find ${ cmd } in PATH, please fix and retry. The Google Cloud "
echo "SDK can be downloaded from https://cloud.google.com/sdk/."
2014-09-23 22:54:27 +00:00
exit 1
2014-10-06 20:25:27 +00:00
}
2014-09-23 22:54:27 +00:00
done
}
2014-10-06 20:25:27 +00:00
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [ [ -z ${ KUBE_TEMP - } ] ] ; then
KUBE_TEMP = $( mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${KUBE_TEMP}"' EXIT
fi
}
2014-09-23 22:54:27 +00:00
# Verify and find the various tar files that we are going to use on the server.
#
# Vars set:
# SERVER_BINARY_TAR
# SALT_TAR
function find-release-tars {
2014-10-03 21:58:49 +00:00
SERVER_BINARY_TAR = " ${ KUBE_ROOT } /server/kubernetes-server-linux-amd64.tar.gz "
2014-09-23 22:54:27 +00:00
if [ [ ! -f " $SERVER_BINARY_TAR " ] ] ; then
2014-10-03 21:58:49 +00:00
SERVER_BINARY_TAR = " ${ KUBE_ROOT } /_output/release-tars/kubernetes-server-linux-amd64.tar.gz "
2014-09-23 22:54:27 +00:00
fi
if [ [ ! -f " $SERVER_BINARY_TAR " ] ] ; then
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
exit 1
2014-07-14 17:50:04 +00:00
fi
2014-10-03 21:58:49 +00:00
SALT_TAR = " ${ KUBE_ROOT } /server/kubernetes-salt.tar.gz "
2014-09-23 22:54:27 +00:00
if [ [ ! -f " $SALT_TAR " ] ] ; then
2014-10-03 21:58:49 +00:00
SALT_TAR = " ${ KUBE_ROOT } /_output/release-tars/kubernetes-salt.tar.gz "
2014-09-23 22:54:27 +00:00
fi
if [ [ ! -f " $SALT_TAR " ] ] ; then
echo "!!! Cannot find kubernetes-salt.tar.gz"
2014-07-14 17:50:04 +00:00
exit 1
fi
}
# Use the gcloud defaults to find the project. If it is already set in the
# environment then go with that.
2014-09-23 22:54:27 +00:00
#
# Vars set:
# PROJECT
2015-01-15 19:21:42 +00:00
# PROJECT_REPORTED
2014-07-14 17:50:04 +00:00
function detect-project ( ) {
2014-10-06 20:25:27 +00:00
if [ [ -z " ${ PROJECT - } " ] ] ; then
2014-07-14 17:50:04 +00:00
PROJECT = $( gcloud config list project | tail -n 1 | cut -f 3 -d ' ' )
fi
2014-10-06 20:25:27 +00:00
if [ [ -z " ${ PROJECT - } " ] ] ; then
echo "Could not detect Google Cloud Platform project. Set the default project using " >& 2
echo "'gcloud config set project <PROJECT>'" >& 2
2014-07-14 17:50:04 +00:00
exit 1
fi
2015-01-15 19:21:42 +00:00
if [ [ -z " ${ PROJECT_REPORTED - } " ] ] ; then
echo " Project: ${ PROJECT } " >& 2
echo " Zone: ${ ZONE } " >& 2
PROJECT_REPORTED = true
fi
2014-07-14 17:50:04 +00:00
}
2014-12-09 23:37:06 +00:00
2014-09-23 22:54:27 +00:00
# Take the local tar files and upload them to Google Storage. They will then be
# downloaded by the master as part of the start up script for the master.
#
# Assumed vars:
# PROJECT
# SERVER_BINARY_TAR
# SALT_TAR
# Vars set:
# SERVER_BINARY_TAR_URL
# SALT_TAR_URL
function upload-server-tars( ) {
SERVER_BINARY_TAR_URL =
SALT_TAR_URL =
local project_hash
if which md5 > /dev/null 2>& 1; then
project_hash = $( md5 -q -s " $PROJECT " )
else
2014-11-12 07:04:01 +00:00
project_hash = $( echo -n " $PROJECT " | md5sum | awk '{ print $1 }' )
2014-09-23 22:54:27 +00:00
fi
2014-10-10 21:24:35 +00:00
project_hash = ${ project_hash : 0 : 5 }
2014-09-23 22:54:27 +00:00
local -r staging_bucket = " gs://kubernetes-staging- ${ project_hash } "
# Ensure the bucket is created
if ! gsutil ls " $staging_bucket " > /dev/null 2>& 1 ; then
echo " Creating $staging_bucket "
gsutil mb " ${ staging_bucket } "
fi
local -r staging_path = " ${ staging_bucket } /devel "
echo " +++ Staging server tars to Google Storage: ${ staging_path } "
2014-11-08 00:16:45 +00:00
local server_binary_gs_url = " ${ staging_path } / ${ SERVER_BINARY_TAR ##*/ } "
gsutil -q -h "Cache-Control:private, max-age=0" cp " ${ SERVER_BINARY_TAR } " " ${ server_binary_gs_url } "
gsutil acl ch -g all:R " ${ server_binary_gs_url } " >/dev/null 2>& 1
local salt_gs_url = " ${ staging_path } / ${ SALT_TAR ##*/ } "
gsutil -q -h "Cache-Control:private, max-age=0" cp " ${ SALT_TAR } " " ${ salt_gs_url } "
gsutil acl ch -g all:R " ${ salt_gs_url } " >/dev/null 2>& 1
# Convert from gs:// URL to an https:// URL
SERVER_BINARY_TAR_URL = " ${ server_binary_gs_url /gs : \/ \/ /https : //storage.googleapis.com/ } "
SALT_TAR_URL = " ${ salt_gs_url /gs : \/ \/ /https : //storage.googleapis.com/ } "
2014-09-23 22:54:27 +00:00
}
2015-01-28 14:57:10 +00:00
# Detect minions created in the minion group
2014-09-23 22:54:27 +00:00
#
# Assumed vars:
2015-01-28 14:57:10 +00:00
# NODE_INSTANCE_PREFIX
# Vars set:
2014-09-23 22:54:27 +00:00
# MINION_NAMES
2015-01-28 14:57:10 +00:00
function detect-minion-names {
detect-project
MINION_NAMES = ( $( gcloud preview --project " ${ PROJECT } " instance-groups \
--zone " ${ ZONE } " instances --group " ${ NODE_INSTANCE_PREFIX } -group " list \
| cut -d'/' -f11) )
echo " MINION_NAMES= ${ MINION_NAMES [*] } "
}
# Waits until the number of running nodes in the instance group is equal to NUM_NODES
#
# Assumed vars:
# NODE_INSTANCE_PREFIX
# NUM_MINIONS
function wait-for-minions-to-run {
detect-project
local running_minions = 0
while [ [ " ${ NUM_MINIONS } " != " ${ running_minions } " ] ] ; do
echo -e -n " ${ color_yellow } Waiting for minions to run. "
echo -e " ${ running_minions } out of ${ NUM_MINIONS } running. Retrying. ${ color_norm } "
sleep 5
running_minions = $( gcloud preview --project " ${ PROJECT } " instance-groups \
--zone " ${ ZONE } " instances --group " ${ NODE_INSTANCE_PREFIX } -group " list \
2015-02-18 01:18:32 +00:00
--running | wc -l | xargs)
2015-01-28 14:57:10 +00:00
done
}
# Detect the information about the minions
#
# Assumed vars:
2014-09-23 22:54:27 +00:00
# ZONE
# Vars set:
2015-01-28 14:57:10 +00:00
# MINION_NAMES
2014-12-09 23:07:54 +00:00
# KUBE_MINION_IP_ADDRESSES (array)
2014-07-14 17:50:04 +00:00
function detect-minions ( ) {
2014-12-09 23:07:54 +00:00
detect-project
2015-01-28 14:57:10 +00:00
detect-minion-names
2014-07-14 17:50:04 +00:00
KUBE_MINION_IP_ADDRESSES = ( )
for ( ( i = 0; i<${# MINION_NAMES [@] } ; i++) ) ; do
2014-12-03 05:14:18 +00:00
local minion_ip = $( gcloud compute instances describe --project " ${ PROJECT } " --zone " ${ ZONE } " \
2014-11-25 18:32:27 +00:00
" ${ MINION_NAMES [ $i ] } " --fields networkInterfaces[ 0] .accessConfigs[ 0] .natIP \
--format= text | awk '{ print $2 }' )
2014-10-06 20:25:27 +00:00
if [ [ -z " ${ minion_ip - } " ] ] ; then
echo " Did not find ${ MINION_NAMES [ $i ] } " >& 2
2014-09-22 17:25:25 +00:00
else
echo " Found ${ MINION_NAMES [ $i ] } at ${ minion_ip } "
KUBE_MINION_IP_ADDRESSES += ( " ${ minion_ip } " )
fi
2014-07-14 17:50:04 +00:00
done
2014-10-06 20:25:27 +00:00
if [ [ -z " ${ KUBE_MINION_IP_ADDRESSES - } " ] ] ; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >& 2
2014-07-14 17:50:04 +00:00
exit 1
fi
}
2014-09-23 22:54:27 +00:00
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# ZONE
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
2014-07-14 17:50:04 +00:00
function detect-master ( ) {
2014-12-09 23:07:54 +00:00
detect-project
2014-07-14 17:50:04 +00:00
KUBE_MASTER = ${ MASTER_NAME }
2014-10-06 20:25:27 +00:00
if [ [ -z " ${ KUBE_MASTER_IP - } " ] ] ; then
2014-12-03 05:14:18 +00:00
KUBE_MASTER_IP = $( gcloud compute instances describe --project " ${ PROJECT } " --zone " ${ ZONE } " \
2014-11-25 18:32:27 +00:00
" ${ MASTER_NAME } " --fields networkInterfaces[ 0] .accessConfigs[ 0] .natIP \
--format= text | awk '{ print $2 }' )
2014-07-14 17:50:04 +00:00
fi
2014-10-06 20:25:27 +00:00
if [ [ -z " ${ KUBE_MASTER_IP - } " ] ] ; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >& 2
2014-07-14 17:50:04 +00:00
exit 1
fi
echo " Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP ) "
}
2014-09-23 22:54:27 +00:00
# Ensure that we have a password created for validating to the master. Will
2015-02-04 03:38:24 +00:00
# read from the kubernetes auth-file for the current context if available.
#
# Assumed vars
# KUBE_ROOT
2014-09-23 22:54:27 +00:00
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
2014-07-14 17:50:04 +00:00
function get-password {
2015-02-05 22:54:45 +00:00
# go template to extract the auth-path of the current-context user
2015-02-12 01:44:28 +00:00
# Note: we save dot ('.') to $dot because the 'with' action overrides dot
local template = '{{$dot := .}}{{with $ctx := index $dot "current-context"}}{{$user := index $dot "contexts" $ctx "user"}}{{index $dot "users" $user "auth-path"}}{{end}}'
2015-02-05 22:54:45 +00:00
local file = $( " ${ KUBE_ROOT } /cluster/kubectl.sh " config view -o template --template= " ${ template } " )
2015-02-11 22:24:36 +00:00
if [ [ ! -z " $file " && -r " $file " ] ] ; then
2014-09-23 22:54:27 +00:00
KUBE_USER = $( cat " $file " | python -c 'import json,sys;print json.load(sys.stdin)["User"]' )
KUBE_PASSWORD = $( cat " $file " | python -c 'import json,sys;print json.load(sys.stdin)["Password"]' )
2014-07-14 17:50:04 +00:00
return
fi
2014-09-23 22:54:27 +00:00
KUBE_USER = admin
KUBE_PASSWORD = $( python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))' )
2014-07-14 17:50:04 +00:00
}
2014-10-20 20:49:24 +00:00
# Generate authentication token for admin user. Will
# read from $HOME/.kubernetes_auth if available.
#
# Vars set:
# KUBE_ADMIN_TOKEN
function get-admin-token {
local file = " $HOME /.kubernetes_auth "
if [ [ -r " $file " ] ] ; then
KUBE_ADMIN_TOKEN = $( cat " $file " | python -c 'import json,sys;print json.load(sys.stdin)["BearerToken"]' )
return
fi
KUBE_ADMIN_TOKEN = $( python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(32))' )
}
2014-12-09 23:37:06 +00:00
# Wait for background jobs to finish. Exit with
# an error status if any of the jobs failed.
function wait-for-jobs {
local fail = 0
local job
for job in $( jobs -p) ; do
wait " ${ job } " || fail = $(( fail + 1 ))
done
if ( ( fail != 0 ) ) ; then
echo -e " ${ color_red } ${ fail } commands failed. Exiting. ${ color_norm } " >& 2
# Ignore failures for now.
# exit 2
fi
}
# Robustly try to create a firewall rule.
# $1: The name of firewall rule.
# $2: IP ranges.
2014-12-16 18:22:29 +00:00
# $3: Target tags for this firewall rule.
2014-12-09 23:37:06 +00:00
function create-firewall-rule {
2014-12-09 23:07:54 +00:00
detect-project
2014-12-09 23:37:06 +00:00
local attempt = 0
while true; do
if ! gcloud compute firewall-rules create " $1 " \
--project " ${ PROJECT } " \
--network " ${ NETWORK } " \
--source-ranges " $2 " \
2014-12-16 18:22:29 +00:00
--target-tags " $3 " \
2014-12-09 23:07:54 +00:00
--allow tcp udp icmp esp ah sctp; then
2014-12-09 23:37:06 +00:00
if ( ( attempt > 5 ) ) ; then
echo -e " ${ color_red } Failed to create firewall rule $1 ${ color_norm } "
exit 2
fi
echo -e " ${ color_yellow } Attempt $(( $attempt + 1 )) failed to create firewall rule $1 . Retrying. ${ color_norm } "
attempt = $(( $attempt + 1 ))
else
2015-01-28 14:57:10 +00:00
break
2014-12-09 23:37:06 +00:00
fi
done
}
# Robustly try to create a route.
# $1: The name of the route.
# $2: IP range.
function create-route {
2014-12-09 23:07:54 +00:00
detect-project
2014-12-09 23:37:06 +00:00
local attempt = 0
while true; do
if ! gcloud compute routes create " $1 " \
--project " ${ PROJECT } " \
--destination-range " $2 " \
--network " ${ NETWORK } " \
--next-hop-instance " $1 " \
--next-hop-instance-zone " ${ ZONE } " ; then
if ( ( attempt > 5 ) ) ; then
echo -e " ${ color_red } Failed to create route $1 ${ color_norm } "
exit 2
fi
echo -e " ${ color_yellow } Attempt $(( $attempt + 1 )) failed to create route $1 . Retrying. ${ color_norm } "
attempt = $(( $attempt + 1 ))
else
2015-01-28 14:57:10 +00:00
break
2014-12-09 23:37:06 +00:00
fi
done
}
2015-01-28 14:57:10 +00:00
# Robustly try to create an instance template.
# $1: The name of the instance template.
2014-12-09 23:37:06 +00:00
# $2: The scopes flag.
2015-01-28 14:57:10 +00:00
# $3: The minion start script metadata from file.
function create-node-template {
2014-12-09 23:07:54 +00:00
detect-project
2014-12-09 23:37:06 +00:00
local attempt = 0
while true; do
2015-01-28 14:57:10 +00:00
if ! gcloud compute instance-templates create " $1 " \
2014-12-09 23:37:06 +00:00
--project " ${ PROJECT } " \
--machine-type " ${ MINION_SIZE } " \
2015-01-04 00:56:54 +00:00
--boot-disk-type " ${ MINION_DISK_TYPE } " \
--boot-disk-size " ${ MINION_DISK_SIZE } " \
2014-12-09 23:37:06 +00:00
--image-project= " ${ IMAGE_PROJECT } " \
--image " ${ IMAGE } " \
--tags " ${ MINION_TAG } " \
--network " ${ NETWORK } " \
$2 \
--can-ip-forward \
--metadata-from-file " $3 " ; then
if ( ( attempt > 5 ) ) ; then
2015-01-28 14:57:10 +00:00
echo -e " ${ color_red } Failed to create instance template $1 ${ color_norm } "
2014-12-09 23:37:06 +00:00
exit 2
fi
2015-01-28 14:57:10 +00:00
echo -e " ${ color_yellow } Attempt $(( $attempt + 1 )) failed to create instance template $1 . Retrying. ${ color_norm } "
2014-12-09 23:37:06 +00:00
attempt = $(( $attempt + 1 ))
2015-01-28 14:57:10 +00:00
else
break
fi
done
}
# Robustly try to add metadata on an instance.
# $1: The name of the instace.
# $2: The metadata key=value pair to add.
function add-instance-metadata {
detect-project
local attempt = 0
while true; do
if ! gcloud compute instances add-metadata " $1 " \
--project " ${ PROJECT } " \
--zone " ${ ZONE } " \
--metadata " $2 " ; then
if ( ( attempt > 5 ) ) ; then
echo -e " ${ color_red } Failed to add instance metadata in $1 ${ color_norm } "
exit 2
fi
echo -e " ${ color_yellow } Attempt $(( $attempt + 1 )) failed to add metadata in $1 . Retrying. ${ color_norm } "
attempt = $(( $attempt + 1 ))
else
break
2014-12-09 23:37:06 +00:00
fi
done
}
2014-07-14 17:50:04 +00:00
# Instantiate a kubernetes cluster
2014-09-23 22:54:27 +00:00
#
# Assumed vars
2014-10-03 21:58:49 +00:00
# KUBE_ROOT
2014-09-23 22:54:27 +00:00
# <Various vars set in config file>
2014-07-14 17:50:04 +00:00
function kube-up {
detect-project
2014-09-23 22:54:27 +00:00
# Make sure we have the tar files staged on Google Storage
find-release-tars
upload-server-tars
2014-09-24 17:55:58 +00:00
2014-10-06 20:25:27 +00:00
ensure-temp-dir
2014-07-14 17:50:04 +00:00
get-password
2014-10-03 21:58:49 +00:00
python " ${ KUBE_ROOT } /third_party/htpasswd/htpasswd.py " \
2014-10-06 20:25:27 +00:00
-b -c " ${ KUBE_TEMP } /htpasswd " " $KUBE_USER " " $KUBE_PASSWORD "
local htpasswd
htpasswd = $( cat " ${ KUBE_TEMP } /htpasswd " )
2014-07-14 17:50:04 +00:00
2015-01-07 23:02:35 +00:00
if ! gcloud compute networks --project " ${ PROJECT } " describe " ${ NETWORK } " & >/dev/null; then
2014-11-25 18:32:27 +00:00
echo " Creating new network: ${ NETWORK } "
2014-09-24 17:55:58 +00:00
# The network needs to be created synchronously or we have a race. The
# firewalls can be added concurrent with instance creation.
2015-01-07 23:02:35 +00:00
gcloud compute networks create --project " ${ PROJECT } " " ${ NETWORK } " --range "10.240.0.0/16"
2014-10-28 20:47:49 +00:00
fi
2015-01-07 23:02:35 +00:00
if ! gcloud compute firewall-rules --project " ${ PROJECT } " describe " ${ NETWORK } -default-internal " & >/dev/null; then
2014-11-25 18:32:27 +00:00
gcloud compute firewall-rules create " ${ NETWORK } -default-internal " \
2014-09-24 23:03:38 +00:00
--project " ${ PROJECT } " \
--network " ${ NETWORK } " \
2014-11-25 18:32:27 +00:00
--source-ranges "10.0.0.0/8" \
--allow "tcp:1-65535" "udp:1-65535" "icmp" &
2014-10-28 20:47:49 +00:00
fi
2015-01-07 23:02:35 +00:00
if ! gcloud compute firewall-rules describe --project " ${ PROJECT } " " ${ NETWORK } -default-ssh " & >/dev/null; then
2014-11-25 18:32:27 +00:00
gcloud compute firewall-rules create " ${ NETWORK } -default-ssh " \
2014-09-24 23:03:38 +00:00
--project " ${ PROJECT } " \
--network " ${ NETWORK } " \
2014-11-25 18:32:27 +00:00
--source-ranges "0.0.0.0/0" \
--allow "tcp:22" &
2014-09-24 23:03:38 +00:00
fi
2014-07-14 17:50:04 +00:00
echo "Starting VMs and configuring firewalls"
2014-11-25 18:32:27 +00:00
gcloud compute firewall-rules create " ${ MASTER_NAME } -https " \
2014-10-06 20:25:27 +00:00
--project " ${ PROJECT } " \
--network " ${ NETWORK } " \
2014-11-25 18:32:27 +00:00
--target-tags " ${ MASTER_TAG } " \
--allow tcp:443 &
2014-07-14 17:50:04 +00:00
2015-02-23 21:57:09 +00:00
# We have to make sure the disk is created before creating the master VM, so
# run this in the foreground.
gcloud compute disks create " ${ MASTER_NAME } -pd " \
--project " ${ PROJECT } " \
--zone " ${ ZONE } " \
--size "10GB"
2014-09-24 17:55:58 +00:00
(
echo "#! /bin/bash"
2014-09-23 22:54:27 +00:00
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo " readonly MASTER_NAME=' ${ MASTER_NAME } ' "
2015-02-13 22:58:42 +00:00
echo " readonly INSTANCE_PREFIX=' ${ INSTANCE_PREFIX } ' "
2015-01-28 14:57:10 +00:00
echo " readonly NODE_INSTANCE_PREFIX=' ${ NODE_INSTANCE_PREFIX } ' "
2014-09-23 22:54:27 +00:00
echo " readonly SERVER_BINARY_TAR_URL=' ${ SERVER_BINARY_TAR_URL } ' "
echo " readonly SALT_TAR_URL=' ${ SALT_TAR_URL } ' "
echo " readonly MASTER_HTPASSWD=' ${ htpasswd } ' "
2014-09-18 23:03:34 +00:00
echo " readonly PORTAL_NET=' ${ PORTAL_NET } ' "
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
echo " readonly ENABLE_CLUSTER_MONITORING=' ${ ENABLE_CLUSTER_MONITORING :- false } ' "
2014-11-14 04:32:35 +00:00
echo " readonly ENABLE_NODE_MONITORING=' ${ ENABLE_NODE_MONITORING :- false } ' "
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
echo " readonly ENABLE_CLUSTER_LOGGING=' ${ ENABLE_CLUSTER_LOGGING :- false } ' "
2014-11-14 07:07:43 +00:00
echo " readonly ENABLE_NODE_LOGGING=' ${ ENABLE_NODE_LOGGING :- false } ' "
echo " readonly LOGGING_DESTINATION=' ${ LOGGING_DESTINATION :- } ' "
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
echo " readonly ELASTICSEARCH_LOGGING_REPLICAS=' ${ ELASTICSEARCH_LOGGING_REPLICAS :- } ' "
2014-11-07 04:49:21 +00:00
echo " readonly ENABLE_CLUSTER_DNS=' ${ ENABLE_CLUSTER_DNS :- false } ' "
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
echo " readonly DNS_REPLICAS=' ${ DNS_REPLICAS :- } ' "
2014-11-07 04:49:21 +00:00
echo " readonly DNS_SERVER_IP=' ${ DNS_SERVER_IP :- } ' "
echo " readonly DNS_DOMAIN=' ${ DNS_DOMAIN :- } ' "
2014-11-08 00:16:45 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/common.sh "
2015-02-22 19:27:16 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/mount-pd.sh "
2014-10-14 22:00:52 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/create-dynamic-salt-files.sh "
2014-10-03 21:58:49 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/download-release.sh "
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/salt-master.sh "
2014-10-06 20:25:27 +00:00
) > " ${ KUBE_TEMP } /master-start.sh "
2014-09-24 17:55:58 +00:00
2014-11-25 18:32:27 +00:00
gcloud compute instances create " ${ MASTER_NAME } " \
2014-10-06 20:25:27 +00:00
--project " ${ PROJECT } " \
--zone " ${ ZONE } " \
2014-11-25 18:32:27 +00:00
--machine-type " ${ MASTER_SIZE } " \
--image-project= " ${ IMAGE_PROJECT } " \
--image " ${ IMAGE } " \
2014-10-06 20:25:27 +00:00
--tags " ${ MASTER_TAG } " \
--network " ${ NETWORK } " \
2014-11-25 18:32:27 +00:00
--scopes "storage-ro" "compute-rw" \
2014-12-18 20:24:57 +00:00
--metadata-from-file " startup-script= ${ KUBE_TEMP } /master-start.sh " \
--disk name = " ${ MASTER_NAME } -pd " device-name= master-pd mode = rw boot = no auto-delete= no &
2014-07-14 17:50:04 +00:00
2014-12-16 18:22:29 +00:00
# Create a single firewall rule for all minions.
create-firewall-rule " ${ MINION_TAG } -all " " ${ CLUSTER_IP_RANGE } " " ${ MINION_TAG } " &
2014-12-09 23:37:06 +00:00
2015-02-23 21:57:09 +00:00
# Report logging choice (if any).
if [ [ " ${ ENABLE_NODE_LOGGING - } " = = "true" ] ] ; then
echo " +++ Logging using Fluentd to ${ LOGGING_DESTINATION :- unknown } "
# For logging to GCP we need to enable some minion scopes.
if [ [ " ${ LOGGING_DESTINATION - } " = = "gcp" ] ] ; then
MINION_SCOPES += ( 'https://www.googleapis.com/auth/logging.write' )
fi
fi
2014-12-09 23:37:06 +00:00
# Wait for last batch of jobs.
wait-for-jobs
local -a scope_flags = ( )
if ( ( " ${# MINION_SCOPES [@] } " > 0 ) ) ; then
scope_flags = ( "--scopes" " ${ MINION_SCOPES [@] } " )
else
scope_flags = ( "--no-scopes" )
fi
2015-01-28 14:57:10 +00:00
(
2014-07-14 17:50:04 +00:00
echo "#! /bin/bash"
2014-11-22 01:10:50 +00:00
echo " ZONE=' ${ ZONE } ' "
2014-09-24 17:55:58 +00:00
echo " MASTER_NAME=' ${ MASTER_NAME } ' "
2015-01-28 14:57:10 +00:00
echo "until MINION_IP_RANGE=\$(curl --fail --silent -H 'Metadata-Flavor: Google'\\"
echo " http://metadata/computeMetadata/v1/instance/attributes/node-ip-range); do"
echo " echo 'Waiting for metadata MINION_IP_RANGE...'"
echo " sleep 3"
echo "done"
2014-11-26 01:47:02 +00:00
echo " EXTRA_DOCKER_OPTS=' ${ EXTRA_DOCKER_OPTS } ' "
2014-11-22 01:10:50 +00:00
echo " ENABLE_DOCKER_REGISTRY_CACHE=' ${ ENABLE_DOCKER_REGISTRY_CACHE :- false } ' "
2014-11-08 00:16:45 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/common.sh "
2014-10-03 21:58:49 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/salt-minion.sh "
2015-01-28 14:57:10 +00:00
) > " ${ KUBE_TEMP } /minion-start.sh "
create-node-template " ${ NODE_INSTANCE_PREFIX } -template " " ${ scope_flags [*] } " \
" startup-script= ${ KUBE_TEMP } /minion-start.sh "
gcloud preview managed-instance-groups --zone " ${ ZONE } " \
create " ${ NODE_INSTANCE_PREFIX } -group " \
--project " ${ PROJECT } " \
--base-instance-name " ${ NODE_INSTANCE_PREFIX } " \
--size " ${ NUM_MINIONS } " \
--template " ${ NODE_INSTANCE_PREFIX } -template " || true;
# TODO: this should be true when the above create managed-instance-group
# command returns, but currently it returns before the instances come up due
# to gcloud's deficiency.
wait-for-minions-to-run
2014-07-14 17:50:04 +00:00
2015-01-28 14:57:10 +00:00
detect-minion-names
# Create the routes and set IP ranges to instance metadata, 5 instances at a time.
for ( ( i = 0; i<${# MINION_NAMES [@] } ; i++) ) ; do
create-route " ${ MINION_NAMES [ $i ] } " " ${ MINION_IP_RANGES [ $i ] } " &
add-instance-metadata " ${ MINION_NAMES [ $i ] } " " node-ip-range= ${ MINION_IP_RANGES [ $i ] } " &
2014-08-13 20:26:03 +00:00
2014-12-09 23:37:06 +00:00
if [ $i -ne 0 ] && [ $(( i%5)) -eq 0 ] ; then
2015-01-28 14:57:10 +00:00
echo Waiting for a batch of routes at $i ...
2014-12-09 23:37:06 +00:00
wait-for-jobs
2014-11-25 18:32:27 +00:00
fi
2014-07-14 17:50:04 +00:00
done
2014-12-09 23:37:06 +00:00
# Wait for last batch of jobs.
wait-for-jobs
2014-07-14 17:50:04 +00:00
2014-11-25 18:32:27 +00:00
detect-master
2014-07-14 17:50:04 +00:00
2015-02-22 19:27:16 +00:00
# Reserve the master's IP so that it can later be transferred to another VM
# without disrupting the kubelets. IPs are associated with regions, not zones,
# so extract the region name, which is the same as the zone but with the final
# dash and characters trailing the dash removed.
local REGION = ${ ZONE %-* }
gcloud compute addresses create " ${ MASTER_NAME } -ip " \
--project " ${ PROJECT } " \
--addresses " ${ KUBE_MASTER_IP } " \
--region " ${ REGION } "
2014-07-14 17:50:04 +00:00
echo "Waiting for cluster initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This might loop forever if there was some uncaught error during start"
echo " up."
echo
2014-10-06 20:25:27 +00:00
until curl --insecure --user " ${ KUBE_USER } : ${ KUBE_PASSWORD } " --max-time 5 \
--fail --output /dev/null --silent " https:// ${ KUBE_MASTER_IP } /api/v1beta1/pods " ; do
2014-07-14 17:50:04 +00:00
printf "."
sleep 2
done
echo "Kubernetes cluster created."
2014-08-06 16:57:00 +00:00
2015-02-02 21:49:03 +00:00
local kube_cert = "kubecfg.crt"
local kube_key = "kubecfg.key"
local ca_cert = "kubernetes.ca.crt"
# TODO use token instead of kube_auth
local kube_auth = "kubernetes_auth"
local kubectl = " ${ KUBE_ROOT } /cluster/kubectl.sh "
2015-02-13 21:31:50 +00:00
local context = " ${ PROJECT } _ ${ INSTANCE_PREFIX } "
2015-02-14 01:31:26 +00:00
local user = " ${ context } -admin "
2015-02-02 21:49:03 +00:00
local config_dir = " ${ HOME } /.kube/ ${ context } "
2014-09-23 21:14:34 +00:00
2014-10-20 20:49:24 +00:00
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
# config file. Distribute the same way the htpasswd is done.
2015-02-02 21:49:03 +00:00
(
mkdir -p " ${ config_dir } "
umask 077
gcloud compute ssh --project " ${ PROJECT } " --zone " $ZONE " " ${ MASTER_NAME } " --command "sudo cat /srv/kubernetes/kubecfg.crt" >" ${ config_dir } / ${ kube_cert } " 2>/dev/null
gcloud compute ssh --project " ${ PROJECT } " --zone " $ZONE " " ${ MASTER_NAME } " --command "sudo cat /srv/kubernetes/kubecfg.key" >" ${ config_dir } / ${ kube_key } " 2>/dev/null
gcloud compute ssh --project " ${ PROJECT } " --zone " $ZONE " " ${ MASTER_NAME } " --command "sudo cat /srv/kubernetes/ca.crt" >" ${ config_dir } / ${ ca_cert } " 2>/dev/null
" ${ kubectl } " config set-cluster " ${ context } " --server= " https:// ${ KUBE_MASTER_IP } " --certificate-authority= " ${ config_dir } / ${ ca_cert } " --global
" ${ kubectl } " config set-credentials " ${ user } " --auth-path= " ${ config_dir } / ${ kube_auth } " --global
" ${ kubectl } " config set-context " ${ context } " --cluster= " ${ context } " --user= " ${ user } " --global
" ${ kubectl } " config use-context " ${ context } " --global
cat << EOF > " ${ config_dir } / ${ kube_auth } "
2014-09-23 21:14:34 +00:00
{
2014-09-23 22:54:27 +00:00
"User" : " $KUBE_USER " ,
"Password" : " $KUBE_PASSWORD " ,
2015-02-02 21:49:03 +00:00
"CAFile" : " ${ config_dir } / ${ ca_cert } " ,
"CertFile" : " ${ config_dir } / ${ kube_cert } " ,
"KeyFile" : " ${ config_dir } / ${ kube_key } "
2014-09-23 21:14:34 +00:00
}
2014-09-24 16:39:42 +00:00
EOF
2014-09-24 16:39:42 +00:00
2015-02-02 21:49:03 +00:00
chmod 0600 " ${ config_dir } / ${ kube_auth } " " ${ config_dir } / $kube_cert " \
" ${ config_dir } / ${ kube_key } " " ${ config_dir } / ${ ca_cert } "
echo " Wrote ${ config_dir } / ${ kube_auth } "
2014-09-23 22:54:27 +00:00
)
2014-12-09 23:37:06 +00:00
echo "Sanity checking cluster..."
# Basic sanity checking
local i
local rc # Capture return code without exiting because of errexit bash option
for ( ( i = 0; i<${# MINION_NAMES [@] } ; i++) ) ; do
# Make sure docker is installed and working.
local attempt = 0
while true; do
echo -n Attempt " $(( $attempt + 1 )) " to check Docker on node " ${ MINION_NAMES [ $i ] } " ...
local output = $( gcloud compute --project " ${ PROJECT } " ssh --zone " $ZONE " " ${ MINION_NAMES [ $i ] } " --command "sudo docker ps -a" 2>/dev/null)
if [ [ -z " ${ output } " ] ] ; then
if ( ( attempt > 9 ) ) ; then
echo
echo -e " ${ color_red } Docker failed to install on node ${ MINION_NAMES [ $i ] } . Your cluster is unlikely " >& 2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >& 2
echo -e " cluster. (sorry!) ${ color_norm } " >& 2
exit 1
fi
elif [ [ " ${ output } " != *"kubernetes/pause" * ] ] ; then
if ( ( attempt > 9 ) ) ; then
echo
echo -e " ${ color_red } Failed to observe kubernetes/pause on node ${ MINION_NAMES [ $i ] } . Your cluster is unlikely " >& 2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >& 2
echo -e " cluster. (sorry!) ${ color_norm } " >& 2
exit 1
fi
else
echo -e " ${ color_green } [working] ${ color_norm } "
break
fi
echo -e " ${ color_yellow } [not working yet] ${ color_norm } "
# Start Docker, in case it failed to start.
gcloud compute --project " ${ PROJECT } " ssh --zone " $ZONE " " ${ MINION_NAMES [ $i ] } " \
--command "sudo service docker start" 2>/dev/null || true
attempt = $(( $attempt + 1 ))
sleep 30
done
done
echo
echo -e " ${ color_green } Kubernetes cluster is running. The master is running at: "
echo
echo -e " ${ color_yellow } https:// ${ KUBE_MASTER_IP } "
echo
2015-02-02 21:49:03 +00:00
echo -e " ${ color_green } The user name and password to use is located in ${ config_dir } / ${ kube_auth } . ${ color_norm } "
2014-12-09 23:37:06 +00:00
echo
2014-07-14 17:50:04 +00:00
}
2014-12-09 23:07:54 +00:00
# Delete a kubernetes cluster. This is called from test-teardown.
2014-12-09 00:52:43 +00:00
#
# Assumed vars:
# MASTER_NAME
2015-01-28 14:57:10 +00:00
# NODE_INSTANCE_PREFIX
2014-12-09 00:52:43 +00:00
# ZONE
# This function tears down cluster resources 10 at a time to avoid issuing too many
# API calls and exceeding API quota. It is important to bring down the instances before bringing
# down the firewall rules and routes.
2014-07-14 17:50:04 +00:00
function kube-down {
detect-project
echo "Bringing down cluster"
2014-12-09 00:52:43 +00:00
2015-01-28 14:57:10 +00:00
gcloud preview managed-instance-groups --zone " ${ ZONE } " delete \
--project " ${ PROJECT } " \
--quiet \
" ${ NODE_INSTANCE_PREFIX } -group " || true
gcloud compute instance-templates delete \
--project " ${ PROJECT } " \
--quiet \
" ${ NODE_INSTANCE_PREFIX } -template " || true
2014-12-09 00:52:43 +00:00
# First delete the master (if it exists).
gcloud compute instances delete \
2014-10-06 20:25:27 +00:00
--project " ${ PROJECT } " \
2014-11-25 18:32:27 +00:00
--quiet \
2014-12-09 00:52:43 +00:00
--delete-disks all \
--zone " ${ ZONE } " \
" ${ MASTER_NAME } " || true
# Find out what minions are running.
local -a minions
minions = ( $( gcloud compute instances list \
--project " ${ PROJECT } " --zone " ${ ZONE } " \
2015-01-28 14:57:10 +00:00
--regexp " ${ NODE_INSTANCE_PREFIX } -.+ " \
2014-12-09 00:52:43 +00:00
| awk 'NR >= 2 { print $1 }' ) )
# If any minions are running, delete them in batches.
while ( ( " ${# minions [@] } " > 0 ) ) ; do
echo Deleting nodes " ${ minions [*] : : 10 } "
gcloud compute instances delete \
2014-11-25 18:32:27 +00:00
--project " ${ PROJECT } " \
--quiet \
2014-12-09 23:37:06 +00:00
--delete-disks boot \
2014-12-09 00:52:43 +00:00
--zone " ${ ZONE } " \
" ${ minions [@] : : 10 } " || true
minions = ( " ${ minions [@] : 10 } " )
done
2014-08-13 20:26:03 +00:00
2014-12-09 00:52:43 +00:00
# Delete firewall rule for the master.
gcloud compute firewall-rules delete \
--project " ${ PROJECT } " \
--quiet \
" ${ MASTER_NAME } -https " || true
2014-12-16 18:22:29 +00:00
# Delete firewall rule for minions.
gcloud compute firewall-rules delete \
--project " ${ PROJECT } " \
--quiet \
" ${ MINION_TAG } -all " || true
2014-07-14 17:50:04 +00:00
2014-12-09 00:52:43 +00:00
# Delete routes.
local -a routes
routes = ( $( gcloud compute routes list --project " ${ PROJECT } " \
2015-01-28 14:57:10 +00:00
--regexp " ${ NODE_INSTANCE_PREFIX } -.+ " | awk 'NR >= 2 { print $1 }' ) )
2014-12-09 00:52:43 +00:00
while ( ( " ${# routes [@] } " > 0 ) ) ; do
echo Deleting routes " ${ routes [*] : : 10 } "
gcloud compute routes delete \
2014-11-25 18:32:27 +00:00
--project " ${ PROJECT } " \
--quiet \
2014-12-09 00:52:43 +00:00
" ${ routes [@] : : 10 } " || true
routes = ( " ${ routes [@] : 10 } " )
2014-11-25 18:32:27 +00:00
done
2014-07-14 17:50:04 +00:00
2015-02-22 19:27:16 +00:00
# Delete the master's reserved IP
local REGION = ${ ZONE %-* }
gcloud compute addresses delete \
--project " ${ PROJECT } " \
--region " ${ REGION } " \
--quiet \
" ${ MASTER_NAME } -ip " || true
2014-07-14 17:50:04 +00:00
}
# Update a kubernetes cluster with latest source
function kube-push {
2014-09-23 22:54:27 +00:00
detect-project
2014-07-14 17:50:04 +00:00
detect-master
2014-09-23 22:54:27 +00:00
# Make sure we have the tar files staged on Google Storage
find-release-tars
upload-server-tars
2014-07-14 17:50:04 +00:00
(
2014-09-23 22:54:27 +00:00
echo "#! /bin/bash"
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
2014-10-06 20:25:27 +00:00
echo " readonly SERVER_BINARY_TAR_URL=' ${ SERVER_BINARY_TAR_URL } ' "
echo " readonly SALT_TAR_URL=' ${ SALT_TAR_URL } ' "
2014-11-08 00:16:45 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/common.sh "
2014-10-03 21:58:49 +00:00
grep -v "^#" " ${ KUBE_ROOT } /cluster/gce/templates/download-release.sh "
2014-07-14 17:50:04 +00:00
echo "echo Executing configuration"
echo "sudo salt '*' mine.update"
echo "sudo salt --force-color '*' state.highstate"
2014-12-09 23:37:06 +00:00
) | gcloud compute ssh --project " ${ PROJECT } " --zone " $ZONE " " $KUBE_MASTER " --command "sudo bash"
2014-07-14 17:50:04 +00:00
get-password
echo
2014-09-29 20:11:31 +00:00
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https:// ${ KUBE_MASTER_IP } "
echo
echo "The user name and password to use is located in ~/.kubernetes_auth."
2014-07-14 17:50:04 +00:00
echo
}
2014-09-23 22:54:27 +00:00
# -----------------------------------------------------------------------------
# Cluster specific test helpers used from hack/e2e-test.sh
# Execute prior to running tests to build a release if required for env.
#
# Assumed Vars:
2014-10-03 21:58:49 +00:00
# KUBE_ROOT
2014-07-14 17:50:04 +00:00
function test-build-release {
# Make a release
2014-10-06 20:25:27 +00:00
" ${ KUBE_ROOT } /build/release.sh "
2014-07-14 17:50:04 +00:00
}
2014-09-23 22:54:27 +00:00
# Execute prior to running tests to initialize required structure. This is
2014-12-09 23:07:54 +00:00
# called from hack/e2e.go only when running -up (it is run after kube-up).
2014-09-23 22:54:27 +00:00
#
# Assumed vars:
# Variables from config.sh
2014-07-14 17:50:04 +00:00
function test-setup {
# Detect the project into $PROJECT if it isn't set
detect-project
2014-10-23 00:49:40 +00:00
# Open up port 80 & 8080 so common containers on minions can be reached
2014-11-25 18:32:27 +00:00
gcloud compute firewall-rules create \
2014-10-23 00:49:40 +00:00
--project " ${ PROJECT } " \
2014-11-25 18:32:27 +00:00
--target-tags " ${ MINION_TAG } " \
--allow tcp:80 tcp:8080 \
2014-10-23 00:49:40 +00:00
--network " ${ NETWORK } " \
" ${ MINION_TAG } - ${ INSTANCE_PREFIX } -http-alt "
2014-07-14 17:50:04 +00:00
}
2014-12-09 23:07:54 +00:00
# Execute after running tests to perform any required clean-up. This is called
# from hack/e2e.go
2014-07-14 17:50:04 +00:00
function test-teardown {
2014-12-09 23:07:54 +00:00
detect-project
2014-07-14 17:50:04 +00:00
echo "Shutting down test cluster in background."
2014-11-25 18:32:27 +00:00
gcloud compute firewall-rules delete \
2014-10-06 20:25:27 +00:00
--project " ${ PROJECT } " \
2014-11-25 18:32:27 +00:00
--quiet \
" ${ MINION_TAG } - ${ INSTANCE_PREFIX } -http-alt " || true
" ${ KUBE_ROOT } /cluster/kube-down.sh "
2014-07-14 17:50:04 +00:00
}
2014-10-10 05:38:00 +00:00
# SSH to a node by name ($1) and run a command ($2).
function ssh-to-node {
local node = " $1 "
local cmd = " $2 "
2015-01-29 23:50:46 +00:00
for try in $( seq 1 5) ; do
if gcloud compute ssh --ssh-flag= "-o LogLevel=quiet" --project " ${ PROJECT } " --zone= " ${ ZONE } " " ${ node } " --command " ${ cmd } " ; then
break
fi
done
2014-10-10 05:38:00 +00:00
}
# Restart the kube-proxy on a node ($1)
function restart-kube-proxy {
ssh-to-node " $1 " "sudo /etc/init.d/kube-proxy restart"
}
2014-11-06 19:35:33 +00:00
2015-02-11 21:41:42 +00:00
# Restart the kube-proxy on a node ($1)
function restart-apiserver {
ssh-to-node " $1 " "sudo /etc/init.d/kube-apiserver restart"
}
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
# Setup monitoring firewalls using heapster and InfluxDB
function setup-monitoring-firewall {
if [ [ " ${ ENABLE_CLUSTER_MONITORING } " != "true" ] ] ; then
return
fi
2014-11-25 18:32:27 +00:00
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
echo "Setting up firewalls to Heapster based cluster monitoring."
detect-project
gcloud compute firewall-rules create " ${ INSTANCE_PREFIX } -monitoring-heapster " --project " ${ PROJECT } " \
--allow tcp:80 tcp:8083 tcp:8086 --target-tags= " ${ MINION_TAG } " --network= " ${ NETWORK } "
2015-02-26 12:54:59 +00:00
echo
echo -e " ${ color_green } Grafana dashboard will be available at ${ color_yellow } https:// ${ KUBE_MASTER_IP } /api/v1beta1/proxy/services/monitoring-grafana/ ${ color_green } . Wait for the monitoring dashboard to be online. ${ color_norm } "
echo
2014-11-06 19:35:33 +00:00
}
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
function teardown-monitoring-firewall {
if [ [ " ${ ENABLE_CLUSTER_MONITORING } " != "true" ] ] ; then
return
2014-11-07 01:23:14 +00:00
fi
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
detect-project
gcloud compute firewall-rules delete -q " ${ INSTANCE_PREFIX } -monitoring-heapster " --project " ${ PROJECT } " || true
2014-11-07 01:23:14 +00:00
}
2014-11-11 19:03:07 +00:00
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
function setup-logging-firewall {
2015-01-07 23:02:35 +00:00
# If logging with Fluentd to Elasticsearch is enabled then create pods
# and services for Elasticsearch (for ingesting logs) and Kibana (for
# viewing logs).
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
if [ [ " ${ ENABLE_NODE_LOGGING - } " != "true" ] ] || \
[ [ " ${ LOGGING_DESTINATION - } " != "elasticsearch" ] ] || \
[ [ " ${ ENABLE_CLUSTER_LOGGING - } " != "true" ] ] ; then
return
2015-01-07 23:02:35 +00:00
fi
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
detect-project
gcloud compute firewall-rules create " ${ INSTANCE_PREFIX } -fluentd-elasticsearch-logging " --project " ${ PROJECT } " \
--allow tcp:5601 tcp:9200 tcp:9300 --target-tags " ${ MINION_TAG } " --network= " ${ NETWORK } "
2015-02-03 16:27:02 +00:00
# This should be nearly instant once kube-addons gets a chance to
# run, and we already know we can hit the apiserver, but it's still
# worth checking.
echo "waiting for logging services to be created by the master."
local kubectl = " ${ KUBE_ROOT } /cluster/kubectl.sh "
for i in ` seq 1 10` ; do
if " ${ kubectl } " get services -l name = kibana-logging -o template -t { { range.items} } { { .id} } { { end} } | grep -q kibana-logging &&
" ${ kubectl } " get services -l name = elasticsearch-logging -o template -t { { range.items} } { { .id} } { { end} } | grep -q elasticsearch-logging; then
break
fi
sleep 10
done
2015-02-07 00:35:39 +00:00
local -r region = " ${ ZONE : 0 : ${# ZONE } -2 } "
2015-02-13 22:58:42 +00:00
local -r es_ip = $( gcloud compute forwarding-rules --project " ${ PROJECT } " describe --region " ${ region } " " ${ INSTANCE_PREFIX } " -elasticsearch-logging | grep IPAddress | awk '{print $2}' )
local -r kibana_ip = $( gcloud compute forwarding-rules --project " ${ PROJECT } " describe --region " ${ region } " " ${ INSTANCE_PREFIX } " -kibana-logging | grep IPAddress | awk '{print $2}' )
2015-02-03 16:27:02 +00:00
echo
echo -e " ${ color_green } Cluster logs are ingested into Elasticsearch running at ${ color_yellow } http:// ${ es_ip } :9200 "
echo -e " ${ color_green } Kibana logging dashboard will be available at ${ color_yellow } http:// ${ kibana_ip } :5601 ${ color_norm } "
echo
2015-01-07 23:02:35 +00:00
}
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
function teardown-logging-firewall {
if [ [ " ${ ENABLE_NODE_LOGGING - } " != "true" ] ] || \
[ [ " ${ LOGGING_DESTINATION - } " != "elasticsearch" ] ] || \
[ [ " ${ ENABLE_CLUSTER_LOGGING - } " != "true" ] ] ; then
return
2015-01-07 23:02:35 +00:00
fi
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
detect-project
gcloud compute firewall-rules delete -q " ${ INSTANCE_PREFIX } -fluentd-elasticsearch-logging " --project " ${ PROJECT } " || true
2015-02-19 22:30:53 +00:00
# Also delete the logging services which will remove the associated forwarding rules (TCP load balancers).
local kubectl = " ${ KUBE_ROOT } /cluster/kubectl.sh "
" ${ kubectl } " delete services elasticsearch-logging || true
" ${ kubectl } " delete services kibana-logging || true
2015-01-07 23:02:35 +00:00
}
2014-11-11 19:03:07 +00:00
# Perform preparations required to run e2e tests
function prepare-e2e( ) {
detect-project
}