2014-07-14 17:50:04 +00:00
|
|
|
#!/bin/bash
|
|
|
|
|
|
|
|
# Copyright 2014 Google Inc. All rights reserved.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
# A library of helper functions and constant for the local config.
|
|
|
|
|
|
|
|
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
|
|
|
|
# config-default.sh.
|
2014-10-03 21:58:49 +00:00
|
|
|
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
|
|
|
source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
2015-03-06 22:49:25 +00:00
|
|
|
source "${KUBE_ROOT}/cluster/common.sh"
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2015-01-28 14:57:10 +00:00
|
|
|
NODE_INSTANCE_PREFIX="${INSTANCE_PREFIX}-minion"
|
|
|
|
|
2015-03-27 20:53:26 +00:00
|
|
|
KUBE_PROMPT_FOR_UPDATE=y
|
2015-04-21 23:11:15 +00:00
|
|
|
KUBE_SKIP_UPDATE=${KUBE_SKIP_UPDATE-"n"}
|
2015-03-27 20:53:26 +00:00
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Verify prereqs
|
|
|
|
function verify-prereqs {
|
2014-10-06 20:25:27 +00:00
|
|
|
local cmd
|
2014-11-25 18:32:27 +00:00
|
|
|
for cmd in gcloud gsutil; do
|
2015-03-27 20:46:28 +00:00
|
|
|
if ! which "${cmd}" >/dev/null; then
|
|
|
|
local resp
|
2015-03-31 21:03:18 +00:00
|
|
|
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
|
2015-03-30 17:20:29 +00:00
|
|
|
echo "Can't find ${cmd} in PATH. Do you wish to install the Google Cloud SDK? [Y/n]"
|
|
|
|
read resp
|
|
|
|
else
|
|
|
|
resp="y"
|
|
|
|
fi
|
2015-03-27 20:46:28 +00:00
|
|
|
if [[ "${resp}" != "n" && "${resp}" != "N" ]]; then
|
|
|
|
curl https://sdk.cloud.google.com | bash
|
|
|
|
fi
|
|
|
|
if ! which "${cmd}" >/dev/null; then
|
|
|
|
echo "Can't find ${cmd} in PATH, please fix and retry. The Google Cloud "
|
|
|
|
echo "SDK can be downloaded from https://cloud.google.com/sdk/."
|
|
|
|
exit 1
|
|
|
|
fi
|
2015-04-02 00:23:00 +00:00
|
|
|
fi
|
2014-09-23 22:54:27 +00:00
|
|
|
done
|
2015-04-22 17:11:08 +00:00
|
|
|
if [[ "${KUBE_SKIP_UPDATE}" == "y" ]]; then
|
2015-04-21 23:11:15 +00:00
|
|
|
return
|
|
|
|
fi
|
2015-03-27 20:46:28 +00:00
|
|
|
# update and install components as needed
|
2015-03-27 20:53:26 +00:00
|
|
|
if [[ "${KUBE_PROMPT_FOR_UPDATE}" != "y" ]]; then
|
|
|
|
gcloud_prompt="-q"
|
|
|
|
fi
|
2015-04-22 17:19:46 +00:00
|
|
|
local sudo_prefix=""
|
2015-04-21 23:11:15 +00:00
|
|
|
if [ ! -w $(dirname `which gcloud`) ]; then
|
|
|
|
sudo_prefix="sudo"
|
|
|
|
fi
|
|
|
|
${sudo_prefix} gcloud ${gcloud_prompt:-} components update preview || true
|
|
|
|
${sudo_prefix} gcloud ${gcloud_prompt:-} components update alpha || true
|
|
|
|
${sudo_prefix} gcloud ${gcloud_prompt:-} components update || true
|
2014-09-23 22:54:27 +00:00
|
|
|
}
|
|
|
|
|
2014-10-06 20:25:27 +00:00
|
|
|
# Create a temp dir that'll be deleted at the end of this bash session.
|
|
|
|
#
|
|
|
|
# Vars set:
|
|
|
|
# KUBE_TEMP
|
|
|
|
function ensure-temp-dir {
|
|
|
|
if [[ -z ${KUBE_TEMP-} ]]; then
|
|
|
|
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
|
|
|
|
trap 'rm -rf "${KUBE_TEMP}"' EXIT
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Verify and find the various tar files that we are going to use on the server.
|
|
|
|
#
|
|
|
|
# Vars set:
|
|
|
|
# SERVER_BINARY_TAR
|
|
|
|
# SALT_TAR
|
|
|
|
function find-release-tars {
|
2014-10-03 21:58:49 +00:00
|
|
|
SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
|
2014-09-23 22:54:27 +00:00
|
|
|
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
|
2014-10-03 21:58:49 +00:00
|
|
|
SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
|
2014-09-23 22:54:27 +00:00
|
|
|
fi
|
|
|
|
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
|
|
|
|
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
|
|
|
|
exit 1
|
2014-07-14 17:50:04 +00:00
|
|
|
fi
|
|
|
|
|
2014-10-03 21:58:49 +00:00
|
|
|
SALT_TAR="${KUBE_ROOT}/server/kubernetes-salt.tar.gz"
|
2014-09-23 22:54:27 +00:00
|
|
|
if [[ ! -f "$SALT_TAR" ]]; then
|
2014-10-03 21:58:49 +00:00
|
|
|
SALT_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-salt.tar.gz"
|
2014-09-23 22:54:27 +00:00
|
|
|
fi
|
|
|
|
if [[ ! -f "$SALT_TAR" ]]; then
|
|
|
|
echo "!!! Cannot find kubernetes-salt.tar.gz"
|
2014-07-14 17:50:04 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# Use the gcloud defaults to find the project. If it is already set in the
|
|
|
|
# environment then go with that.
|
2014-09-23 22:54:27 +00:00
|
|
|
#
|
|
|
|
# Vars set:
|
|
|
|
# PROJECT
|
2015-01-15 19:21:42 +00:00
|
|
|
# PROJECT_REPORTED
|
2014-07-14 17:50:04 +00:00
|
|
|
function detect-project () {
|
2014-10-06 20:25:27 +00:00
|
|
|
if [[ -z "${PROJECT-}" ]]; then
|
2014-07-14 17:50:04 +00:00
|
|
|
PROJECT=$(gcloud config list project | tail -n 1 | cut -f 3 -d ' ')
|
|
|
|
fi
|
|
|
|
|
2014-10-06 20:25:27 +00:00
|
|
|
if [[ -z "${PROJECT-}" ]]; then
|
|
|
|
echo "Could not detect Google Cloud Platform project. Set the default project using " >&2
|
|
|
|
echo "'gcloud config set project <PROJECT>'" >&2
|
2014-07-14 17:50:04 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
2015-01-15 19:21:42 +00:00
|
|
|
if [[ -z "${PROJECT_REPORTED-}" ]]; then
|
|
|
|
echo "Project: ${PROJECT}" >&2
|
|
|
|
echo "Zone: ${ZONE}" >&2
|
|
|
|
PROJECT_REPORTED=true
|
|
|
|
fi
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
2014-12-09 23:37:06 +00:00
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Take the local tar files and upload them to Google Storage. They will then be
|
|
|
|
# downloaded by the master as part of the start up script for the master.
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# PROJECT
|
|
|
|
# SERVER_BINARY_TAR
|
|
|
|
# SALT_TAR
|
|
|
|
# Vars set:
|
|
|
|
# SERVER_BINARY_TAR_URL
|
|
|
|
# SALT_TAR_URL
|
|
|
|
function upload-server-tars() {
|
|
|
|
SERVER_BINARY_TAR_URL=
|
|
|
|
SALT_TAR_URL=
|
|
|
|
|
|
|
|
local project_hash
|
|
|
|
if which md5 > /dev/null 2>&1; then
|
|
|
|
project_hash=$(md5 -q -s "$PROJECT")
|
|
|
|
else
|
2014-11-12 07:04:01 +00:00
|
|
|
project_hash=$(echo -n "$PROJECT" | md5sum | awk '{ print $1 }')
|
2014-09-23 22:54:27 +00:00
|
|
|
fi
|
2015-04-09 04:51:50 +00:00
|
|
|
# This requires 1 million projects before the probability of collision is 50%
|
|
|
|
# that's probably good enough for now :P
|
|
|
|
project_hash=${project_hash:0:10}
|
2014-09-23 22:54:27 +00:00
|
|
|
|
|
|
|
local -r staging_bucket="gs://kubernetes-staging-${project_hash}"
|
|
|
|
|
|
|
|
# Ensure the bucket is created
|
|
|
|
if ! gsutil ls "$staging_bucket" > /dev/null 2>&1 ; then
|
|
|
|
echo "Creating $staging_bucket"
|
|
|
|
gsutil mb "${staging_bucket}"
|
|
|
|
fi
|
|
|
|
|
|
|
|
local -r staging_path="${staging_bucket}/devel"
|
|
|
|
|
|
|
|
echo "+++ Staging server tars to Google Storage: ${staging_path}"
|
2014-11-08 00:16:45 +00:00
|
|
|
local server_binary_gs_url="${staging_path}/${SERVER_BINARY_TAR##*/}"
|
|
|
|
gsutil -q -h "Cache-Control:private, max-age=0" cp "${SERVER_BINARY_TAR}" "${server_binary_gs_url}"
|
|
|
|
gsutil acl ch -g all:R "${server_binary_gs_url}" >/dev/null 2>&1
|
|
|
|
local salt_gs_url="${staging_path}/${SALT_TAR##*/}"
|
|
|
|
gsutil -q -h "Cache-Control:private, max-age=0" cp "${SALT_TAR}" "${salt_gs_url}"
|
|
|
|
gsutil acl ch -g all:R "${salt_gs_url}" >/dev/null 2>&1
|
|
|
|
|
|
|
|
# Convert from gs:// URL to an https:// URL
|
|
|
|
SERVER_BINARY_TAR_URL="${server_binary_gs_url/gs:\/\//https://storage.googleapis.com/}"
|
|
|
|
SALT_TAR_URL="${salt_gs_url/gs:\/\//https://storage.googleapis.com/}"
|
2014-09-23 22:54:27 +00:00
|
|
|
}
|
|
|
|
|
2015-01-28 14:57:10 +00:00
|
|
|
# Detect minions created in the minion group
|
2014-09-23 22:54:27 +00:00
|
|
|
#
|
|
|
|
# Assumed vars:
|
2015-01-28 14:57:10 +00:00
|
|
|
# NODE_INSTANCE_PREFIX
|
|
|
|
# Vars set:
|
2014-09-23 22:54:27 +00:00
|
|
|
# MINION_NAMES
|
2015-01-28 14:57:10 +00:00
|
|
|
function detect-minion-names {
|
|
|
|
detect-project
|
|
|
|
MINION_NAMES=($(gcloud preview --project "${PROJECT}" instance-groups \
|
|
|
|
--zone "${ZONE}" instances --group "${NODE_INSTANCE_PREFIX}-group" list \
|
|
|
|
| cut -d'/' -f11))
|
|
|
|
echo "MINION_NAMES=${MINION_NAMES[*]}"
|
|
|
|
}
|
|
|
|
|
|
|
|
# Waits until the number of running nodes in the instance group is equal to NUM_NODES
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# NODE_INSTANCE_PREFIX
|
|
|
|
# NUM_MINIONS
|
|
|
|
function wait-for-minions-to-run {
|
|
|
|
detect-project
|
|
|
|
local running_minions=0
|
|
|
|
while [[ "${NUM_MINIONS}" != "${running_minions}" ]]; do
|
|
|
|
echo -e -n "${color_yellow}Waiting for minions to run. "
|
|
|
|
echo -e "${running_minions} out of ${NUM_MINIONS} running. Retrying.${color_norm}"
|
|
|
|
sleep 5
|
|
|
|
running_minions=$(gcloud preview --project "${PROJECT}" instance-groups \
|
|
|
|
--zone "${ZONE}" instances --group "${NODE_INSTANCE_PREFIX}-group" list \
|
2015-02-18 01:18:32 +00:00
|
|
|
--running | wc -l | xargs)
|
2015-01-28 14:57:10 +00:00
|
|
|
done
|
|
|
|
}
|
|
|
|
|
|
|
|
# Detect the information about the minions
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
2014-09-23 22:54:27 +00:00
|
|
|
# ZONE
|
|
|
|
# Vars set:
|
2015-01-28 14:57:10 +00:00
|
|
|
# MINION_NAMES
|
2014-12-09 23:07:54 +00:00
|
|
|
# KUBE_MINION_IP_ADDRESSES (array)
|
2014-07-14 17:50:04 +00:00
|
|
|
function detect-minions () {
|
2014-12-09 23:07:54 +00:00
|
|
|
detect-project
|
2015-01-28 14:57:10 +00:00
|
|
|
detect-minion-names
|
2014-07-14 17:50:04 +00:00
|
|
|
KUBE_MINION_IP_ADDRESSES=()
|
|
|
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
2014-12-03 05:14:18 +00:00
|
|
|
local minion_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
|
2014-11-25 18:32:27 +00:00
|
|
|
"${MINION_NAMES[$i]}" --fields networkInterfaces[0].accessConfigs[0].natIP \
|
|
|
|
--format=text | awk '{ print $2 }')
|
2014-10-06 20:25:27 +00:00
|
|
|
if [[ -z "${minion_ip-}" ]] ; then
|
|
|
|
echo "Did not find ${MINION_NAMES[$i]}" >&2
|
2014-09-22 17:25:25 +00:00
|
|
|
else
|
|
|
|
echo "Found ${MINION_NAMES[$i]} at ${minion_ip}"
|
|
|
|
KUBE_MINION_IP_ADDRESSES+=("${minion_ip}")
|
|
|
|
fi
|
2014-07-14 17:50:04 +00:00
|
|
|
done
|
2014-10-06 20:25:27 +00:00
|
|
|
if [[ -z "${KUBE_MINION_IP_ADDRESSES-}" ]]; then
|
|
|
|
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2
|
2014-07-14 17:50:04 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Detect the IP for the master
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# MASTER_NAME
|
|
|
|
# ZONE
|
|
|
|
# Vars set:
|
|
|
|
# KUBE_MASTER
|
|
|
|
# KUBE_MASTER_IP
|
2014-07-14 17:50:04 +00:00
|
|
|
function detect-master () {
|
2014-12-09 23:07:54 +00:00
|
|
|
detect-project
|
2014-07-14 17:50:04 +00:00
|
|
|
KUBE_MASTER=${MASTER_NAME}
|
2014-10-06 20:25:27 +00:00
|
|
|
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
|
2014-12-03 05:14:18 +00:00
|
|
|
KUBE_MASTER_IP=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
|
2014-11-25 18:32:27 +00:00
|
|
|
"${MASTER_NAME}" --fields networkInterfaces[0].accessConfigs[0].natIP \
|
|
|
|
--format=text | awk '{ print $2 }')
|
2014-07-14 17:50:04 +00:00
|
|
|
fi
|
2014-10-06 20:25:27 +00:00
|
|
|
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
|
|
|
|
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2
|
2014-07-14 17:50:04 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
|
|
|
|
}
|
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Ensure that we have a password created for validating to the master. Will
|
2015-03-13 19:22:49 +00:00
|
|
|
# read from kubeconfig for the current context if available.
|
2015-02-04 03:38:24 +00:00
|
|
|
#
|
|
|
|
# Assumed vars
|
|
|
|
# KUBE_ROOT
|
2014-09-23 22:54:27 +00:00
|
|
|
#
|
|
|
|
# Vars set:
|
|
|
|
# KUBE_USER
|
|
|
|
# KUBE_PASSWORD
|
2014-07-14 17:50:04 +00:00
|
|
|
function get-password {
|
2015-03-13 19:22:49 +00:00
|
|
|
get-kubeconfig-basicauth
|
2015-03-06 22:49:25 +00:00
|
|
|
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
|
|
|
|
KUBE_USER=admin
|
|
|
|
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
2014-07-14 17:50:04 +00:00
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
# Set MASTER_HTPASSWD
|
|
|
|
function set-master-htpasswd {
|
|
|
|
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
|
|
|
|
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
|
|
|
|
local htpasswd
|
|
|
|
MASTER_HTPASSWD=$(cat "${KUBE_TEMP}/htpasswd")
|
|
|
|
}
|
|
|
|
|
2014-10-20 20:49:24 +00:00
|
|
|
# Generate authentication token for admin user. Will
|
|
|
|
# read from $HOME/.kubernetes_auth if available.
|
|
|
|
#
|
|
|
|
# Vars set:
|
|
|
|
# KUBE_ADMIN_TOKEN
|
|
|
|
function get-admin-token {
|
|
|
|
local file="$HOME/.kubernetes_auth"
|
|
|
|
if [[ -r "$file" ]]; then
|
|
|
|
KUBE_ADMIN_TOKEN=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["BearerToken"]')
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
KUBE_ADMIN_TOKEN=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(32))')
|
|
|
|
}
|
|
|
|
|
2014-12-09 23:37:06 +00:00
|
|
|
|
|
|
|
|
|
|
|
# Wait for background jobs to finish. Exit with
|
|
|
|
# an error status if any of the jobs failed.
|
|
|
|
function wait-for-jobs {
|
|
|
|
local fail=0
|
|
|
|
local job
|
|
|
|
for job in $(jobs -p); do
|
|
|
|
wait "${job}" || fail=$((fail + 1))
|
|
|
|
done
|
|
|
|
if (( fail != 0 )); then
|
|
|
|
echo -e "${color_red}${fail} commands failed. Exiting.${color_norm}" >&2
|
|
|
|
# Ignore failures for now.
|
|
|
|
# exit 2
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# Robustly try to create a firewall rule.
|
|
|
|
# $1: The name of firewall rule.
|
|
|
|
# $2: IP ranges.
|
2014-12-16 18:22:29 +00:00
|
|
|
# $3: Target tags for this firewall rule.
|
2014-12-09 23:37:06 +00:00
|
|
|
function create-firewall-rule {
|
2014-12-09 23:07:54 +00:00
|
|
|
detect-project
|
2014-12-09 23:37:06 +00:00
|
|
|
local attempt=0
|
|
|
|
while true; do
|
|
|
|
if ! gcloud compute firewall-rules create "$1" \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--network "${NETWORK}" \
|
|
|
|
--source-ranges "$2" \
|
2014-12-16 18:22:29 +00:00
|
|
|
--target-tags "$3" \
|
2014-12-09 23:07:54 +00:00
|
|
|
--allow tcp udp icmp esp ah sctp; then
|
2014-12-09 23:37:06 +00:00
|
|
|
if (( attempt > 5 )); then
|
|
|
|
echo -e "${color_red}Failed to create firewall rule $1 ${color_norm}"
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to create firewall rule $1. Retrying.${color_norm}"
|
|
|
|
attempt=$(($attempt+1))
|
|
|
|
else
|
2015-01-28 14:57:10 +00:00
|
|
|
break
|
2014-12-09 23:37:06 +00:00
|
|
|
fi
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
|
|
|
# Robustly try to create a route.
|
|
|
|
# $1: The name of the route.
|
|
|
|
# $2: IP range.
|
|
|
|
function create-route {
|
2014-12-09 23:07:54 +00:00
|
|
|
detect-project
|
2014-12-09 23:37:06 +00:00
|
|
|
local attempt=0
|
|
|
|
while true; do
|
|
|
|
if ! gcloud compute routes create "$1" \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--destination-range "$2" \
|
|
|
|
--network "${NETWORK}" \
|
|
|
|
--next-hop-instance "$1" \
|
|
|
|
--next-hop-instance-zone "${ZONE}"; then
|
|
|
|
if (( attempt > 5 )); then
|
|
|
|
echo -e "${color_red}Failed to create route $1 ${color_norm}"
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to create route $1. Retrying.${color_norm}"
|
|
|
|
attempt=$(($attempt+1))
|
|
|
|
else
|
2015-01-28 14:57:10 +00:00
|
|
|
break
|
2014-12-09 23:37:06 +00:00
|
|
|
fi
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2015-01-28 14:57:10 +00:00
|
|
|
# Robustly try to create an instance template.
|
|
|
|
# $1: The name of the instance template.
|
2014-12-09 23:37:06 +00:00
|
|
|
# $2: The scopes flag.
|
2015-01-28 14:57:10 +00:00
|
|
|
# $3: The minion start script metadata from file.
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
# $4: The kube-env metadata.
|
|
|
|
# $5: Raw metadata
|
2015-01-28 14:57:10 +00:00
|
|
|
function create-node-template {
|
2014-12-09 23:07:54 +00:00
|
|
|
detect-project
|
2014-12-09 23:37:06 +00:00
|
|
|
local attempt=0
|
|
|
|
while true; do
|
2015-01-28 14:57:10 +00:00
|
|
|
if ! gcloud compute instance-templates create "$1" \
|
2014-12-09 23:37:06 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--machine-type "${MINION_SIZE}" \
|
2015-01-04 00:56:54 +00:00
|
|
|
--boot-disk-type "${MINION_DISK_TYPE}" \
|
|
|
|
--boot-disk-size "${MINION_DISK_SIZE}" \
|
2014-12-09 23:37:06 +00:00
|
|
|
--image-project="${IMAGE_PROJECT}" \
|
|
|
|
--image "${IMAGE}" \
|
|
|
|
--tags "${MINION_TAG}" \
|
|
|
|
--network "${NETWORK}" \
|
|
|
|
$2 \
|
|
|
|
--can-ip-forward \
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
--metadata-from-file "$3" "$4" \
|
|
|
|
--metadata "$5"; then
|
2014-12-09 23:37:06 +00:00
|
|
|
if (( attempt > 5 )); then
|
2015-01-28 14:57:10 +00:00
|
|
|
echo -e "${color_red}Failed to create instance template $1 ${color_norm}"
|
2014-12-09 23:37:06 +00:00
|
|
|
exit 2
|
|
|
|
fi
|
2015-01-28 14:57:10 +00:00
|
|
|
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to create instance template $1. Retrying.${color_norm}"
|
2014-12-09 23:37:06 +00:00
|
|
|
attempt=$(($attempt+1))
|
2015-01-28 14:57:10 +00:00
|
|
|
else
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
|
|
|
# Robustly try to add metadata on an instance.
|
|
|
|
# $1: The name of the instace.
|
2015-04-17 17:58:26 +00:00
|
|
|
# $2...$n: The metadata key=value pairs to add.
|
2015-01-28 14:57:10 +00:00
|
|
|
function add-instance-metadata {
|
2015-04-17 17:58:26 +00:00
|
|
|
local -r instance=$1
|
|
|
|
shift 1
|
|
|
|
local -r kvs=( "$@" )
|
2015-01-28 14:57:10 +00:00
|
|
|
detect-project
|
|
|
|
local attempt=0
|
|
|
|
while true; do
|
2015-04-17 17:58:26 +00:00
|
|
|
if ! gcloud compute instances add-metadata "${instance}" \
|
2015-01-28 14:57:10 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--zone "${ZONE}" \
|
2015-04-17 17:58:26 +00:00
|
|
|
--metadata "${kvs[@]}"; then
|
2015-01-28 14:57:10 +00:00
|
|
|
if (( attempt > 5 )); then
|
2015-04-17 17:58:26 +00:00
|
|
|
echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}"
|
2015-01-28 14:57:10 +00:00
|
|
|
exit 2
|
|
|
|
fi
|
2015-04-17 17:58:26 +00:00
|
|
|
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}"
|
2015-01-28 14:57:10 +00:00
|
|
|
attempt=$(($attempt+1))
|
|
|
|
else
|
|
|
|
break
|
2014-12-09 23:37:06 +00:00
|
|
|
fi
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
# Robustly try to add metadata on an instance, from a file.
|
2015-04-17 17:58:26 +00:00
|
|
|
# $1: The name of the instance.
|
|
|
|
# $2...$n: The metadata key=file pairs to add.
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
function add-instance-metadata-from-file {
|
2015-04-17 17:58:26 +00:00
|
|
|
local -r instance=$1
|
|
|
|
shift 1
|
|
|
|
local -r kvs=( "$@" )
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
detect-project
|
|
|
|
local attempt=0
|
|
|
|
while true; do
|
2015-04-17 17:58:26 +00:00
|
|
|
echo "${kvs[@]}"
|
|
|
|
if ! gcloud compute instances add-metadata "${instance}" \
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--zone "${ZONE}" \
|
2015-04-17 17:58:26 +00:00
|
|
|
--metadata-from-file "${kvs[@]}"; then
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
if (( attempt > 5 )); then
|
2015-04-17 17:58:26 +00:00
|
|
|
echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}"
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
exit 2
|
|
|
|
fi
|
2015-04-17 17:58:26 +00:00
|
|
|
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}"
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
attempt=$(($attempt+1))
|
|
|
|
else
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2015-03-10 23:19:05 +00:00
|
|
|
# Quote something appropriate for a yaml string.
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
#
|
2015-03-10 23:19:05 +00:00
|
|
|
# TODO(zmerlynn): Note that this function doesn't so much "quote" as
|
|
|
|
# "strip out quotes", and we really should be using a YAML library for
|
|
|
|
# this, but PyYAML isn't shipped by default, and *rant rant rant ... SIGH*
|
|
|
|
function yaml-quote {
|
|
|
|
echo "'$(echo "${@}" | sed -e "s/'/''/g")'"
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# $1: if 'true', we're building a master yaml, else a node
|
|
|
|
function build-kube-env {
|
|
|
|
local master=$1
|
|
|
|
local file=$2
|
|
|
|
|
|
|
|
rm -f ${file}
|
2015-03-10 23:19:05 +00:00
|
|
|
cat >$file <<EOF
|
2015-03-26 20:48:59 +00:00
|
|
|
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
|
2015-03-10 23:19:05 +00:00
|
|
|
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
|
|
|
|
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
|
|
|
|
SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL})
|
|
|
|
SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL})
|
|
|
|
PORTAL_NET: $(yaml-quote ${PORTAL_NET})
|
|
|
|
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-false})
|
|
|
|
ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false})
|
|
|
|
ENABLE_CLUSTER_LOGGING: $(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})
|
|
|
|
ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
|
|
|
|
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
|
|
|
|
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
|
|
|
|
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
|
|
|
|
DNS_REPLICAS: $(yaml-quote ${DNS_REPLICAS:-})
|
|
|
|
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
|
|
|
|
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
|
|
|
|
MASTER_HTPASSWD: $(yaml-quote ${MASTER_HTPASSWD})
|
2015-03-11 15:07:36 +00:00
|
|
|
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
|
2015-03-12 17:37:30 +00:00
|
|
|
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
|
2015-03-10 23:19:05 +00:00
|
|
|
EOF
|
|
|
|
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
if [[ "${master}" != "true" ]]; then
|
2015-03-10 23:19:05 +00:00
|
|
|
cat >>$file <<EOF
|
|
|
|
KUBERNETES_MASTER_NAME: $(yaml-quote ${MASTER_NAME})
|
|
|
|
ZONE: $(yaml-quote ${ZONE})
|
|
|
|
EXTRA_DOCKER_OPTS: $(yaml-quote ${EXTRA_DOCKER_OPTS})
|
|
|
|
ENABLE_DOCKER_REGISTRY_CACHE: $(yaml-quote ${ENABLE_DOCKER_REGISTRY_CACHE:-false})
|
|
|
|
EOF
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
function write-master-env {
|
|
|
|
build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml"
|
|
|
|
}
|
|
|
|
|
|
|
|
function write-node-env {
|
|
|
|
build-kube-env false "${KUBE_TEMP}/node-kube-env.yaml"
|
|
|
|
}
|
|
|
|
|
2015-04-06 15:35:02 +00:00
|
|
|
# create-master-instance creates the master instance. If called with
|
|
|
|
# an argument, the argument is used as the name to a reserved IP
|
|
|
|
# address for the master. (In the case of upgrade/repair, we re-use
|
|
|
|
# the same IP.)
|
|
|
|
#
|
|
|
|
# It requires a whole slew of assumed variables, partially due to to
|
|
|
|
# the call to write-master-env. Listing them would be rather
|
|
|
|
# futile. Instead, we list the required calls to ensure any additional
|
|
|
|
# variables are set:
|
|
|
|
# ensure-temp-dir
|
|
|
|
# detect-project
|
|
|
|
# get-password
|
|
|
|
# set-master-htpasswd
|
|
|
|
#
|
|
|
|
function create-master-instance {
|
|
|
|
local address_opt=""
|
|
|
|
[[ -n ${1:-} ]] && address_opt="--address ${1}"
|
|
|
|
|
|
|
|
write-master-env
|
|
|
|
gcloud compute instances create "${MASTER_NAME}" \
|
|
|
|
${address_opt} \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
--machine-type "${MASTER_SIZE}" \
|
|
|
|
--image-project="${IMAGE_PROJECT}" \
|
|
|
|
--image "${IMAGE}" \
|
|
|
|
--tags "${MASTER_TAG}" \
|
|
|
|
--network "${NETWORK}" \
|
|
|
|
--scopes "storage-ro" "compute-rw" \
|
|
|
|
--can-ip-forward \
|
|
|
|
--metadata-from-file \
|
|
|
|
"startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" \
|
|
|
|
"kube-env=${KUBE_TEMP}/master-kube-env.yaml" \
|
|
|
|
--disk name="${MASTER_NAME}-pd" device-name=master-pd mode=rw boot=no auto-delete=no
|
|
|
|
}
|
|
|
|
|
2014-07-14 17:50:04 +00:00
|
|
|
# Instantiate a kubernetes cluster
|
2014-09-23 22:54:27 +00:00
|
|
|
#
|
|
|
|
# Assumed vars
|
2014-10-03 21:58:49 +00:00
|
|
|
# KUBE_ROOT
|
2014-09-23 22:54:27 +00:00
|
|
|
# <Various vars set in config file>
|
2014-07-14 17:50:04 +00:00
|
|
|
function kube-up {
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
ensure-temp-dir
|
2014-07-14 17:50:04 +00:00
|
|
|
detect-project
|
|
|
|
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
get-password
|
|
|
|
set-master-htpasswd
|
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Make sure we have the tar files staged on Google Storage
|
|
|
|
find-release-tars
|
|
|
|
upload-server-tars
|
2014-09-24 17:55:58 +00:00
|
|
|
|
2015-01-07 23:02:35 +00:00
|
|
|
if ! gcloud compute networks --project "${PROJECT}" describe "${NETWORK}" &>/dev/null; then
|
2014-11-25 18:32:27 +00:00
|
|
|
echo "Creating new network: ${NETWORK}"
|
2014-09-24 17:55:58 +00:00
|
|
|
# The network needs to be created synchronously or we have a race. The
|
|
|
|
# firewalls can be added concurrent with instance creation.
|
2015-01-07 23:02:35 +00:00
|
|
|
gcloud compute networks create --project "${PROJECT}" "${NETWORK}" --range "10.240.0.0/16"
|
2014-10-28 20:47:49 +00:00
|
|
|
fi
|
|
|
|
|
2015-01-07 23:02:35 +00:00
|
|
|
if ! gcloud compute firewall-rules --project "${PROJECT}" describe "${NETWORK}-default-internal" &>/dev/null; then
|
2014-11-25 18:32:27 +00:00
|
|
|
gcloud compute firewall-rules create "${NETWORK}-default-internal" \
|
2014-09-24 23:03:38 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--network "${NETWORK}" \
|
2014-11-25 18:32:27 +00:00
|
|
|
--source-ranges "10.0.0.0/8" \
|
|
|
|
--allow "tcp:1-65535" "udp:1-65535" "icmp" &
|
2014-10-28 20:47:49 +00:00
|
|
|
fi
|
|
|
|
|
2015-01-07 23:02:35 +00:00
|
|
|
if ! gcloud compute firewall-rules describe --project "${PROJECT}" "${NETWORK}-default-ssh" &>/dev/null; then
|
2014-11-25 18:32:27 +00:00
|
|
|
gcloud compute firewall-rules create "${NETWORK}-default-ssh" \
|
2014-09-24 23:03:38 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--network "${NETWORK}" \
|
2014-11-25 18:32:27 +00:00
|
|
|
--source-ranges "0.0.0.0/0" \
|
|
|
|
--allow "tcp:22" &
|
2014-09-24 23:03:38 +00:00
|
|
|
fi
|
|
|
|
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
echo "Starting master and configuring firewalls"
|
2014-11-25 18:32:27 +00:00
|
|
|
gcloud compute firewall-rules create "${MASTER_NAME}-https" \
|
2014-10-06 20:25:27 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--network "${NETWORK}" \
|
2014-11-25 18:32:27 +00:00
|
|
|
--target-tags "${MASTER_TAG}" \
|
|
|
|
--allow tcp:443 &
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2015-02-23 21:57:09 +00:00
|
|
|
# We have to make sure the disk is created before creating the master VM, so
|
|
|
|
# run this in the foreground.
|
|
|
|
gcloud compute disks create "${MASTER_NAME}-pd" \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
--size "10GB"
|
|
|
|
|
2015-04-21 16:09:45 +00:00
|
|
|
# Generate a bearer token for kubelets in this cluster. We push this
|
|
|
|
# separately from the other cluster variables so that the client (this
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
# computer) can forget it later. This should disappear with
|
|
|
|
# https://github.com/GoogleCloudPlatform/kubernetes/issues/3168
|
|
|
|
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
2015-04-21 16:09:45 +00:00
|
|
|
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
2014-09-24 17:55:58 +00:00
|
|
|
|
2015-04-17 17:35:19 +00:00
|
|
|
# Reserve the master's IP so that it can later be transferred to another VM
|
|
|
|
# without disrupting the kubelets. IPs are associated with regions, not zones,
|
|
|
|
# so extract the region name, which is the same as the zone but with the final
|
|
|
|
# dash and characters trailing the dash removed.
|
|
|
|
local REGION=${ZONE%-*}
|
|
|
|
MASTER_RESERVED_IP=$(gcloud compute addresses create "${MASTER_NAME}-ip" \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--region "${REGION}" -q --format yaml | awk '/^address:/ { print $2 }')
|
|
|
|
|
|
|
|
create-master-instance $MASTER_RESERVED_IP &
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2014-12-16 18:22:29 +00:00
|
|
|
# Create a single firewall rule for all minions.
|
|
|
|
create-firewall-rule "${MINION_TAG}-all" "${CLUSTER_IP_RANGE}" "${MINION_TAG}" &
|
2014-12-09 23:37:06 +00:00
|
|
|
|
2015-02-23 21:57:09 +00:00
|
|
|
# Report logging choice (if any).
|
|
|
|
if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]]; then
|
|
|
|
echo "+++ Logging using Fluentd to ${LOGGING_DESTINATION:-unknown}"
|
|
|
|
# For logging to GCP we need to enable some minion scopes.
|
|
|
|
if [[ "${LOGGING_DESTINATION-}" == "gcp" ]]; then
|
|
|
|
MINION_SCOPES+=('https://www.googleapis.com/auth/logging.write')
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
# Wait for last batch of jobs
|
2014-12-09 23:37:06 +00:00
|
|
|
wait-for-jobs
|
2015-04-21 16:09:45 +00:00
|
|
|
add-instance-metadata "${MASTER_NAME}" "kubelet-token=${KUBELET_TOKEN}"
|
|
|
|
add-instance-metadata "${MASTER_NAME}" "kube-proxy-token=${KUBE_PROXY_TOKEN}"
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
|
|
|
|
echo "Creating minions."
|
2014-12-09 23:37:06 +00:00
|
|
|
|
|
|
|
local -a scope_flags=()
|
|
|
|
if (( "${#MINION_SCOPES[@]}" > 0 )); then
|
|
|
|
scope_flags=("--scopes" "${MINION_SCOPES[@]}")
|
|
|
|
else
|
|
|
|
scope_flags=("--no-scopes")
|
|
|
|
fi
|
2015-01-28 14:57:10 +00:00
|
|
|
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
write-node-env
|
2015-01-28 14:57:10 +00:00
|
|
|
create-node-template "${NODE_INSTANCE_PREFIX}-template" "${scope_flags[*]}" \
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
"startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" \
|
|
|
|
"kube-env=${KUBE_TEMP}/node-kube-env.yaml" \
|
2015-04-21 16:09:45 +00:00
|
|
|
"kubelet-token=${KUBELET_TOKEN}" \
|
|
|
|
"kube-proxy-token=${KUBE_PROXY_TOKEN}"
|
2015-01-28 14:57:10 +00:00
|
|
|
|
|
|
|
gcloud preview managed-instance-groups --zone "${ZONE}" \
|
|
|
|
create "${NODE_INSTANCE_PREFIX}-group" \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--base-instance-name "${NODE_INSTANCE_PREFIX}" \
|
|
|
|
--size "${NUM_MINIONS}" \
|
|
|
|
--template "${NODE_INSTANCE_PREFIX}-template" || true;
|
|
|
|
# TODO: this should be true when the above create managed-instance-group
|
|
|
|
# command returns, but currently it returns before the instances come up due
|
|
|
|
# to gcloud's deficiency.
|
|
|
|
wait-for-minions-to-run
|
|
|
|
detect-minion-names
|
|
|
|
|
|
|
|
# Create the routes and set IP ranges to instance metadata, 5 instances at a time.
|
|
|
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
|
|
|
create-route "${MINION_NAMES[$i]}" "${MINION_IP_RANGES[$i]}" &
|
|
|
|
add-instance-metadata "${MINION_NAMES[$i]}" "node-ip-range=${MINION_IP_RANGES[$i]}" &
|
2014-08-13 20:26:03 +00:00
|
|
|
|
2014-12-09 23:37:06 +00:00
|
|
|
if [ $i -ne 0 ] && [ $((i%5)) -eq 0 ]; then
|
2015-01-28 14:57:10 +00:00
|
|
|
echo Waiting for a batch of routes at $i...
|
2014-12-09 23:37:06 +00:00
|
|
|
wait-for-jobs
|
2014-11-25 18:32:27 +00:00
|
|
|
fi
|
2014-07-14 17:50:04 +00:00
|
|
|
|
|
|
|
done
|
2015-03-12 17:37:30 +00:00
|
|
|
create-route "${MASTER_NAME}" "${MASTER_IP_RANGE}"
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
|
2014-12-09 23:37:06 +00:00
|
|
|
# Wait for last batch of jobs.
|
|
|
|
wait-for-jobs
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2015-03-10 15:59:19 +00:00
|
|
|
detect-master
|
|
|
|
|
2014-07-14 17:50:04 +00:00
|
|
|
echo "Waiting for cluster initialization."
|
|
|
|
echo
|
|
|
|
echo " This will continually check to see if the API for kubernetes is reachable."
|
|
|
|
echo " This might loop forever if there was some uncaught error during start"
|
|
|
|
echo " up."
|
|
|
|
echo
|
|
|
|
|
2014-10-06 20:25:27 +00:00
|
|
|
until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
|
|
|
|
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/api/v1beta1/pods"; do
|
2014-07-14 17:50:04 +00:00
|
|
|
printf "."
|
|
|
|
sleep 2
|
|
|
|
done
|
|
|
|
|
|
|
|
echo "Kubernetes cluster created."
|
2014-08-06 16:57:00 +00:00
|
|
|
|
2015-03-06 22:49:25 +00:00
|
|
|
# TODO use token instead of basic auth
|
2015-04-10 00:07:24 +00:00
|
|
|
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
|
|
|
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
|
|
|
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
2015-03-06 22:49:25 +00:00
|
|
|
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
|
2014-09-23 21:14:34 +00:00
|
|
|
|
2014-10-20 20:49:24 +00:00
|
|
|
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
|
|
|
|
# config file. Distribute the same way the htpasswd is done.
|
2015-02-02 21:49:03 +00:00
|
|
|
(
|
|
|
|
umask 077
|
2015-03-06 22:49:25 +00:00
|
|
|
gcloud compute ssh --project "${PROJECT}" --zone "$ZONE" "${MASTER_NAME}" --command "sudo cat /srv/kubernetes/kubecfg.crt" >"${KUBE_CERT}" 2>/dev/null
|
|
|
|
gcloud compute ssh --project "${PROJECT}" --zone "$ZONE" "${MASTER_NAME}" --command "sudo cat /srv/kubernetes/kubecfg.key" >"${KUBE_KEY}" 2>/dev/null
|
|
|
|
gcloud compute ssh --project "${PROJECT}" --zone "$ZONE" "${MASTER_NAME}" --command "sudo cat /srv/kubernetes/ca.crt" >"${CA_CERT}" 2>/dev/null
|
2014-09-24 16:39:42 +00:00
|
|
|
|
2015-03-06 22:49:25 +00:00
|
|
|
create-kubeconfig
|
2014-09-23 22:54:27 +00:00
|
|
|
)
|
2014-12-09 23:37:06 +00:00
|
|
|
|
|
|
|
echo "Sanity checking cluster..."
|
|
|
|
|
|
|
|
# Basic sanity checking
|
|
|
|
local i
|
|
|
|
local rc # Capture return code without exiting because of errexit bash option
|
2015-04-02 00:23:00 +00:00
|
|
|
local pause_pod="google_containers/pause"
|
2014-12-09 23:37:06 +00:00
|
|
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
|
|
|
# Make sure docker is installed and working.
|
|
|
|
local attempt=0
|
|
|
|
while true; do
|
2015-04-02 00:23:00 +00:00
|
|
|
echo -n Attempt "$(($attempt+1))" to check Docker and pause pod on node "${MINION_NAMES[$i]}" ...
|
2014-12-09 23:37:06 +00:00
|
|
|
local output=$(gcloud compute --project "${PROJECT}" ssh --zone "$ZONE" "${MINION_NAMES[$i]}" --command "sudo docker ps -a" 2>/dev/null)
|
|
|
|
if [[ -z "${output}" ]]; then
|
|
|
|
if (( attempt > 9 )); then
|
|
|
|
echo
|
|
|
|
echo -e "${color_red}Docker failed to install on node ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2
|
|
|
|
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
|
|
|
|
echo -e "cluster. (sorry!)${color_norm}" >&2
|
|
|
|
exit 1
|
|
|
|
fi
|
2015-04-02 00:23:00 +00:00
|
|
|
elif [[ ! `echo "${output}" | grep "${pause_pod}"` ]]; then
|
2014-12-09 23:37:06 +00:00
|
|
|
if (( attempt > 9 )); then
|
|
|
|
echo
|
2015-04-02 00:23:00 +00:00
|
|
|
echo -e "${color_red}Failed to observe ${pause_pod} on node ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2
|
2014-12-09 23:37:06 +00:00
|
|
|
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
|
|
|
|
echo -e "cluster. (sorry!)${color_norm}" >&2
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
else
|
|
|
|
echo -e " ${color_green}[working]${color_norm}"
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
echo -e " ${color_yellow}[not working yet]${color_norm}"
|
|
|
|
# Start Docker, in case it failed to start.
|
|
|
|
gcloud compute --project "${PROJECT}" ssh --zone "$ZONE" "${MINION_NAMES[$i]}" \
|
|
|
|
--command "sudo service docker start" 2>/dev/null || true
|
|
|
|
attempt=$(($attempt+1))
|
|
|
|
sleep 30
|
|
|
|
done
|
|
|
|
done
|
|
|
|
|
|
|
|
echo
|
|
|
|
echo -e "${color_green}Kubernetes cluster is running. The master is running at:"
|
|
|
|
echo
|
|
|
|
echo -e "${color_yellow} https://${KUBE_MASTER_IP}"
|
|
|
|
echo
|
2015-03-06 22:49:25 +00:00
|
|
|
echo -e "${color_green}The user name and password to use is located in ${KUBECONFIG}.${color_norm}"
|
2014-12-09 23:37:06 +00:00
|
|
|
echo
|
|
|
|
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
2014-12-09 23:07:54 +00:00
|
|
|
# Delete a kubernetes cluster. This is called from test-teardown.
|
2014-12-09 00:52:43 +00:00
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# MASTER_NAME
|
2015-01-28 14:57:10 +00:00
|
|
|
# NODE_INSTANCE_PREFIX
|
2014-12-09 00:52:43 +00:00
|
|
|
# ZONE
|
|
|
|
# This function tears down cluster resources 10 at a time to avoid issuing too many
|
|
|
|
# API calls and exceeding API quota. It is important to bring down the instances before bringing
|
|
|
|
# down the firewall rules and routes.
|
2014-07-14 17:50:04 +00:00
|
|
|
function kube-down {
|
|
|
|
detect-project
|
|
|
|
|
|
|
|
echo "Bringing down cluster"
|
2014-12-09 00:52:43 +00:00
|
|
|
|
2015-01-28 14:57:10 +00:00
|
|
|
gcloud preview managed-instance-groups --zone "${ZONE}" delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
|
|
|
"${NODE_INSTANCE_PREFIX}-group" || true
|
|
|
|
|
|
|
|
gcloud compute instance-templates delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
|
|
|
"${NODE_INSTANCE_PREFIX}-template" || true
|
|
|
|
|
2014-12-09 00:52:43 +00:00
|
|
|
# First delete the master (if it exists).
|
|
|
|
gcloud compute instances delete \
|
2014-10-06 20:25:27 +00:00
|
|
|
--project "${PROJECT}" \
|
2014-11-25 18:32:27 +00:00
|
|
|
--quiet \
|
2014-12-09 00:52:43 +00:00
|
|
|
--delete-disks all \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
"${MASTER_NAME}" || true
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
|
|
|
|
# Delete the master pd (possibly leaked by kube-up if master create failed)
|
|
|
|
gcloud compute disks delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
"${MASTER_NAME}"-pd || true
|
|
|
|
|
2014-12-09 00:52:43 +00:00
|
|
|
# Find out what minions are running.
|
|
|
|
local -a minions
|
|
|
|
minions=( $(gcloud compute instances list \
|
|
|
|
--project "${PROJECT}" --zone "${ZONE}" \
|
2015-01-28 14:57:10 +00:00
|
|
|
--regexp "${NODE_INSTANCE_PREFIX}-.+" \
|
2014-12-09 00:52:43 +00:00
|
|
|
| awk 'NR >= 2 { print $1 }') )
|
|
|
|
# If any minions are running, delete them in batches.
|
|
|
|
while (( "${#minions[@]}" > 0 )); do
|
|
|
|
echo Deleting nodes "${minions[*]::10}"
|
|
|
|
gcloud compute instances delete \
|
2014-11-25 18:32:27 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
2014-12-09 23:37:06 +00:00
|
|
|
--delete-disks boot \
|
2014-12-09 00:52:43 +00:00
|
|
|
--zone "${ZONE}" \
|
|
|
|
"${minions[@]::10}" || true
|
|
|
|
minions=( "${minions[@]:10}" )
|
|
|
|
done
|
2014-08-13 20:26:03 +00:00
|
|
|
|
2014-12-09 00:52:43 +00:00
|
|
|
# Delete firewall rule for the master.
|
|
|
|
gcloud compute firewall-rules delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
|
|
|
"${MASTER_NAME}-https" || true
|
|
|
|
|
2014-12-16 18:22:29 +00:00
|
|
|
# Delete firewall rule for minions.
|
|
|
|
gcloud compute firewall-rules delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
|
|
|
"${MINION_TAG}-all" || true
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2014-12-09 00:52:43 +00:00
|
|
|
# Delete routes.
|
|
|
|
local -a routes
|
|
|
|
routes=( $(gcloud compute routes list --project "${PROJECT}" \
|
2015-01-28 14:57:10 +00:00
|
|
|
--regexp "${NODE_INSTANCE_PREFIX}-.+" | awk 'NR >= 2 { print $1 }') )
|
2015-03-13 18:00:19 +00:00
|
|
|
routes+=("${MASTER_NAME}")
|
2014-12-09 00:52:43 +00:00
|
|
|
while (( "${#routes[@]}" > 0 )); do
|
|
|
|
echo Deleting routes "${routes[*]::10}"
|
|
|
|
gcloud compute routes delete \
|
2014-11-25 18:32:27 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
2014-12-09 00:52:43 +00:00
|
|
|
"${routes[@]::10}" || true
|
|
|
|
routes=( "${routes[@]:10}" )
|
2014-11-25 18:32:27 +00:00
|
|
|
done
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2015-02-22 19:27:16 +00:00
|
|
|
# Delete the master's reserved IP
|
|
|
|
local REGION=${ZONE%-*}
|
|
|
|
gcloud compute addresses delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--region "${REGION}" \
|
|
|
|
--quiet \
|
|
|
|
"${MASTER_NAME}-ip" || true
|
|
|
|
|
2015-03-06 22:49:25 +00:00
|
|
|
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
|
|
|
|
clear-kubeconfig
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Update a kubernetes cluster with latest source
|
|
|
|
function kube-push {
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
OUTPUT=${KUBE_ROOT}/_output/logs
|
|
|
|
mkdir -p ${OUTPUT}
|
|
|
|
|
|
|
|
ensure-temp-dir
|
2014-09-23 22:54:27 +00:00
|
|
|
detect-project
|
2014-07-14 17:50:04 +00:00
|
|
|
detect-master
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
detect-minion-names
|
|
|
|
get-password
|
|
|
|
set-master-htpasswd
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Make sure we have the tar files staged on Google Storage
|
|
|
|
find-release-tars
|
|
|
|
upload-server-tars
|
|
|
|
|
2015-04-17 17:58:26 +00:00
|
|
|
echo "Updating master metadata ..."
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
write-master-env
|
2015-04-17 17:58:26 +00:00
|
|
|
add-instance-metadata-from-file "${KUBE_MASTER}" "kube-env=${KUBE_TEMP}/master-kube-env.yaml" "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh"
|
|
|
|
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
echo "Pushing to master (log at ${OUTPUT}/kube-push-${KUBE_MASTER}.log) ..."
|
|
|
|
cat ${KUBE_ROOT}/cluster/gce/configure-vm.sh | gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone "${ZONE}" "${KUBE_MASTER}" --command "sudo bash -s -- --push" &> ${OUTPUT}/kube-push-"${KUBE_MASTER}".log
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2015-04-03 21:48:39 +00:00
|
|
|
kube-update-nodes push
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
|
|
|
|
# TODO(zmerlynn): Re-create instance-template with the new
|
|
|
|
# node-kube-env. This isn't important until the node-ip-range issue
|
|
|
|
# is solved (because that's blocking automatic dynamic nodes from
|
2015-04-21 16:09:45 +00:00
|
|
|
# working). The node-kube-env has to be composed with the kube*-token
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
# metadata. Ideally we would have
|
|
|
|
# https://github.com/GoogleCloudPlatform/kubernetes/issues/3168
|
|
|
|
# implemented before then, though, so avoiding this mess until then.
|
2014-07-14 17:50:04 +00:00
|
|
|
|
|
|
|
echo
|
2014-09-29 20:11:31 +00:00
|
|
|
echo "Kubernetes cluster is running. The master is running at:"
|
|
|
|
echo
|
|
|
|
echo " https://${KUBE_MASTER_IP}"
|
|
|
|
echo
|
|
|
|
echo "The user name and password to use is located in ~/.kubernetes_auth."
|
2014-07-14 17:50:04 +00:00
|
|
|
echo
|
|
|
|
}
|
|
|
|
|
2015-04-03 21:48:39 +00:00
|
|
|
# Push or upgrade nodes.
|
|
|
|
#
|
|
|
|
# TODO: This really needs to trampoline somehow to the configure-vm.sh
|
|
|
|
# from the .tar.gz that we're actually pushing onto the node, because
|
|
|
|
# that configuration shifts over versions. Right now, we're blasting
|
|
|
|
# the configure-vm from our version instead.
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# KUBE_ROOT
|
|
|
|
# MINION_NAMES
|
|
|
|
# KUBE_TEMP
|
|
|
|
# PROJECT
|
|
|
|
# ZONE
|
|
|
|
function kube-update-nodes() {
|
|
|
|
action=${1}
|
|
|
|
|
|
|
|
OUTPUT=${KUBE_ROOT}/_output/logs
|
|
|
|
mkdir -p ${OUTPUT}
|
|
|
|
|
|
|
|
echo "Updating node metadata... "
|
|
|
|
write-node-env
|
|
|
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
2015-04-17 17:58:26 +00:00
|
|
|
add-instance-metadata-from-file "${MINION_NAMES[$i]}" "kube-env=${KUBE_TEMP}/node-kube-env.yaml" "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" &
|
2015-04-03 21:48:39 +00:00
|
|
|
done
|
|
|
|
wait-for-jobs
|
|
|
|
echo "Done"
|
|
|
|
|
|
|
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
|
|
|
echo "Starting ${action} on node (log at ${OUTPUT}/kube-${action}-${MINION_NAMES[$i]}.log) ..."
|
|
|
|
cat ${KUBE_ROOT}/cluster/gce/configure-vm.sh | gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone "${ZONE}" "${MINION_NAMES[$i]}" --command "sudo bash -s -- --push" &> ${OUTPUT}/kube-${action}-"${MINION_NAMES[$i]}".log &
|
|
|
|
done
|
|
|
|
|
|
|
|
echo -n "Waiting..."
|
|
|
|
wait-for-jobs
|
|
|
|
echo "Done"
|
|
|
|
}
|
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# -----------------------------------------------------------------------------
|
|
|
|
# Cluster specific test helpers used from hack/e2e-test.sh
|
|
|
|
|
|
|
|
# Execute prior to running tests to build a release if required for env.
|
|
|
|
#
|
|
|
|
# Assumed Vars:
|
2014-10-03 21:58:49 +00:00
|
|
|
# KUBE_ROOT
|
2014-07-14 17:50:04 +00:00
|
|
|
function test-build-release {
|
|
|
|
# Make a release
|
2014-10-06 20:25:27 +00:00
|
|
|
"${KUBE_ROOT}/build/release.sh"
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Execute prior to running tests to initialize required structure. This is
|
2014-12-09 23:07:54 +00:00
|
|
|
# called from hack/e2e.go only when running -up (it is run after kube-up).
|
2014-09-23 22:54:27 +00:00
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# Variables from config.sh
|
2014-07-14 17:50:04 +00:00
|
|
|
function test-setup {
|
|
|
|
# Detect the project into $PROJECT if it isn't set
|
|
|
|
detect-project
|
|
|
|
|
2014-10-23 00:49:40 +00:00
|
|
|
# Open up port 80 & 8080 so common containers on minions can be reached
|
2015-03-02 18:15:34 +00:00
|
|
|
# TODO(roberthbailey): Remove this once we are no longer relying on hostPorts.
|
2014-11-25 18:32:27 +00:00
|
|
|
gcloud compute firewall-rules create \
|
2014-10-23 00:49:40 +00:00
|
|
|
--project "${PROJECT}" \
|
2014-11-25 18:32:27 +00:00
|
|
|
--target-tags "${MINION_TAG}" \
|
|
|
|
--allow tcp:80 tcp:8080 \
|
2014-10-23 00:49:40 +00:00
|
|
|
--network "${NETWORK}" \
|
|
|
|
"${MINION_TAG}-${INSTANCE_PREFIX}-http-alt"
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
2014-12-09 23:07:54 +00:00
|
|
|
# Execute after running tests to perform any required clean-up. This is called
|
|
|
|
# from hack/e2e.go
|
2014-07-14 17:50:04 +00:00
|
|
|
function test-teardown {
|
2014-12-09 23:07:54 +00:00
|
|
|
detect-project
|
2014-07-14 17:50:04 +00:00
|
|
|
echo "Shutting down test cluster in background."
|
2014-11-25 18:32:27 +00:00
|
|
|
gcloud compute firewall-rules delete \
|
2014-10-06 20:25:27 +00:00
|
|
|
--project "${PROJECT}" \
|
2014-11-25 18:32:27 +00:00
|
|
|
--quiet \
|
|
|
|
"${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" || true
|
|
|
|
"${KUBE_ROOT}/cluster/kube-down.sh"
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
2014-10-10 05:38:00 +00:00
|
|
|
|
|
|
|
# SSH to a node by name ($1) and run a command ($2).
|
|
|
|
function ssh-to-node {
|
|
|
|
local node="$1"
|
|
|
|
local cmd="$2"
|
2015-04-21 22:27:38 +00:00
|
|
|
# Loop until we can successfully ssh into the box
|
2015-01-29 23:50:46 +00:00
|
|
|
for try in $(seq 1 5); do
|
2015-04-21 22:27:38 +00:00
|
|
|
if gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "echo test"; then
|
2015-01-29 23:50:46 +00:00
|
|
|
break
|
|
|
|
fi
|
2015-04-21 22:27:38 +00:00
|
|
|
sleep 5
|
2015-01-29 23:50:46 +00:00
|
|
|
done
|
2015-04-21 22:27:38 +00:00
|
|
|
# Then actually try the command.
|
|
|
|
gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "${cmd}"
|
2014-10-10 05:38:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Restart the kube-proxy on a node ($1)
|
|
|
|
function restart-kube-proxy {
|
|
|
|
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
|
|
|
|
}
|
2014-11-06 19:35:33 +00:00
|
|
|
|
2015-03-04 20:34:02 +00:00
|
|
|
# Restart the kube-apiserver on a node ($1)
|
2015-02-11 21:41:42 +00:00
|
|
|
function restart-apiserver {
|
2015-04-21 20:59:26 +00:00
|
|
|
ssh-to-node "$1" "sudo docker kill \`sudo docker ps | grep /kube-apiserver | awk '{print $1}'\`"
|
2015-02-11 21:41:42 +00:00
|
|
|
}
|
|
|
|
|
2014-11-11 19:03:07 +00:00
|
|
|
# Perform preparations required to run e2e tests
|
|
|
|
function prepare-e2e() {
|
|
|
|
detect-project
|
|
|
|
}
|