2018-04-16 16:31:44 +00:00
|
|
|
#!/usr/bin/env bash
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2017-04-25 23:03:27 +00:00
|
|
|
# Copyright 2017 The Kubernetes Authors.
|
2014-07-14 17:50:04 +00:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
# A library of helper functions and constant for the local config.
|
|
|
|
|
|
|
|
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
|
|
|
|
# config-default.sh.
|
2017-10-04 22:01:25 +00:00
|
|
|
readonly GCE_MAX_LOCAL_SSD=8
|
|
|
|
|
2014-10-03 21:58:49 +00:00
|
|
|
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
|
|
|
source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
2015-03-06 22:49:25 +00:00
|
|
|
source "${KUBE_ROOT}/cluster/common.sh"
|
2017-03-11 06:18:38 +00:00
|
|
|
source "${KUBE_ROOT}/hack/lib/util.sh"
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2018-03-15 20:39:56 +00:00
|
|
|
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" || "${NODE_OS_DISTRIBUTION}" == "custom" ]]; then
|
2016-06-23 18:22:04 +00:00
|
|
|
source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh"
|
2016-05-19 20:24:03 +00:00
|
|
|
else
|
2016-06-23 18:22:04 +00:00
|
|
|
echo "Cannot operate on cluster using node os distro: ${NODE_OS_DISTRIBUTION}" >&2
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
2018-01-10 18:03:22 +00:00
|
|
|
if [[ "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
2016-06-23 18:22:04 +00:00
|
|
|
source "${KUBE_ROOT}/cluster/gce/${MASTER_OS_DISTRIBUTION}/master-helper.sh"
|
|
|
|
else
|
|
|
|
echo "Cannot operate on cluster using master os distro: ${MASTER_OS_DISTRIBUTION}" >&2
|
2016-05-19 20:24:03 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
2017-10-04 22:01:25 +00:00
|
|
|
if [[ ${NODE_LOCAL_SSDS:-} -ge 1 ]] && [[ ! -z ${NODE_LOCAL_SSDS_EXT:-} ]] ; then
|
|
|
|
echo -e "${color_red}Local SSD: Only one of NODE_LOCAL_SSDS and NODE_LOCAL_SSDS_EXT can be specified at once${color_norm}" >&2
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
|
2016-06-23 18:22:04 +00:00
|
|
|
if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then
|
2017-05-20 12:23:39 +00:00
|
|
|
DEFAULT_GCI_PROJECT=google-containers
|
|
|
|
if [[ "${GCI_VERSION}" == "cos"* ]]; then
|
|
|
|
DEFAULT_GCI_PROJECT=cos-cloud
|
|
|
|
fi
|
|
|
|
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-${DEFAULT_GCI_PROJECT}}
|
|
|
|
# If the master image is not set, we use the latest GCI image.
|
|
|
|
# Otherwise, we respect whatever is set by the user.
|
|
|
|
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-${GCI_VERSION}}
|
2016-06-23 18:22:04 +00:00
|
|
|
fi
|
|
|
|
|
2016-09-16 01:42:08 +00:00
|
|
|
# Sets node image based on the specified os distro. Currently this function only
|
2017-04-25 23:03:27 +00:00
|
|
|
# supports gci and debian.
|
2016-09-16 01:42:08 +00:00
|
|
|
function set-node-image() {
|
2017-05-20 12:23:39 +00:00
|
|
|
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
|
|
|
|
DEFAULT_GCI_PROJECT=google-containers
|
|
|
|
if [[ "${GCI_VERSION}" == "cos"* ]]; then
|
|
|
|
DEFAULT_GCI_PROJECT=cos-cloud
|
|
|
|
fi
|
|
|
|
|
|
|
|
# If the node image is not set, we use the latest GCI image.
|
|
|
|
# Otherwise, we respect whatever is set by the user.
|
|
|
|
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
|
|
|
|
NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-${DEFAULT_GCI_PROJECT}}
|
|
|
|
fi
|
2016-09-16 01:42:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
set-node-image
|
2015-04-28 08:22:25 +00:00
|
|
|
|
2016-05-17 11:15:49 +00:00
|
|
|
# Verfiy cluster autoscaler configuration.
|
2016-06-07 20:10:17 +00:00
|
|
|
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
|
2017-06-22 07:59:07 +00:00
|
|
|
if [[ -z $AUTOSCALER_MIN_NODES ]]; then
|
2016-05-17 11:15:49 +00:00
|
|
|
echo "AUTOSCALER_MIN_NODES not set."
|
|
|
|
exit 1
|
|
|
|
fi
|
2017-06-22 07:59:07 +00:00
|
|
|
if [[ -z $AUTOSCALER_MAX_NODES ]]; then
|
2016-05-17 11:15:49 +00:00
|
|
|
echo "AUTOSCALER_MAX_NODES not set."
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
2017-10-25 20:40:08 +00:00
|
|
|
NODE_INSTANCE_PREFIX=${NODE_INSTANCE_PREFIX:-"${INSTANCE_PREFIX}-minion"}
|
|
|
|
|
2016-07-18 21:20:45 +00:00
|
|
|
NODE_TAGS="${NODE_TAG}"
|
2015-01-28 14:57:10 +00:00
|
|
|
|
2015-04-28 15:02:45 +00:00
|
|
|
ALLOCATE_NODE_CIDRS=true
|
2017-06-14 11:23:41 +00:00
|
|
|
PREEXISTING_NETWORK=false
|
|
|
|
PREEXISTING_NETWORK_MODE=""
|
2015-04-28 15:02:45 +00:00
|
|
|
|
2016-11-04 22:10:24 +00:00
|
|
|
KUBE_PROMPT_FOR_UPDATE=${KUBE_PROMPT_FOR_UPDATE:-"n"}
|
2015-10-07 01:51:27 +00:00
|
|
|
# How long (in seconds) to wait for cluster initialization.
|
|
|
|
KUBE_CLUSTER_INITIALIZATION_TIMEOUT=${KUBE_CLUSTER_INITIALIZATION_TIMEOUT:-300}
|
2015-03-27 20:53:26 +00:00
|
|
|
|
2016-08-02 07:08:05 +00:00
|
|
|
function join_csv() {
|
2015-05-23 04:07:09 +00:00
|
|
|
local IFS=','; echo "$*";
|
|
|
|
}
|
|
|
|
|
2016-06-23 18:22:04 +00:00
|
|
|
# This function returns the first string before the comma
|
2016-08-02 07:08:05 +00:00
|
|
|
function split_csv() {
|
2016-06-23 18:22:04 +00:00
|
|
|
echo "$*" | cut -d',' -f1
|
|
|
|
}
|
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Verify prereqs
|
2016-08-02 07:08:05 +00:00
|
|
|
function verify-prereqs() {
|
2014-10-06 20:25:27 +00:00
|
|
|
local cmd
|
2018-07-26 21:48:51 +00:00
|
|
|
|
|
|
|
# we use openssl to generate certs
|
|
|
|
kube::util::test_openssl_installed
|
|
|
|
|
2018-08-06 23:56:10 +00:00
|
|
|
# ensure a version supported by easyrsa is installed
|
|
|
|
if [ "$(openssl version | cut -d\ -f1)" == "LibreSSL" ]; then
|
|
|
|
echo "LibreSSL is not supported. Please ensure openssl points to an OpenSSL binary"
|
|
|
|
if [ "$(uname -s)" == "Darwin" ]; then
|
|
|
|
echo 'On macOS we recommend using homebrew and adding "$(brew --prefix openssl)/bin" to your PATH'
|
|
|
|
fi
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
2018-07-26 21:48:51 +00:00
|
|
|
# we use gcloud to create the cluster, gsutil to stage binaries and data
|
2014-11-25 18:32:27 +00:00
|
|
|
for cmd in gcloud gsutil; do
|
2015-03-27 20:46:28 +00:00
|
|
|
if ! which "${cmd}" >/dev/null; then
|
2016-11-04 22:10:24 +00:00
|
|
|
local resp="n"
|
2015-03-31 21:03:18 +00:00
|
|
|
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
|
2015-03-30 17:20:29 +00:00
|
|
|
echo "Can't find ${cmd} in PATH. Do you wish to install the Google Cloud SDK? [Y/n]"
|
|
|
|
read resp
|
|
|
|
fi
|
2015-03-27 20:46:28 +00:00
|
|
|
if [[ "${resp}" != "n" && "${resp}" != "N" ]]; then
|
|
|
|
curl https://sdk.cloud.google.com | bash
|
|
|
|
fi
|
|
|
|
if ! which "${cmd}" >/dev/null; then
|
2015-10-07 18:19:32 +00:00
|
|
|
echo "Can't find ${cmd} in PATH, please fix and retry. The Google Cloud " >&2
|
|
|
|
echo "SDK can be downloaded from https://cloud.google.com/sdk/." >&2
|
2015-03-27 20:46:28 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
2015-04-02 00:23:00 +00:00
|
|
|
fi
|
2014-09-23 22:54:27 +00:00
|
|
|
done
|
2016-11-04 22:10:24 +00:00
|
|
|
update-or-verify-gcloud
|
2014-09-23 22:54:27 +00:00
|
|
|
}
|
|
|
|
|
2014-07-14 17:50:04 +00:00
|
|
|
# Use the gcloud defaults to find the project. If it is already set in the
|
|
|
|
# environment then go with that.
|
2014-09-23 22:54:27 +00:00
|
|
|
#
|
|
|
|
# Vars set:
|
|
|
|
# PROJECT
|
2017-09-04 16:55:56 +00:00
|
|
|
# NETWORK_PROJECT
|
2015-01-15 19:21:42 +00:00
|
|
|
# PROJECT_REPORTED
|
2016-08-02 07:08:05 +00:00
|
|
|
function detect-project() {
|
2014-10-06 20:25:27 +00:00
|
|
|
if [[ -z "${PROJECT-}" ]]; then
|
2016-05-04 21:10:00 +00:00
|
|
|
PROJECT=$(gcloud config list project --format 'value(core.project)')
|
2014-07-14 17:50:04 +00:00
|
|
|
fi
|
|
|
|
|
2017-09-04 16:55:56 +00:00
|
|
|
NETWORK_PROJECT=${NETWORK_PROJECT:-${PROJECT}}
|
|
|
|
|
2014-10-06 20:25:27 +00:00
|
|
|
if [[ -z "${PROJECT-}" ]]; then
|
|
|
|
echo "Could not detect Google Cloud Platform project. Set the default project using " >&2
|
|
|
|
echo "'gcloud config set project <PROJECT>'" >&2
|
2014-07-14 17:50:04 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
2015-01-15 19:21:42 +00:00
|
|
|
if [[ -z "${PROJECT_REPORTED-}" ]]; then
|
|
|
|
echo "Project: ${PROJECT}" >&2
|
2017-09-04 16:55:56 +00:00
|
|
|
echo "Network Project: ${NETWORK_PROJECT}" >&2
|
2015-01-15 19:21:42 +00:00
|
|
|
echo "Zone: ${ZONE}" >&2
|
|
|
|
PROJECT_REPORTED=true
|
|
|
|
fi
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
2017-06-14 14:49:59 +00:00
|
|
|
# Use gsutil to get the md5 hash for a particular tar
|
|
|
|
function gsutil_get_tar_md5() {
|
|
|
|
# location_tar could be local or in the cloud
|
|
|
|
# local tar_location example ./_output/release-tars/kubernetes-server-linux-amd64.tar.gz
|
|
|
|
# cloud tar_location example gs://kubernetes-staging-PROJECT/kubernetes-devel/kubernetes-server-linux-amd64.tar.gz
|
|
|
|
local -r tar_location=$1
|
|
|
|
#parse the output and return the md5 hash
|
|
|
|
#the sed command at the end removes whitespace
|
|
|
|
local -r tar_md5=$(gsutil hash -h -m ${tar_location} 2>/dev/null | grep "Hash (md5):" | awk -F ':' '{print $2}' | sed 's/^[[:space:]]*//g')
|
|
|
|
echo "${tar_md5}"
|
|
|
|
}
|
|
|
|
|
2016-03-01 00:23:54 +00:00
|
|
|
# Copy a release tar and its accompanying hash.
|
|
|
|
function copy-to-staging() {
|
2015-05-01 17:50:18 +00:00
|
|
|
local -r staging_path=$1
|
|
|
|
local -r gs_url=$2
|
|
|
|
local -r tar=$3
|
|
|
|
local -r hash=$4
|
2017-06-01 18:06:48 +00:00
|
|
|
local -r basename_tar=$(basename ${tar})
|
|
|
|
|
|
|
|
#check whether this tar alread exists and has the same hash
|
|
|
|
#if it matches, then don't bother uploading it again
|
2017-06-13 23:12:21 +00:00
|
|
|
|
|
|
|
#remote_tar_md5 checks the remote location for the existing tarball and its md5
|
|
|
|
#staging_path example gs://kubernetes-staging-PROJECT/kubernetes-devel
|
|
|
|
#basename_tar example kubernetes-server-linux-amd64.tar.gz
|
|
|
|
local -r remote_tar_md5=$(gsutil_get_tar_md5 "${staging_path}/${basename_tar}")
|
|
|
|
if [[ -n ${remote_tar_md5} ]]; then
|
|
|
|
#local_tar_md5 checks the remote location for the existing tarball and its md5 hash
|
|
|
|
#tar example ./_output/release-tars/kubernetes-server-linux-amd64.tar.gz
|
|
|
|
local -r local_tar_md5=$(gsutil_get_tar_md5 "${tar}")
|
|
|
|
if [[ "${remote_tar_md5}" == "${local_tar_md5}" ]]; then
|
|
|
|
echo "+++ ${basename_tar} uploaded earlier, cloud and local file md5 match (md5 = ${local_tar_md5})"
|
2017-06-01 18:06:48 +00:00
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
fi
|
2015-05-01 17:50:18 +00:00
|
|
|
|
2016-03-01 00:23:54 +00:00
|
|
|
echo "${hash}" > "${tar}.sha1"
|
|
|
|
gsutil -m -q -h "Cache-Control:private, max-age=0" cp "${tar}" "${tar}.sha1" "${staging_path}"
|
|
|
|
gsutil -m acl ch -g all:R "${gs_url}" "${gs_url}.sha1" >/dev/null 2>&1
|
2017-06-01 18:06:48 +00:00
|
|
|
echo "+++ ${basename_tar} uploaded (sha1 = ${hash})"
|
2016-03-01 00:23:54 +00:00
|
|
|
}
|
|
|
|
|
2017-06-13 23:12:21 +00:00
|
|
|
|
2016-03-01 00:23:54 +00:00
|
|
|
# Given the cluster zone, return the list of regional GCS release
|
|
|
|
# bucket suffixes for the release in preference order. GCS doesn't
|
|
|
|
# give us an API for this, so we hardcode it.
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
2016-03-05 00:43:08 +00:00
|
|
|
# RELEASE_REGION_FALLBACK
|
|
|
|
# REGIONAL_KUBE_ADDONS
|
2016-03-01 00:23:54 +00:00
|
|
|
# ZONE
|
|
|
|
# Vars set:
|
|
|
|
# PREFERRED_REGION
|
|
|
|
function set-preferred-region() {
|
|
|
|
case ${ZONE} in
|
|
|
|
asia-*)
|
|
|
|
PREFERRED_REGION=("asia" "us" "eu")
|
|
|
|
;;
|
|
|
|
europe-*)
|
|
|
|
PREFERRED_REGION=("eu" "us" "asia")
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
PREFERRED_REGION=("us" "eu" "asia")
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
|
|
|
|
if [[ "${RELEASE_REGION_FALLBACK}" != "true" ]]; then
|
Switch to k8s.gcr.io vanity domain
This is the 2nd attempt. The previous was reverted while we figured out
the regional mirrors (oops).
New plan: k8s.gcr.io is a read-only facade that auto-detects your source
region (us, eu, or asia for now) and pulls from the closest. To publish
an image, push k8s-staging.gcr.io and it will be synced to the regionals
automatically (similar to today). For now the staging is an alias to
gcr.io/google_containers (the legacy URL).
When we move off of google-owned projects (working on it), then we just
do a one-time sync, and change the google-internal config, and nobody
outside should notice.
We can, in parallel, change the auto-sync into a manual sync - send a PR
to "promote" something from staging, and a bot activates it. Nice and
visible, easy to keep track of.
2018-01-17 19:36:53 +00:00
|
|
|
PREFERRED_REGION=( "${PREFERRED_REGION[0]}" )
|
2015-05-01 17:50:18 +00:00
|
|
|
fi
|
|
|
|
}
|
2014-12-09 23:37:06 +00:00
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Take the local tar files and upload them to Google Storage. They will then be
|
|
|
|
# downloaded by the master as part of the start up script for the master.
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# PROJECT
|
|
|
|
# SERVER_BINARY_TAR
|
2015-12-08 22:32:23 +00:00
|
|
|
# KUBE_MANIFESTS_TAR
|
2016-03-01 00:23:54 +00:00
|
|
|
# ZONE
|
2014-09-23 22:54:27 +00:00
|
|
|
# Vars set:
|
|
|
|
# SERVER_BINARY_TAR_URL
|
2015-06-18 18:31:21 +00:00
|
|
|
# SERVER_BINARY_TAR_HASH
|
2015-12-08 22:32:23 +00:00
|
|
|
# KUBE_MANIFESTS_TAR_URL
|
|
|
|
# KUBE_MANIFESTS_TAR_HASH
|
2014-09-23 22:54:27 +00:00
|
|
|
function upload-server-tars() {
|
|
|
|
SERVER_BINARY_TAR_URL=
|
2015-06-18 18:31:21 +00:00
|
|
|
SERVER_BINARY_TAR_HASH=
|
2015-12-08 22:32:23 +00:00
|
|
|
KUBE_MANIFESTS_TAR_URL=
|
|
|
|
KUBE_MANIFESTS_TAR_HASH=
|
2014-09-23 22:54:27 +00:00
|
|
|
|
|
|
|
local project_hash
|
|
|
|
if which md5 > /dev/null 2>&1; then
|
|
|
|
project_hash=$(md5 -q -s "$PROJECT")
|
|
|
|
else
|
2014-11-12 07:04:01 +00:00
|
|
|
project_hash=$(echo -n "$PROJECT" | md5sum | awk '{ print $1 }')
|
2014-09-23 22:54:27 +00:00
|
|
|
fi
|
2015-05-01 17:50:18 +00:00
|
|
|
|
2015-04-09 04:51:50 +00:00
|
|
|
# This requires 1 million projects before the probability of collision is 50%
|
|
|
|
# that's probably good enough for now :P
|
|
|
|
project_hash=${project_hash:0:10}
|
2014-09-23 22:54:27 +00:00
|
|
|
|
2016-03-01 00:23:54 +00:00
|
|
|
set-preferred-region
|
2014-09-23 22:54:27 +00:00
|
|
|
|
Switch to k8s.gcr.io vanity domain
This is the 2nd attempt. The previous was reverted while we figured out
the regional mirrors (oops).
New plan: k8s.gcr.io is a read-only facade that auto-detects your source
region (us, eu, or asia for now) and pulls from the closest. To publish
an image, push k8s-staging.gcr.io and it will be synced to the regionals
automatically (similar to today). For now the staging is an alias to
gcr.io/google_containers (the legacy URL).
When we move off of google-owned projects (working on it), then we just
do a one-time sync, and change the google-internal config, and nobody
outside should notice.
We can, in parallel, change the auto-sync into a manual sync - send a PR
to "promote" something from staging, and a bot activates it. Nice and
visible, easy to keep track of.
2018-01-17 19:36:53 +00:00
|
|
|
if [[ "${ENABLE_DOCKER_REGISTRY_CACHE:-}" == "true" ]]; then
|
|
|
|
DOCKER_REGISTRY_MIRROR_URL="https://mirror.gcr.io"
|
|
|
|
fi
|
|
|
|
|
2016-03-01 00:23:54 +00:00
|
|
|
SERVER_BINARY_TAR_HASH=$(sha1sum-file "${SERVER_BINARY_TAR}")
|
2016-07-18 21:20:45 +00:00
|
|
|
if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then
|
|
|
|
KUBE_MANIFESTS_TAR_HASH=$(sha1sum-file "${KUBE_MANIFESTS_TAR}")
|
|
|
|
fi
|
2014-09-23 22:54:27 +00:00
|
|
|
|
2016-03-01 00:23:54 +00:00
|
|
|
local server_binary_tar_urls=()
|
|
|
|
local kube_manifest_tar_urls=()
|
2014-09-23 22:54:27 +00:00
|
|
|
|
2016-03-01 00:23:54 +00:00
|
|
|
for region in "${PREFERRED_REGION[@]}"; do
|
|
|
|
suffix="-${region}"
|
|
|
|
if [[ "${suffix}" == "-us" ]]; then
|
|
|
|
suffix=""
|
|
|
|
fi
|
|
|
|
local staging_bucket="gs://kubernetes-staging-${project_hash}${suffix}"
|
2015-05-01 17:50:18 +00:00
|
|
|
|
2016-03-01 00:23:54 +00:00
|
|
|
# Ensure the buckets are created
|
2016-10-19 00:32:56 +00:00
|
|
|
if ! gsutil ls "${staging_bucket}" >/dev/null; then
|
2016-03-01 00:23:54 +00:00
|
|
|
echo "Creating ${staging_bucket}"
|
|
|
|
gsutil mb -l "${region}" "${staging_bucket}"
|
|
|
|
fi
|
2014-11-08 00:16:45 +00:00
|
|
|
|
2016-03-01 00:23:54 +00:00
|
|
|
local staging_path="${staging_bucket}/${INSTANCE_PREFIX}-devel"
|
|
|
|
|
|
|
|
echo "+++ Staging server tars to Google Storage: ${staging_path}"
|
|
|
|
local server_binary_gs_url="${staging_path}/${SERVER_BINARY_TAR##*/}"
|
|
|
|
copy-to-staging "${staging_path}" "${server_binary_gs_url}" "${SERVER_BINARY_TAR}" "${SERVER_BINARY_TAR_HASH}"
|
2015-12-02 19:42:23 +00:00
|
|
|
|
2015-12-08 22:32:23 +00:00
|
|
|
# Convert from gs:// URL to an https:// URL
|
2016-03-01 00:23:54 +00:00
|
|
|
server_binary_tar_urls+=("${server_binary_gs_url/gs:\/\//https://storage.googleapis.com/}")
|
2016-07-18 21:20:45 +00:00
|
|
|
if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then
|
|
|
|
local kube_manifests_gs_url="${staging_path}/${KUBE_MANIFESTS_TAR##*/}"
|
|
|
|
copy-to-staging "${staging_path}" "${kube_manifests_gs_url}" "${KUBE_MANIFESTS_TAR}" "${KUBE_MANIFESTS_TAR_HASH}"
|
|
|
|
# Convert from gs:// URL to an https:// URL
|
|
|
|
kube_manifests_tar_urls+=("${kube_manifests_gs_url/gs:\/\//https://storage.googleapis.com/}")
|
|
|
|
fi
|
2016-03-01 00:23:54 +00:00
|
|
|
done
|
|
|
|
|
2016-06-23 18:22:04 +00:00
|
|
|
SERVER_BINARY_TAR_URL=$(join_csv "${server_binary_tar_urls[@]}")
|
2016-07-18 21:20:45 +00:00
|
|
|
if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then
|
|
|
|
KUBE_MANIFESTS_TAR_URL=$(join_csv "${kube_manifests_tar_urls[@]}")
|
|
|
|
fi
|
2014-09-23 22:54:27 +00:00
|
|
|
}
|
|
|
|
|
2015-01-28 14:57:10 +00:00
|
|
|
# Detect minions created in the minion group
|
2014-09-23 22:54:27 +00:00
|
|
|
#
|
|
|
|
# Assumed vars:
|
2015-01-28 14:57:10 +00:00
|
|
|
# NODE_INSTANCE_PREFIX
|
|
|
|
# Vars set:
|
2015-11-24 03:04:40 +00:00
|
|
|
# NODE_NAMES
|
2015-12-11 08:09:09 +00:00
|
|
|
# INSTANCE_GROUPS
|
2016-08-02 07:08:05 +00:00
|
|
|
function detect-node-names() {
|
2015-01-28 14:57:10 +00:00
|
|
|
detect-project
|
2015-12-11 08:09:09 +00:00
|
|
|
INSTANCE_GROUPS=()
|
2016-04-25 19:10:25 +00:00
|
|
|
INSTANCE_GROUPS+=($(gcloud compute instance-groups managed list \
|
2017-08-09 16:45:44 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--filter "name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
|
2017-08-28 19:24:24 +00:00
|
|
|
--format='value(name)' || true))
|
2015-12-11 08:09:09 +00:00
|
|
|
NODE_NAMES=()
|
|
|
|
if [[ -n "${INSTANCE_GROUPS[@]:-}" ]]; then
|
|
|
|
for group in "${INSTANCE_GROUPS[@]}"; do
|
|
|
|
NODE_NAMES+=($(gcloud compute instance-groups managed list-instances \
|
|
|
|
"${group}" --zone "${ZONE}" --project "${PROJECT}" \
|
2016-04-25 19:10:25 +00:00
|
|
|
--format='value(instance)'))
|
2015-12-11 08:09:09 +00:00
|
|
|
done
|
|
|
|
fi
|
2017-06-28 17:50:58 +00:00
|
|
|
# Add heapster node name to the list too (if it exists).
|
|
|
|
if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
|
|
|
|
NODE_NAMES+=("${NODE_INSTANCE_PREFIX}-heapster")
|
|
|
|
fi
|
|
|
|
|
2016-04-27 00:41:15 +00:00
|
|
|
echo "INSTANCE_GROUPS=${INSTANCE_GROUPS[*]:-}" >&2
|
|
|
|
echo "NODE_NAMES=${NODE_NAMES[*]:-}" >&2
|
2015-01-28 14:57:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Detect the information about the minions
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
2014-09-23 22:54:27 +00:00
|
|
|
# ZONE
|
|
|
|
# Vars set:
|
2015-11-24 03:04:40 +00:00
|
|
|
# NODE_NAMES
|
2015-11-24 03:00:46 +00:00
|
|
|
# KUBE_NODE_IP_ADDRESSES (array)
|
2016-08-02 07:08:05 +00:00
|
|
|
function detect-nodes() {
|
2014-12-09 23:07:54 +00:00
|
|
|
detect-project
|
2015-11-09 07:33:06 +00:00
|
|
|
detect-node-names
|
2015-11-24 03:00:46 +00:00
|
|
|
KUBE_NODE_IP_ADDRESSES=()
|
2015-11-24 03:04:40 +00:00
|
|
|
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
2015-12-11 08:09:09 +00:00
|
|
|
local node_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
|
2016-05-04 19:32:13 +00:00
|
|
|
"${NODE_NAMES[$i]}" --format='value(networkInterfaces[0].accessConfigs[0].natIP)')
|
2015-12-11 08:09:09 +00:00
|
|
|
if [[ -z "${node_ip-}" ]] ; then
|
2015-11-24 03:04:40 +00:00
|
|
|
echo "Did not find ${NODE_NAMES[$i]}" >&2
|
2014-09-22 17:25:25 +00:00
|
|
|
else
|
2016-02-29 21:55:03 +00:00
|
|
|
echo "Found ${NODE_NAMES[$i]} at ${node_ip}"
|
|
|
|
KUBE_NODE_IP_ADDRESSES+=("${node_ip}")
|
2014-09-22 17:25:25 +00:00
|
|
|
fi
|
2014-07-14 17:50:04 +00:00
|
|
|
done
|
2015-11-24 03:00:46 +00:00
|
|
|
if [[ -z "${KUBE_NODE_IP_ADDRESSES-}" ]]; then
|
2014-10-06 20:25:27 +00:00
|
|
|
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2
|
2014-07-14 17:50:04 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Detect the IP for the master
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# MASTER_NAME
|
|
|
|
# ZONE
|
2016-07-20 15:25:25 +00:00
|
|
|
# REGION
|
2014-09-23 22:54:27 +00:00
|
|
|
# Vars set:
|
|
|
|
# KUBE_MASTER
|
|
|
|
# KUBE_MASTER_IP
|
2016-08-02 07:08:05 +00:00
|
|
|
function detect-master() {
|
2014-12-09 23:07:54 +00:00
|
|
|
detect-project
|
2014-07-14 17:50:04 +00:00
|
|
|
KUBE_MASTER=${MASTER_NAME}
|
2016-08-23 20:18:22 +00:00
|
|
|
echo "Trying to find master named '${MASTER_NAME}'" >&2
|
2014-10-06 20:25:27 +00:00
|
|
|
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
|
2016-08-23 20:18:22 +00:00
|
|
|
local master_address_name="${MASTER_NAME}-ip"
|
|
|
|
echo "Looking for address '${master_address_name}'" >&2
|
2017-09-14 10:00:29 +00:00
|
|
|
if ! KUBE_MASTER_IP=$(gcloud compute addresses describe "${master_address_name}" \
|
|
|
|
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)') || \
|
|
|
|
[[ -z "${KUBE_MASTER_IP-}" ]]; then
|
|
|
|
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2
|
|
|
|
exit 1
|
|
|
|
fi
|
2014-07-14 17:50:04 +00:00
|
|
|
fi
|
2016-08-23 20:18:22 +00:00
|
|
|
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" >&2
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
2018-02-15 08:54:36 +00:00
|
|
|
function load-or-gen-kube-bearertoken() {
|
|
|
|
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
|
|
|
|
get-kubeconfig-bearertoken
|
|
|
|
fi
|
|
|
|
if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then
|
|
|
|
gen-kube-bearertoken
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# Figure out which binary use on the server and assure it is available.
|
|
|
|
# If KUBE_VERSION is specified use binaries specified by it, otherwise
|
|
|
|
# use local dev binaries.
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# KUBE_VERSION
|
|
|
|
# KUBE_RELEASE_VERSION_REGEX
|
|
|
|
# KUBE_CI_VERSION_REGEX
|
|
|
|
# Vars set:
|
|
|
|
# KUBE_TAR_HASH
|
|
|
|
# SERVER_BINARY_TAR_URL
|
|
|
|
# SERVER_BINARY_TAR_HASH
|
|
|
|
function tars_from_version() {
|
|
|
|
local sha1sum=""
|
|
|
|
if which sha1sum >/dev/null 2>&1; then
|
|
|
|
sha1sum="sha1sum"
|
|
|
|
else
|
|
|
|
sha1sum="shasum -a1"
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [[ -z "${KUBE_VERSION-}" ]]; then
|
|
|
|
find-release-tars
|
|
|
|
upload-server-tars
|
|
|
|
elif [[ ${KUBE_VERSION} =~ ${KUBE_RELEASE_VERSION_REGEX} ]]; then
|
|
|
|
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
|
|
|
|
# TODO: Clean this up.
|
|
|
|
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
|
|
|
|
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
|
|
|
|
elif [[ ${KUBE_VERSION} =~ ${KUBE_CI_VERSION_REGEX} ]]; then
|
|
|
|
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release-dev/ci/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
|
|
|
|
# TODO: Clean this up.
|
|
|
|
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
|
|
|
|
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
|
|
|
|
else
|
|
|
|
echo "Version doesn't match regexp" >&2
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
if ! SERVER_BINARY_TAR_HASH=$(curl -Ss --fail "${SERVER_BINARY_TAR_URL}.sha1"); then
|
|
|
|
echo "Failure trying to curl release .sha1"
|
|
|
|
fi
|
|
|
|
|
|
|
|
if ! curl -Ss --head "${SERVER_BINARY_TAR_URL}" >&/dev/null; then
|
|
|
|
echo "Can't find release at ${SERVER_BINARY_TAR_URL}" >&2
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2016-03-05 20:50:28 +00:00
|
|
|
# Reads kube-env metadata from master
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# KUBE_MASTER
|
|
|
|
# PROJECT
|
|
|
|
# ZONE
|
|
|
|
function get-master-env() {
|
|
|
|
# TODO(zmerlynn): Make this more reliable with retries.
|
|
|
|
gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${KUBE_MASTER} --command \
|
|
|
|
"curl --fail --silent -H 'Metadata-Flavor: Google' \
|
|
|
|
'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null
|
2017-02-14 19:02:11 +00:00
|
|
|
gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${KUBE_MASTER} --command \
|
|
|
|
"curl --fail --silent -H 'Metadata-Flavor: Google' \
|
|
|
|
'http://metadata/computeMetadata/v1/instance/attributes/kube-master-certs'" 2>/dev/null
|
2016-03-05 20:50:28 +00:00
|
|
|
}
|
|
|
|
|
2018-02-15 08:54:36 +00:00
|
|
|
# Quote something appropriate for a yaml string.
|
|
|
|
#
|
|
|
|
# TODO(zmerlynn): Note that this function doesn't so much "quote" as
|
|
|
|
# "strip out quotes", and we really should be using a YAML library for
|
|
|
|
# this, but PyYAML isn't shipped by default, and *rant rant rant ... SIGH*
|
|
|
|
function yaml-quote {
|
|
|
|
echo "'$(echo "${@:-}" | sed -e "s/'/''/g")'"
|
|
|
|
}
|
|
|
|
|
2018-02-15 13:51:46 +00:00
|
|
|
# Writes the cluster location into a temporary file.
|
|
|
|
# Assumed vars
|
|
|
|
# ZONE
|
|
|
|
function write-cluster-location {
|
|
|
|
cat >"${KUBE_TEMP}/cluster-location.txt" << EOF
|
|
|
|
${ZONE}
|
|
|
|
EOF
|
|
|
|
}
|
|
|
|
|
2018-02-15 08:54:36 +00:00
|
|
|
# Writes the cluster name into a temporary file.
|
|
|
|
# Assumed vars
|
|
|
|
# CLUSTER_NAME
|
|
|
|
function write-cluster-name {
|
|
|
|
cat >"${KUBE_TEMP}/cluster-name.txt" << EOF
|
|
|
|
${CLUSTER_NAME}
|
|
|
|
EOF
|
|
|
|
}
|
|
|
|
|
|
|
|
function write-master-env {
|
|
|
|
# If the user requested that the master be part of the cluster, set the
|
|
|
|
# environment variable to program the master kubelet to register itself.
|
|
|
|
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" && -z "${KUBELET_APISERVER:-}" ]]; then
|
|
|
|
KUBELET_APISERVER="${MASTER_NAME}"
|
|
|
|
fi
|
|
|
|
if [[ -z "${KUBERNETES_MASTER_NAME:-}" ]]; then
|
|
|
|
KUBERNETES_MASTER_NAME="${MASTER_NAME}"
|
|
|
|
fi
|
|
|
|
|
2018-02-15 08:34:46 +00:00
|
|
|
construct-kubelet-flags true
|
2018-02-15 08:54:36 +00:00
|
|
|
build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml"
|
2018-04-04 23:11:10 +00:00
|
|
|
build-kubelet-config true "${KUBE_TEMP}/master-kubelet-config.yaml"
|
2018-02-15 08:54:36 +00:00
|
|
|
build-kube-master-certs "${KUBE_TEMP}/kube-master-certs.yaml"
|
|
|
|
}
|
|
|
|
|
|
|
|
function write-node-env {
|
|
|
|
if [[ -z "${KUBERNETES_MASTER_NAME:-}" ]]; then
|
|
|
|
KUBERNETES_MASTER_NAME="${MASTER_NAME}"
|
|
|
|
fi
|
|
|
|
|
2018-02-15 08:34:46 +00:00
|
|
|
construct-kubelet-flags false
|
2018-02-15 08:54:36 +00:00
|
|
|
build-kube-env false "${KUBE_TEMP}/node-kube-env.yaml"
|
2018-04-04 23:11:10 +00:00
|
|
|
build-kubelet-config false "${KUBE_TEMP}/node-kubelet-config.yaml"
|
2018-02-15 08:54:36 +00:00
|
|
|
}
|
|
|
|
|
2018-03-13 17:16:52 +00:00
|
|
|
function build-node-labels {
|
|
|
|
local master=$1
|
|
|
|
local node_labels=""
|
|
|
|
if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" && "${master}" != "true" ]]; then
|
|
|
|
# Add kube-proxy daemonset label to node to avoid situation during cluster
|
|
|
|
# upgrade/downgrade when there are two instances of kube-proxy running on a node.
|
|
|
|
node_labels="beta.kubernetes.io/kube-proxy-ds-ready=true"
|
|
|
|
fi
|
|
|
|
if [[ -n "${NODE_LABELS:-}" ]]; then
|
|
|
|
node_labels="${node_labels:+${node_labels},}${NODE_LABELS}"
|
|
|
|
fi
|
|
|
|
if [[ -n "${NON_MASTER_NODE_LABELS:-}" && "${master}" != "true" ]]; then
|
|
|
|
node_labels="${node_labels:+${node_labels},}${NON_MASTER_NODE_LABELS}"
|
|
|
|
fi
|
|
|
|
echo $node_labels
|
|
|
|
}
|
|
|
|
|
2018-04-04 23:11:10 +00:00
|
|
|
# yaml-map-string-stringarray converts the encoded structure to yaml format, and echoes the result
|
|
|
|
# under the provided name. If the encoded structure is empty, echoes nothing.
|
|
|
|
# 1: name to be output in yaml
|
|
|
|
# 2: encoded map-string-string (which may contain duplicate keys - resulting in map-string-stringarray)
|
|
|
|
# 3: key-value separator (defaults to ':')
|
|
|
|
# 4: item separator (defaults to ',')
|
|
|
|
function yaml-map-string-stringarray {
|
|
|
|
declare -r name="${1}"
|
|
|
|
declare -r encoded="${2}"
|
|
|
|
declare -r kv_sep="${3:-:}"
|
|
|
|
declare -r item_sep="${4:-,}"
|
|
|
|
|
|
|
|
declare -a pairs # indexed array
|
|
|
|
declare -A map # associative array
|
|
|
|
IFS="${item_sep}" read -ra pairs <<<"${encoded}" # split on item_sep
|
|
|
|
for pair in "${pairs[@]}"; do
|
|
|
|
declare key
|
|
|
|
declare value
|
|
|
|
IFS="${kv_sep}" read -r key value <<<"${pair}" # split on kv_sep
|
|
|
|
map[$key]="${map[$key]+${map[$key]}${item_sep}}${value}" # append values from duplicate keys
|
|
|
|
done
|
|
|
|
# only output if there is a non-empty map
|
|
|
|
if [[ ${#map[@]} -gt 0 ]]; then
|
|
|
|
echo "${name}:"
|
|
|
|
for k in "${!map[@]}"; do
|
|
|
|
echo " ${k}:"
|
|
|
|
declare -a values
|
|
|
|
IFS="${item_sep}" read -ra values <<<"${map[$k]}"
|
|
|
|
for val in "${values[@]}"; do
|
|
|
|
# declare across two lines so errexit can catch failures
|
|
|
|
declare v
|
|
|
|
v=$(yaml-quote "${val}")
|
|
|
|
echo " - ${v}"
|
|
|
|
done
|
|
|
|
done
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# yaml-map-string-string converts the encoded structure to yaml format, and echoes the result
|
|
|
|
# under the provided name. If the encoded structure is empty, echoes nothing.
|
|
|
|
# 1: name to be output in yaml
|
|
|
|
# 2: encoded map-string-string (no duplicate keys)
|
|
|
|
# 3: bool, whether to yaml-quote the value string in the output (defaults to true)
|
|
|
|
# 4: key-value separator (defaults to ':')
|
|
|
|
# 5: item separator (defaults to ',')
|
|
|
|
function yaml-map-string-string {
|
|
|
|
declare -r name="${1}"
|
|
|
|
declare -r encoded="${2}"
|
|
|
|
declare -r quote_val_string="${3:-true}"
|
|
|
|
declare -r kv_sep="${4:-:}"
|
|
|
|
declare -r item_sep="${5:-,}"
|
|
|
|
|
|
|
|
declare -a pairs # indexed array
|
|
|
|
declare -A map # associative array
|
|
|
|
IFS="${item_sep}" read -ra pairs <<<"${encoded}" # split on item_sep # TODO(mtaufen): try quoting this too
|
|
|
|
for pair in "${pairs[@]}"; do
|
|
|
|
declare key
|
|
|
|
declare value
|
|
|
|
IFS="${kv_sep}" read -r key value <<<"${pair}" # split on kv_sep
|
|
|
|
map[$key]="${value}" # add to associative array
|
|
|
|
done
|
|
|
|
# only output if there is a non-empty map
|
|
|
|
if [[ ${#map[@]} -gt 0 ]]; then
|
|
|
|
echo "${name}:"
|
|
|
|
for k in "${!map[@]}"; do
|
|
|
|
if [[ "${quote_val_string}" == "true" ]]; then
|
|
|
|
# declare across two lines so errexit can catch failures
|
|
|
|
declare v
|
|
|
|
v=$(yaml-quote "${map[$k]}")
|
|
|
|
echo " ${k}: ${v}"
|
|
|
|
else
|
|
|
|
echo " ${k}: ${map[$k]}"
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2018-02-15 08:34:46 +00:00
|
|
|
# $1: if 'true', we're rendering flags for a master, else a node
|
|
|
|
function construct-kubelet-flags {
|
|
|
|
local master=$1
|
|
|
|
local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}"
|
|
|
|
flags+=" --allow-privileged=true"
|
|
|
|
flags+=" --cloud-provider=gce"
|
|
|
|
# Keep in sync with CONTAINERIZED_MOUNTER_HOME in configure-helper.sh
|
|
|
|
flags+=" --experimental-mounter-path=/home/kubernetes/containerized_mounter/mounter"
|
|
|
|
flags+=" --experimental-check-node-capabilities-before-mount=true"
|
|
|
|
# Keep in sync with the mkdir command in configure-helper.sh (until the TODO is resolved)
|
|
|
|
flags+=" --cert-dir=/var/lib/kubelet/pki/"
|
2018-05-24 16:49:20 +00:00
|
|
|
# Configure the directory that the Kubelet should use to store dynamic config checkpoints
|
|
|
|
flags+=" --dynamic-config-dir=/var/lib/kubelet/dynamic-config"
|
|
|
|
|
2018-02-15 08:34:46 +00:00
|
|
|
|
|
|
|
if [[ "${master}" == "true" ]]; then
|
|
|
|
flags+=" ${MASTER_KUBELET_TEST_ARGS:-}"
|
|
|
|
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then
|
|
|
|
#TODO(mikedanese): allow static pods to start before creating a client
|
|
|
|
#flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
|
|
|
|
#flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
|
|
|
|
flags+=" --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
|
|
|
|
flags+=" --register-schedulable=false"
|
|
|
|
fi
|
|
|
|
else # For nodes
|
|
|
|
flags+=" ${NODE_KUBELET_TEST_ARGS:-}"
|
|
|
|
flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
|
|
|
|
flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
|
|
|
|
fi
|
|
|
|
# Network plugin
|
|
|
|
if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]]; then
|
|
|
|
flags+=" --cni-bin-dir=/home/kubernetes/bin"
|
2018-06-04 02:41:47 +00:00
|
|
|
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" || "${ENABLE_NETD:-}" == "true" ]]; then
|
2018-02-15 08:34:46 +00:00
|
|
|
# Calico uses CNI always.
|
|
|
|
# Note that network policy won't work for master node.
|
|
|
|
if [[ "${master}" == "true" ]]; then
|
|
|
|
flags+=" --network-plugin=${NETWORK_PROVIDER}"
|
|
|
|
else
|
|
|
|
flags+=" --network-plugin=cni"
|
|
|
|
fi
|
|
|
|
else
|
|
|
|
# Otherwise use the configured value.
|
2018-06-04 02:41:47 +00:00
|
|
|
flags+=" --network-plugin=${NETWORK_PROVIDER}"
|
|
|
|
|
2018-02-15 08:34:46 +00:00
|
|
|
fi
|
|
|
|
fi
|
|
|
|
if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
|
2018-06-04 02:41:47 +00:00
|
|
|
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
|
2018-02-15 08:34:46 +00:00
|
|
|
fi
|
|
|
|
flags+=" --volume-plugin-dir=${VOLUME_PLUGIN_DIR}"
|
2018-03-13 17:16:52 +00:00
|
|
|
local node_labels=$(build-node-labels ${master})
|
2018-02-15 08:34:46 +00:00
|
|
|
if [[ -n "${node_labels:-}" ]]; then
|
|
|
|
flags+=" --node-labels=${node_labels}"
|
|
|
|
fi
|
|
|
|
if [[ -n "${NODE_TAINTS:-}" ]]; then
|
|
|
|
flags+=" --register-with-taints=${NODE_TAINTS}"
|
|
|
|
fi
|
|
|
|
# TODO(mtaufen): ROTATE_CERTIFICATES seems unused; delete it?
|
|
|
|
if [[ -n "${ROTATE_CERTIFICATES:-}" ]]; then
|
|
|
|
flags+=" --rotate-certificates=true"
|
|
|
|
fi
|
|
|
|
if [[ -n "${CONTAINER_RUNTIME:-}" ]]; then
|
|
|
|
flags+=" --container-runtime=${CONTAINER_RUNTIME}"
|
|
|
|
fi
|
|
|
|
if [[ -n "${CONTAINER_RUNTIME_ENDPOINT:-}" ]]; then
|
|
|
|
flags+=" --container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}"
|
|
|
|
fi
|
2018-04-25 00:50:07 +00:00
|
|
|
if [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then
|
|
|
|
flags+=" --max-pods=${MAX_PODS_PER_NODE}"
|
|
|
|
fi
|
2018-02-15 08:34:46 +00:00
|
|
|
|
|
|
|
KUBELET_ARGS="${flags}"
|
|
|
|
}
|
|
|
|
|
2018-04-04 23:11:10 +00:00
|
|
|
# $1: if 'true', we're rendering config for a master, else a node
|
|
|
|
function build-kubelet-config {
|
|
|
|
local master=$1
|
|
|
|
local file=$2
|
|
|
|
|
|
|
|
rm -f "${file}"
|
|
|
|
{
|
|
|
|
declare quoted_dns_server_ip
|
|
|
|
declare quoted_dns_domain
|
|
|
|
quoted_dns_server_ip=$(yaml-quote "${DNS_SERVER_IP}")
|
|
|
|
quoted_dns_domain=$(yaml-quote "${DNS_DOMAIN}")
|
|
|
|
cat <<EOF
|
|
|
|
kind: KubeletConfiguration
|
|
|
|
apiVersion: kubelet.config.k8s.io/v1beta1
|
|
|
|
cgroupRoot: /
|
|
|
|
clusterDNS:
|
|
|
|
- ${quoted_dns_server_ip}
|
|
|
|
clusterDomain: ${quoted_dns_domain}
|
|
|
|
staticPodPath: /etc/kubernetes/manifests
|
|
|
|
readOnlyPort: 10255
|
|
|
|
EOF
|
|
|
|
|
|
|
|
# --- begin master-specific config ---
|
|
|
|
if [[ "${master}" == "true" ]]; then
|
|
|
|
cat <<EOF
|
|
|
|
enableDebuggingHandlers: false
|
|
|
|
hairpinMode: none
|
|
|
|
authentication:
|
|
|
|
webhook:
|
|
|
|
enabled: false
|
|
|
|
anonymous:
|
|
|
|
enabled: true
|
|
|
|
authorization:
|
|
|
|
mode: AlwaysAllow
|
|
|
|
EOF
|
|
|
|
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "false" ]]; then
|
|
|
|
# Note: Standalone mode is used by GKE
|
|
|
|
declare quoted_master_ip_range
|
|
|
|
quoted_master_ip_range=$(yaml-quote "${MASTER_IP_RANGE}")
|
|
|
|
cat <<EOF
|
|
|
|
podCidr: ${quoted_master_ip_range}
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
# --- end master-specific config ---
|
|
|
|
else
|
|
|
|
# --- begin node-specific config ---
|
|
|
|
# Keep authentication.x509.clientCAFile in sync with CA_CERT_BUNDLE_PATH in configure-helper.sh
|
|
|
|
cat <<EOF
|
|
|
|
enableDebuggingHandlers: true
|
|
|
|
authentication:
|
|
|
|
x509:
|
|
|
|
clientCAFile: /etc/srv/kubernetes/pki/ca-certificates.crt
|
|
|
|
EOF
|
|
|
|
if [[ "${HAIRPIN_MODE:-}" == "promiscuous-bridge" ]] || \
|
|
|
|
[[ "${HAIRPIN_MODE:-}" == "hairpin-veth" ]] || \
|
|
|
|
[[ "${HAIRPIN_MODE:-}" == "none" ]]; then
|
|
|
|
declare quoted_hairpin_mode
|
|
|
|
quoted_hairpin_mode=$(yaml-quote "${HAIRPIN_MODE}")
|
|
|
|
cat <<EOF
|
|
|
|
hairpinMode: ${quoted_hairpin_mode}
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
# --- end node-specific config ---
|
|
|
|
fi
|
|
|
|
|
|
|
|
# Note: ENABLE_MANIFEST_URL is used by GKE
|
|
|
|
if [[ "${ENABLE_MANIFEST_URL:-}" == "true" ]]; then
|
|
|
|
declare quoted_manifest_url
|
|
|
|
quoted_manifest_url=$(yaml-quote "${MANIFEST_URL}")
|
|
|
|
cat <<EOF
|
|
|
|
staticPodURL: ${quoted_manifest_url}
|
|
|
|
EOF
|
|
|
|
yaml-map-string-stringarray 'staticPodURLHeader' "${MANIFEST_URL_HEADER}"
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [[ -n "${EVICTION_HARD:-}" ]]; then
|
|
|
|
yaml-map-string-string 'evictionHard' "${EVICTION_HARD}" true '<'
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
|
|
|
yaml-map-string-string 'featureGates' "${FEATURE_GATES}" false '='
|
|
|
|
fi
|
|
|
|
} > "${file}"
|
|
|
|
}
|
|
|
|
|
2018-02-15 08:54:36 +00:00
|
|
|
function build-kube-master-certs {
|
|
|
|
local file=$1
|
|
|
|
rm -f ${file}
|
|
|
|
cat >$file <<EOF
|
|
|
|
KUBEAPISERVER_CERT: $(yaml-quote ${KUBEAPISERVER_CERT_BASE64:-})
|
|
|
|
KUBEAPISERVER_KEY: $(yaml-quote ${KUBEAPISERVER_KEY_BASE64:-})
|
|
|
|
CA_KEY: $(yaml-quote ${CA_KEY_BASE64:-})
|
|
|
|
AGGREGATOR_CA_KEY: $(yaml-quote ${AGGREGATOR_CA_KEY_BASE64:-})
|
|
|
|
REQUESTHEADER_CA_CERT: $(yaml-quote ${REQUESTHEADER_CA_CERT_BASE64:-})
|
|
|
|
PROXY_CLIENT_CERT: $(yaml-quote ${PROXY_CLIENT_CERT_BASE64:-})
|
|
|
|
PROXY_CLIENT_KEY: $(yaml-quote ${PROXY_CLIENT_KEY_BASE64:-})
|
|
|
|
EOF
|
|
|
|
}
|
|
|
|
|
|
|
|
# $1: if 'true', we're building a master yaml, else a node
|
|
|
|
function build-kube-env {
|
|
|
|
local master=$1
|
|
|
|
local file=$2
|
|
|
|
|
|
|
|
local server_binary_tar_url=$SERVER_BINARY_TAR_URL
|
|
|
|
local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}"
|
|
|
|
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
2018-03-15 20:39:56 +00:00
|
|
|
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "ubuntu" || "${NODE_OS_DISTRIBUTION}" == "custom") ]]; then
|
2018-02-15 08:54:36 +00:00
|
|
|
# TODO: Support fallback .tar.gz settings on Container Linux
|
|
|
|
server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}")
|
|
|
|
kube_manifests_tar_url=$(split_csv "${KUBE_MANIFESTS_TAR_URL}")
|
|
|
|
fi
|
|
|
|
|
|
|
|
rm -f ${file}
|
|
|
|
cat >$file <<EOF
|
|
|
|
CLUSTER_NAME: $(yaml-quote ${CLUSTER_NAME})
|
|
|
|
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
|
|
|
|
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
|
|
|
|
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
|
|
|
|
NODE_TAGS: $(yaml-quote ${NODE_TAGS:-})
|
|
|
|
NODE_NETWORK: $(yaml-quote ${NETWORK:-})
|
|
|
|
NODE_SUBNETWORK: $(yaml-quote ${SUBNETWORK:-})
|
|
|
|
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
|
|
|
|
SERVER_BINARY_TAR_URL: $(yaml-quote ${server_binary_tar_url})
|
|
|
|
SERVER_BINARY_TAR_HASH: $(yaml-quote ${SERVER_BINARY_TAR_HASH})
|
|
|
|
PROJECT_ID: $(yaml-quote ${PROJECT})
|
|
|
|
NETWORK_PROJECT_ID: $(yaml-quote ${NETWORK_PROJECT})
|
|
|
|
SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
|
|
|
|
KUBERNETES_MASTER_NAME: $(yaml-quote ${KUBERNETES_MASTER_NAME})
|
|
|
|
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
|
|
|
|
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
|
2018-04-06 16:24:56 +00:00
|
|
|
ENABLE_PROMETHEUS_MONITORING: $(yaml-quote ${ENABLE_PROMETHEUS_MONITORING:-false})
|
2018-02-15 08:54:36 +00:00
|
|
|
ENABLE_METRICS_SERVER: $(yaml-quote ${ENABLE_METRICS_SERVER:-false})
|
|
|
|
ENABLE_METADATA_AGENT: $(yaml-quote ${ENABLE_METADATA_AGENT:-none})
|
2018-04-05 14:09:11 +00:00
|
|
|
METADATA_AGENT_CPU_REQUEST: $(yaml-quote ${METADATA_AGENT_CPU_REQUEST:-})
|
|
|
|
METADATA_AGENT_MEMORY_REQUEST: $(yaml-quote ${METADATA_AGENT_MEMORY_REQUEST:-})
|
|
|
|
METADATA_AGENT_CLUSTER_LEVEL_CPU_REQUEST: $(yaml-quote ${METADATA_AGENT_CLUSTER_LEVEL_CPU_REQUEST:-})
|
|
|
|
METADATA_AGENT_CLUSTER_LEVEL_MEMORY_REQUEST: $(yaml-quote ${METADATA_AGENT_CLUSTER_LEVEL_MEMORY_REQUEST:-})
|
2018-02-15 08:54:36 +00:00
|
|
|
DOCKER_REGISTRY_MIRROR_URL: $(yaml-quote ${DOCKER_REGISTRY_MIRROR_URL:-})
|
|
|
|
ENABLE_L7_LOADBALANCING: $(yaml-quote ${ENABLE_L7_LOADBALANCING:-none})
|
|
|
|
ENABLE_CLUSTER_LOGGING: $(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})
|
|
|
|
ENABLE_CLUSTER_UI: $(yaml-quote ${ENABLE_CLUSTER_UI:-false})
|
|
|
|
ENABLE_NODE_PROBLEM_DETECTOR: $(yaml-quote ${ENABLE_NODE_PROBLEM_DETECTOR:-none})
|
|
|
|
NODE_PROBLEM_DETECTOR_VERSION: $(yaml-quote ${NODE_PROBLEM_DETECTOR_VERSION:-})
|
|
|
|
NODE_PROBLEM_DETECTOR_TAR_HASH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TAR_HASH:-})
|
|
|
|
ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
|
2018-06-01 02:18:49 +00:00
|
|
|
ENABLE_RESCHEDULER: $(yaml-quote ${ENABLE_RESCHEDULER:-false})
|
2018-02-15 08:54:36 +00:00
|
|
|
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
|
|
|
|
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
|
|
|
|
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
|
|
|
|
CLUSTER_DNS_CORE_DNS: $(yaml-quote ${CLUSTER_DNS_CORE_DNS:-false})
|
|
|
|
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
|
|
|
|
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
|
|
|
|
ENABLE_DNS_HORIZONTAL_AUTOSCALER: $(yaml-quote ${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false})
|
|
|
|
KUBE_PROXY_DAEMONSET: $(yaml-quote ${KUBE_PROXY_DAEMONSET:-false})
|
|
|
|
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
|
|
|
|
KUBE_PROXY_MODE: $(yaml-quote ${KUBE_PROXY_MODE:-iptables})
|
|
|
|
NODE_PROBLEM_DETECTOR_TOKEN: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TOKEN:-})
|
|
|
|
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
|
|
|
|
ENABLE_POD_SECURITY_POLICY: $(yaml-quote ${ENABLE_POD_SECURITY_POLICY:-})
|
|
|
|
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
|
|
|
|
RUNTIME_CONFIG: $(yaml-quote ${RUNTIME_CONFIG})
|
|
|
|
CA_CERT: $(yaml-quote ${CA_CERT_BASE64:-})
|
|
|
|
KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
|
|
|
|
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})
|
2018-06-04 02:41:47 +00:00
|
|
|
NETWORK_PROVIDER: $(yaml-quote ${NETWORK_PROVIDER:-})
|
2018-02-15 08:54:36 +00:00
|
|
|
NETWORK_POLICY_PROVIDER: $(yaml-quote ${NETWORK_POLICY_PROVIDER:-})
|
|
|
|
PREPULL_E2E_IMAGES: $(yaml-quote ${PREPULL_E2E_IMAGES:-})
|
|
|
|
HAIRPIN_MODE: $(yaml-quote ${HAIRPIN_MODE:-})
|
|
|
|
E2E_STORAGE_TEST_ENVIRONMENT: $(yaml-quote ${E2E_STORAGE_TEST_ENVIRONMENT:-})
|
|
|
|
KUBE_DOCKER_REGISTRY: $(yaml-quote ${KUBE_DOCKER_REGISTRY:-})
|
|
|
|
KUBE_ADDON_REGISTRY: $(yaml-quote ${KUBE_ADDON_REGISTRY:-})
|
|
|
|
MULTIZONE: $(yaml-quote ${MULTIZONE:-})
|
2018-06-04 02:41:47 +00:00
|
|
|
NON_MASQUERADE_CIDR: $(yaml-quote ${NON_MASQUERADE_CIDR:-})
|
2018-02-15 08:54:36 +00:00
|
|
|
ENABLE_DEFAULT_STORAGE_CLASS: $(yaml-quote ${ENABLE_DEFAULT_STORAGE_CLASS:-})
|
|
|
|
ENABLE_APISERVER_BASIC_AUDIT: $(yaml-quote ${ENABLE_APISERVER_BASIC_AUDIT:-})
|
|
|
|
ENABLE_APISERVER_ADVANCED_AUDIT: $(yaml-quote ${ENABLE_APISERVER_ADVANCED_AUDIT:-})
|
|
|
|
ENABLE_CACHE_MUTATION_DETECTOR: $(yaml-quote ${ENABLE_CACHE_MUTATION_DETECTOR:-false})
|
|
|
|
ENABLE_PATCH_CONVERSION_DETECTOR: $(yaml-quote ${ENABLE_PATCH_CONVERSION_DETECTOR:-false})
|
|
|
|
ADVANCED_AUDIT_POLICY: $(yaml-quote ${ADVANCED_AUDIT_POLICY:-})
|
|
|
|
ADVANCED_AUDIT_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_BACKEND:-log})
|
2018-05-18 13:52:47 +00:00
|
|
|
ADVANCED_AUDIT_TRUNCATING_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_TRUNCATING_BACKEND:-})
|
2018-03-08 13:05:33 +00:00
|
|
|
ADVANCED_AUDIT_LOG_MODE: $(yaml-quote ${ADVANCED_AUDIT_LOG_MODE:-})
|
|
|
|
ADVANCED_AUDIT_LOG_BUFFER_SIZE: $(yaml-quote ${ADVANCED_AUDIT_LOG_BUFFER_SIZE:-})
|
|
|
|
ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE: $(yaml-quote ${ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE:-})
|
|
|
|
ADVANCED_AUDIT_LOG_MAX_BATCH_WAIT: $(yaml-quote ${ADVANCED_AUDIT_LOG_MAX_BATCH_WAIT:-})
|
|
|
|
ADVANCED_AUDIT_LOG_THROTTLE_QPS: $(yaml-quote ${ADVANCED_AUDIT_LOG_THROTTLE_QPS:-})
|
|
|
|
ADVANCED_AUDIT_LOG_THROTTLE_BURST: $(yaml-quote ${ADVANCED_AUDIT_LOG_THROTTLE_BURST:-})
|
|
|
|
ADVANCED_AUDIT_LOG_INITIAL_BACKOFF: $(yaml-quote ${ADVANCED_AUDIT_LOG_INITIAL_BACKOFF:-})
|
|
|
|
ADVANCED_AUDIT_WEBHOOK_MODE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MODE:-})
|
2018-02-15 08:54:36 +00:00
|
|
|
ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE:-})
|
|
|
|
ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE:-})
|
|
|
|
ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT:-})
|
|
|
|
ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS:-})
|
|
|
|
ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST:-})
|
|
|
|
ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF:-})
|
|
|
|
GCE_API_ENDPOINT: $(yaml-quote ${GCE_API_ENDPOINT:-})
|
|
|
|
GCE_GLBC_IMAGE: $(yaml-quote ${GCE_GLBC_IMAGE:-})
|
2018-03-22 19:02:55 +00:00
|
|
|
ENABLE_NODE_JOURNAL: $(yaml-quote ${ENABLE_NODE_JOURNAL:-false})
|
2018-02-15 08:54:36 +00:00
|
|
|
PROMETHEUS_TO_SD_ENDPOINT: $(yaml-quote ${PROMETHEUS_TO_SD_ENDPOINT:-})
|
|
|
|
PROMETHEUS_TO_SD_PREFIX: $(yaml-quote ${PROMETHEUS_TO_SD_PREFIX:-})
|
|
|
|
ENABLE_PROMETHEUS_TO_SD: $(yaml-quote ${ENABLE_PROMETHEUS_TO_SD:-false})
|
|
|
|
ENABLE_POD_PRIORITY: $(yaml-quote ${ENABLE_POD_PRIORITY:-})
|
|
|
|
CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME:-})
|
|
|
|
CONTAINER_RUNTIME_ENDPOINT: $(yaml-quote ${CONTAINER_RUNTIME_ENDPOINT:-})
|
|
|
|
CONTAINER_RUNTIME_NAME: $(yaml-quote ${CONTAINER_RUNTIME_NAME:-})
|
|
|
|
NODE_LOCAL_SSDS_EXT: $(yaml-quote ${NODE_LOCAL_SSDS_EXT:-})
|
|
|
|
LOAD_IMAGE_COMMAND: $(yaml-quote ${LOAD_IMAGE_COMMAND:-})
|
2018-01-10 15:22:46 +00:00
|
|
|
ZONE: $(yaml-quote ${ZONE})
|
2018-04-20 12:21:31 +00:00
|
|
|
REGION: $(yaml-quote ${REGION})
|
2018-02-15 08:34:46 +00:00
|
|
|
VOLUME_PLUGIN_DIR: $(yaml-quote ${VOLUME_PLUGIN_DIR})
|
|
|
|
KUBELET_ARGS: $(yaml-quote ${KUBELET_ARGS})
|
2018-04-04 23:11:10 +00:00
|
|
|
REQUIRE_METADATA_KUBELET_CONFIG_FILE: $(yaml-quote true)
|
2018-06-01 02:19:34 +00:00
|
|
|
ENABLE_NETD: $(yaml-quote ${ENABLE_NETD:-false})
|
2018-06-03 08:35:27 +00:00
|
|
|
CUSTOM_NETD_YAML: |
|
|
|
|
$(echo "${CUSTOM_NETD_YAML:-}" | sed -e "s/'/''/g")
|
2018-07-26 18:59:00 +00:00
|
|
|
CUSTOM_CALICO_NODE_DAEMONSET_YAML: |
|
|
|
|
$(echo "${CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}" | sed -e "s/'/''/g")
|
|
|
|
CUSTOM_TYPHA_DEPLOYMENT_YAML: |
|
|
|
|
$(echo "${CUSTOM_TYPHA_DEPLOYMENT_YAML:-}" | sed -e "s/'/''/g")
|
2018-02-15 08:54:36 +00:00
|
|
|
EOF
|
2018-03-02 19:30:13 +00:00
|
|
|
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || \
|
|
|
|
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "gci" ]] || \
|
|
|
|
[[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "cos" ]] || \
|
|
|
|
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
REMOUNT_VOLUME_PLUGIN_DIR: $(yaml-quote ${REMOUNT_VOLUME_PLUGIN_DIR:-true})
|
|
|
|
EOF
|
|
|
|
fi
|
2018-02-15 08:54:36 +00:00
|
|
|
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
KUBE_APISERVER_REQUEST_TIMEOUT: $(yaml-quote ${KUBE_APISERVER_REQUEST_TIMEOUT})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
TERMINATED_POD_GC_THRESHOLD: $(yaml-quote ${TERMINATED_POD_GC_THRESHOLD})
|
|
|
|
EOF
|
|
|
|
fi
|
2018-03-15 20:39:56 +00:00
|
|
|
if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu") ]] || \
|
|
|
|
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" = "ubuntu" || "${NODE_OS_DISTRIBUTION}" = "custom") ]] ; then
|
2018-02-15 08:54:36 +00:00
|
|
|
cat >>$file <<EOF
|
|
|
|
KUBE_MANIFESTS_TAR_URL: $(yaml-quote ${kube_manifests_tar_url})
|
|
|
|
KUBE_MANIFESTS_TAR_HASH: $(yaml-quote ${KUBE_MANIFESTS_TAR_HASH})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${TEST_CLUSTER:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
TEST_CLUSTER: $(yaml-quote ${TEST_CLUSTER})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${DOCKER_TEST_LOG_LEVEL:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
DOCKER_TEST_LOG_LEVEL: $(yaml-quote ${DOCKER_TEST_LOG_LEVEL})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${DOCKER_LOG_DRIVER:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
DOCKER_LOG_DRIVER: $(yaml-quote ${DOCKER_LOG_DRIVER})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${DOCKER_LOG_MAX_SIZE:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
DOCKER_LOG_MAX_SIZE: $(yaml-quote ${DOCKER_LOG_MAX_SIZE})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${DOCKER_LOG_MAX_FILE:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
DOCKER_LOG_MAX_FILE: $(yaml-quote ${DOCKER_LOG_MAX_FILE})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${FEATURE_GATES:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
FEATURE_GATES: $(yaml-quote ${FEATURE_GATES})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${PROVIDER_VARS:-}" ]; then
|
|
|
|
local var_name
|
|
|
|
local var_value
|
|
|
|
|
|
|
|
for var_name in ${PROVIDER_VARS}; do
|
|
|
|
eval "local var_value=\$(yaml-quote \${${var_name}})"
|
|
|
|
cat >>$file <<EOF
|
|
|
|
${var_name}: ${var_value}
|
|
|
|
EOF
|
|
|
|
done
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [[ "${master}" == "true" ]]; then
|
|
|
|
# Master-only env vars.
|
|
|
|
cat >>$file <<EOF
|
|
|
|
KUBERNETES_MASTER: $(yaml-quote "true")
|
|
|
|
KUBE_USER: $(yaml-quote ${KUBE_USER})
|
|
|
|
KUBE_PASSWORD: $(yaml-quote ${KUBE_PASSWORD})
|
|
|
|
KUBE_BEARER_TOKEN: $(yaml-quote ${KUBE_BEARER_TOKEN})
|
|
|
|
MASTER_CERT: $(yaml-quote ${MASTER_CERT_BASE64:-})
|
|
|
|
MASTER_KEY: $(yaml-quote ${MASTER_KEY_BASE64:-})
|
|
|
|
KUBECFG_CERT: $(yaml-quote ${KUBECFG_CERT_BASE64:-})
|
|
|
|
KUBECFG_KEY: $(yaml-quote ${KUBECFG_KEY_BASE64:-})
|
|
|
|
KUBELET_APISERVER: $(yaml-quote ${KUBELET_APISERVER:-})
|
|
|
|
NUM_NODES: $(yaml-quote ${NUM_NODES})
|
|
|
|
STORAGE_BACKEND: $(yaml-quote ${STORAGE_BACKEND:-etcd3})
|
|
|
|
STORAGE_MEDIA_TYPE: $(yaml-quote ${STORAGE_MEDIA_TYPE:-})
|
|
|
|
ENABLE_GARBAGE_COLLECTOR: $(yaml-quote ${ENABLE_GARBAGE_COLLECTOR:-})
|
|
|
|
ENABLE_LEGACY_ABAC: $(yaml-quote ${ENABLE_LEGACY_ABAC:-})
|
|
|
|
MASTER_ADVERTISE_ADDRESS: $(yaml-quote ${MASTER_ADVERTISE_ADDRESS:-})
|
|
|
|
ETCD_CA_KEY: $(yaml-quote ${ETCD_CA_KEY_BASE64:-})
|
|
|
|
ETCD_CA_CERT: $(yaml-quote ${ETCD_CA_CERT_BASE64:-})
|
|
|
|
ETCD_PEER_KEY: $(yaml-quote ${ETCD_PEER_KEY_BASE64:-})
|
|
|
|
ETCD_PEER_CERT: $(yaml-quote ${ETCD_PEER_CERT_BASE64:-})
|
2018-02-28 18:21:04 +00:00
|
|
|
ENCRYPTION_PROVIDER_CONFIG: $(yaml-quote ${ENCRYPTION_PROVIDER_CONFIG:-})
|
2018-02-15 08:54:36 +00:00
|
|
|
EOF
|
2018-02-22 20:14:36 +00:00
|
|
|
if [[ "${ENABLE_TOKENREQUEST:-}" == "true" ]]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
SERVICEACCOUNT_ISSUER: $(yaml-quote ${SERVICEACCOUNT_ISSUER:-})
|
|
|
|
SERVICEACCOUNT_API_AUDIENCES: $(yaml-quote ${SERVICEACCOUNT_API_AUDIENCES:-})
|
|
|
|
EOF
|
|
|
|
fi
|
2018-02-15 08:54:36 +00:00
|
|
|
# KUBE_APISERVER_REQUEST_TIMEOUT_SEC (if set) controls the --request-timeout
|
|
|
|
# flag
|
|
|
|
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
KUBE_APISERVER_REQUEST_TIMEOUT_SEC: $(yaml-quote ${KUBE_APISERVER_REQUEST_TIMEOUT_SEC})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
# ETCD_IMAGE (if set) allows to use a custom etcd image.
|
|
|
|
if [ -n "${ETCD_IMAGE:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
ETCD_IMAGE: $(yaml-quote ${ETCD_IMAGE})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
# ETCD_DOCKER_REPOSITORY (if set) allows to use a custom etcd docker repository to pull the etcd image from.
|
|
|
|
if [ -n "${ETCD_DOCKER_REPOSITORY:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
ETCD_DOCKER_REPOSITORY: $(yaml-quote ${ETCD_DOCKER_REPOSITORY})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
# ETCD_VERSION (if set) allows you to use custom version of etcd.
|
|
|
|
# The main purpose of using it may be rollback of etcd v3 API,
|
|
|
|
# where we need 3.0.* image, but are rolling back to 2.3.7.
|
|
|
|
if [ -n "${ETCD_VERSION:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
ETCD_VERSION: $(yaml-quote ${ETCD_VERSION})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${ETCD_HOSTNAME:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
ETCD_HOSTNAME: $(yaml-quote ${ETCD_HOSTNAME})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC: $(yaml-quote ${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC: $(yaml-quote ${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${ETCD_COMPACTION_INTERVAL_SEC:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
ETCD_COMPACTION_INTERVAL_SEC: $(yaml-quote ${ETCD_COMPACTION_INTERVAL_SEC})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${ETCD_QUOTA_BACKEND_BYTES:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
ETCD_QUOTA_BACKEND_BYTES: $(yaml-quote ${ETCD_QUOTA_BACKEND_BYTES})
|
2018-05-17 08:48:13 +00:00
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${ETCD_EXTRA_ARGS:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
ETCD_EXTRA_ARGS: $(yaml-quote ${ETCD_EXTRA_ARGS})
|
2018-02-15 08:54:36 +00:00
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
APISERVER_TEST_ARGS: $(yaml-quote ${APISERVER_TEST_ARGS})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
CONTROLLER_MANAGER_TEST_ARGS: $(yaml-quote ${CONTROLLER_MANAGER_TEST_ARGS})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
CONTROLLER_MANAGER_TEST_LOG_LEVEL: $(yaml-quote ${CONTROLLER_MANAGER_TEST_LOG_LEVEL})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
SCHEDULER_TEST_ARGS: $(yaml-quote ${SCHEDULER_TEST_ARGS})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
SCHEDULER_TEST_LOG_LEVEL: $(yaml-quote ${SCHEDULER_TEST_LOG_LEVEL})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${INITIAL_ETCD_CLUSTER:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
INITIAL_ETCD_CLUSTER: $(yaml-quote ${INITIAL_ETCD_CLUSTER})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
INITIAL_ETCD_CLUSTER_STATE: $(yaml-quote ${INITIAL_ETCD_CLUSTER_STATE})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${CLUSTER_SIGNING_DURATION:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
CLUSTER_SIGNING_DURATION: $(yaml-quote ${CLUSTER_SIGNING_DURATION})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [[ "${NODE_ACCELERATORS:-}" == *"type=nvidia"* ]]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
ENABLE_NVIDIA_GPU_DEVICE_PLUGIN: $(yaml-quote "true")
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${ADDON_MANAGER_LEADER_ELECTION:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
ADDON_MANAGER_LEADER_ELECTION: $(yaml-quote ${ADDON_MANAGER_LEADER_ELECTION})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
|
|
|
|
else
|
|
|
|
# Node-only env vars.
|
|
|
|
cat >>$file <<EOF
|
|
|
|
KUBERNETES_MASTER: $(yaml-quote "false")
|
|
|
|
EXTRA_DOCKER_OPTS: $(yaml-quote ${EXTRA_DOCKER_OPTS:-})
|
|
|
|
EOF
|
|
|
|
if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
KUBEPROXY_TEST_ARGS: $(yaml-quote ${KUBEPROXY_TEST_ARGS})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${KUBEPROXY_TEST_LOG_LEVEL:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
KUBEPROXY_TEST_LOG_LEVEL: $(yaml-quote ${KUBEPROXY_TEST_LOG_LEVEL})
|
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
ENABLE_CLUSTER_AUTOSCALER: $(yaml-quote ${ENABLE_CLUSTER_AUTOSCALER})
|
|
|
|
AUTOSCALER_MIG_CONFIG: $(yaml-quote ${AUTOSCALER_MIG_CONFIG})
|
|
|
|
AUTOSCALER_EXPANDER_CONFIG: $(yaml-quote ${AUTOSCALER_EXPANDER_CONFIG})
|
|
|
|
EOF
|
2018-03-13 17:16:52 +00:00
|
|
|
if [[ "${master}" == "false" ]]; then
|
|
|
|
# TODO(kubernetes/autoscaler#718): AUTOSCALER_ENV_VARS is a hotfix for cluster autoscaler,
|
|
|
|
# which reads the kube-env to determine the shape of a node and was broken by #60020.
|
|
|
|
# This should be removed as soon as a more reliable source of information is available!
|
|
|
|
local node_labels=$(build-node-labels false)
|
|
|
|
local node_taints="${NODE_TAINTS:-}"
|
|
|
|
local autoscaler_env_vars="node_labels=${node_labels};node_taints=${node_taints}"
|
|
|
|
cat >>$file <<EOF
|
|
|
|
AUTOSCALER_ENV_VARS: $(yaml-quote ${autoscaler_env_vars})
|
|
|
|
EOF
|
|
|
|
fi
|
2018-02-15 08:54:36 +00:00
|
|
|
fi
|
|
|
|
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
SCHEDULING_ALGORITHM_PROVIDER: $(yaml-quote ${SCHEDULING_ALGORITHM_PROVIDER})
|
2018-04-25 00:50:07 +00:00
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
if [ -n "${MAX_PODS_PER_NODE:-}" ]; then
|
|
|
|
cat >>$file <<EOF
|
|
|
|
MAX_PODS_PER_NODE: $(yaml-quote ${MAX_PODS_PER_NODE})
|
2018-02-15 08:54:36 +00:00
|
|
|
EOF
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
function sha1sum-file() {
|
|
|
|
if which sha1sum >/dev/null 2>&1; then
|
|
|
|
sha1sum "$1" | awk '{ print $1 }'
|
|
|
|
else
|
|
|
|
shasum -a1 "$1" | awk '{ print $1 }'
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# Create certificate pairs for the cluster.
|
|
|
|
# $1: The public IP for the master.
|
|
|
|
#
|
|
|
|
# These are used for static cert distribution (e.g. static clustering) at
|
|
|
|
# cluster creation time. This will be obsoleted once we implement dynamic
|
|
|
|
# clustering.
|
|
|
|
#
|
|
|
|
# The following certificate pairs are created:
|
|
|
|
#
|
|
|
|
# - ca (the cluster's certificate authority)
|
|
|
|
# - server
|
|
|
|
# - kubelet
|
|
|
|
# - kubecfg (for kubectl)
|
|
|
|
#
|
|
|
|
# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
|
|
|
|
# the certs that we need.
|
|
|
|
#
|
|
|
|
# Assumed vars
|
|
|
|
# KUBE_TEMP
|
|
|
|
# MASTER_NAME
|
|
|
|
#
|
|
|
|
# Vars set:
|
|
|
|
# CERT_DIR
|
|
|
|
# CA_CERT_BASE64
|
|
|
|
# MASTER_CERT_BASE64
|
|
|
|
# MASTER_KEY_BASE64
|
|
|
|
# KUBELET_CERT_BASE64
|
|
|
|
# KUBELET_KEY_BASE64
|
|
|
|
# KUBECFG_CERT_BASE64
|
|
|
|
# KUBECFG_KEY_BASE64
|
|
|
|
function create-certs {
|
|
|
|
local -r primary_cn="${1}"
|
|
|
|
|
|
|
|
# Determine extra certificate names for master
|
|
|
|
local octets=($(echo "${SERVICE_CLUSTER_IP_RANGE}" | sed -e 's|/.*||' -e 's/\./ /g'))
|
|
|
|
((octets[3]+=1))
|
|
|
|
local -r service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
|
|
|
|
local sans=""
|
|
|
|
for extra in $@; do
|
|
|
|
if [[ -n "${extra}" ]]; then
|
|
|
|
sans="${sans}IP:${extra},"
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
sans="${sans}IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}"
|
|
|
|
|
|
|
|
echo "Generating certs for alternate-names: ${sans}"
|
|
|
|
|
|
|
|
setup-easyrsa
|
|
|
|
PRIMARY_CN="${primary_cn}" SANS="${sans}" generate-certs
|
|
|
|
AGGREGATOR_PRIMARY_CN="${primary_cn}" AGGREGATOR_SANS="${sans}" generate-aggregator-certs
|
|
|
|
|
|
|
|
# By default, linux wraps base64 output every 76 cols, so we use 'tr -d' to remove whitespaces.
|
|
|
|
# Note 'base64 -w0' doesn't work on Mac OS X, which has different flags.
|
|
|
|
CA_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/ca.key" | base64 | tr -d '\r\n')
|
|
|
|
CA_CERT_BASE64=$(cat "${CERT_DIR}/pki/ca.crt" | base64 | tr -d '\r\n')
|
|
|
|
MASTER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/${MASTER_NAME}.crt" | base64 | tr -d '\r\n')
|
|
|
|
MASTER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/${MASTER_NAME}.key" | base64 | tr -d '\r\n')
|
|
|
|
KUBELET_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubelet.crt" | base64 | tr -d '\r\n')
|
|
|
|
KUBELET_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubelet.key" | base64 | tr -d '\r\n')
|
|
|
|
KUBECFG_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubecfg.crt" | base64 | tr -d '\r\n')
|
|
|
|
KUBECFG_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubecfg.key" | base64 | tr -d '\r\n')
|
|
|
|
KUBEAPISERVER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kube-apiserver.crt" | base64 | tr -d '\r\n')
|
|
|
|
KUBEAPISERVER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kube-apiserver.key" | base64 | tr -d '\r\n')
|
|
|
|
|
|
|
|
# Setting up an addition directory (beyond pki) as it is the simplest way to
|
|
|
|
# ensure we get a different CA pair to sign the proxy-client certs and which
|
|
|
|
# we can send CA public key to the user-apiserver to validate communication.
|
|
|
|
AGGREGATOR_CA_KEY_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/private/ca.key" | base64 | tr -d '\r\n')
|
|
|
|
REQUESTHEADER_CA_CERT_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/ca.crt" | base64 | tr -d '\r\n')
|
|
|
|
PROXY_CLIENT_CERT_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/issued/proxy-client.crt" | base64 | tr -d '\r\n')
|
|
|
|
PROXY_CLIENT_KEY_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/private/proxy-client.key" | base64 | tr -d '\r\n')
|
|
|
|
}
|
|
|
|
|
|
|
|
# Set up easy-rsa directory structure.
|
|
|
|
#
|
|
|
|
# Assumed vars
|
|
|
|
# KUBE_TEMP
|
|
|
|
#
|
|
|
|
# Vars set:
|
|
|
|
# CERT_DIR
|
|
|
|
# AGGREGATOR_CERT_DIR
|
|
|
|
function setup-easyrsa {
|
|
|
|
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
|
|
|
|
# Note: This was heavily cribbed from make-ca-cert.sh
|
|
|
|
(set -x
|
|
|
|
cd "${KUBE_TEMP}"
|
|
|
|
curl -L -O --connect-timeout 20 --retry 6 --retry-delay 2 https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz
|
|
|
|
tar xzf easy-rsa.tar.gz
|
|
|
|
mkdir easy-rsa-master/kubelet
|
|
|
|
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/kubelet
|
|
|
|
mkdir easy-rsa-master/aggregator
|
|
|
|
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/aggregator) &>${cert_create_debug_output} || true
|
|
|
|
CERT_DIR="${KUBE_TEMP}/easy-rsa-master/easyrsa3"
|
|
|
|
AGGREGATOR_CERT_DIR="${KUBE_TEMP}/easy-rsa-master/aggregator"
|
|
|
|
if [ ! -x "${CERT_DIR}/easyrsa" -o ! -x "${AGGREGATOR_CERT_DIR}/easyrsa" ]; then
|
|
|
|
# TODO(roberthbailey,porridge): add better error handling here,
|
|
|
|
# see https://github.com/kubernetes/kubernetes/issues/55229
|
|
|
|
cat "${cert_create_debug_output}" >&2
|
|
|
|
echo "=== Failed to setup easy-rsa: Aborting ===" >&2
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# Runs the easy RSA commands to generate certificate files.
|
|
|
|
# The generated files are IN ${CERT_DIR}
|
|
|
|
#
|
|
|
|
# Assumed vars
|
|
|
|
# KUBE_TEMP
|
|
|
|
# MASTER_NAME
|
|
|
|
# CERT_DIR
|
|
|
|
# PRIMARY_CN: Primary canonical name
|
|
|
|
# SANS: Subject alternate names
|
|
|
|
#
|
|
|
|
#
|
|
|
|
function generate-certs {
|
|
|
|
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
|
|
|
|
# Note: This was heavily cribbed from make-ca-cert.sh
|
|
|
|
(set -x
|
|
|
|
cd "${CERT_DIR}"
|
|
|
|
./easyrsa init-pki
|
|
|
|
# this puts the cert into pki/ca.crt and the key into pki/private/ca.key
|
|
|
|
./easyrsa --batch "--req-cn=${PRIMARY_CN}@$(date +%s)" build-ca nopass
|
|
|
|
./easyrsa --subject-alt-name="${SANS}" build-server-full "${MASTER_NAME}" nopass
|
|
|
|
./easyrsa build-client-full kube-apiserver nopass
|
|
|
|
|
|
|
|
kube::util::ensure-cfssl "${KUBE_TEMP}/cfssl"
|
|
|
|
|
|
|
|
# make the config for the signer
|
|
|
|
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "ca-config.json"
|
|
|
|
# create the kubelet client cert with the correct groups
|
|
|
|
echo '{"CN":"kubelet","names":[{"O":"system:nodes"}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | "${CFSSL_BIN}" gencert -ca=pki/ca.crt -ca-key=pki/private/ca.key -config=ca-config.json - | "${CFSSLJSON_BIN}" -bare kubelet
|
|
|
|
mv "kubelet-key.pem" "pki/private/kubelet.key"
|
|
|
|
mv "kubelet.pem" "pki/issued/kubelet.crt"
|
|
|
|
rm -f "kubelet.csr"
|
|
|
|
|
|
|
|
# Make a superuser client cert with subject "O=system:masters, CN=kubecfg"
|
|
|
|
./easyrsa --dn-mode=org \
|
|
|
|
--req-cn=kubecfg --req-org=system:masters \
|
|
|
|
--req-c= --req-st= --req-city= --req-email= --req-ou= \
|
|
|
|
build-client-full kubecfg nopass) &>${cert_create_debug_output} || true
|
|
|
|
local output_file_missing=0
|
|
|
|
local output_file
|
|
|
|
for output_file in \
|
|
|
|
"${CERT_DIR}/pki/private/ca.key" \
|
|
|
|
"${CERT_DIR}/pki/ca.crt" \
|
|
|
|
"${CERT_DIR}/pki/issued/${MASTER_NAME}.crt" \
|
|
|
|
"${CERT_DIR}/pki/private/${MASTER_NAME}.key" \
|
|
|
|
"${CERT_DIR}/pki/issued/kubelet.crt" \
|
|
|
|
"${CERT_DIR}/pki/private/kubelet.key" \
|
|
|
|
"${CERT_DIR}/pki/issued/kubecfg.crt" \
|
|
|
|
"${CERT_DIR}/pki/private/kubecfg.key" \
|
|
|
|
"${CERT_DIR}/pki/issued/kube-apiserver.crt" \
|
|
|
|
"${CERT_DIR}/pki/private/kube-apiserver.key"
|
|
|
|
do
|
|
|
|
if [[ ! -s "${output_file}" ]]; then
|
|
|
|
echo "Expected file ${output_file} not created" >&2
|
|
|
|
output_file_missing=1
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
if (( $output_file_missing )); then
|
|
|
|
# TODO(roberthbailey,porridge): add better error handling here,
|
|
|
|
# see https://github.com/kubernetes/kubernetes/issues/55229
|
|
|
|
cat "${cert_create_debug_output}" >&2
|
|
|
|
echo "=== Failed to generate master certificates: Aborting ===" >&2
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# Runs the easy RSA commands to generate aggregator certificate files.
|
|
|
|
# The generated files are in ${AGGREGATOR_CERT_DIR}
|
|
|
|
#
|
|
|
|
# Assumed vars
|
|
|
|
# KUBE_TEMP
|
|
|
|
# AGGREGATOR_MASTER_NAME
|
|
|
|
# AGGREGATOR_CERT_DIR
|
|
|
|
# AGGREGATOR_PRIMARY_CN: Primary canonical name
|
|
|
|
# AGGREGATOR_SANS: Subject alternate names
|
|
|
|
#
|
|
|
|
#
|
|
|
|
function generate-aggregator-certs {
|
|
|
|
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
|
|
|
|
# Note: This was heavily cribbed from make-ca-cert.sh
|
|
|
|
(set -x
|
|
|
|
cd "${KUBE_TEMP}/easy-rsa-master/aggregator"
|
|
|
|
./easyrsa init-pki
|
|
|
|
# this puts the cert into pki/ca.crt and the key into pki/private/ca.key
|
|
|
|
./easyrsa --batch "--req-cn=${AGGREGATOR_PRIMARY_CN}@$(date +%s)" build-ca nopass
|
|
|
|
./easyrsa --subject-alt-name="${AGGREGATOR_SANS}" build-server-full "${AGGREGATOR_MASTER_NAME}" nopass
|
|
|
|
./easyrsa build-client-full aggregator-apiserver nopass
|
|
|
|
|
|
|
|
kube::util::ensure-cfssl "${KUBE_TEMP}/cfssl"
|
|
|
|
|
|
|
|
# make the config for the signer
|
|
|
|
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "ca-config.json"
|
|
|
|
# create the aggregator client cert with the correct groups
|
|
|
|
echo '{"CN":"aggregator","hosts":[""],"key":{"algo":"rsa","size":2048}}' | "${CFSSL_BIN}" gencert -ca=pki/ca.crt -ca-key=pki/private/ca.key -config=ca-config.json - | "${CFSSLJSON_BIN}" -bare proxy-client
|
|
|
|
mv "proxy-client-key.pem" "pki/private/proxy-client.key"
|
|
|
|
mv "proxy-client.pem" "pki/issued/proxy-client.crt"
|
|
|
|
rm -f "proxy-client.csr"
|
|
|
|
|
|
|
|
# Make a superuser client cert with subject "O=system:masters, CN=kubecfg"
|
|
|
|
./easyrsa --dn-mode=org \
|
|
|
|
--req-cn=proxy-clientcfg --req-org=system:aggregator \
|
|
|
|
--req-c= --req-st= --req-city= --req-email= --req-ou= \
|
|
|
|
build-client-full proxy-clientcfg nopass) &>${cert_create_debug_output} || true
|
|
|
|
local output_file_missing=0
|
|
|
|
local output_file
|
|
|
|
for output_file in \
|
|
|
|
"${AGGREGATOR_CERT_DIR}/pki/private/ca.key" \
|
|
|
|
"${AGGREGATOR_CERT_DIR}/pki/ca.crt" \
|
|
|
|
"${AGGREGATOR_CERT_DIR}/pki/issued/proxy-client.crt" \
|
|
|
|
"${AGGREGATOR_CERT_DIR}/pki/private/proxy-client.key"
|
|
|
|
do
|
|
|
|
if [[ ! -s "${output_file}" ]]; then
|
|
|
|
echo "Expected file ${output_file} not created" >&2
|
|
|
|
output_file_missing=1
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
if (( $output_file_missing )); then
|
|
|
|
# TODO(roberthbailey,porridge): add better error handling here,
|
|
|
|
# see https://github.com/kubernetes/kubernetes/issues/55229
|
|
|
|
cat "${cert_create_debug_output}" >&2
|
|
|
|
echo "=== Failed to generate aggregator certificates: Aborting ===" >&2
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
#
|
|
|
|
# Using provided master env, extracts value from provided key.
|
|
|
|
#
|
|
|
|
# Args:
|
|
|
|
# $1 master env (kube-env of master; result of calling get-master-env)
|
|
|
|
# $2 env key to use
|
|
|
|
function get-env-val() {
|
|
|
|
local match=`(echo "${1}" | grep -E "^${2}:") || echo ""`
|
|
|
|
if [[ -z ${match} ]]; then
|
|
|
|
echo ""
|
|
|
|
fi
|
|
|
|
echo ${match} | cut -d : -f 2 | cut -d \' -f 2
|
|
|
|
}
|
|
|
|
|
|
|
|
# Load the master env by calling get-master-env, and extract important values
|
|
|
|
function parse-master-env() {
|
|
|
|
# Get required master env vars
|
|
|
|
local master_env=$(get-master-env)
|
|
|
|
KUBE_PROXY_TOKEN=$(get-env-val "${master_env}" "KUBE_PROXY_TOKEN")
|
|
|
|
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${master_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
|
|
|
|
CA_CERT_BASE64=$(get-env-val "${master_env}" "CA_CERT")
|
|
|
|
CA_KEY_BASE64=$(get-env-val "${master_env}" "CA_KEY")
|
|
|
|
KUBEAPISERVER_CERT_BASE64=$(get-env-val "${master_env}" "KUBEAPISERVER_CERT")
|
|
|
|
KUBEAPISERVER_KEY_BASE64=$(get-env-val "${master_env}" "KUBEAPISERVER_KEY")
|
|
|
|
EXTRA_DOCKER_OPTS=$(get-env-val "${master_env}" "EXTRA_DOCKER_OPTS")
|
|
|
|
KUBELET_CERT_BASE64=$(get-env-val "${master_env}" "KUBELET_CERT")
|
|
|
|
KUBELET_KEY_BASE64=$(get-env-val "${master_env}" "KUBELET_KEY")
|
|
|
|
MASTER_CERT_BASE64=$(get-env-val "${master_env}" "MASTER_CERT")
|
|
|
|
MASTER_KEY_BASE64=$(get-env-val "${master_env}" "MASTER_KEY")
|
|
|
|
AGGREGATOR_CA_KEY_BASE64=$(get-env-val "${master_env}" "AGGREGATOR_CA_KEY")
|
|
|
|
REQUESTHEADER_CA_CERT_BASE64=$(get-env-val "${master_env}" "REQUESTHEADER_CA_CERT")
|
|
|
|
PROXY_CLIENT_CERT_BASE64=$(get-env-val "${master_env}" "PROXY_CLIENT_CERT")
|
|
|
|
PROXY_CLIENT_KEY_BASE64=$(get-env-val "${master_env}" "PROXY_CLIENT_KEY")
|
|
|
|
ENABLE_LEGACY_ABAC=$(get-env-val "${master_env}" "ENABLE_LEGACY_ABAC")
|
|
|
|
}
|
|
|
|
|
|
|
|
# Update or verify required gcloud components are installed
|
|
|
|
# at minimum required version.
|
|
|
|
# Assumed vars
|
|
|
|
# KUBE_PROMPT_FOR_UPDATE
|
|
|
|
function update-or-verify-gcloud() {
|
|
|
|
local sudo_prefix=""
|
|
|
|
if [ ! -w $(dirname `which gcloud`) ]; then
|
|
|
|
sudo_prefix="sudo"
|
|
|
|
fi
|
|
|
|
# update and install components as needed
|
|
|
|
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
|
|
|
|
${sudo_prefix} gcloud ${gcloud_prompt:-} components install alpha
|
|
|
|
${sudo_prefix} gcloud ${gcloud_prompt:-} components install beta
|
|
|
|
${sudo_prefix} gcloud ${gcloud_prompt:-} components update
|
|
|
|
else
|
|
|
|
local version=$(gcloud version --format=json)
|
|
|
|
python -c'
|
|
|
|
import json,sys
|
|
|
|
from distutils import version
|
|
|
|
|
|
|
|
minVersion = version.LooseVersion("1.3.0")
|
|
|
|
required = [ "alpha", "beta", "core" ]
|
|
|
|
data = json.loads(sys.argv[1])
|
|
|
|
rel = data.get("Google Cloud SDK")
|
2018-03-12 21:10:35 +00:00
|
|
|
if "CL @" in rel:
|
|
|
|
print("Using dev version of gcloud: %s" %rel)
|
|
|
|
exit(0)
|
2018-02-15 08:54:36 +00:00
|
|
|
if rel != "HEAD" and version.LooseVersion(rel) < minVersion:
|
|
|
|
print("gcloud version out of date ( < %s )" % minVersion)
|
|
|
|
exit(1)
|
|
|
|
missing = []
|
|
|
|
for c in required:
|
|
|
|
if not data.get(c):
|
|
|
|
missing += [c]
|
|
|
|
if missing:
|
|
|
|
for c in missing:
|
|
|
|
print ("missing required gcloud component \"{0}\"".format(c))
|
|
|
|
exit(1)
|
|
|
|
' """${version}"""
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2015-10-07 20:48:28 +00:00
|
|
|
# Robustly try to create a static ip.
|
|
|
|
# $1: The name of the ip to create
|
|
|
|
# $2: The name of the region to create the ip in.
|
2016-08-02 07:08:05 +00:00
|
|
|
function create-static-ip() {
|
2015-10-07 20:48:28 +00:00
|
|
|
detect-project
|
|
|
|
local attempt=0
|
|
|
|
local REGION="$2"
|
|
|
|
while true; do
|
2016-03-03 16:20:12 +00:00
|
|
|
if gcloud compute addresses create "$1" \
|
2015-10-07 20:48:28 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--region "${REGION}" -q > /dev/null; then
|
2016-09-20 14:51:19 +00:00
|
|
|
# successful operation - wait until it's visible
|
|
|
|
start="$(date +%s)"
|
|
|
|
while true; do
|
|
|
|
now="$(date +%s)"
|
|
|
|
# Timeout set to 15 minutes
|
2017-06-22 07:59:07 +00:00
|
|
|
if [[ $((now - start)) -gt 900 ]]; then
|
2016-09-20 14:51:19 +00:00
|
|
|
echo "Timeout while waiting for master IP visibility"
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
if gcloud compute addresses describe "$1" --project "${PROJECT}" --region "${REGION}" >/dev/null 2>&1; then
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
echo "Master IP not visible yet. Waiting..."
|
|
|
|
sleep 5
|
|
|
|
done
|
2016-03-03 16:20:12 +00:00
|
|
|
break
|
|
|
|
fi
|
|
|
|
|
2016-11-15 17:13:40 +00:00
|
|
|
if gcloud compute addresses describe "$1" \
|
2016-03-03 16:20:12 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--region "${REGION}" >/dev/null 2>&1; then
|
|
|
|
# it exists - postcondition satisfied
|
2015-10-07 20:48:28 +00:00
|
|
|
break
|
|
|
|
fi
|
2016-03-03 16:20:12 +00:00
|
|
|
|
|
|
|
if (( attempt > 4 )); then
|
|
|
|
echo -e "${color_red}Failed to create static ip $1 ${color_norm}" >&2
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
attempt=$(($attempt+1))
|
|
|
|
echo -e "${color_yellow}Attempt $attempt failed to create static ip $1. Retrying.${color_norm}" >&2
|
|
|
|
sleep $(($attempt * 5))
|
2015-10-07 20:48:28 +00:00
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2014-12-09 23:37:06 +00:00
|
|
|
# Robustly try to create a firewall rule.
|
|
|
|
# $1: The name of firewall rule.
|
|
|
|
# $2: IP ranges.
|
2014-12-16 18:22:29 +00:00
|
|
|
# $3: Target tags for this firewall rule.
|
2016-08-02 07:08:05 +00:00
|
|
|
function create-firewall-rule() {
|
2014-12-09 23:07:54 +00:00
|
|
|
detect-project
|
2014-12-09 23:37:06 +00:00
|
|
|
local attempt=0
|
|
|
|
while true; do
|
|
|
|
if ! gcloud compute firewall-rules create "$1" \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2014-12-09 23:37:06 +00:00
|
|
|
--network "${NETWORK}" \
|
|
|
|
--source-ranges "$2" \
|
2014-12-16 18:22:29 +00:00
|
|
|
--target-tags "$3" \
|
2015-05-23 04:07:09 +00:00
|
|
|
--allow tcp,udp,icmp,esp,ah,sctp; then
|
2015-10-07 20:48:28 +00:00
|
|
|
if (( attempt > 4 )); then
|
|
|
|
echo -e "${color_red}Failed to create firewall rule $1 ${color_norm}" >&2
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to create firewall rule $1. Retrying.${color_norm}" >&2
|
|
|
|
attempt=$(($attempt+1))
|
|
|
|
sleep $(($attempt * 5))
|
2014-12-09 23:37:06 +00:00
|
|
|
else
|
2015-01-28 14:57:10 +00:00
|
|
|
break
|
2014-12-09 23:37:06 +00:00
|
|
|
fi
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2017-02-27 08:31:13 +00:00
|
|
|
# Format the string argument for gcloud network.
|
|
|
|
function make-gcloud-network-argument() {
|
2017-09-04 16:55:56 +00:00
|
|
|
local network_project="$1"
|
|
|
|
local region="$2"
|
|
|
|
local network="$3"
|
|
|
|
local subnet="$4"
|
|
|
|
local address="$5" # optional
|
|
|
|
local enable_ip_alias="$6" # optional
|
|
|
|
local alias_size="$7" # optional
|
|
|
|
|
|
|
|
local networkURL="projects/${network_project}/global/networks/${network}"
|
2017-09-06 18:59:47 +00:00
|
|
|
local subnetURL="projects/${network_project}/regions/${region}/subnetworks/${subnet:-}"
|
2017-02-27 08:31:13 +00:00
|
|
|
|
|
|
|
local ret=""
|
|
|
|
|
|
|
|
if [[ "${enable_ip_alias}" == 'true' ]]; then
|
|
|
|
ret="--network-interface"
|
2017-09-04 16:55:56 +00:00
|
|
|
ret="${ret} network=${networkURL}"
|
2017-02-27 08:31:13 +00:00
|
|
|
# If address is omitted, instance will not receive an external IP.
|
|
|
|
ret="${ret},address=${address:-}"
|
2017-09-04 16:55:56 +00:00
|
|
|
ret="${ret},subnet=${subnetURL}"
|
2017-02-27 08:31:13 +00:00
|
|
|
ret="${ret},aliases=pods-default:${alias_size}"
|
|
|
|
ret="${ret} --no-can-ip-forward"
|
|
|
|
else
|
2017-09-06 18:59:47 +00:00
|
|
|
if [[ -n ${subnet:-} ]]; then
|
|
|
|
ret="${ret} --subnet ${subnetURL}"
|
|
|
|
else
|
|
|
|
ret="${ret} --network ${networkURL}"
|
|
|
|
fi
|
|
|
|
|
2017-02-27 08:31:13 +00:00
|
|
|
ret="${ret} --can-ip-forward"
|
|
|
|
if [[ -n ${address:-} ]]; then
|
|
|
|
ret="${ret} --address ${address}"
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
|
|
|
echo "${ret}"
|
|
|
|
}
|
|
|
|
|
2015-09-28 23:22:13 +00:00
|
|
|
# $1: version (required)
|
2016-08-02 07:08:05 +00:00
|
|
|
function get-template-name-from-version() {
|
2015-09-28 23:22:13 +00:00
|
|
|
# trim template name to pass gce name validation
|
|
|
|
echo "${NODE_INSTANCE_PREFIX}-template-${1}" | cut -c 1-63 | sed 's/[\.\+]/-/g;s/-*$//g'
|
|
|
|
}
|
|
|
|
|
2018-03-13 17:16:52 +00:00
|
|
|
# validates the NODE_LOCAL_SSDS_EXT variable
|
2017-10-04 22:01:25 +00:00
|
|
|
function validate-node-local-ssds-ext(){
|
|
|
|
ssdopts="${1}"
|
|
|
|
|
|
|
|
if [[ -z "${ssdopts[0]}" || -z "${ssdopts[1]}" || -z "${ssdopts[2]}" ]]; then
|
|
|
|
echo -e "${color_red}Local SSD: NODE_LOCAL_SSDS_EXT is malformed, found ${ssdopts[0]-_},${ssdopts[1]-_},${ssdopts[2]-_} ${color_norm}" >&2
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
if [[ "${ssdopts[1]}" != "scsi" && "${ssdopts[1]}" != "nvme" ]]; then
|
|
|
|
echo -e "${color_red}Local SSD: Interface must be scsi or nvme, found: ${ssdopts[1]} ${color_norm}" >&2
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
if [[ "${ssdopts[2]}" != "fs" && "${ssdopts[2]}" != "block" ]]; then
|
|
|
|
echo -e "${color_red}Local SSD: Filesystem type must be fs or block, found: ${ssdopts[2]} ${color_norm}" >&2
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
local_ssd_ext_count=$((local_ssd_ext_count+ssdopts[0]))
|
|
|
|
if [[ "${local_ssd_ext_count}" -gt "${GCE_MAX_LOCAL_SSD}" || "${local_ssd_ext_count}" -lt 1 ]]; then
|
|
|
|
echo -e "${color_red}Local SSD: Total number of local ssds must range from 1 to 8, found: ${local_ssd_ext_count} ${color_norm}" >&2
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2015-01-28 14:57:10 +00:00
|
|
|
# Robustly try to create an instance template.
|
|
|
|
# $1: The name of the instance template.
|
2014-12-09 23:37:06 +00:00
|
|
|
# $2: The scopes flag.
|
2017-06-28 17:50:58 +00:00
|
|
|
# $3: String of comma-separated metadata entries (must all be from a file).
|
2016-08-02 07:08:05 +00:00
|
|
|
function create-node-template() {
|
2014-12-09 23:07:54 +00:00
|
|
|
detect-project
|
2017-09-06 18:59:47 +00:00
|
|
|
detect-subnetworks
|
2015-09-28 23:22:13 +00:00
|
|
|
local template_name="$1"
|
2015-05-08 00:41:22 +00:00
|
|
|
|
|
|
|
# First, ensure the template doesn't exist.
|
2015-08-26 17:05:34 +00:00
|
|
|
# TODO(zmerlynn): To make this really robust, we need to parse the output and
|
2015-05-08 00:41:22 +00:00
|
|
|
# add retries. Just relying on a non-zero exit code doesn't
|
|
|
|
# distinguish an ephemeral failed call from a "not-exists".
|
2015-09-28 23:22:13 +00:00
|
|
|
if gcloud compute instance-templates describe "$template_name" --project "${PROJECT}" &>/dev/null; then
|
2015-06-12 03:56:01 +00:00
|
|
|
echo "Instance template ${1} already exists; deleting." >&2
|
2017-05-11 23:51:54 +00:00
|
|
|
if ! gcloud compute instance-templates delete "$template_name" --project "${PROJECT}" --quiet &>/dev/null; then
|
2015-06-12 03:56:01 +00:00
|
|
|
echo -e "${color_yellow}Failed to delete existing instance template${color_norm}" >&2
|
|
|
|
exit 2
|
|
|
|
fi
|
2015-05-08 00:41:22 +00:00
|
|
|
fi
|
|
|
|
|
2017-02-27 08:31:13 +00:00
|
|
|
local gcloud="gcloud"
|
2017-05-10 19:07:02 +00:00
|
|
|
|
|
|
|
local accelerator_args=""
|
|
|
|
# VMs with Accelerators cannot be live migrated.
|
|
|
|
# More details here - https://cloud.google.com/compute/docs/gpus/add-gpus#create-new-gpu-instance
|
|
|
|
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
|
|
|
|
accelerator_args="--maintenance-policy TERMINATE --restart-on-failure --accelerator ${NODE_ACCELERATORS}"
|
|
|
|
gcloud="gcloud beta"
|
|
|
|
fi
|
|
|
|
|
2017-02-27 08:31:13 +00:00
|
|
|
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
|
2017-05-17 23:26:23 +00:00
|
|
|
gcloud="gcloud beta"
|
2017-02-27 08:31:13 +00:00
|
|
|
fi
|
|
|
|
|
2015-08-07 06:47:10 +00:00
|
|
|
local preemptible_minions=""
|
2015-11-24 03:06:47 +00:00
|
|
|
if [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then
|
2015-08-07 06:47:10 +00:00
|
|
|
preemptible_minions="--preemptible --maintenance-policy TERMINATE"
|
2015-08-08 16:07:12 +00:00
|
|
|
fi
|
2017-02-27 08:31:13 +00:00
|
|
|
|
2017-03-27 19:17:15 +00:00
|
|
|
local local_ssds=""
|
2017-10-04 22:01:25 +00:00
|
|
|
local_ssd_ext_count=0
|
|
|
|
if [[ ! -z ${NODE_LOCAL_SSDS_EXT:-} ]]; then
|
|
|
|
IFS=";" read -r -a ssdgroups <<< "${NODE_LOCAL_SSDS_EXT:-}"
|
|
|
|
for ssdgroup in "${ssdgroups[@]}"
|
|
|
|
do
|
|
|
|
IFS="," read -r -a ssdopts <<< "${ssdgroup}"
|
|
|
|
validate-node-local-ssds-ext "${ssdopts}"
|
|
|
|
for i in $(seq ${ssdopts[0]}); do
|
|
|
|
local_ssds="$local_ssds--local-ssd=interface=${ssdopts[1]} "
|
|
|
|
done
|
|
|
|
done
|
|
|
|
fi
|
2018-03-13 17:16:52 +00:00
|
|
|
|
2017-06-22 07:59:07 +00:00
|
|
|
if [[ ! -z ${NODE_LOCAL_SSDS+x} ]]; then
|
2017-07-19 01:43:45 +00:00
|
|
|
# The NODE_LOCAL_SSDS check below fixes issue #49171
|
|
|
|
# Some versions of seq will count down from 1 if "seq 0" is specified
|
|
|
|
if [[ ${NODE_LOCAL_SSDS} -ge 1 ]]; then
|
2017-03-27 19:17:15 +00:00
|
|
|
for i in $(seq ${NODE_LOCAL_SSDS}); do
|
2017-07-19 01:43:45 +00:00
|
|
|
local_ssds="$local_ssds--local-ssd=interface=SCSI "
|
2017-03-27 19:17:15 +00:00
|
|
|
done
|
2017-07-19 01:43:45 +00:00
|
|
|
fi
|
2017-03-27 19:17:15 +00:00
|
|
|
fi
|
2018-03-13 17:16:52 +00:00
|
|
|
|
2017-02-27 08:31:13 +00:00
|
|
|
|
|
|
|
local network=$(make-gcloud-network-argument \
|
2017-09-04 16:55:56 +00:00
|
|
|
"${NETWORK_PROJECT}" \
|
|
|
|
"${REGION}" \
|
|
|
|
"${NETWORK}" \
|
2017-09-06 18:59:47 +00:00
|
|
|
"${SUBNETWORK:-}" \
|
2017-09-04 16:55:56 +00:00
|
|
|
"" \
|
2017-02-27 08:31:13 +00:00
|
|
|
"${ENABLE_IP_ALIASES:-}" \
|
|
|
|
"${IP_ALIAS_SIZE:-}")
|
|
|
|
|
|
|
|
local attempt=1
|
2014-12-09 23:37:06 +00:00
|
|
|
while true; do
|
2015-06-17 07:13:26 +00:00
|
|
|
echo "Attempt ${attempt} to create ${1}" >&2
|
2017-05-10 19:07:02 +00:00
|
|
|
if ! ${gcloud} compute instance-templates create \
|
2017-02-27 08:31:13 +00:00
|
|
|
"$template_name" \
|
2014-12-09 23:37:06 +00:00
|
|
|
--project "${PROJECT}" \
|
2015-11-24 03:05:51 +00:00
|
|
|
--machine-type "${NODE_SIZE}" \
|
2015-11-24 03:02:38 +00:00
|
|
|
--boot-disk-type "${NODE_DISK_TYPE}" \
|
|
|
|
--boot-disk-size "${NODE_DISK_SIZE}" \
|
|
|
|
--image-project="${NODE_IMAGE_PROJECT}" \
|
|
|
|
--image "${NODE_IMAGE}" \
|
2017-09-21 22:07:47 +00:00
|
|
|
--service-account "${NODE_SERVICE_ACCOUNT}" \
|
2015-11-24 03:06:00 +00:00
|
|
|
--tags "${NODE_TAG}" \
|
2017-04-28 22:57:39 +00:00
|
|
|
${accelerator_args} \
|
2017-03-27 19:17:15 +00:00
|
|
|
${local_ssds} \
|
2017-02-27 08:31:13 +00:00
|
|
|
--region "${REGION}" \
|
|
|
|
${network} \
|
2015-08-08 16:07:12 +00:00
|
|
|
${preemptible_minions} \
|
2014-12-09 23:37:06 +00:00
|
|
|
$2 \
|
2017-06-28 17:50:58 +00:00
|
|
|
--metadata-from-file $3 >&2; then
|
2014-12-09 23:37:06 +00:00
|
|
|
if (( attempt > 5 )); then
|
2015-09-28 23:22:13 +00:00
|
|
|
echo -e "${color_red}Failed to create instance template $template_name ${color_norm}" >&2
|
2014-12-09 23:37:06 +00:00
|
|
|
exit 2
|
|
|
|
fi
|
2015-09-28 23:22:13 +00:00
|
|
|
echo -e "${color_yellow}Attempt ${attempt} failed to create instance template $template_name. Retrying.${color_norm}" >&2
|
2014-12-09 23:37:06 +00:00
|
|
|
attempt=$(($attempt+1))
|
2015-10-23 20:57:13 +00:00
|
|
|
sleep $(($attempt * 5))
|
2016-02-23 00:01:06 +00:00
|
|
|
|
|
|
|
# In case the previous attempt failed with something like a
|
|
|
|
# Backend Error and left the entry laying around, delete it
|
|
|
|
# before we try again.
|
|
|
|
gcloud compute instance-templates delete "$template_name" --project "${PROJECT}" &>/dev/null || true
|
2015-01-28 14:57:10 +00:00
|
|
|
else
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2014-07-14 17:50:04 +00:00
|
|
|
# Instantiate a kubernetes cluster
|
2014-09-23 22:54:27 +00:00
|
|
|
#
|
|
|
|
# Assumed vars
|
2014-10-03 21:58:49 +00:00
|
|
|
# KUBE_ROOT
|
2014-09-23 22:54:27 +00:00
|
|
|
# <Various vars set in config file>
|
2016-08-02 07:08:05 +00:00
|
|
|
function kube-up() {
|
2017-03-11 06:18:38 +00:00
|
|
|
kube::util::ensure-temp-dir
|
2014-07-14 17:50:04 +00:00
|
|
|
detect-project
|
|
|
|
|
2015-10-26 17:38:53 +00:00
|
|
|
load-or-gen-kube-basicauth
|
|
|
|
load-or-gen-kube-bearertoken
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Make sure we have the tar files staged on Google Storage
|
|
|
|
find-release-tars
|
|
|
|
upload-server-tars
|
2014-09-24 17:55:58 +00:00
|
|
|
|
2016-02-08 09:21:04 +00:00
|
|
|
# ensure that environmental variables specifying number of migs to create
|
|
|
|
set_num_migs
|
|
|
|
|
2015-11-29 19:38:03 +00:00
|
|
|
if [[ ${KUBE_USE_EXISTING_MASTER:-} == "true" ]]; then
|
2016-12-12 14:46:39 +00:00
|
|
|
detect-master
|
2016-03-05 20:50:28 +00:00
|
|
|
parse-master-env
|
2017-05-19 22:23:39 +00:00
|
|
|
create-subnetworks
|
2017-09-06 18:59:47 +00:00
|
|
|
detect-subnetworks
|
2015-11-29 19:38:03 +00:00
|
|
|
create-nodes
|
2016-11-07 11:47:04 +00:00
|
|
|
elif [[ ${KUBE_REPLICATE_EXISTING_MASTER:-} == "true" ]]; then
|
2018-01-20 05:23:23 +00:00
|
|
|
if [[ "${MASTER_OS_DISTRIBUTION}" != "gci" && "${MASTER_OS_DISTRIBUTION}" != "ubuntu" ]]; then
|
|
|
|
echo "Master replication supported only for gci and ubuntu"
|
2016-08-02 07:08:05 +00:00
|
|
|
return 1
|
|
|
|
fi
|
2016-07-20 14:37:31 +00:00
|
|
|
create-loadbalancer
|
2016-08-02 07:08:05 +00:00
|
|
|
# If replication of master fails, we need to ensure that the replica is removed from etcd clusters.
|
|
|
|
if ! replicate-master; then
|
2016-09-07 14:10:55 +00:00
|
|
|
remove-replica-from-etcd 2379 || true
|
2016-08-02 07:08:05 +00:00
|
|
|
remove-replica-from-etcd 4002 || true
|
|
|
|
fi
|
2015-11-29 19:38:03 +00:00
|
|
|
else
|
|
|
|
check-existing
|
|
|
|
create-network
|
2017-05-19 22:23:39 +00:00
|
|
|
create-subnetworks
|
2017-09-06 18:59:47 +00:00
|
|
|
detect-subnetworks
|
2018-02-15 13:51:46 +00:00
|
|
|
write-cluster-location
|
2016-02-26 00:00:16 +00:00
|
|
|
write-cluster-name
|
2016-05-09 14:23:00 +00:00
|
|
|
create-autoscaler-config
|
2015-11-29 19:38:03 +00:00
|
|
|
create-master
|
|
|
|
create-nodes-firewall
|
|
|
|
create-nodes-template
|
|
|
|
create-nodes
|
|
|
|
check-cluster
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
function check-existing() {
|
2015-06-15 16:21:27 +00:00
|
|
|
local running_in_terminal=false
|
|
|
|
# May be false if tty is not allocated (for example with ssh -T).
|
2017-06-22 07:59:07 +00:00
|
|
|
if [[ -t 1 ]]; then
|
2015-06-15 16:21:27 +00:00
|
|
|
running_in_terminal=true
|
|
|
|
fi
|
|
|
|
|
2015-06-19 20:04:16 +00:00
|
|
|
if [[ ${running_in_terminal} == "true" || ${KUBE_UP_AUTOMATIC_CLEANUP} == "true" ]]; then
|
2015-06-15 16:21:27 +00:00
|
|
|
if ! check-resources; then
|
|
|
|
local run_kube_down="n"
|
|
|
|
echo "${KUBE_RESOURCE_FOUND} found." >&2
|
|
|
|
# Get user input only if running in terminal.
|
|
|
|
if [[ ${running_in_terminal} == "true" && ${KUBE_UP_AUTOMATIC_CLEANUP} == "false" ]]; then
|
|
|
|
read -p "Would you like to shut down the old cluster (call kube-down)? [y/N] " run_kube_down
|
|
|
|
fi
|
|
|
|
if [[ ${run_kube_down} == "y" || ${run_kube_down} == "Y" || ${KUBE_UP_AUTOMATIC_CLEANUP} == "true" ]]; then
|
|
|
|
echo "... calling kube-down" >&2
|
|
|
|
kube-down
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
fi
|
2015-11-29 19:38:03 +00:00
|
|
|
}
|
2015-06-15 16:21:27 +00:00
|
|
|
|
2017-10-16 20:51:14 +00:00
|
|
|
function check-network-mode() {
|
|
|
|
local mode="$(gcloud compute networks list --filter="name=('${NETWORK}')" --project ${NETWORK_PROJECT} --format='value(x_gcloud_subnet_mode)' || true)"
|
|
|
|
# The deprecated field uses lower case. Convert to upper case for consistency.
|
|
|
|
echo "$(echo $mode | tr [a-z] [A-Z])"
|
|
|
|
}
|
|
|
|
|
2015-11-29 19:38:03 +00:00
|
|
|
function create-network() {
|
2017-09-04 16:55:56 +00:00
|
|
|
if ! gcloud compute networks --project "${NETWORK_PROJECT}" describe "${NETWORK}" &>/dev/null; then
|
2014-09-24 17:55:58 +00:00
|
|
|
# The network needs to be created synchronously or we have a race. The
|
|
|
|
# firewalls can be added concurrent with instance creation.
|
2017-09-12 23:27:22 +00:00
|
|
|
local network_mode="auto"
|
|
|
|
if [[ "${CREATE_CUSTOM_NETWORK:-}" == "true" ]]; then
|
|
|
|
network_mode="custom"
|
|
|
|
fi
|
|
|
|
echo "Creating new ${network_mode} network: ${NETWORK}"
|
2018-03-14 21:54:53 +00:00
|
|
|
gcloud compute networks create --project "${NETWORK_PROJECT}" "${NETWORK}" --subnet-mode="${network_mode}"
|
2017-06-14 11:23:41 +00:00
|
|
|
else
|
|
|
|
PREEXISTING_NETWORK=true
|
2017-10-16 20:51:14 +00:00
|
|
|
PREEXISTING_NETWORK_MODE="$(check-network-mode)"
|
2017-06-14 11:23:41 +00:00
|
|
|
echo "Found existing network ${NETWORK} in ${PREEXISTING_NETWORK_MODE} mode."
|
2014-10-28 20:47:49 +00:00
|
|
|
fi
|
|
|
|
|
2017-09-04 16:55:56 +00:00
|
|
|
if ! gcloud compute firewall-rules --project "${NETWORK_PROJECT}" describe "${CLUSTER_NAME}-default-internal-master" &>/dev/null; then
|
2016-12-13 19:21:14 +00:00
|
|
|
gcloud compute firewall-rules create "${CLUSTER_NAME}-default-internal-master" \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2014-09-24 23:03:38 +00:00
|
|
|
--network "${NETWORK}" \
|
2014-11-25 18:32:27 +00:00
|
|
|
--source-ranges "10.0.0.0/8" \
|
2016-09-20 14:34:56 +00:00
|
|
|
--allow "tcp:1-2379,tcp:2382-65535,udp:1-65535,icmp" \
|
|
|
|
--target-tags "${MASTER_TAG}"&
|
|
|
|
fi
|
|
|
|
|
2017-09-04 16:55:56 +00:00
|
|
|
if ! gcloud compute firewall-rules --project "${NETWORK_PROJECT}" describe "${CLUSTER_NAME}-default-internal-node" &>/dev/null; then
|
2016-12-13 19:21:14 +00:00
|
|
|
gcloud compute firewall-rules create "${CLUSTER_NAME}-default-internal-node" \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2016-09-20 14:34:56 +00:00
|
|
|
--network "${NETWORK}" \
|
|
|
|
--source-ranges "10.0.0.0/8" \
|
|
|
|
--allow "tcp:1-65535,udp:1-65535,icmp" \
|
|
|
|
--target-tags "${NODE_TAG}"&
|
2014-10-28 20:47:49 +00:00
|
|
|
fi
|
|
|
|
|
2017-09-04 16:55:56 +00:00
|
|
|
if ! gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NETWORK}-default-ssh" &>/dev/null; then
|
2014-11-25 18:32:27 +00:00
|
|
|
gcloud compute firewall-rules create "${NETWORK}-default-ssh" \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2014-09-24 23:03:38 +00:00
|
|
|
--network "${NETWORK}" \
|
2014-11-25 18:32:27 +00:00
|
|
|
--source-ranges "0.0.0.0/0" \
|
|
|
|
--allow "tcp:22" &
|
2014-09-24 23:03:38 +00:00
|
|
|
fi
|
2015-11-29 19:38:03 +00:00
|
|
|
}
|
2014-09-24 23:03:38 +00:00
|
|
|
|
2017-06-14 11:23:41 +00:00
|
|
|
function expand-default-subnetwork() {
|
2018-03-14 21:54:53 +00:00
|
|
|
gcloud compute networks update "${NETWORK}" \
|
|
|
|
--switch-to-custom-subnet-mode \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2017-06-14 11:23:41 +00:00
|
|
|
--quiet || true
|
|
|
|
gcloud compute networks subnets expand-ip-range "${NETWORK}" \
|
|
|
|
--region="${REGION}" \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2017-06-14 11:23:41 +00:00
|
|
|
--prefix-length=19 \
|
|
|
|
--quiet
|
|
|
|
}
|
|
|
|
|
2017-05-19 22:23:39 +00:00
|
|
|
function create-subnetworks() {
|
2017-02-27 08:31:13 +00:00
|
|
|
case ${ENABLE_IP_ALIASES} in
|
2017-06-14 11:23:41 +00:00
|
|
|
true) echo "IP aliases are enabled. Creating subnetworks.";;
|
|
|
|
false)
|
|
|
|
echo "IP aliases are disabled."
|
2017-08-31 15:58:07 +00:00
|
|
|
if [[ "${ENABLE_BIG_CLUSTER_SUBNETS}" = "true" ]]; then
|
2017-06-14 11:23:41 +00:00
|
|
|
if [[ "${PREEXISTING_NETWORK}" != "true" ]]; then
|
|
|
|
expand-default-subnetwork
|
|
|
|
else
|
|
|
|
echo "${color_yellow}Using pre-existing network ${NETWORK}, subnets won't be expanded to /19!${color_norm}"
|
|
|
|
fi
|
2017-09-12 23:27:22 +00:00
|
|
|
elif [[ "${CREATE_CUSTOM_NETWORK:-}" == "true" && "${PREEXISTING_NETWORK}" != "true" ]]; then
|
|
|
|
gcloud compute networks subnets create "${SUBNETWORK}" --project "${NETWORK_PROJECT}" --region "${REGION}" --network "${NETWORK}" --range "${NODE_IP_RANGE}"
|
2017-06-14 11:23:41 +00:00
|
|
|
fi
|
|
|
|
return;;
|
2017-02-27 08:31:13 +00:00
|
|
|
*) echo "${color_red}Invalid argument to ENABLE_IP_ALIASES${color_norm}"
|
|
|
|
exit 1;;
|
|
|
|
esac
|
|
|
|
|
2017-05-19 22:23:39 +00:00
|
|
|
# Look for the alias subnet, it must exist and have a secondary
|
|
|
|
# range configured.
|
2017-05-17 23:26:23 +00:00
|
|
|
local subnet=$(gcloud beta compute networks subnets describe \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2017-04-13 19:08:35 +00:00
|
|
|
--region ${REGION} \
|
|
|
|
${IP_ALIAS_SUBNETWORK} 2>/dev/null)
|
2017-02-27 08:31:13 +00:00
|
|
|
if [[ -z ${subnet} ]]; then
|
|
|
|
echo "Creating subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
|
2017-05-17 23:26:23 +00:00
|
|
|
gcloud beta compute networks subnets create \
|
2017-02-27 08:31:13 +00:00
|
|
|
${IP_ALIAS_SUBNETWORK} \
|
|
|
|
--description "Automatically generated subnet for ${INSTANCE_PREFIX} cluster. This will be removed on cluster teardown." \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2017-02-27 08:31:13 +00:00
|
|
|
--network ${NETWORK} \
|
|
|
|
--region ${REGION} \
|
|
|
|
--range ${NODE_IP_RANGE} \
|
2017-08-31 08:33:17 +00:00
|
|
|
--secondary-range "pods-default=${CLUSTER_IP_RANGE}" \
|
|
|
|
--secondary-range "services-default=${SERVICE_CLUSTER_IP_RANGE}"
|
2017-02-27 08:31:13 +00:00
|
|
|
echo "Created subnetwork ${IP_ALIAS_SUBNETWORK}"
|
|
|
|
else
|
2018-03-17 01:12:39 +00:00
|
|
|
if ! echo ${subnet} | grep --quiet secondaryIpRanges; then
|
2017-02-27 08:31:13 +00:00
|
|
|
echo "${color_red}Subnet ${IP_ALIAS_SUBNETWORK} does not have a secondary range${color_norm}"
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2017-09-06 18:59:47 +00:00
|
|
|
# detect-subnetworks sets the SUBNETWORK var if not already set
|
|
|
|
# Assumed vars:
|
|
|
|
# NETWORK
|
|
|
|
# REGION
|
|
|
|
# NETWORK_PROJECT
|
|
|
|
#
|
|
|
|
# Optional vars:
|
|
|
|
# SUBNETWORK
|
|
|
|
# IP_ALIAS_SUBNETWORK
|
|
|
|
function detect-subnetworks() {
|
|
|
|
if [[ -n ${SUBNETWORK:-} ]]; then
|
|
|
|
echo "Using subnet ${SUBNETWORK}"
|
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [[ -n ${IP_ALIAS_SUBNETWORK:-} ]]; then
|
|
|
|
SUBNETWORK=${IP_ALIAS_SUBNETWORK}
|
|
|
|
echo "Using IP Alias subnet ${SUBNETWORK}"
|
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
|
|
|
|
SUBNETWORK=$(gcloud beta compute networks subnets list \
|
|
|
|
--network=${NETWORK} \
|
|
|
|
--regions=${REGION} \
|
|
|
|
--project=${NETWORK_PROJECT} \
|
|
|
|
--limit=1 \
|
|
|
|
--format='value(name)' 2>/dev/null)
|
|
|
|
|
|
|
|
if [[ -n ${SUBNETWORK:-} ]]; then
|
|
|
|
echo "Found subnet for region ${REGION} in network ${NETWORK}: ${SUBNETWORK}"
|
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
|
|
|
|
echo "${color_red}Could not find subnetwork with region ${REGION}, network ${NETWORK}, and project ${NETWORK_PROJECT}"
|
|
|
|
}
|
|
|
|
|
2017-10-10 00:35:43 +00:00
|
|
|
function delete-all-firewall-rules() {
|
|
|
|
if fws=$(gcloud compute firewall-rules list --project "${NETWORK_PROJECT}" --filter="network=${NETWORK}" --format="value(name)"); then
|
|
|
|
echo "Deleting firewall rules remaining in network ${NETWORK}: ${fws}"
|
|
|
|
delete-firewall-rules "$fws"
|
|
|
|
else
|
|
|
|
echo "Failed to list firewall rules from the network ${NETWORK}"
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2016-10-11 23:50:30 +00:00
|
|
|
function delete-firewall-rules() {
|
|
|
|
for fw in $@; do
|
2017-09-04 16:55:56 +00:00
|
|
|
if [[ -n $(gcloud compute firewall-rules --project "${NETWORK_PROJECT}" describe "${fw}" --format='value(name)' 2>/dev/null || true) ]]; then
|
|
|
|
gcloud compute firewall-rules delete --project "${NETWORK_PROJECT}" --quiet "${fw}" &
|
2016-10-11 23:50:30 +00:00
|
|
|
fi
|
|
|
|
done
|
|
|
|
kube::util::wait-for-jobs || {
|
|
|
|
echo -e "${color_red}Failed to delete firewall rules.${color_norm}" >&2
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
function delete-network() {
|
2017-09-04 16:55:56 +00:00
|
|
|
if [[ -n $(gcloud compute networks --project "${NETWORK_PROJECT}" describe "${NETWORK}" --format='value(name)' 2>/dev/null || true) ]]; then
|
|
|
|
if ! gcloud compute networks delete --project "${NETWORK_PROJECT}" --quiet "${NETWORK}"; then
|
2016-10-11 23:50:30 +00:00
|
|
|
echo "Failed to delete network '${NETWORK}'. Listing firewall-rules:"
|
2017-09-04 16:55:56 +00:00
|
|
|
gcloud compute firewall-rules --project "${NETWORK_PROJECT}" list --filter="network=${NETWORK}"
|
2016-10-11 23:50:30 +00:00
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2017-05-19 22:23:39 +00:00
|
|
|
function delete-subnetworks() {
|
2018-04-11 13:44:28 +00:00
|
|
|
# If running in custom mode network we need to delete subnets manually.
|
|
|
|
mode="$(check-network-mode)"
|
|
|
|
if [[ "${mode}" == "CUSTOM" ]]; then
|
|
|
|
if [[ "${ENABLE_BIG_CLUSTER_SUBNETS}" = "true" ]]; then
|
|
|
|
echo "Deleting default subnets..."
|
|
|
|
# This value should be kept in sync with number of regions.
|
|
|
|
local parallelism=9
|
|
|
|
gcloud compute networks subnets list --network="${NETWORK}" --project "${NETWORK_PROJECT}" --format='value(region.basename())' | \
|
|
|
|
xargs -i -P ${parallelism} gcloud --quiet compute networks subnets delete "${NETWORK}" --project "${NETWORK_PROJECT}" --region="{}" || true
|
|
|
|
elif [[ "${CREATE_CUSTOM_NETWORK:-}" == "true" ]]; then
|
|
|
|
echo "Deleting custom subnet..."
|
|
|
|
gcloud --quiet compute networks subnets delete "${SUBNETWORK}" --project "${NETWORK_PROJECT}" --region="${REGION}" || true
|
2017-06-14 11:23:41 +00:00
|
|
|
fi
|
2017-02-27 08:31:13 +00:00
|
|
|
return
|
|
|
|
fi
|
|
|
|
|
2018-04-11 13:44:28 +00:00
|
|
|
# If we reached here, it means we're not using custom network.
|
|
|
|
# So the only thing we need to check is if IP-aliases was turned
|
|
|
|
# on and we created a subnet for it. If so, we should delete it.
|
|
|
|
if [[ ${ENABLE_IP_ALIASES:-} == "true" ]]; then
|
|
|
|
# Only delete the subnet if we created it (i.e it's not pre-existing).
|
|
|
|
if [[ -z "${KUBE_GCE_IP_ALIAS_SUBNETWORK:-}" ]]; then
|
|
|
|
echo "Removing auto-created subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
|
|
|
|
if [[ -n $(gcloud beta compute networks subnets describe \
|
|
|
|
--project "${NETWORK_PROJECT}" \
|
|
|
|
--region ${REGION} \
|
|
|
|
${IP_ALIAS_SUBNETWORK} 2>/dev/null) ]]; then
|
|
|
|
gcloud beta --quiet compute networks subnets delete \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2017-05-19 22:23:39 +00:00
|
|
|
--region ${REGION} \
|
2018-04-11 13:44:28 +00:00
|
|
|
${IP_ALIAS_SUBNETWORK}
|
|
|
|
fi
|
2017-05-19 22:23:39 +00:00
|
|
|
fi
|
2017-02-27 08:31:13 +00:00
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2016-12-20 13:18:01 +00:00
|
|
|
# Generates SSL certificates for etcd cluster. Uses cfssl program.
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# KUBE_TEMP: temporary directory
|
2017-08-08 12:09:15 +00:00
|
|
|
# NUM_NODES: #nodes in the cluster
|
2016-12-20 13:18:01 +00:00
|
|
|
#
|
|
|
|
# Args:
|
|
|
|
# $1: host name
|
|
|
|
# $2: CA certificate
|
|
|
|
# $3: CA key
|
|
|
|
#
|
|
|
|
# If CA cert/key is empty, the function will also generate certs for CA.
|
|
|
|
#
|
|
|
|
# Vars set:
|
|
|
|
# ETCD_CA_KEY_BASE64
|
|
|
|
# ETCD_CA_CERT_BASE64
|
|
|
|
# ETCD_PEER_KEY_BASE64
|
|
|
|
# ETCD_PEER_CERT_BASE64
|
|
|
|
#
|
|
|
|
function create-etcd-certs {
|
|
|
|
local host=${1}
|
|
|
|
local ca_cert=${2:-}
|
|
|
|
local ca_key=${3:-}
|
|
|
|
|
2017-02-13 19:20:27 +00:00
|
|
|
GEN_ETCD_CA_CERT="${ca_cert}" GEN_ETCD_CA_KEY="${ca_key}" \
|
|
|
|
generate-etcd-cert "${KUBE_TEMP}/cfssl" "${host}" "peer" "peer"
|
2016-12-20 13:18:01 +00:00
|
|
|
|
|
|
|
pushd "${KUBE_TEMP}/cfssl"
|
2016-10-20 09:08:43 +00:00
|
|
|
ETCD_CA_KEY_BASE64=$(cat "ca-key.pem" | base64 | tr -d '\r\n')
|
|
|
|
ETCD_CA_CERT_BASE64=$(cat "ca.pem" | gzip | base64 | tr -d '\r\n')
|
2017-02-13 19:20:27 +00:00
|
|
|
ETCD_PEER_KEY_BASE64=$(cat "peer-key.pem" | base64 | tr -d '\r\n')
|
|
|
|
ETCD_PEER_CERT_BASE64=$(cat "peer.pem" | gzip | base64 | tr -d '\r\n')
|
2016-10-20 09:08:43 +00:00
|
|
|
popd
|
|
|
|
}
|
|
|
|
|
2015-11-29 19:38:03 +00:00
|
|
|
function create-master() {
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
echo "Starting master and configuring firewalls"
|
2014-11-25 18:32:27 +00:00
|
|
|
gcloud compute firewall-rules create "${MASTER_NAME}-https" \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2014-10-06 20:25:27 +00:00
|
|
|
--network "${NETWORK}" \
|
2014-11-25 18:32:27 +00:00
|
|
|
--target-tags "${MASTER_TAG}" \
|
|
|
|
--allow tcp:443 &
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2015-02-23 21:57:09 +00:00
|
|
|
# We have to make sure the disk is created before creating the master VM, so
|
|
|
|
# run this in the foreground.
|
|
|
|
gcloud compute disks create "${MASTER_NAME}-pd" \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--zone "${ZONE}" \
|
2015-05-11 12:30:57 +00:00
|
|
|
--type "${MASTER_DISK_TYPE}" \
|
|
|
|
--size "${MASTER_DISK_SIZE}"
|
2015-02-23 21:57:09 +00:00
|
|
|
|
2016-09-20 14:34:56 +00:00
|
|
|
# Create rule for accessing and securing etcd servers.
|
2017-09-04 16:55:56 +00:00
|
|
|
if ! gcloud compute firewall-rules --project "${NETWORK_PROJECT}" describe "${MASTER_NAME}-etcd" &>/dev/null; then
|
2016-09-20 14:34:56 +00:00
|
|
|
gcloud compute firewall-rules create "${MASTER_NAME}-etcd" \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2016-09-20 14:34:56 +00:00
|
|
|
--network "${NETWORK}" \
|
|
|
|
--source-tags "${MASTER_TAG}" \
|
|
|
|
--allow "tcp:2380,tcp:2381" \
|
|
|
|
--target-tags "${MASTER_TAG}" &
|
|
|
|
fi
|
|
|
|
|
2015-04-22 17:55:08 +00:00
|
|
|
# Generate a bearer token for this cluster. We push this separately
|
|
|
|
# from the other cluster variables so that the client (this
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
# computer) can forget it later. This should disappear with
|
2015-08-06 01:08:26 +00:00
|
|
|
# http://issue.k8s.io/3168
|
Generate a token for kube-proxy.
Tested on GCE.
Includes untested modifications for AWS and Vagrant.
No changes for any other distros.
Probably will work on other up-to-date providers
but beware. Symptom would be that service proxying
stops working.
1. Generates a token kube-proxy in AWS, GCE, and Vagrant setup scripts.
1. Distributes the token via salt-overlay, and salt to /var/lib/kube-proxy/kubeconfig
1. Changes kube-proxy args:
- use the --kubeconfig argument
- changes --master argument from http://MASTER:7080 to https://MASTER
- http -> https
- explicit port 7080 -> implied 443
Possible ways this might break other distros:
Mitigation: there is an default empty kubeconfig file.
If the distro does not populate the salt-overlay, then
it should get the empty, which parses to an empty
object, which, combined with the --master argument,
should still work.
Mitigation:
- azure: Special case to use 7080 in
- rackspace: way out of date, so don't care.
- vsphere: way out of date, so don't care.
- other distros: not using salt.
2015-04-24 16:27:11 +00:00
|
|
|
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
2017-01-19 09:20:43 +00:00
|
|
|
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
|
|
|
|
NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
|
|
|
fi
|
2014-09-24 17:55:58 +00:00
|
|
|
|
2015-04-17 17:35:19 +00:00
|
|
|
# Reserve the master's IP so that it can later be transferred to another VM
|
2016-07-20 15:25:25 +00:00
|
|
|
# without disrupting the kubelets.
|
2015-10-07 20:48:28 +00:00
|
|
|
create-static-ip "${MASTER_NAME}-ip" "${REGION}"
|
|
|
|
MASTER_RESERVED_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
|
2016-05-04 21:10:00 +00:00
|
|
|
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
|
2016-11-04 08:58:09 +00:00
|
|
|
|
|
|
|
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then
|
|
|
|
KUBELET_APISERVER="${MASTER_RESERVED_IP}"
|
|
|
|
fi
|
|
|
|
|
2016-09-28 13:25:18 +00:00
|
|
|
KUBERNETES_MASTER_NAME="${MASTER_RESERVED_IP}"
|
2016-11-07 09:21:07 +00:00
|
|
|
MASTER_ADVERTISE_ADDRESS="${MASTER_RESERVED_IP}"
|
2015-04-17 17:35:19 +00:00
|
|
|
|
2015-05-11 18:43:44 +00:00
|
|
|
create-certs "${MASTER_RESERVED_IP}"
|
2016-10-11 12:52:51 +00:00
|
|
|
create-etcd-certs ${MASTER_NAME}
|
2015-05-11 18:43:44 +00:00
|
|
|
|
2017-08-08 12:09:15 +00:00
|
|
|
if [[ "${NUM_NODES}" -ge "50" ]]; then
|
|
|
|
# We block on master creation for large clusters to avoid doing too much
|
|
|
|
# unnecessary work in case master start-up fails (like creation of nodes).
|
|
|
|
create-master-instance "${MASTER_RESERVED_IP}"
|
|
|
|
else
|
|
|
|
create-master-instance "${MASTER_RESERVED_IP}" &
|
|
|
|
fi
|
2015-11-29 19:38:03 +00:00
|
|
|
}
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2016-08-02 07:08:05 +00:00
|
|
|
# Adds master replica to etcd cluster.
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# REPLICA_NAME
|
|
|
|
# PROJECT
|
|
|
|
# EXISTING_MASTER_NAME
|
|
|
|
# EXISTING_MASTER_ZONE
|
|
|
|
#
|
|
|
|
# $1: etcd client port
|
|
|
|
# $2: etcd internal port
|
|
|
|
# returns the result of ssh command which adds replica
|
|
|
|
function add-replica-to-etcd() {
|
|
|
|
local -r client_port="${1}"
|
|
|
|
local -r internal_port="${2}"
|
|
|
|
gcloud compute ssh "${EXISTING_MASTER_NAME}" \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--zone "${EXISTING_MASTER_ZONE}" \
|
|
|
|
--command \
|
2016-10-11 12:52:51 +00:00
|
|
|
"curl localhost:${client_port}/v2/members -XPOST -H \"Content-Type: application/json\" -d '{\"peerURLs\":[\"https://${REPLICA_NAME}:${internal_port}\"]}' -s"
|
2016-08-02 07:08:05 +00:00
|
|
|
return $?
|
|
|
|
}
|
|
|
|
|
|
|
|
# Sets EXISTING_MASTER_NAME and EXISTING_MASTER_ZONE variables.
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# PROJECT
|
|
|
|
#
|
|
|
|
# NOTE: Must be in sync with get-replica-name-regexp
|
|
|
|
function set-existing-master() {
|
|
|
|
local existing_master=$(gcloud compute instances list \
|
|
|
|
--project "${PROJECT}" \
|
2017-08-09 16:45:44 +00:00
|
|
|
--filter "name ~ '$(get-replica-name-regexp)'" \
|
2016-08-02 07:08:05 +00:00
|
|
|
--format "value(name,zone)" | head -n1)
|
|
|
|
EXISTING_MASTER_NAME="$(echo "${existing_master}" | cut -f1)"
|
|
|
|
EXISTING_MASTER_ZONE="$(echo "${existing_master}" | cut -f2)"
|
|
|
|
}
|
|
|
|
|
|
|
|
function replicate-master() {
|
|
|
|
set-replica-name
|
|
|
|
set-existing-master
|
|
|
|
|
2016-08-30 09:50:22 +00:00
|
|
|
echo "Experimental: replicating existing master ${EXISTING_MASTER_ZONE}/${EXISTING_MASTER_NAME} as ${ZONE}/${REPLICA_NAME}"
|
2016-08-02 07:08:05 +00:00
|
|
|
|
|
|
|
# Before we do anything else, we should configure etcd to expect more replicas.
|
2016-09-07 14:10:55 +00:00
|
|
|
if ! add-replica-to-etcd 2379 2380; then
|
2016-08-02 07:08:05 +00:00
|
|
|
echo "Failed to add master replica to etcd cluster."
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
if ! add-replica-to-etcd 4002 2381; then
|
|
|
|
echo "Failed to add master replica to etcd events cluster."
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
# We have to make sure the disk is created before creating the master VM, so
|
|
|
|
# run this in the foreground.
|
|
|
|
gcloud compute disks create "${REPLICA_NAME}-pd" \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
--type "${MASTER_DISK_TYPE}" \
|
|
|
|
--size "${MASTER_DISK_SIZE}"
|
|
|
|
|
|
|
|
local existing_master_replicas="$(get-all-replica-names)"
|
|
|
|
replicate-master-instance "${EXISTING_MASTER_ZONE}" "${EXISTING_MASTER_NAME}" "${existing_master_replicas}"
|
|
|
|
|
|
|
|
# Add new replica to the load balancer.
|
|
|
|
gcloud compute target-pools add-instances "${MASTER_NAME}" \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
--instances "${REPLICA_NAME}"
|
|
|
|
}
|
|
|
|
|
2016-07-20 14:37:31 +00:00
|
|
|
# Detaches old and ataches new external IP to a VM.
|
|
|
|
#
|
|
|
|
# Arguments:
|
|
|
|
# $1 - VM name
|
|
|
|
# $2 - VM zone
|
|
|
|
# $3 - external static IP; if empty will use an ephemeral IP address.
|
|
|
|
function attach-external-ip() {
|
|
|
|
local NAME=${1}
|
|
|
|
local ZONE=${2}
|
|
|
|
local IP_ADDR=${3:-}
|
|
|
|
local ACCESS_CONFIG_NAME=$(gcloud compute instances describe "${NAME}" \
|
|
|
|
--project "${PROJECT}" --zone "${ZONE}" \
|
|
|
|
--format="value(networkInterfaces[0].accessConfigs[0].name)")
|
|
|
|
gcloud compute instances delete-access-config "${NAME}" \
|
|
|
|
--project "${PROJECT}" --zone "${ZONE}" \
|
|
|
|
--access-config-name "${ACCESS_CONFIG_NAME}"
|
|
|
|
if [[ -z ${IP_ADDR} ]]; then
|
|
|
|
gcloud compute instances add-access-config "${NAME}" \
|
|
|
|
--project "${PROJECT}" --zone "${ZONE}" \
|
|
|
|
--access-config-name "${ACCESS_CONFIG_NAME}"
|
|
|
|
else
|
|
|
|
gcloud compute instances add-access-config "${NAME}" \
|
|
|
|
--project "${PROJECT}" --zone "${ZONE}" \
|
|
|
|
--access-config-name "${ACCESS_CONFIG_NAME}" \
|
|
|
|
--address "${IP_ADDR}"
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# Creates load balancer in front of apiserver if it doesn't exists already. Assumes there's only one
|
|
|
|
# existing master replica.
|
|
|
|
#
|
|
|
|
# Assumes:
|
|
|
|
# PROJECT
|
|
|
|
# MASTER_NAME
|
|
|
|
# ZONE
|
2016-07-20 15:25:25 +00:00
|
|
|
# REGION
|
2016-07-20 14:37:31 +00:00
|
|
|
function create-loadbalancer() {
|
|
|
|
detect-master
|
|
|
|
|
|
|
|
# Step 0: Return early if LB is already configured.
|
|
|
|
if gcloud compute forwarding-rules describe ${MASTER_NAME} \
|
|
|
|
--project "${PROJECT}" --region ${REGION} > /dev/null 2>&1; then
|
|
|
|
echo "Load balancer already exists"
|
|
|
|
return
|
|
|
|
fi
|
2016-11-22 13:27:55 +00:00
|
|
|
|
|
|
|
local EXISTING_MASTER_NAME="$(get-all-replica-names)"
|
|
|
|
local EXISTING_MASTER_ZONE=$(gcloud compute instances list "${EXISTING_MASTER_NAME}" \
|
2016-07-20 14:37:31 +00:00
|
|
|
--project "${PROJECT}" --format="value(zone)")
|
2016-11-22 13:27:55 +00:00
|
|
|
|
2016-07-20 14:37:31 +00:00
|
|
|
echo "Creating load balancer in front of an already existing master in ${EXISTING_MASTER_ZONE}"
|
|
|
|
|
|
|
|
# Step 1: Detach master IP address and attach ephemeral address to the existing master
|
2016-11-22 13:27:55 +00:00
|
|
|
attach-external-ip "${EXISTING_MASTER_NAME}" "${EXISTING_MASTER_ZONE}"
|
2016-07-20 14:37:31 +00:00
|
|
|
|
|
|
|
# Step 2: Create target pool.
|
2016-12-01 11:59:41 +00:00
|
|
|
gcloud compute target-pools create "${MASTER_NAME}" --project "${PROJECT}" --region "${REGION}"
|
2016-07-20 14:37:31 +00:00
|
|
|
# TODO: We should also add master instances with suffixes
|
2016-12-01 11:59:41 +00:00
|
|
|
gcloud compute target-pools add-instances "${MASTER_NAME}" --instances "${EXISTING_MASTER_NAME}" --project "${PROJECT}" --zone "${EXISTING_MASTER_ZONE}"
|
2016-07-20 14:37:31 +00:00
|
|
|
|
|
|
|
# Step 3: Create forwarding rule.
|
|
|
|
# TODO: This step can take up to 20 min. We need to speed this up...
|
|
|
|
gcloud compute forwarding-rules create ${MASTER_NAME} \
|
|
|
|
--project "${PROJECT}" --region ${REGION} \
|
|
|
|
--target-pool ${MASTER_NAME} --address=${KUBE_MASTER_IP} --ports=443
|
|
|
|
|
|
|
|
echo -n "Waiting for the load balancer configuration to propagate..."
|
2016-07-20 15:25:25 +00:00
|
|
|
local counter=0
|
|
|
|
until $(curl -k -m1 https://${KUBE_MASTER_IP} &> /dev/null); do
|
|
|
|
counter=$((counter+1))
|
|
|
|
echo -n .
|
|
|
|
if [[ ${counter} -ge 1800 ]]; then
|
|
|
|
echo -e "${color_red}TIMEOUT${color_norm}" >&2
|
|
|
|
echo -e "${color_red}Load balancer failed to initialize within ${counter} seconds.${color_norm}" >&2
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
done
|
2016-07-20 14:37:31 +00:00
|
|
|
echo "DONE"
|
|
|
|
}
|
|
|
|
|
2015-11-29 19:38:03 +00:00
|
|
|
function create-nodes-firewall() {
|
2014-12-16 18:22:29 +00:00
|
|
|
# Create a single firewall rule for all minions.
|
2015-11-24 03:06:00 +00:00
|
|
|
create-firewall-rule "${NODE_TAG}-all" "${CLUSTER_IP_RANGE}" "${NODE_TAG}" &
|
2014-12-09 23:37:06 +00:00
|
|
|
|
2015-02-23 21:57:09 +00:00
|
|
|
# Report logging choice (if any).
|
|
|
|
if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]]; then
|
|
|
|
echo "+++ Logging using Fluentd to ${LOGGING_DESTINATION:-unknown}"
|
|
|
|
fi
|
|
|
|
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
# Wait for last batch of jobs
|
2016-01-11 23:23:21 +00:00
|
|
|
kube::util::wait-for-jobs || {
|
2016-01-22 22:42:32 +00:00
|
|
|
echo -e "${color_red}Some commands failed.${color_norm}" >&2
|
2016-01-11 23:23:21 +00:00
|
|
|
}
|
2015-11-29 19:38:03 +00:00
|
|
|
}
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
|
2017-06-28 17:50:58 +00:00
|
|
|
function get-scope-flags() {
|
2015-06-29 23:47:36 +00:00
|
|
|
local scope_flags=
|
2017-06-22 07:59:07 +00:00
|
|
|
if [[ -n "${NODE_SCOPES}" ]]; then
|
2015-11-24 03:05:07 +00:00
|
|
|
scope_flags="--scopes ${NODE_SCOPES}"
|
2014-12-09 23:37:06 +00:00
|
|
|
else
|
2015-06-29 23:47:36 +00:00
|
|
|
scope_flags="--no-scopes"
|
2014-12-09 23:37:06 +00:00
|
|
|
fi
|
2017-06-28 17:50:58 +00:00
|
|
|
echo "${scope_flags}"
|
|
|
|
}
|
|
|
|
|
|
|
|
function create-nodes-template() {
|
|
|
|
echo "Creating nodes."
|
|
|
|
|
|
|
|
local scope_flags=$(get-scope-flags)
|
2015-01-28 14:57:10 +00:00
|
|
|
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
write-node-env
|
2015-09-28 23:22:13 +00:00
|
|
|
|
|
|
|
local template_name="${NODE_INSTANCE_PREFIX}-template"
|
|
|
|
create-node-instance-template $template_name
|
2015-11-29 19:38:03 +00:00
|
|
|
}
|
|
|
|
|
2016-02-08 09:21:04 +00:00
|
|
|
# Assumes:
|
|
|
|
# - MAX_INSTANCES_PER_MIG
|
|
|
|
# - NUM_NODES
|
2016-02-26 00:00:16 +00:00
|
|
|
# exports:
|
2016-02-08 09:21:04 +00:00
|
|
|
# - NUM_MIGS
|
|
|
|
function set_num_migs() {
|
2016-03-14 11:19:51 +00:00
|
|
|
local defaulted_max_instances_per_mig=${MAX_INSTANCES_PER_MIG:-1000}
|
2015-12-11 08:09:09 +00:00
|
|
|
|
|
|
|
if [[ ${defaulted_max_instances_per_mig} -le "0" ]]; then
|
2016-03-14 11:19:51 +00:00
|
|
|
echo "MAX_INSTANCES_PER_MIG cannot be negative. Assuming default 1000"
|
|
|
|
defaulted_max_instances_per_mig=1000
|
2015-12-11 08:09:09 +00:00
|
|
|
fi
|
2016-02-08 09:21:04 +00:00
|
|
|
export NUM_MIGS=$(((${NUM_NODES} + ${defaulted_max_instances_per_mig} - 1) / ${defaulted_max_instances_per_mig}))
|
|
|
|
}
|
|
|
|
|
|
|
|
# Assumes:
|
|
|
|
# - NUM_MIGS
|
|
|
|
# - NODE_INSTANCE_PREFIX
|
|
|
|
# - NUM_NODES
|
|
|
|
# - PROJECT
|
|
|
|
# - ZONE
|
|
|
|
function create-nodes() {
|
|
|
|
local template_name="${NODE_INSTANCE_PREFIX}-template"
|
|
|
|
|
2017-06-28 17:50:58 +00:00
|
|
|
if [[ -z "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
|
|
|
|
local -r nodes="${NUM_NODES}"
|
|
|
|
else
|
2018-06-21 09:00:18 +00:00
|
|
|
echo "Creating a special node for heapster with machine-type ${HEAPSTER_MACHINE_TYPE}"
|
|
|
|
create-heapster-node
|
2017-06-28 17:50:58 +00:00
|
|
|
local -r nodes=$(( NUM_NODES - 1 ))
|
|
|
|
fi
|
|
|
|
|
|
|
|
local instances_left=${nodes}
|
2015-12-11 08:09:09 +00:00
|
|
|
|
|
|
|
#TODO: parallelize this loop to speed up the process
|
2016-05-09 14:23:00 +00:00
|
|
|
for ((i=1; i<=${NUM_MIGS}; i++)); do
|
|
|
|
local group_name="${NODE_INSTANCE_PREFIX}-group-$i"
|
|
|
|
if [[ $i == ${NUM_MIGS} ]]; then
|
|
|
|
# TODO: We don't add a suffix for the last group to keep backward compatibility when there's only one MIG.
|
|
|
|
# We should change it at some point, but note #18545 when changing this.
|
|
|
|
group_name="${NODE_INSTANCE_PREFIX}-group"
|
|
|
|
fi
|
|
|
|
# Spread the remaining number of nodes evenly
|
|
|
|
this_mig_size=$((${instances_left} / (${NUM_MIGS}-${i}+1)))
|
|
|
|
instances_left=$((instances_left-${this_mig_size}))
|
2016-06-23 18:22:04 +00:00
|
|
|
|
2015-12-11 08:09:09 +00:00
|
|
|
gcloud compute instance-groups managed \
|
2016-05-09 14:23:00 +00:00
|
|
|
create "${group_name}" \
|
2015-12-11 08:09:09 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--zone "${ZONE}" \
|
2016-05-23 12:36:08 +00:00
|
|
|
--base-instance-name "${group_name}" \
|
2016-05-09 14:23:00 +00:00
|
|
|
--size "${this_mig_size}" \
|
2015-12-11 08:09:09 +00:00
|
|
|
--template "$template_name" || true;
|
|
|
|
gcloud compute instance-groups managed wait-until-stable \
|
2016-05-09 14:23:00 +00:00
|
|
|
"${group_name}" \
|
2016-04-13 19:40:34 +00:00
|
|
|
--zone "${ZONE}" \
|
|
|
|
--project "${PROJECT}" || true;
|
2015-12-11 08:09:09 +00:00
|
|
|
done
|
2017-06-28 17:50:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Assumes:
|
|
|
|
# - NODE_INSTANCE_PREFIX
|
|
|
|
# - PROJECT
|
2017-09-04 16:55:56 +00:00
|
|
|
# - NETWORK_PROJECT
|
|
|
|
# - REGION
|
2017-06-28 17:50:58 +00:00
|
|
|
# - ZONE
|
|
|
|
# - HEAPSTER_MACHINE_TYPE
|
|
|
|
# - NODE_DISK_TYPE
|
|
|
|
# - NODE_DISK_SIZE
|
|
|
|
# - NODE_IMAGE_PROJECT
|
|
|
|
# - NODE_IMAGE
|
2017-09-21 22:07:47 +00:00
|
|
|
# - NODE_SERVICE_ACCOUNT
|
2017-06-28 17:50:58 +00:00
|
|
|
# - NODE_TAG
|
|
|
|
# - NETWORK
|
|
|
|
# - ENABLE_IP_ALIASES
|
2017-09-04 16:55:56 +00:00
|
|
|
# - SUBNETWORK
|
2017-06-28 17:50:58 +00:00
|
|
|
# - IP_ALIAS_SIZE
|
|
|
|
function create-heapster-node() {
|
2017-09-05 12:45:26 +00:00
|
|
|
local gcloud="gcloud"
|
|
|
|
|
|
|
|
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
|
|
|
|
gcloud="gcloud beta"
|
|
|
|
fi
|
|
|
|
|
2017-06-28 17:50:58 +00:00
|
|
|
local network=$(make-gcloud-network-argument \
|
2017-09-04 16:55:56 +00:00
|
|
|
"${NETWORK_PROJECT}" \
|
|
|
|
"${REGION}" \
|
2017-09-19 14:05:41 +00:00
|
|
|
"${NETWORK}" \
|
2017-09-06 18:59:47 +00:00
|
|
|
"${SUBNETWORK:-}" \
|
2017-09-04 16:55:56 +00:00
|
|
|
"" \
|
2017-06-28 17:50:58 +00:00
|
|
|
"${ENABLE_IP_ALIASES:-}" \
|
|
|
|
"${IP_ALIAS_SIZE:-}")
|
|
|
|
|
2017-09-05 12:45:26 +00:00
|
|
|
${gcloud} compute instances \
|
2017-06-28 17:50:58 +00:00
|
|
|
create "${NODE_INSTANCE_PREFIX}-heapster" \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
--machine-type="${HEAPSTER_MACHINE_TYPE}" \
|
|
|
|
--boot-disk-type "${NODE_DISK_TYPE}" \
|
|
|
|
--boot-disk-size "${NODE_DISK_SIZE}" \
|
|
|
|
--image-project="${NODE_IMAGE_PROJECT}" \
|
|
|
|
--image "${NODE_IMAGE}" \
|
2017-09-21 22:07:47 +00:00
|
|
|
--service-account "${NODE_SERVICE_ACCOUNT}" \
|
2017-06-28 17:50:58 +00:00
|
|
|
--tags "${NODE_TAG}" \
|
|
|
|
${network} \
|
|
|
|
$(get-scope-flags) \
|
|
|
|
--metadata-from-file "$(get-node-instance-metadata)"
|
2016-05-09 14:23:00 +00:00
|
|
|
}
|
2015-12-11 08:09:09 +00:00
|
|
|
|
2016-05-09 14:23:00 +00:00
|
|
|
# Assumes:
|
|
|
|
# - NUM_MIGS
|
|
|
|
# - NODE_INSTANCE_PREFIX
|
|
|
|
# - PROJECT
|
|
|
|
# - ZONE
|
|
|
|
# - AUTOSCALER_MAX_NODES
|
|
|
|
# - AUTOSCALER_MIN_NODES
|
|
|
|
# Exports
|
|
|
|
# - AUTOSCALER_MIG_CONFIG
|
|
|
|
function create-cluster-autoscaler-mig-config() {
|
|
|
|
|
|
|
|
# Each MIG must have at least one node, so the min number of nodes
|
2016-06-23 18:22:04 +00:00
|
|
|
# must be greater or equal to the number of migs.
|
2017-06-14 05:36:18 +00:00
|
|
|
if [[ ${AUTOSCALER_MIN_NODES} -lt 0 ]]; then
|
|
|
|
echo "AUTOSCALER_MIN_NODES must be greater or equal 0"
|
2016-05-31 17:41:54 +00:00
|
|
|
exit 2
|
2016-05-09 14:23:00 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
# Each MIG must have at least one node, so the min number of nodes
|
2016-06-23 18:22:04 +00:00
|
|
|
# must be greater or equal to the number of migs.
|
2017-05-19 12:50:55 +00:00
|
|
|
if [[ ${AUTOSCALER_MAX_NODES} -lt ${NUM_MIGS} ]]; then
|
2016-05-09 14:23:00 +00:00
|
|
|
echo "AUTOSCALER_MAX_NODES must be greater or equal ${NUM_MIGS}"
|
2016-05-31 17:41:54 +00:00
|
|
|
exit 2
|
2016-05-09 14:23:00 +00:00
|
|
|
fi
|
|
|
|
|
2016-06-23 18:22:04 +00:00
|
|
|
# The code assumes that the migs were created with create-nodes
|
2016-05-09 14:23:00 +00:00
|
|
|
# function which tries to evenly spread nodes across the migs.
|
|
|
|
AUTOSCALER_MIG_CONFIG=""
|
|
|
|
|
|
|
|
local left_min=${AUTOSCALER_MIN_NODES}
|
|
|
|
local left_max=${AUTOSCALER_MAX_NODES}
|
|
|
|
|
|
|
|
for ((i=1; i<=${NUM_MIGS}; i++)); do
|
|
|
|
local group_name="${NODE_INSTANCE_PREFIX}-group-$i"
|
|
|
|
if [[ $i == ${NUM_MIGS} ]]; then
|
|
|
|
# TODO: We don't add a suffix for the last group to keep backward compatibility when there's only one MIG.
|
|
|
|
# We should change it at some point, but note #18545 when changing this.
|
|
|
|
group_name="${NODE_INSTANCE_PREFIX}-group"
|
|
|
|
fi
|
|
|
|
|
|
|
|
this_mig_min=$((${left_min}/(${NUM_MIGS}-${i}+1)))
|
|
|
|
this_mig_max=$((${left_max}/(${NUM_MIGS}-${i}+1)))
|
|
|
|
left_min=$((left_min-$this_mig_min))
|
|
|
|
left_max=$((left_max-$this_mig_max))
|
|
|
|
|
|
|
|
local mig_url="https://www.googleapis.com/compute/v1/projects/${PROJECT}/zones/${ZONE}/instanceGroups/${group_name}"
|
|
|
|
AUTOSCALER_MIG_CONFIG="${AUTOSCALER_MIG_CONFIG} --nodes=${this_mig_min}:${this_mig_max}:${mig_url}"
|
|
|
|
done
|
2016-05-30 15:07:54 +00:00
|
|
|
|
2016-06-07 19:42:56 +00:00
|
|
|
AUTOSCALER_MIG_CONFIG="${AUTOSCALER_MIG_CONFIG} --scale-down-enabled=${AUTOSCALER_ENABLE_SCALE_DOWN}"
|
2015-11-29 19:38:03 +00:00
|
|
|
}
|
2015-12-11 08:09:09 +00:00
|
|
|
|
2016-02-08 09:21:04 +00:00
|
|
|
# Assumes:
|
|
|
|
# - NUM_MIGS
|
|
|
|
# - NODE_INSTANCE_PREFIX
|
|
|
|
# - PROJECT
|
|
|
|
# - ZONE
|
2016-06-07 20:10:17 +00:00
|
|
|
# - ENABLE_CLUSTER_AUTOSCALER
|
2016-02-08 09:21:04 +00:00
|
|
|
# - AUTOSCALER_MAX_NODES
|
|
|
|
# - AUTOSCALER_MIN_NODES
|
2016-05-09 14:23:00 +00:00
|
|
|
function create-autoscaler-config() {
|
|
|
|
# Create autoscaler for nodes configuration if requested
|
2016-06-07 20:10:17 +00:00
|
|
|
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
|
2016-05-09 14:23:00 +00:00
|
|
|
create-cluster-autoscaler-mig-config
|
2017-06-16 07:54:22 +00:00
|
|
|
echo "Using autoscaler config: ${AUTOSCALER_MIG_CONFIG} ${AUTOSCALER_EXPANDER_CONFIG}"
|
2015-07-08 14:48:33 +00:00
|
|
|
fi
|
2015-11-29 19:38:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
function check-cluster() {
|
|
|
|
detect-node-names
|
|
|
|
detect-master
|
2015-07-08 14:48:33 +00:00
|
|
|
|
2015-10-07 01:51:27 +00:00
|
|
|
echo "Waiting up to ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds for cluster initialization."
|
2014-07-14 17:50:04 +00:00
|
|
|
echo
|
|
|
|
echo " This will continually check to see if the API for kubernetes is reachable."
|
2015-10-07 01:51:27 +00:00
|
|
|
echo " This may time out if there was some uncaught error during start up."
|
2014-07-14 17:50:04 +00:00
|
|
|
echo
|
|
|
|
|
2015-05-28 02:53:24 +00:00
|
|
|
# curl in mavericks is borked.
|
|
|
|
secure=""
|
2015-11-03 04:32:42 +00:00
|
|
|
if which sw_vers >& /dev/null; then
|
2015-05-28 02:53:24 +00:00
|
|
|
if [[ $(sw_vers | grep ProductVersion | awk '{print $2}') = "10.9."* ]]; then
|
|
|
|
secure="--insecure"
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
2015-10-07 01:51:27 +00:00
|
|
|
local start_time=$(date +%s)
|
2017-11-14 10:05:17 +00:00
|
|
|
local curl_out=$(mktemp)
|
|
|
|
kube::util::trap_add "rm -f ${curl_out}" EXIT
|
2015-05-11 18:43:44 +00:00
|
|
|
until curl --cacert "${CERT_DIR}/pki/ca.crt" \
|
|
|
|
-H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \
|
2015-05-28 02:53:24 +00:00
|
|
|
${secure} \
|
2017-11-14 10:05:17 +00:00
|
|
|
--max-time 5 --fail \
|
2017-12-06 14:17:30 +00:00
|
|
|
"https://${KUBE_MASTER_IP}/api/v1/pods?limit=100" > "${curl_out}" 2>&1; do
|
2015-10-07 01:51:27 +00:00
|
|
|
local elapsed=$(($(date +%s) - ${start_time}))
|
|
|
|
if [[ ${elapsed} -gt ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} ]]; then
|
2015-10-07 18:19:32 +00:00
|
|
|
echo -e "${color_red}Cluster failed to initialize within ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds.${color_norm}" >&2
|
2017-11-14 10:05:17 +00:00
|
|
|
echo "Last output from querying API server follows:" >&2
|
|
|
|
echo "-----------------------------------------------------" >&2
|
|
|
|
cat "${curl_out}" >&2
|
|
|
|
echo "-----------------------------------------------------" >&2
|
2015-10-07 01:51:27 +00:00
|
|
|
exit 2
|
|
|
|
fi
|
2014-07-14 17:50:04 +00:00
|
|
|
printf "."
|
|
|
|
sleep 2
|
|
|
|
done
|
|
|
|
|
|
|
|
echo "Kubernetes cluster created."
|
2014-08-06 16:57:00 +00:00
|
|
|
|
2015-05-11 18:43:44 +00:00
|
|
|
export KUBE_CERT="${CERT_DIR}/pki/issued/kubecfg.crt"
|
|
|
|
export KUBE_KEY="${CERT_DIR}/pki/private/kubecfg.key"
|
|
|
|
export CA_CERT="${CERT_DIR}/pki/ca.crt"
|
2015-03-06 22:49:25 +00:00
|
|
|
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
|
2015-02-02 21:49:03 +00:00
|
|
|
(
|
|
|
|
umask 077
|
2016-06-08 00:30:15 +00:00
|
|
|
|
|
|
|
# Update the user's kubeconfig to include credentials for this apiserver.
|
2015-03-06 22:49:25 +00:00
|
|
|
create-kubeconfig
|
2014-09-23 22:54:27 +00:00
|
|
|
)
|
2014-12-09 23:37:06 +00:00
|
|
|
|
2015-08-22 01:47:31 +00:00
|
|
|
# ensures KUBECONFIG is set
|
|
|
|
get-kubeconfig-basicauth
|
2014-12-09 23:37:06 +00:00
|
|
|
echo
|
|
|
|
echo -e "${color_green}Kubernetes cluster is running. The master is running at:"
|
|
|
|
echo
|
|
|
|
echo -e "${color_yellow} https://${KUBE_MASTER_IP}"
|
|
|
|
echo
|
2015-03-06 22:49:25 +00:00
|
|
|
echo -e "${color_green}The user name and password to use is located in ${KUBECONFIG}.${color_norm}"
|
2014-12-09 23:37:06 +00:00
|
|
|
echo
|
|
|
|
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
2016-08-02 07:08:05 +00:00
|
|
|
# Removes master replica from etcd cluster.
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# REPLICA_NAME
|
|
|
|
# PROJECT
|
|
|
|
# EXISTING_MASTER_NAME
|
|
|
|
# EXISTING_MASTER_ZONE
|
|
|
|
#
|
|
|
|
# $1: etcd client port
|
|
|
|
# returns the result of ssh command which removes replica
|
|
|
|
function remove-replica-from-etcd() {
|
|
|
|
local -r port="${1}"
|
2016-10-19 00:32:56 +00:00
|
|
|
[[ -n "${EXISTING_MASTER_NAME}" ]] || return
|
2016-08-02 07:08:05 +00:00
|
|
|
gcloud compute ssh "${EXISTING_MASTER_NAME}" \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--zone "${EXISTING_MASTER_ZONE}" \
|
|
|
|
--command \
|
2016-09-28 13:25:18 +00:00
|
|
|
"curl -s localhost:${port}/v2/members/\$(curl -s localhost:${port}/v2/members -XGET | sed 's/{\\\"id/\n/g' | grep ${REPLICA_NAME}\\\" | cut -f 3 -d \\\") -XDELETE -L 2>/dev/null"
|
|
|
|
local -r res=$?
|
|
|
|
echo "Removing etcd replica, name: ${REPLICA_NAME}, port: ${port}, result: ${res}"
|
|
|
|
return "${res}"
|
2016-08-02 07:08:05 +00:00
|
|
|
}
|
|
|
|
|
2014-12-09 23:07:54 +00:00
|
|
|
# Delete a kubernetes cluster. This is called from test-teardown.
|
2014-12-09 00:52:43 +00:00
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# MASTER_NAME
|
2015-01-28 14:57:10 +00:00
|
|
|
# NODE_INSTANCE_PREFIX
|
2014-12-09 00:52:43 +00:00
|
|
|
# ZONE
|
|
|
|
# This function tears down cluster resources 10 at a time to avoid issuing too many
|
|
|
|
# API calls and exceeding API quota. It is important to bring down the instances before bringing
|
|
|
|
# down the firewall rules and routes.
|
2016-08-02 07:08:05 +00:00
|
|
|
function kube-down() {
|
2016-06-13 21:01:59 +00:00
|
|
|
local -r batch=200
|
|
|
|
|
2014-07-14 17:50:04 +00:00
|
|
|
detect-project
|
2015-12-11 08:09:09 +00:00
|
|
|
detect-node-names # For INSTANCE_GROUPS
|
2014-07-14 17:50:04 +00:00
|
|
|
|
|
|
|
echo "Bringing down cluster"
|
2015-05-12 23:12:15 +00:00
|
|
|
set +e # Do not stop on error
|
2014-12-09 00:52:43 +00:00
|
|
|
|
2016-09-16 14:51:52 +00:00
|
|
|
if [[ "${KUBE_DELETE_NODES:-}" != "false" ]]; then
|
|
|
|
# Get the name of the managed instance group template before we delete the
|
|
|
|
# managed instance group. (The name of the managed instance group template may
|
|
|
|
# change during a cluster upgrade.)
|
|
|
|
local templates=$(get-template "${PROJECT}")
|
|
|
|
|
|
|
|
for group in ${INSTANCE_GROUPS[@]:-}; do
|
|
|
|
if gcloud compute instance-groups managed describe "${group}" --project "${PROJECT}" --zone "${ZONE}" &>/dev/null; then
|
|
|
|
gcloud compute instance-groups managed delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
"${group}" &
|
|
|
|
fi
|
|
|
|
done
|
2016-06-13 21:01:59 +00:00
|
|
|
|
2016-09-16 14:51:52 +00:00
|
|
|
# Wait for last batch of jobs
|
|
|
|
kube::util::wait-for-jobs || {
|
|
|
|
echo -e "Failed to delete instance group(s)." >&2
|
|
|
|
}
|
|
|
|
|
|
|
|
for template in ${templates[@]:-}; do
|
|
|
|
if gcloud compute instance-templates describe --project "${PROJECT}" "${template}" &>/dev/null; then
|
|
|
|
gcloud compute instance-templates delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
|
|
|
"${template}"
|
|
|
|
fi
|
|
|
|
done
|
2017-06-28 17:50:58 +00:00
|
|
|
|
|
|
|
# Delete the special heapster node (if it exists).
|
|
|
|
if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
|
|
|
|
local -r heapster_machine_name="${NODE_INSTANCE_PREFIX}-heapster"
|
|
|
|
if gcloud compute instances describe "${heapster_machine_name}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
|
|
|
|
# Now we can safely delete the VM.
|
|
|
|
gcloud compute instances delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
|
|
|
--delete-disks all \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
"${heapster_machine_name}"
|
|
|
|
fi
|
|
|
|
fi
|
2016-09-16 14:51:52 +00:00
|
|
|
fi
|
2015-01-28 14:57:10 +00:00
|
|
|
|
2016-11-23 14:41:54 +00:00
|
|
|
local -r REPLICA_NAME="${KUBE_REPLICA_NAME:-$(get-replica-name)}"
|
2016-08-02 07:08:05 +00:00
|
|
|
|
|
|
|
set-existing-master
|
|
|
|
|
|
|
|
# Un-register the master replica from etcd and events etcd.
|
2016-09-07 14:10:55 +00:00
|
|
|
remove-replica-from-etcd 2379
|
2016-08-02 07:08:05 +00:00
|
|
|
remove-replica-from-etcd 4002
|
|
|
|
|
|
|
|
# Delete the master replica (if it exists).
|
|
|
|
if gcloud compute instances describe "${REPLICA_NAME}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
|
|
|
|
# If there is a load balancer in front of apiservers we need to first update its configuration.
|
|
|
|
if gcloud compute target-pools describe "${MASTER_NAME}" --region "${REGION}" --project "${PROJECT}" &>/dev/null; then
|
|
|
|
gcloud compute target-pools remove-instances "${MASTER_NAME}" \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
--instances "${REPLICA_NAME}"
|
|
|
|
fi
|
|
|
|
# Now we can safely delete the VM.
|
2015-05-29 18:46:10 +00:00
|
|
|
gcloud compute instances delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
|
|
|
--delete-disks all \
|
|
|
|
--zone "${ZONE}" \
|
2016-08-02 07:08:05 +00:00
|
|
|
"${REPLICA_NAME}"
|
2015-05-29 18:46:10 +00:00
|
|
|
fi
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
|
2016-08-02 07:08:05 +00:00
|
|
|
# Delete the master replica pd (possibly leaked by kube-up if master create failed).
|
2016-09-14 10:00:46 +00:00
|
|
|
# TODO(jszczepkowski): remove also possibly leaked replicas' pds
|
2016-09-19 19:00:08 +00:00
|
|
|
local -r replica_pd="${REPLICA_NAME:-${MASTER_NAME}}-pd"
|
|
|
|
if gcloud compute disks describe "${replica_pd}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
|
2015-05-29 18:46:10 +00:00
|
|
|
gcloud compute disks delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
|
|
|
--zone "${ZONE}" \
|
2016-09-19 19:00:08 +00:00
|
|
|
"${replica_pd}"
|
2015-05-29 18:46:10 +00:00
|
|
|
fi
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
|
2016-07-20 14:37:31 +00:00
|
|
|
# Check if this are any remaining master replicas.
|
|
|
|
local REMAINING_MASTER_COUNT=$(gcloud compute instances list \
|
|
|
|
--project "${PROJECT}" \
|
2017-08-09 16:45:44 +00:00
|
|
|
--filter="name ~ '$(get-replica-name-regexp)'" \
|
2016-07-20 14:37:31 +00:00
|
|
|
--format "value(zone)" | wc -l)
|
|
|
|
|
|
|
|
# In the replicated scenario, if there's only a single master left, we should also delete load balancer in front of it.
|
2016-11-29 17:50:31 +00:00
|
|
|
if [[ "${REMAINING_MASTER_COUNT}" -eq 1 ]]; then
|
2016-07-20 14:37:31 +00:00
|
|
|
if gcloud compute forwarding-rules describe "${MASTER_NAME}" --region "${REGION}" --project "${PROJECT}" &>/dev/null; then
|
|
|
|
detect-master
|
2016-10-11 12:52:51 +00:00
|
|
|
local REMAINING_REPLICA_NAME="$(get-all-replica-names)"
|
2016-09-28 13:25:18 +00:00
|
|
|
local REMAINING_REPLICA_ZONE=$(gcloud compute instances list "${REMAINING_REPLICA_NAME}" \
|
2016-07-20 14:37:31 +00:00
|
|
|
--project "${PROJECT}" --format="value(zone)")
|
|
|
|
gcloud compute forwarding-rules delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--region "${REGION}" \
|
|
|
|
--quiet \
|
|
|
|
"${MASTER_NAME}"
|
2016-09-28 13:25:18 +00:00
|
|
|
attach-external-ip "${REMAINING_REPLICA_NAME}" "${REMAINING_REPLICA_ZONE}" "${KUBE_MASTER_IP}"
|
2016-07-20 14:37:31 +00:00
|
|
|
gcloud compute target-pools delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--region "${REGION}" \
|
|
|
|
--quiet \
|
|
|
|
"${MASTER_NAME}"
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
|
|
|
# If there are no more remaining master replicas, we should delete all remaining network resources.
|
2016-11-29 17:50:31 +00:00
|
|
|
if [[ "${REMAINING_MASTER_COUNT}" -eq 0 ]]; then
|
2016-10-11 23:50:30 +00:00
|
|
|
# Delete firewall rule for the master, etcd servers, and nodes.
|
|
|
|
delete-firewall-rules "${MASTER_NAME}-https" "${MASTER_NAME}-etcd" "${NODE_TAG}-all"
|
2016-07-20 14:37:31 +00:00
|
|
|
# Delete the master's reserved IP
|
|
|
|
if gcloud compute addresses describe "${MASTER_NAME}-ip" --region "${REGION}" --project "${PROJECT}" &>/dev/null; then
|
|
|
|
gcloud compute addresses delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--region "${REGION}" \
|
|
|
|
--quiet \
|
|
|
|
"${MASTER_NAME}-ip"
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
2016-09-16 14:51:52 +00:00
|
|
|
if [[ "${KUBE_DELETE_NODES:-}" != "false" ]]; then
|
|
|
|
# Find out what minions are running.
|
|
|
|
local -a minions
|
|
|
|
minions=( $(gcloud compute instances list \
|
2017-08-09 16:45:44 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--filter="name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
|
2016-09-16 14:51:52 +00:00
|
|
|
--format='value(name)') )
|
|
|
|
# If any minions are running, delete them in batches.
|
|
|
|
while (( "${#minions[@]}" > 0 )); do
|
|
|
|
echo Deleting nodes "${minions[*]::${batch}}"
|
|
|
|
gcloud compute instances delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
|
|
|
--delete-disks boot \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
"${minions[@]::${batch}}"
|
|
|
|
minions=( "${minions[@]:${batch}}" )
|
|
|
|
done
|
|
|
|
fi
|
2014-08-13 20:26:03 +00:00
|
|
|
|
2016-09-28 13:25:18 +00:00
|
|
|
# If there are no more remaining master replicas: delete routes, pd for influxdb and update kubeconfig
|
2016-11-29 17:50:31 +00:00
|
|
|
if [[ "${REMAINING_MASTER_COUNT}" -eq 0 ]]; then
|
2016-09-28 13:25:18 +00:00
|
|
|
# Delete routes.
|
|
|
|
local -a routes
|
|
|
|
# Clean up all routes w/ names like "<cluster-name>-<node-GUID>"
|
|
|
|
# e.g. "kubernetes-12345678-90ab-cdef-1234-567890abcdef". The name is
|
|
|
|
# determined by the node controller on the master.
|
|
|
|
# Note that this is currently a noop, as synchronously deleting the node MIG
|
|
|
|
# first allows the master to cleanup routes itself.
|
|
|
|
local TRUNCATED_PREFIX="${INSTANCE_PREFIX:0:26}"
|
2017-09-04 16:55:56 +00:00
|
|
|
routes=( $(gcloud compute routes list --project "${NETWORK_PROJECT}" \
|
2017-08-09 16:45:44 +00:00
|
|
|
--filter="name ~ '${TRUNCATED_PREFIX}-.{8}-.{4}-.{4}-.{4}-.{12}'" \
|
2016-09-28 13:25:18 +00:00
|
|
|
--format='value(name)') )
|
|
|
|
while (( "${#routes[@]}" > 0 )); do
|
|
|
|
echo Deleting routes "${routes[*]::${batch}}"
|
|
|
|
gcloud compute routes delete \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2016-09-28 13:25:18 +00:00
|
|
|
--quiet \
|
|
|
|
"${routes[@]::${batch}}"
|
|
|
|
routes=( "${routes[@]:${batch}}" )
|
|
|
|
done
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2016-09-28 13:25:18 +00:00
|
|
|
# Delete persistent disk for influx-db.
|
|
|
|
if gcloud compute disks describe "${INSTANCE_PREFIX}"-influxdb-pd --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
|
|
|
|
gcloud compute disks delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
"${INSTANCE_PREFIX}"-influxdb-pd
|
|
|
|
fi
|
2016-06-21 07:43:36 +00:00
|
|
|
|
2016-09-28 13:25:18 +00:00
|
|
|
# Delete all remaining firewall rules and network.
|
|
|
|
delete-firewall-rules \
|
2016-12-13 19:21:14 +00:00
|
|
|
"${CLUSTER_NAME}-default-internal-master" \
|
|
|
|
"${CLUSTER_NAME}-default-internal-node" \
|
2016-09-28 13:25:18 +00:00
|
|
|
"${NETWORK}-default-ssh" \
|
|
|
|
"${NETWORK}-default-internal" # Pre-1.5 clusters
|
2017-02-27 08:31:13 +00:00
|
|
|
|
2016-09-28 13:25:18 +00:00
|
|
|
if [[ "${KUBE_DELETE_NETWORK}" == "true" ]]; then
|
2017-10-10 00:35:43 +00:00
|
|
|
# Delete all remaining firewall rules in the network.
|
|
|
|
delete-all-firewall-rules || true
|
2017-06-14 11:23:41 +00:00
|
|
|
delete-subnetworks || true
|
2017-10-10 00:35:43 +00:00
|
|
|
delete-network || true # might fail if there are leaked resources that reference the network
|
2016-09-28 13:25:18 +00:00
|
|
|
fi
|
2016-10-11 23:50:30 +00:00
|
|
|
|
2016-09-28 13:25:18 +00:00
|
|
|
# If there are no more remaining master replicas, we should update kubeconfig.
|
2016-07-20 15:25:25 +00:00
|
|
|
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
|
|
|
|
clear-kubeconfig
|
2016-10-11 12:52:51 +00:00
|
|
|
else
|
|
|
|
# If some master replicas remain: cluster has been changed, we need to re-validate it.
|
|
|
|
echo "... calling validate-cluster" >&2
|
|
|
|
# Override errexit
|
|
|
|
(validate-cluster) && validate_result="$?" || validate_result="$?"
|
|
|
|
|
|
|
|
# We have two different failure modes from validate cluster:
|
|
|
|
# - 1: fatal error - cluster won't be working correctly
|
|
|
|
# - 2: weak error - something went wrong, but cluster probably will be working correctly
|
|
|
|
# We just print an error message in case 2).
|
2016-11-29 17:50:31 +00:00
|
|
|
if [[ "${validate_result}" -eq 1 ]]; then
|
2016-10-11 12:52:51 +00:00
|
|
|
exit 1
|
2016-11-29 17:50:31 +00:00
|
|
|
elif [[ "${validate_result}" -eq 2 ]]; then
|
2016-10-11 12:52:51 +00:00
|
|
|
echo "...ignoring non-fatal errors in validate-cluster" >&2
|
|
|
|
fi
|
2016-07-20 15:25:25 +00:00
|
|
|
fi
|
2015-05-12 23:12:15 +00:00
|
|
|
set -e
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
2016-08-02 07:08:05 +00:00
|
|
|
# Prints name of one of the master replicas in the current zone. It will be either
|
|
|
|
# just MASTER_NAME or MASTER_NAME with a suffix for a replica (see get-replica-name-regexp).
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# PROJECT
|
|
|
|
# ZONE
|
|
|
|
# MASTER_NAME
|
|
|
|
#
|
|
|
|
# NOTE: Must be in sync with get-replica-name-regexp and set-replica-name.
|
|
|
|
function get-replica-name() {
|
|
|
|
echo $(gcloud compute instances list \
|
|
|
|
--project "${PROJECT}" \
|
2017-08-09 16:45:44 +00:00
|
|
|
--filter="name ~ '$(get-replica-name-regexp)' AND zone:(${ZONE})" \
|
2016-08-02 07:08:05 +00:00
|
|
|
--format "value(name)" | head -n1)
|
|
|
|
}
|
|
|
|
|
|
|
|
# Prints comma-separated names of all of the master replicas in all zones.
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# PROJECT
|
|
|
|
# MASTER_NAME
|
|
|
|
#
|
|
|
|
# NOTE: Must be in sync with get-replica-name-regexp and set-replica-name.
|
|
|
|
function get-all-replica-names() {
|
|
|
|
echo $(gcloud compute instances list \
|
|
|
|
--project "${PROJECT}" \
|
2017-08-09 16:45:44 +00:00
|
|
|
--filter="name ~ '$(get-replica-name-regexp)'" \
|
2016-08-02 07:08:05 +00:00
|
|
|
--format "value(name)" | tr "\n" "," | sed 's/,$//')
|
|
|
|
}
|
|
|
|
|
2016-10-11 12:52:51 +00:00
|
|
|
# Prints the number of all of the master replicas in all zones.
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# MASTER_NAME
|
|
|
|
function get-master-replicas-count() {
|
|
|
|
detect-project
|
|
|
|
local num_masters=$(gcloud compute instances list \
|
|
|
|
--project "${PROJECT}" \
|
2017-08-09 16:45:44 +00:00
|
|
|
--filter="name ~ '$(get-replica-name-regexp)'" \
|
2016-10-11 12:52:51 +00:00
|
|
|
--format "value(zone)" | wc -l)
|
|
|
|
echo -n "${num_masters}"
|
|
|
|
}
|
|
|
|
|
2016-08-02 07:08:05 +00:00
|
|
|
# Prints regexp for full master machine name. In a cluster with replicated master,
|
|
|
|
# VM names may either be MASTER_NAME or MASTER_NAME with a suffix for a replica.
|
|
|
|
function get-replica-name-regexp() {
|
2018-01-20 02:44:52 +00:00
|
|
|
echo "^${MASTER_NAME}(-...)?"
|
2016-08-02 07:08:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Sets REPLICA_NAME to a unique name for a master replica that will match
|
|
|
|
# expected regexp (see get-replica-name-regexp).
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# PROJECT
|
|
|
|
# ZONE
|
|
|
|
# MASTER_NAME
|
|
|
|
#
|
|
|
|
# Sets:
|
|
|
|
# REPLICA_NAME
|
|
|
|
function set-replica-name() {
|
|
|
|
local instances=$(gcloud compute instances list \
|
|
|
|
--project "${PROJECT}" \
|
2017-08-09 16:45:44 +00:00
|
|
|
--filter="name ~ '$(get-replica-name-regexp)'" \
|
2016-08-02 07:08:05 +00:00
|
|
|
--format "value(name)")
|
|
|
|
|
|
|
|
suffix=""
|
|
|
|
while echo "${instances}" | grep "${suffix}" &>/dev/null; do
|
|
|
|
suffix="$(date | md5sum | head -c3)"
|
|
|
|
done
|
|
|
|
REPLICA_NAME="${MASTER_NAME}-${suffix}"
|
|
|
|
}
|
|
|
|
|
2016-02-22 12:05:34 +00:00
|
|
|
# Gets the instance template for given NODE_INSTANCE_PREFIX. It echos the template name so that the function
|
2015-07-09 19:01:06 +00:00
|
|
|
# output can be used.
|
2016-02-22 12:05:34 +00:00
|
|
|
# Assumed vars:
|
|
|
|
# NODE_INSTANCE_PREFIX
|
2015-07-09 19:01:06 +00:00
|
|
|
#
|
|
|
|
# $1: project
|
2016-08-02 07:08:05 +00:00
|
|
|
function get-template() {
|
2017-09-07 21:39:38 +00:00
|
|
|
gcloud compute instance-templates list \
|
|
|
|
--filter="name ~ '${NODE_INSTANCE_PREFIX}-template(-(${KUBE_RELEASE_VERSION_DASHED_REGEX}|${KUBE_CI_VERSION_DASHED_REGEX}))?'" \
|
2016-05-04 21:10:00 +00:00
|
|
|
--project="${1}" --format='value(name)'
|
2015-07-09 19:01:06 +00:00
|
|
|
}
|
|
|
|
|
2015-06-15 16:21:27 +00:00
|
|
|
# Checks if there are any present resources related kubernetes cluster.
|
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# MASTER_NAME
|
|
|
|
# NODE_INSTANCE_PREFIX
|
|
|
|
# ZONE
|
2016-07-20 15:25:25 +00:00
|
|
|
# REGION
|
2015-06-15 16:21:27 +00:00
|
|
|
# Vars set:
|
|
|
|
# KUBE_RESOURCE_FOUND
|
2016-08-02 07:08:05 +00:00
|
|
|
function check-resources() {
|
2015-06-15 16:21:27 +00:00
|
|
|
detect-project
|
2015-12-11 08:09:09 +00:00
|
|
|
detect-node-names
|
2015-06-15 16:21:27 +00:00
|
|
|
|
|
|
|
echo "Looking for already existing resources"
|
|
|
|
KUBE_RESOURCE_FOUND=""
|
|
|
|
|
2015-12-11 08:09:09 +00:00
|
|
|
if [[ -n "${INSTANCE_GROUPS[@]:-}" ]]; then
|
|
|
|
KUBE_RESOURCE_FOUND="Managed instance groups ${INSTANCE_GROUPS[@]}"
|
2015-06-15 16:21:27 +00:00
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
2015-06-17 12:59:12 +00:00
|
|
|
if gcloud compute instance-templates describe --project "${PROJECT}" "${NODE_INSTANCE_PREFIX}-template" &>/dev/null; then
|
2015-06-15 16:21:27 +00:00
|
|
|
KUBE_RESOURCE_FOUND="Instance template ${NODE_INSTANCE_PREFIX}-template"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
2015-06-17 12:59:12 +00:00
|
|
|
if gcloud compute instances describe --project "${PROJECT}" "${MASTER_NAME}" --zone "${ZONE}" &>/dev/null; then
|
2015-06-15 16:21:27 +00:00
|
|
|
KUBE_RESOURCE_FOUND="Kubernetes master ${MASTER_NAME}"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
2015-06-17 12:59:12 +00:00
|
|
|
if gcloud compute disks describe --project "${PROJECT}" "${MASTER_NAME}"-pd --zone "${ZONE}" &>/dev/null; then
|
2015-06-15 16:21:27 +00:00
|
|
|
KUBE_RESOURCE_FOUND="Persistent disk ${MASTER_NAME}-pd"
|
|
|
|
return 1
|
|
|
|
fi
|
2015-07-27 18:50:31 +00:00
|
|
|
|
2015-06-15 16:21:27 +00:00
|
|
|
# Find out what minions are running.
|
|
|
|
local -a minions
|
|
|
|
minions=( $(gcloud compute instances list \
|
2017-08-09 16:45:44 +00:00
|
|
|
--project "${PROJECT}" \
|
|
|
|
--filter="name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
|
2016-05-04 21:10:00 +00:00
|
|
|
--format='value(name)') )
|
2015-06-15 16:21:27 +00:00
|
|
|
if (( "${#minions[@]}" > 0 )); then
|
|
|
|
KUBE_RESOURCE_FOUND="${#minions[@]} matching matching ${NODE_INSTANCE_PREFIX}-.+"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
2017-09-04 16:55:56 +00:00
|
|
|
if gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${MASTER_NAME}-https" &>/dev/null; then
|
2015-06-19 15:17:06 +00:00
|
|
|
KUBE_RESOURCE_FOUND="Firewall rules for ${MASTER_NAME}-https"
|
2015-06-15 16:21:27 +00:00
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
2017-09-04 16:55:56 +00:00
|
|
|
if gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NODE_TAG}-all" &>/dev/null; then
|
2015-06-19 15:17:06 +00:00
|
|
|
KUBE_RESOURCE_FOUND="Firewall rules for ${MASTER_NAME}-all"
|
2015-06-15 16:21:27 +00:00
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
local -a routes
|
2017-09-04 16:55:56 +00:00
|
|
|
routes=( $(gcloud compute routes list --project "${NETWORK_PROJECT}" \
|
2017-08-09 16:45:44 +00:00
|
|
|
--filter="name ~ '${INSTANCE_PREFIX}-minion-.{4}'" --format='value(name)') )
|
2015-06-15 16:21:27 +00:00
|
|
|
if (( "${#routes[@]}" > 0 )); then
|
|
|
|
KUBE_RESOURCE_FOUND="${#routes[@]} routes matching ${INSTANCE_PREFIX}-minion-.{4}"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
2015-06-17 12:59:12 +00:00
|
|
|
if gcloud compute addresses describe --project "${PROJECT}" "${MASTER_NAME}-ip" --region "${REGION}" &>/dev/null; then
|
2015-06-15 16:21:27 +00:00
|
|
|
KUBE_RESOURCE_FOUND="Master's reserved IP"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
# No resources found.
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-06-01 15:59:12 +00:00
|
|
|
# Prepare to push new binaries to kubernetes cluster
|
|
|
|
# $1 - whether prepare push to node
|
|
|
|
function prepare-push() {
|
2016-06-23 18:22:04 +00:00
|
|
|
local node="${1-}"
|
2016-12-29 23:10:06 +00:00
|
|
|
#TODO(dawnchen): figure out how to upgrade a Container Linux node
|
2016-06-23 18:22:04 +00:00
|
|
|
if [[ "${node}" == "true" && "${NODE_OS_DISTRIBUTION}" != "debian" ]]; then
|
|
|
|
echo "Updating nodes in a kubernetes cluster with ${NODE_OS_DISTRIBUTION} is not supported yet." >&2
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
if [[ "${node}" != "true" && "${MASTER_OS_DISTRIBUTION}" != "debian" ]]; then
|
|
|
|
echo "Updating the master in a kubernetes cluster with ${MASTER_OS_DISTRIBUTION} is not supported yet." >&2
|
2015-06-01 15:59:12 +00:00
|
|
|
exit 1
|
2015-04-28 15:50:43 +00:00
|
|
|
fi
|
|
|
|
|
Change GCE to use standalone Saltstack config:
Change provisioning to pass all variables to both master and node. Run
Salt in a masterless setup on all nodes ala
http://docs.saltstack.com/en/latest/topics/tutorials/quickstart.html,
which involves ensuring Salt daemon is NOT running after install. Kill
Salt master install. And fix push to actually work in this new flow.
As part of this, the GCE Salt config no longer has access to the Salt
mine, which is primarily obnoxious for two reasons: - The minions
can't use Salt to see the master: this is easily fixed by static
config. - The master can't see the list of all the minions: this is
fixed temporarily by static config in util.sh, but later, by other
means (see
https://github.com/GoogleCloudPlatform/kubernetes/issues/156, which
should eventually remove this direction).
As part of it, flatten all of cluster/gce/templates/* into
configure-vm.sh, using a single, separate piece of YAML to drive the
environment variables, rather than constantly rewriting the startup
script.
2015-03-02 22:38:58 +00:00
|
|
|
OUTPUT=${KUBE_ROOT}/_output/logs
|
|
|
|
mkdir -p ${OUTPUT}
|
|
|
|
|
2017-03-11 06:18:38 +00:00
|
|
|
kube::util::ensure-temp-dir
|
2014-09-23 22:54:27 +00:00
|
|
|
detect-project
|
2014-07-14 17:50:04 +00:00
|
|
|
detect-master
|
2015-11-09 07:33:06 +00:00
|
|
|
detect-node-names
|
2015-08-22 01:47:31 +00:00
|
|
|
get-kubeconfig-basicauth
|
|
|
|
get-kubeconfig-bearertoken
|
2014-07-14 17:50:04 +00:00
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Make sure we have the tar files staged on Google Storage
|
2015-06-01 15:59:12 +00:00
|
|
|
tars_from_version
|
|
|
|
|
|
|
|
# Prepare node env vars and update MIG template
|
2016-06-23 18:22:04 +00:00
|
|
|
if [[ "${node}" == "true" ]]; then
|
2015-06-01 15:59:12 +00:00
|
|
|
write-node-env
|
|
|
|
|
2017-06-28 17:50:58 +00:00
|
|
|
local scope_flags=$(get-scope-flags)
|
2015-06-01 15:59:12 +00:00
|
|
|
|
|
|
|
# Ugly hack: Since it is not possible to delete instance-template that is currently
|
|
|
|
# being used, create a temp one, then delete the old one and recreate it once again.
|
2015-09-28 23:22:13 +00:00
|
|
|
local tmp_template_name="${NODE_INSTANCE_PREFIX}-template-tmp"
|
|
|
|
create-node-instance-template $tmp_template_name
|
2015-06-01 15:59:12 +00:00
|
|
|
|
2015-09-28 23:22:13 +00:00
|
|
|
local template_name="${NODE_INSTANCE_PREFIX}-template"
|
2015-12-18 10:12:07 +00:00
|
|
|
for group in ${INSTANCE_GROUPS[@]:-}; do
|
2015-12-11 08:09:09 +00:00
|
|
|
gcloud compute instance-groups managed \
|
|
|
|
set-instance-template "${group}" \
|
|
|
|
--template "$tmp_template_name" \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
--project "${PROJECT}" || true;
|
|
|
|
done
|
2015-06-01 15:59:12 +00:00
|
|
|
|
|
|
|
gcloud compute instance-templates delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
2015-09-28 23:22:13 +00:00
|
|
|
"$template_name" || true
|
2015-06-01 15:59:12 +00:00
|
|
|
|
2015-09-28 23:22:13 +00:00
|
|
|
create-node-instance-template "$template_name"
|
2015-06-01 15:59:12 +00:00
|
|
|
|
2015-12-18 10:12:07 +00:00
|
|
|
for group in ${INSTANCE_GROUPS[@]:-}; do
|
2015-12-11 08:09:09 +00:00
|
|
|
gcloud compute instance-groups managed \
|
|
|
|
set-instance-template "${group}" \
|
|
|
|
--template "$template_name" \
|
|
|
|
--zone "${ZONE}" \
|
|
|
|
--project "${PROJECT}" || true;
|
|
|
|
done
|
2014-09-23 22:54:27 +00:00
|
|
|
|
2015-06-01 15:59:12 +00:00
|
|
|
gcloud compute instance-templates delete \
|
|
|
|
--project "${PROJECT}" \
|
|
|
|
--quiet \
|
2015-09-28 23:22:13 +00:00
|
|
|
"$tmp_template_name" || true
|
2015-06-01 15:59:12 +00:00
|
|
|
fi
|
|
|
|
}
|
2014-09-23 22:54:27 +00:00
|
|
|
|
|
|
|
# -----------------------------------------------------------------------------
|
2016-02-16 22:54:50 +00:00
|
|
|
# Cluster specific test helpers used from hack/e2e.go
|
2014-09-23 22:54:27 +00:00
|
|
|
|
|
|
|
# Execute prior to running tests to build a release if required for env.
|
|
|
|
#
|
|
|
|
# Assumed Vars:
|
2014-10-03 21:58:49 +00:00
|
|
|
# KUBE_ROOT
|
2016-08-02 07:08:05 +00:00
|
|
|
function test-build-release() {
|
2014-07-14 17:50:04 +00:00
|
|
|
# Make a release
|
2016-12-14 00:03:06 +00:00
|
|
|
"${KUBE_ROOT}/build/release.sh"
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
2014-09-23 22:54:27 +00:00
|
|
|
# Execute prior to running tests to initialize required structure. This is
|
2016-02-18 00:49:07 +00:00
|
|
|
# called from hack/e2e.go only when running -up.
|
2014-09-23 22:54:27 +00:00
|
|
|
#
|
|
|
|
# Assumed vars:
|
|
|
|
# Variables from config.sh
|
2016-08-02 07:08:05 +00:00
|
|
|
function test-setup() {
|
2014-07-14 17:50:04 +00:00
|
|
|
# Detect the project into $PROJECT if it isn't set
|
|
|
|
detect-project
|
|
|
|
|
2016-10-11 12:52:51 +00:00
|
|
|
if [[ ${MULTIZONE:-} == "true" && -n ${E2E_ZONES:-} ]]; then
|
2017-07-24 10:10:22 +00:00
|
|
|
for KUBE_GCE_ZONE in ${E2E_ZONES}; do
|
2016-02-18 00:49:07 +00:00
|
|
|
KUBE_GCE_ZONE="${KUBE_GCE_ZONE}" KUBE_USE_EXISTING_MASTER="${KUBE_USE_EXISTING_MASTER:-}" "${KUBE_ROOT}/cluster/kube-up.sh"
|
|
|
|
KUBE_USE_EXISTING_MASTER="true" # For subsequent zones we use the existing master
|
|
|
|
done
|
|
|
|
else
|
|
|
|
"${KUBE_ROOT}/cluster/kube-up.sh"
|
|
|
|
fi
|
|
|
|
|
2014-10-23 00:49:40 +00:00
|
|
|
# Open up port 80 & 8080 so common containers on minions can be reached
|
2015-03-02 18:15:34 +00:00
|
|
|
# TODO(roberthbailey): Remove this once we are no longer relying on hostPorts.
|
2015-06-11 09:39:22 +00:00
|
|
|
local start=`date +%s`
|
2014-11-25 18:32:27 +00:00
|
|
|
gcloud compute firewall-rules create \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2015-11-24 03:06:00 +00:00
|
|
|
--target-tags "${NODE_TAG}" \
|
2015-05-25 07:46:24 +00:00
|
|
|
--allow tcp:80,tcp:8080 \
|
2014-10-23 00:49:40 +00:00
|
|
|
--network "${NETWORK}" \
|
2015-11-24 03:06:00 +00:00
|
|
|
"${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || true
|
2015-06-11 09:39:22 +00:00
|
|
|
# As there is no simple way to wait longer for this operation we need to manually
|
|
|
|
# wait some additional time (20 minutes altogether).
|
2017-09-04 16:55:56 +00:00
|
|
|
while ! gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null; do
|
2016-02-01 20:30:45 +00:00
|
|
|
if [[ $(($start + 1200)) -lt `date +%s` ]]; then
|
2017-09-04 16:55:56 +00:00
|
|
|
echo -e "${color_red}Failed to create firewall ${NODE_TAG}-${INSTANCE_PREFIX}-http-alt in ${NETWORK_PROJECT}" >&2
|
2016-02-01 20:30:45 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
sleep 5
|
2015-06-11 09:39:22 +00:00
|
|
|
done
|
2015-05-23 02:39:40 +00:00
|
|
|
|
|
|
|
# Open up the NodePort range
|
|
|
|
# TODO(justinsb): Move to main setup, if we decide whether we want to do this by default.
|
2015-06-11 09:39:22 +00:00
|
|
|
start=`date +%s`
|
2015-05-23 02:39:40 +00:00
|
|
|
gcloud compute firewall-rules create \
|
2017-09-04 16:55:56 +00:00
|
|
|
--project "${NETWORK_PROJECT}" \
|
2015-11-24 03:06:00 +00:00
|
|
|
--target-tags "${NODE_TAG}" \
|
2015-05-23 02:39:40 +00:00
|
|
|
--allow tcp:30000-32767,udp:30000-32767 \
|
|
|
|
--network "${NETWORK}" \
|
2015-11-24 03:06:00 +00:00
|
|
|
"${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || true
|
2015-06-11 09:39:22 +00:00
|
|
|
# As there is no simple way to wait longer for this operation we need to manually
|
|
|
|
# wait some additional time (20 minutes altogether).
|
2017-09-04 16:55:56 +00:00
|
|
|
while ! gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null; do
|
2016-02-01 20:30:45 +00:00
|
|
|
if [[ $(($start + 1200)) -lt `date +%s` ]]; then
|
|
|
|
echo -e "${color_red}Failed to create firewall ${NODE_TAG}-${INSTANCE_PREFIX}-nodeports in ${PROJECT}" >&2
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
sleep 5
|
2015-06-11 09:39:22 +00:00
|
|
|
done
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
2014-12-09 23:07:54 +00:00
|
|
|
# Execute after running tests to perform any required clean-up. This is called
|
|
|
|
# from hack/e2e.go
|
2016-08-02 07:08:05 +00:00
|
|
|
function test-teardown() {
|
2014-12-09 23:07:54 +00:00
|
|
|
detect-project
|
2014-07-14 17:50:04 +00:00
|
|
|
echo "Shutting down test cluster in background."
|
2016-10-11 23:50:30 +00:00
|
|
|
delete-firewall-rules \
|
|
|
|
"${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" \
|
|
|
|
"${NODE_TAG}-${INSTANCE_PREFIX}-nodeports"
|
2016-10-11 12:52:51 +00:00
|
|
|
if [[ ${MULTIZONE:-} == "true" && -n ${E2E_ZONES:-} ]]; then
|
2017-07-24 10:10:22 +00:00
|
|
|
local zones=( ${E2E_ZONES} )
|
|
|
|
# tear them down in reverse order, finally tearing down the master too.
|
|
|
|
for ((zone_num=${#zones[@]}-1; zone_num>0; zone_num--)); do
|
|
|
|
KUBE_GCE_ZONE="${zones[zone_num]}" KUBE_USE_EXISTING_MASTER="true" "${KUBE_ROOT}/cluster/kube-down.sh"
|
|
|
|
done
|
|
|
|
KUBE_GCE_ZONE="${zones[0]}" KUBE_USE_EXISTING_MASTER="false" "${KUBE_ROOT}/cluster/kube-down.sh"
|
2016-01-26 02:36:40 +00:00
|
|
|
else
|
2017-07-24 10:10:22 +00:00
|
|
|
"${KUBE_ROOT}/cluster/kube-down.sh"
|
2016-01-26 02:36:40 +00:00
|
|
|
fi
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
2014-10-10 05:38:00 +00:00
|
|
|
|
|
|
|
# SSH to a node by name ($1) and run a command ($2).
|
2016-08-02 07:08:05 +00:00
|
|
|
function ssh-to-node() {
|
2014-10-10 05:38:00 +00:00
|
|
|
local node="$1"
|
|
|
|
local cmd="$2"
|
2015-04-21 22:27:38 +00:00
|
|
|
# Loop until we can successfully ssh into the box
|
2016-06-03 17:42:38 +00:00
|
|
|
for try in {1..5}; do
|
2016-03-07 21:29:04 +00:00
|
|
|
if gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "echo test > /dev/null"; then
|
2015-01-29 23:50:46 +00:00
|
|
|
break
|
|
|
|
fi
|
2015-04-21 22:27:38 +00:00
|
|
|
sleep 5
|
2015-01-29 23:50:46 +00:00
|
|
|
done
|
2015-04-21 22:27:38 +00:00
|
|
|
# Then actually try the command.
|
2016-03-07 21:29:04 +00:00
|
|
|
gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "${cmd}"
|
2014-10-10 05:38:00 +00:00
|
|
|
}
|
|
|
|
|
2014-11-11 19:03:07 +00:00
|
|
|
# Perform preparations required to run e2e tests
|
|
|
|
function prepare-e2e() {
|
|
|
|
detect-project
|
|
|
|
}
|