mirror of https://github.com/k3s-io/k3s
Move code only used by gce out of common.sh and into gce/util.sh.
parent
4b147e0361
commit
49cb1024b7
|
@ -264,15 +264,6 @@ function load-or-gen-kube-basicauth() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function load-or-gen-kube-bearertoken() {
|
|
||||||
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
|
|
||||||
get-kubeconfig-bearertoken
|
|
||||||
fi
|
|
||||||
if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then
|
|
||||||
gen-kube-bearertoken
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Sets KUBE_VERSION variable to the proper version number (e.g. "v1.0.6",
|
# Sets KUBE_VERSION variable to the proper version number (e.g. "v1.0.6",
|
||||||
# "v1.2.0-alpha.1.881+376438b69c7612") or a version' publication of the form
|
# "v1.2.0-alpha.1.881+376438b69c7612") or a version' publication of the form
|
||||||
# <path>/<version> (e.g. "release/stable",' "ci/latest-1").
|
# <path>/<version> (e.g. "release/stable",' "ci/latest-1").
|
||||||
|
@ -297,53 +288,6 @@ function set_binary_version() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Figure out which binary use on the server and assure it is available.
|
|
||||||
# If KUBE_VERSION is specified use binaries specified by it, otherwise
|
|
||||||
# use local dev binaries.
|
|
||||||
#
|
|
||||||
# Assumed vars:
|
|
||||||
# KUBE_VERSION
|
|
||||||
# KUBE_RELEASE_VERSION_REGEX
|
|
||||||
# KUBE_CI_VERSION_REGEX
|
|
||||||
# Vars set:
|
|
||||||
# KUBE_TAR_HASH
|
|
||||||
# SERVER_BINARY_TAR_URL
|
|
||||||
# SERVER_BINARY_TAR_HASH
|
|
||||||
function tars_from_version() {
|
|
||||||
local sha1sum=""
|
|
||||||
if which sha1sum >/dev/null 2>&1; then
|
|
||||||
sha1sum="sha1sum"
|
|
||||||
else
|
|
||||||
sha1sum="shasum -a1"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z "${KUBE_VERSION-}" ]]; then
|
|
||||||
find-release-tars
|
|
||||||
upload-server-tars
|
|
||||||
elif [[ ${KUBE_VERSION} =~ ${KUBE_RELEASE_VERSION_REGEX} ]]; then
|
|
||||||
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
|
|
||||||
# TODO: Clean this up.
|
|
||||||
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
|
|
||||||
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
|
|
||||||
elif [[ ${KUBE_VERSION} =~ ${KUBE_CI_VERSION_REGEX} ]]; then
|
|
||||||
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release-dev/ci/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
|
|
||||||
# TODO: Clean this up.
|
|
||||||
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
|
|
||||||
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
|
|
||||||
else
|
|
||||||
echo "Version doesn't match regexp" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
if ! SERVER_BINARY_TAR_HASH=$(curl -Ss --fail "${SERVER_BINARY_TAR_URL}.sha1"); then
|
|
||||||
echo "Failure trying to curl release .sha1"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! curl -Ss --head "${SERVER_BINARY_TAR_URL}" >&/dev/null; then
|
|
||||||
echo "Can't find release at ${SERVER_BINARY_TAR_URL}" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Search for the specified tarball in the various known output locations,
|
# Search for the specified tarball in the various known output locations,
|
||||||
# echoing the location if found.
|
# echoing the location if found.
|
||||||
#
|
#
|
||||||
|
@ -386,703 +330,6 @@ function find-release-tars() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Discover the git version of the current build package
|
|
||||||
#
|
|
||||||
# Assumed vars:
|
|
||||||
# KUBE_ROOT
|
|
||||||
# Vars set:
|
|
||||||
# KUBE_GIT_VERSION
|
|
||||||
function find-release-version() {
|
|
||||||
KUBE_GIT_VERSION=""
|
|
||||||
if [[ -f "${KUBE_ROOT}/version" ]]; then
|
|
||||||
KUBE_GIT_VERSION="$(cat ${KUBE_ROOT}/version)"
|
|
||||||
fi
|
|
||||||
if [[ -f "${KUBE_ROOT}/_output/release-stage/full/kubernetes/version" ]]; then
|
|
||||||
KUBE_GIT_VERSION="$(cat ${KUBE_ROOT}/_output/release-stage/full/kubernetes/version)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z "${KUBE_GIT_VERSION}" ]]; then
|
|
||||||
echo "!!! Cannot find release version"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Quote something appropriate for a yaml string.
|
|
||||||
#
|
|
||||||
# TODO(zmerlynn): Note that this function doesn't so much "quote" as
|
|
||||||
# "strip out quotes", and we really should be using a YAML library for
|
|
||||||
# this, but PyYAML isn't shipped by default, and *rant rant rant ... SIGH*
|
|
||||||
function yaml-quote {
|
|
||||||
echo "'$(echo "${@:-}" | sed -e "s/'/''/g")'"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Builds the RUNTIME_CONFIG var from other feature enable options (such as
|
|
||||||
# features in alpha)
|
|
||||||
function build-runtime-config() {
|
|
||||||
# There is nothing to do here for now. Just using this function as a placeholder.
|
|
||||||
:
|
|
||||||
}
|
|
||||||
|
|
||||||
# Writes the cluster name into a temporary file.
|
|
||||||
# Assumed vars
|
|
||||||
# CLUSTER_NAME
|
|
||||||
function write-cluster-name {
|
|
||||||
cat >"${KUBE_TEMP}/cluster-name.txt" << EOF
|
|
||||||
${CLUSTER_NAME}
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
function write-master-env {
|
|
||||||
# If the user requested that the master be part of the cluster, set the
|
|
||||||
# environment variable to program the master kubelet to register itself.
|
|
||||||
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" && -z "${KUBELET_APISERVER:-}" ]]; then
|
|
||||||
KUBELET_APISERVER="${MASTER_NAME}"
|
|
||||||
fi
|
|
||||||
if [[ -z "${KUBERNETES_MASTER_NAME:-}" ]]; then
|
|
||||||
KUBERNETES_MASTER_NAME="${MASTER_NAME}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml"
|
|
||||||
build-kube-master-certs "${KUBE_TEMP}/kube-master-certs.yaml"
|
|
||||||
}
|
|
||||||
|
|
||||||
function write-node-env {
|
|
||||||
if [[ -z "${KUBERNETES_MASTER_NAME:-}" ]]; then
|
|
||||||
KUBERNETES_MASTER_NAME="${MASTER_NAME}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
build-kube-env false "${KUBE_TEMP}/node-kube-env.yaml"
|
|
||||||
}
|
|
||||||
|
|
||||||
function build-kube-master-certs {
|
|
||||||
local file=$1
|
|
||||||
rm -f ${file}
|
|
||||||
cat >$file <<EOF
|
|
||||||
KUBEAPISERVER_CERT: $(yaml-quote ${KUBEAPISERVER_CERT_BASE64:-})
|
|
||||||
KUBEAPISERVER_KEY: $(yaml-quote ${KUBEAPISERVER_KEY_BASE64:-})
|
|
||||||
CA_KEY: $(yaml-quote ${CA_KEY_BASE64:-})
|
|
||||||
AGGREGATOR_CA_KEY: $(yaml-quote ${AGGREGATOR_CA_KEY_BASE64:-})
|
|
||||||
REQUESTHEADER_CA_CERT: $(yaml-quote ${REQUESTHEADER_CA_CERT_BASE64:-})
|
|
||||||
PROXY_CLIENT_CERT: $(yaml-quote ${PROXY_CLIENT_CERT_BASE64:-})
|
|
||||||
PROXY_CLIENT_KEY: $(yaml-quote ${PROXY_CLIENT_KEY_BASE64:-})
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
# $1: if 'true', we're building a master yaml, else a node
|
|
||||||
function build-kube-env {
|
|
||||||
local master=$1
|
|
||||||
local file=$2
|
|
||||||
|
|
||||||
local server_binary_tar_url=$SERVER_BINARY_TAR_URL
|
|
||||||
local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}"
|
|
||||||
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
|
||||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] ; then
|
|
||||||
# TODO: Support fallback .tar.gz settings on Container Linux
|
|
||||||
server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}")
|
|
||||||
kube_manifests_tar_url=$(split_csv "${KUBE_MANIFESTS_TAR_URL}")
|
|
||||||
fi
|
|
||||||
|
|
||||||
build-runtime-config
|
|
||||||
|
|
||||||
rm -f ${file}
|
|
||||||
cat >$file <<EOF
|
|
||||||
CLUSTER_NAME: $(yaml-quote ${CLUSTER_NAME})
|
|
||||||
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
|
|
||||||
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
|
|
||||||
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
|
|
||||||
NODE_TAGS: $(yaml-quote ${NODE_TAGS:-})
|
|
||||||
NODE_NETWORK: $(yaml-quote ${NETWORK:-})
|
|
||||||
NODE_SUBNETWORK: $(yaml-quote ${SUBNETWORK:-})
|
|
||||||
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
|
|
||||||
SERVER_BINARY_TAR_URL: $(yaml-quote ${server_binary_tar_url})
|
|
||||||
SERVER_BINARY_TAR_HASH: $(yaml-quote ${SERVER_BINARY_TAR_HASH})
|
|
||||||
PROJECT_ID: $(yaml-quote ${PROJECT})
|
|
||||||
NETWORK_PROJECT_ID: $(yaml-quote ${NETWORK_PROJECT})
|
|
||||||
SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
|
|
||||||
KUBERNETES_MASTER_NAME: $(yaml-quote ${KUBERNETES_MASTER_NAME})
|
|
||||||
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
|
|
||||||
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
|
|
||||||
ENABLE_METRICS_SERVER: $(yaml-quote ${ENABLE_METRICS_SERVER:-false})
|
|
||||||
ENABLE_METADATA_AGENT: $(yaml-quote ${ENABLE_METADATA_AGENT:-none})
|
|
||||||
METADATA_AGENT_VERSION: $(yaml-quote ${METADATA_AGENT_VERSION:-})
|
|
||||||
DOCKER_REGISTRY_MIRROR_URL: $(yaml-quote ${DOCKER_REGISTRY_MIRROR_URL:-})
|
|
||||||
ENABLE_L7_LOADBALANCING: $(yaml-quote ${ENABLE_L7_LOADBALANCING:-none})
|
|
||||||
ENABLE_CLUSTER_LOGGING: $(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})
|
|
||||||
ENABLE_CLUSTER_UI: $(yaml-quote ${ENABLE_CLUSTER_UI:-false})
|
|
||||||
ENABLE_NODE_PROBLEM_DETECTOR: $(yaml-quote ${ENABLE_NODE_PROBLEM_DETECTOR:-none})
|
|
||||||
NODE_PROBLEM_DETECTOR_VERSION: $(yaml-quote ${NODE_PROBLEM_DETECTOR_VERSION:-})
|
|
||||||
NODE_PROBLEM_DETECTOR_TAR_HASH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TAR_HASH:-})
|
|
||||||
ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
|
|
||||||
ENABLE_RESCHEDULER: $(yaml-quote ${ENABLE_RESCHEDULER:-false})
|
|
||||||
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
|
|
||||||
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
|
|
||||||
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
|
|
||||||
CLUSTER_DNS_CORE_DNS: $(yaml-quote ${CLUSTER_DNS_CORE_DNS:-false})
|
|
||||||
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
|
|
||||||
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
|
|
||||||
ENABLE_DNS_HORIZONTAL_AUTOSCALER: $(yaml-quote ${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false})
|
|
||||||
KUBE_PROXY_DAEMONSET: $(yaml-quote ${KUBE_PROXY_DAEMONSET:-false})
|
|
||||||
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
|
|
||||||
KUBE_PROXY_MODE: $(yaml-quote ${KUBE_PROXY_MODE:-iptables})
|
|
||||||
NODE_PROBLEM_DETECTOR_TOKEN: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TOKEN:-})
|
|
||||||
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
|
|
||||||
ENABLE_POD_SECURITY_POLICY: $(yaml-quote ${ENABLE_POD_SECURITY_POLICY:-})
|
|
||||||
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
|
|
||||||
RUNTIME_CONFIG: $(yaml-quote ${RUNTIME_CONFIG})
|
|
||||||
CA_CERT: $(yaml-quote ${CA_CERT_BASE64:-})
|
|
||||||
KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
|
|
||||||
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})
|
|
||||||
NETWORK_PROVIDER: $(yaml-quote ${NETWORK_PROVIDER:-})
|
|
||||||
NETWORK_POLICY_PROVIDER: $(yaml-quote ${NETWORK_POLICY_PROVIDER:-})
|
|
||||||
PREPULL_E2E_IMAGES: $(yaml-quote ${PREPULL_E2E_IMAGES:-})
|
|
||||||
HAIRPIN_MODE: $(yaml-quote ${HAIRPIN_MODE:-})
|
|
||||||
E2E_STORAGE_TEST_ENVIRONMENT: $(yaml-quote ${E2E_STORAGE_TEST_ENVIRONMENT:-})
|
|
||||||
KUBE_DOCKER_REGISTRY: $(yaml-quote ${KUBE_DOCKER_REGISTRY:-})
|
|
||||||
KUBE_ADDON_REGISTRY: $(yaml-quote ${KUBE_ADDON_REGISTRY:-})
|
|
||||||
MULTIZONE: $(yaml-quote ${MULTIZONE:-})
|
|
||||||
NON_MASQUERADE_CIDR: $(yaml-quote ${NON_MASQUERADE_CIDR:-})
|
|
||||||
ENABLE_DEFAULT_STORAGE_CLASS: $(yaml-quote ${ENABLE_DEFAULT_STORAGE_CLASS:-})
|
|
||||||
ENABLE_APISERVER_BASIC_AUDIT: $(yaml-quote ${ENABLE_APISERVER_BASIC_AUDIT:-})
|
|
||||||
ENABLE_APISERVER_ADVANCED_AUDIT: $(yaml-quote ${ENABLE_APISERVER_ADVANCED_AUDIT:-})
|
|
||||||
ENABLE_CACHE_MUTATION_DETECTOR: $(yaml-quote ${ENABLE_CACHE_MUTATION_DETECTOR:-false})
|
|
||||||
ENABLE_PATCH_CONVERSION_DETECTOR: $(yaml-quote ${ENABLE_PATCH_CONVERSION_DETECTOR:-false})
|
|
||||||
ADVANCED_AUDIT_POLICY: $(yaml-quote ${ADVANCED_AUDIT_POLICY:-})
|
|
||||||
ADVANCED_AUDIT_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_BACKEND:-log})
|
|
||||||
ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE:-})
|
|
||||||
ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE:-})
|
|
||||||
ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT:-})
|
|
||||||
ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS:-})
|
|
||||||
ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST:-})
|
|
||||||
ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF:-})
|
|
||||||
GCE_API_ENDPOINT: $(yaml-quote ${GCE_API_ENDPOINT:-})
|
|
||||||
GCE_GLBC_IMAGE: $(yaml-quote ${GCE_GLBC_IMAGE:-})
|
|
||||||
PROMETHEUS_TO_SD_ENDPOINT: $(yaml-quote ${PROMETHEUS_TO_SD_ENDPOINT:-})
|
|
||||||
PROMETHEUS_TO_SD_PREFIX: $(yaml-quote ${PROMETHEUS_TO_SD_PREFIX:-})
|
|
||||||
ENABLE_PROMETHEUS_TO_SD: $(yaml-quote ${ENABLE_PROMETHEUS_TO_SD:-false})
|
|
||||||
ENABLE_POD_PRIORITY: $(yaml-quote ${ENABLE_POD_PRIORITY:-})
|
|
||||||
CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME:-})
|
|
||||||
CONTAINER_RUNTIME_ENDPOINT: $(yaml-quote ${CONTAINER_RUNTIME_ENDPOINT:-})
|
|
||||||
CONTAINER_RUNTIME_NAME: $(yaml-quote ${CONTAINER_RUNTIME_NAME:-})
|
|
||||||
NODE_LOCAL_SSDS_EXT: $(yaml-quote ${NODE_LOCAL_SSDS_EXT:-})
|
|
||||||
LOAD_IMAGE_COMMAND: $(yaml-quote ${LOAD_IMAGE_COMMAND:-})
|
|
||||||
EOF
|
|
||||||
if [ -n "${KUBELET_PORT:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
KUBELET_PORT: $(yaml-quote ${KUBELET_PORT})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
KUBE_APISERVER_REQUEST_TIMEOUT: $(yaml-quote ${KUBE_APISERVER_REQUEST_TIMEOUT})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
TERMINATED_POD_GC_THRESHOLD: $(yaml-quote ${TERMINATED_POD_GC_THRESHOLD})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci") || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
|
||||||
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci") || "${NODE_OS_DISTRIBUTION}" = "ubuntu" ]] ; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
KUBE_MANIFESTS_TAR_URL: $(yaml-quote ${kube_manifests_tar_url})
|
|
||||||
KUBE_MANIFESTS_TAR_HASH: $(yaml-quote ${KUBE_MANIFESTS_TAR_HASH})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${TEST_CLUSTER:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
TEST_CLUSTER: $(yaml-quote ${TEST_CLUSTER})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${KUBELET_TEST_ARGS:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
KUBELET_TEST_ARGS: $(yaml-quote ${KUBELET_TEST_ARGS})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${NODE_KUBELET_TEST_ARGS:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
NODE_KUBELET_TEST_ARGS: $(yaml-quote ${NODE_KUBELET_TEST_ARGS})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${MASTER_KUBELET_TEST_ARGS:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
MASTER_KUBELET_TEST_ARGS: $(yaml-quote ${MASTER_KUBELET_TEST_ARGS})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${KUBELET_TEST_LOG_LEVEL:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
KUBELET_TEST_LOG_LEVEL: $(yaml-quote ${KUBELET_TEST_LOG_LEVEL})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${DOCKER_TEST_LOG_LEVEL:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
DOCKER_TEST_LOG_LEVEL: $(yaml-quote ${DOCKER_TEST_LOG_LEVEL})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${DOCKER_LOG_DRIVER:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
DOCKER_LOG_DRIVER: $(yaml-quote ${DOCKER_LOG_DRIVER})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${DOCKER_LOG_MAX_SIZE:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
DOCKER_LOG_MAX_SIZE: $(yaml-quote ${DOCKER_LOG_MAX_SIZE})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${DOCKER_LOG_MAX_FILE:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
DOCKER_LOG_MAX_FILE: $(yaml-quote ${DOCKER_LOG_MAX_FILE})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${ENABLE_CUSTOM_METRICS:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
ENABLE_CUSTOM_METRICS: $(yaml-quote ${ENABLE_CUSTOM_METRICS})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${FEATURE_GATES:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
FEATURE_GATES: $(yaml-quote ${FEATURE_GATES})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${ROTATE_CERTIFICATES:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
ROTATE_CERTIFICATES: $(yaml-quote ${ROTATE_CERTIFICATES})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "gci" ]] ||
|
|
||||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
VOLUME_PLUGIN_DIR: $(yaml-quote ${VOLUME_PLUGIN_DIR:-/etc/srv/kubernetes/kubelet-plugins/volume/exec})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${PROVIDER_VARS:-}" ]; then
|
|
||||||
local var_name
|
|
||||||
local var_value
|
|
||||||
|
|
||||||
for var_name in ${PROVIDER_VARS}; do
|
|
||||||
eval "local var_value=\$(yaml-quote \${${var_name}})"
|
|
||||||
cat >>$file <<EOF
|
|
||||||
${var_name}: ${var_value}
|
|
||||||
EOF
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "${master}" == "true" ]]; then
|
|
||||||
# Master-only env vars.
|
|
||||||
cat >>$file <<EOF
|
|
||||||
KUBERNETES_MASTER: $(yaml-quote "true")
|
|
||||||
KUBE_USER: $(yaml-quote ${KUBE_USER})
|
|
||||||
KUBE_PASSWORD: $(yaml-quote ${KUBE_PASSWORD})
|
|
||||||
KUBE_BEARER_TOKEN: $(yaml-quote ${KUBE_BEARER_TOKEN})
|
|
||||||
MASTER_CERT: $(yaml-quote ${MASTER_CERT_BASE64:-})
|
|
||||||
MASTER_KEY: $(yaml-quote ${MASTER_KEY_BASE64:-})
|
|
||||||
KUBECFG_CERT: $(yaml-quote ${KUBECFG_CERT_BASE64:-})
|
|
||||||
KUBECFG_KEY: $(yaml-quote ${KUBECFG_KEY_BASE64:-})
|
|
||||||
KUBELET_APISERVER: $(yaml-quote ${KUBELET_APISERVER:-})
|
|
||||||
ENABLE_MANIFEST_URL: $(yaml-quote ${ENABLE_MANIFEST_URL:-false})
|
|
||||||
MANIFEST_URL: $(yaml-quote ${MANIFEST_URL:-})
|
|
||||||
MANIFEST_URL_HEADER: $(yaml-quote ${MANIFEST_URL_HEADER:-})
|
|
||||||
NUM_NODES: $(yaml-quote ${NUM_NODES})
|
|
||||||
STORAGE_BACKEND: $(yaml-quote ${STORAGE_BACKEND:-etcd3})
|
|
||||||
STORAGE_MEDIA_TYPE: $(yaml-quote ${STORAGE_MEDIA_TYPE:-})
|
|
||||||
ENABLE_GARBAGE_COLLECTOR: $(yaml-quote ${ENABLE_GARBAGE_COLLECTOR:-})
|
|
||||||
ENABLE_LEGACY_ABAC: $(yaml-quote ${ENABLE_LEGACY_ABAC:-})
|
|
||||||
MASTER_ADVERTISE_ADDRESS: $(yaml-quote ${MASTER_ADVERTISE_ADDRESS:-})
|
|
||||||
ETCD_CA_KEY: $(yaml-quote ${ETCD_CA_KEY_BASE64:-})
|
|
||||||
ETCD_CA_CERT: $(yaml-quote ${ETCD_CA_CERT_BASE64:-})
|
|
||||||
ETCD_PEER_KEY: $(yaml-quote ${ETCD_PEER_KEY_BASE64:-})
|
|
||||||
ETCD_PEER_CERT: $(yaml-quote ${ETCD_PEER_CERT_BASE64:-})
|
|
||||||
EOF
|
|
||||||
# KUBE_APISERVER_REQUEST_TIMEOUT_SEC (if set) controls the --request-timeout
|
|
||||||
# flag
|
|
||||||
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
KUBE_APISERVER_REQUEST_TIMEOUT_SEC: $(yaml-quote ${KUBE_APISERVER_REQUEST_TIMEOUT_SEC})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
# ETCD_IMAGE (if set) allows to use a custom etcd image.
|
|
||||||
if [ -n "${ETCD_IMAGE:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
ETCD_IMAGE: $(yaml-quote ${ETCD_IMAGE})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
# ETCD_DOCKER_REPOSITORY (if set) allows to use a custom etcd docker repository to pull the etcd image from.
|
|
||||||
if [ -n "${ETCD_DOCKER_REPOSITORY:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
ETCD_DOCKER_REPOSITORY: $(yaml-quote ${ETCD_DOCKER_REPOSITORY})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
# ETCD_VERSION (if set) allows you to use custom version of etcd.
|
|
||||||
# The main purpose of using it may be rollback of etcd v3 API,
|
|
||||||
# where we need 3.0.* image, but are rolling back to 2.3.7.
|
|
||||||
if [ -n "${ETCD_VERSION:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
ETCD_VERSION: $(yaml-quote ${ETCD_VERSION})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${ETCD_HOSTNAME:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
ETCD_HOSTNAME: $(yaml-quote ${ETCD_HOSTNAME})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC: $(yaml-quote ${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC: $(yaml-quote ${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${ETCD_COMPACTION_INTERVAL_SEC:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
ETCD_COMPACTION_INTERVAL_SEC: $(yaml-quote ${ETCD_COMPACTION_INTERVAL_SEC})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${ETCD_QUOTA_BACKEND_BYTES:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
ETCD_QUOTA_BACKEND_BYTES: $(yaml-quote ${ETCD_QUOTA_BACKEND_BYTES})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
APISERVER_TEST_ARGS: $(yaml-quote ${APISERVER_TEST_ARGS})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
CONTROLLER_MANAGER_TEST_ARGS: $(yaml-quote ${CONTROLLER_MANAGER_TEST_ARGS})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
CONTROLLER_MANAGER_TEST_LOG_LEVEL: $(yaml-quote ${CONTROLLER_MANAGER_TEST_LOG_LEVEL})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
SCHEDULER_TEST_ARGS: $(yaml-quote ${SCHEDULER_TEST_ARGS})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
SCHEDULER_TEST_LOG_LEVEL: $(yaml-quote ${SCHEDULER_TEST_LOG_LEVEL})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${INITIAL_ETCD_CLUSTER:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
INITIAL_ETCD_CLUSTER: $(yaml-quote ${INITIAL_ETCD_CLUSTER})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
INITIAL_ETCD_CLUSTER_STATE: $(yaml-quote ${INITIAL_ETCD_CLUSTER_STATE})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${ETCD_QUORUM_READ:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
ETCD_QUORUM_READ: $(yaml-quote ${ETCD_QUORUM_READ})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${CLUSTER_SIGNING_DURATION:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
CLUSTER_SIGNING_DURATION: $(yaml-quote ${CLUSTER_SIGNING_DURATION})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [[ "${NODE_ACCELERATORS:-}" == *"type=nvidia"* ]]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
ENABLE_NVIDIA_GPU_DEVICE_PLUGIN: $(yaml-quote "true")
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${ADDON_MANAGER_LEADER_ELECTION:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
ADDON_MANAGER_LEADER_ELECTION: $(yaml-quote ${ADDON_MANAGER_LEADER_ELECTION})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
else
|
|
||||||
# Node-only env vars.
|
|
||||||
cat >>$file <<EOF
|
|
||||||
KUBERNETES_MASTER: $(yaml-quote "false")
|
|
||||||
ZONE: $(yaml-quote ${ZONE})
|
|
||||||
EXTRA_DOCKER_OPTS: $(yaml-quote ${EXTRA_DOCKER_OPTS:-})
|
|
||||||
EOF
|
|
||||||
if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
KUBEPROXY_TEST_ARGS: $(yaml-quote ${KUBEPROXY_TEST_ARGS})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${KUBEPROXY_TEST_LOG_LEVEL:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
KUBEPROXY_TEST_LOG_LEVEL: $(yaml-quote ${KUBEPROXY_TEST_LOG_LEVEL})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
if [ -n "${NODE_LABELS:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
NODE_LABELS: $(yaml-quote ${NODE_LABELS})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${NON_MASTER_NODE_LABELS:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
NON_MASTER_NODE_LABELS: $(yaml-quote ${NON_MASTER_NODE_LABELS})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${EVICTION_HARD:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
EVICTION_HARD: $(yaml-quote ${EVICTION_HARD})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
ENABLE_CLUSTER_AUTOSCALER: $(yaml-quote ${ENABLE_CLUSTER_AUTOSCALER})
|
|
||||||
AUTOSCALER_MIG_CONFIG: $(yaml-quote ${AUTOSCALER_MIG_CONFIG})
|
|
||||||
AUTOSCALER_EXPANDER_CONFIG: $(yaml-quote ${AUTOSCALER_EXPANDER_CONFIG})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
|
|
||||||
cat >>$file <<EOF
|
|
||||||
SCHEDULING_ALGORITHM_PROVIDER: $(yaml-quote ${SCHEDULING_ALGORITHM_PROVIDER})
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function sha1sum-file() {
|
|
||||||
if which sha1sum >/dev/null 2>&1; then
|
|
||||||
sha1sum "$1" | awk '{ print $1 }'
|
|
||||||
else
|
|
||||||
shasum -a1 "$1" | awk '{ print $1 }'
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Create certificate pairs for the cluster.
|
|
||||||
# $1: The public IP for the master.
|
|
||||||
#
|
|
||||||
# These are used for static cert distribution (e.g. static clustering) at
|
|
||||||
# cluster creation time. This will be obsoleted once we implement dynamic
|
|
||||||
# clustering.
|
|
||||||
#
|
|
||||||
# The following certificate pairs are created:
|
|
||||||
#
|
|
||||||
# - ca (the cluster's certificate authority)
|
|
||||||
# - server
|
|
||||||
# - kubelet
|
|
||||||
# - kubecfg (for kubectl)
|
|
||||||
#
|
|
||||||
# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
|
|
||||||
# the certs that we need.
|
|
||||||
#
|
|
||||||
# Assumed vars
|
|
||||||
# KUBE_TEMP
|
|
||||||
# MASTER_NAME
|
|
||||||
#
|
|
||||||
# Vars set:
|
|
||||||
# CERT_DIR
|
|
||||||
# CA_CERT_BASE64
|
|
||||||
# MASTER_CERT_BASE64
|
|
||||||
# MASTER_KEY_BASE64
|
|
||||||
# KUBELET_CERT_BASE64
|
|
||||||
# KUBELET_KEY_BASE64
|
|
||||||
# KUBECFG_CERT_BASE64
|
|
||||||
# KUBECFG_KEY_BASE64
|
|
||||||
function create-certs {
|
|
||||||
local -r primary_cn="${1}"
|
|
||||||
|
|
||||||
# Determine extra certificate names for master
|
|
||||||
local octets=($(echo "${SERVICE_CLUSTER_IP_RANGE}" | sed -e 's|/.*||' -e 's/\./ /g'))
|
|
||||||
((octets[3]+=1))
|
|
||||||
local -r service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
|
|
||||||
local sans=""
|
|
||||||
for extra in $@; do
|
|
||||||
if [[ -n "${extra}" ]]; then
|
|
||||||
sans="${sans}IP:${extra},"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
sans="${sans}IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}"
|
|
||||||
|
|
||||||
echo "Generating certs for alternate-names: ${sans}"
|
|
||||||
|
|
||||||
setup-easyrsa
|
|
||||||
PRIMARY_CN="${primary_cn}" SANS="${sans}" generate-certs
|
|
||||||
AGGREGATOR_PRIMARY_CN="${primary_cn}" AGGREGATOR_SANS="${sans}" generate-aggregator-certs
|
|
||||||
|
|
||||||
# By default, linux wraps base64 output every 76 cols, so we use 'tr -d' to remove whitespaces.
|
|
||||||
# Note 'base64 -w0' doesn't work on Mac OS X, which has different flags.
|
|
||||||
CA_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/ca.key" | base64 | tr -d '\r\n')
|
|
||||||
CA_CERT_BASE64=$(cat "${CERT_DIR}/pki/ca.crt" | base64 | tr -d '\r\n')
|
|
||||||
MASTER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/${MASTER_NAME}.crt" | base64 | tr -d '\r\n')
|
|
||||||
MASTER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/${MASTER_NAME}.key" | base64 | tr -d '\r\n')
|
|
||||||
KUBELET_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubelet.crt" | base64 | tr -d '\r\n')
|
|
||||||
KUBELET_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubelet.key" | base64 | tr -d '\r\n')
|
|
||||||
KUBECFG_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubecfg.crt" | base64 | tr -d '\r\n')
|
|
||||||
KUBECFG_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubecfg.key" | base64 | tr -d '\r\n')
|
|
||||||
KUBEAPISERVER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kube-apiserver.crt" | base64 | tr -d '\r\n')
|
|
||||||
KUBEAPISERVER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kube-apiserver.key" | base64 | tr -d '\r\n')
|
|
||||||
|
|
||||||
# Setting up an addition directory (beyond pki) as it is the simplest way to
|
|
||||||
# ensure we get a different CA pair to sign the proxy-client certs and which
|
|
||||||
# we can send CA public key to the user-apiserver to validate communication.
|
|
||||||
AGGREGATOR_CA_KEY_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/private/ca.key" | base64 | tr -d '\r\n')
|
|
||||||
REQUESTHEADER_CA_CERT_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/ca.crt" | base64 | tr -d '\r\n')
|
|
||||||
PROXY_CLIENT_CERT_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/issued/proxy-client.crt" | base64 | tr -d '\r\n')
|
|
||||||
PROXY_CLIENT_KEY_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/private/proxy-client.key" | base64 | tr -d '\r\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
# Set up easy-rsa directory structure.
|
|
||||||
#
|
|
||||||
# Assumed vars
|
|
||||||
# KUBE_TEMP
|
|
||||||
#
|
|
||||||
# Vars set:
|
|
||||||
# CERT_DIR
|
|
||||||
# AGGREGATOR_CERT_DIR
|
|
||||||
function setup-easyrsa {
|
|
||||||
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
|
|
||||||
# Note: This was heavily cribbed from make-ca-cert.sh
|
|
||||||
(set -x
|
|
||||||
cd "${KUBE_TEMP}"
|
|
||||||
curl -L -O --connect-timeout 20 --retry 6 --retry-delay 2 https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz
|
|
||||||
tar xzf easy-rsa.tar.gz
|
|
||||||
mkdir easy-rsa-master/kubelet
|
|
||||||
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/kubelet
|
|
||||||
mkdir easy-rsa-master/aggregator
|
|
||||||
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/aggregator) &>${cert_create_debug_output} || true
|
|
||||||
CERT_DIR="${KUBE_TEMP}/easy-rsa-master/easyrsa3"
|
|
||||||
AGGREGATOR_CERT_DIR="${KUBE_TEMP}/easy-rsa-master/aggregator"
|
|
||||||
if [ ! -x "${CERT_DIR}/easyrsa" -o ! -x "${AGGREGATOR_CERT_DIR}/easyrsa" ]; then
|
|
||||||
# TODO(roberthbailey,porridge): add better error handling here,
|
|
||||||
# see https://github.com/kubernetes/kubernetes/issues/55229
|
|
||||||
cat "${cert_create_debug_output}" >&2
|
|
||||||
echo "=== Failed to setup easy-rsa: Aborting ===" >&2
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Runs the easy RSA commands to generate certificate files.
|
|
||||||
# The generated files are IN ${CERT_DIR}
|
|
||||||
#
|
|
||||||
# Assumed vars
|
|
||||||
# KUBE_TEMP
|
|
||||||
# MASTER_NAME
|
|
||||||
# CERT_DIR
|
|
||||||
# PRIMARY_CN: Primary canonical name
|
|
||||||
# SANS: Subject alternate names
|
|
||||||
#
|
|
||||||
#
|
|
||||||
function generate-certs {
|
|
||||||
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
|
|
||||||
# Note: This was heavily cribbed from make-ca-cert.sh
|
|
||||||
(set -x
|
|
||||||
cd "${CERT_DIR}"
|
|
||||||
./easyrsa init-pki
|
|
||||||
# this puts the cert into pki/ca.crt and the key into pki/private/ca.key
|
|
||||||
./easyrsa --batch "--req-cn=${PRIMARY_CN}@$(date +%s)" build-ca nopass
|
|
||||||
./easyrsa --subject-alt-name="${SANS}" build-server-full "${MASTER_NAME}" nopass
|
|
||||||
./easyrsa build-client-full kube-apiserver nopass
|
|
||||||
|
|
||||||
kube::util::ensure-cfssl "${KUBE_TEMP}/cfssl"
|
|
||||||
|
|
||||||
# make the config for the signer
|
|
||||||
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "ca-config.json"
|
|
||||||
# create the kubelet client cert with the correct groups
|
|
||||||
echo '{"CN":"kubelet","names":[{"O":"system:nodes"}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | "${CFSSL_BIN}" gencert -ca=pki/ca.crt -ca-key=pki/private/ca.key -config=ca-config.json - | "${CFSSLJSON_BIN}" -bare kubelet
|
|
||||||
mv "kubelet-key.pem" "pki/private/kubelet.key"
|
|
||||||
mv "kubelet.pem" "pki/issued/kubelet.crt"
|
|
||||||
rm -f "kubelet.csr"
|
|
||||||
|
|
||||||
# Make a superuser client cert with subject "O=system:masters, CN=kubecfg"
|
|
||||||
./easyrsa --dn-mode=org \
|
|
||||||
--req-cn=kubecfg --req-org=system:masters \
|
|
||||||
--req-c= --req-st= --req-city= --req-email= --req-ou= \
|
|
||||||
build-client-full kubecfg nopass) &>${cert_create_debug_output} || true
|
|
||||||
local output_file_missing=0
|
|
||||||
local output_file
|
|
||||||
for output_file in \
|
|
||||||
"${CERT_DIR}/pki/private/ca.key" \
|
|
||||||
"${CERT_DIR}/pki/ca.crt" \
|
|
||||||
"${CERT_DIR}/pki/issued/${MASTER_NAME}.crt" \
|
|
||||||
"${CERT_DIR}/pki/private/${MASTER_NAME}.key" \
|
|
||||||
"${CERT_DIR}/pki/issued/kubelet.crt" \
|
|
||||||
"${CERT_DIR}/pki/private/kubelet.key" \
|
|
||||||
"${CERT_DIR}/pki/issued/kubecfg.crt" \
|
|
||||||
"${CERT_DIR}/pki/private/kubecfg.key" \
|
|
||||||
"${CERT_DIR}/pki/issued/kube-apiserver.crt" \
|
|
||||||
"${CERT_DIR}/pki/private/kube-apiserver.key"
|
|
||||||
do
|
|
||||||
if [[ ! -s "${output_file}" ]]; then
|
|
||||||
echo "Expected file ${output_file} not created" >&2
|
|
||||||
output_file_missing=1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if (( $output_file_missing )); then
|
|
||||||
# TODO(roberthbailey,porridge): add better error handling here,
|
|
||||||
# see https://github.com/kubernetes/kubernetes/issues/55229
|
|
||||||
cat "${cert_create_debug_output}" >&2
|
|
||||||
echo "=== Failed to generate master certificates: Aborting ===" >&2
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Runs the easy RSA commands to generate aggregator certificate files.
|
|
||||||
# The generated files are in ${AGGREGATOR_CERT_DIR}
|
|
||||||
#
|
|
||||||
# Assumed vars
|
|
||||||
# KUBE_TEMP
|
|
||||||
# AGGREGATOR_MASTER_NAME
|
|
||||||
# AGGREGATOR_CERT_DIR
|
|
||||||
# AGGREGATOR_PRIMARY_CN: Primary canonical name
|
|
||||||
# AGGREGATOR_SANS: Subject alternate names
|
|
||||||
#
|
|
||||||
#
|
|
||||||
function generate-aggregator-certs {
|
|
||||||
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
|
|
||||||
# Note: This was heavily cribbed from make-ca-cert.sh
|
|
||||||
(set -x
|
|
||||||
cd "${KUBE_TEMP}/easy-rsa-master/aggregator"
|
|
||||||
./easyrsa init-pki
|
|
||||||
# this puts the cert into pki/ca.crt and the key into pki/private/ca.key
|
|
||||||
./easyrsa --batch "--req-cn=${AGGREGATOR_PRIMARY_CN}@$(date +%s)" build-ca nopass
|
|
||||||
./easyrsa --subject-alt-name="${AGGREGATOR_SANS}" build-server-full "${AGGREGATOR_MASTER_NAME}" nopass
|
|
||||||
./easyrsa build-client-full aggregator-apiserver nopass
|
|
||||||
|
|
||||||
kube::util::ensure-cfssl "${KUBE_TEMP}/cfssl"
|
|
||||||
|
|
||||||
# make the config for the signer
|
|
||||||
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "ca-config.json"
|
|
||||||
# create the aggregator client cert with the correct groups
|
|
||||||
echo '{"CN":"aggregator","hosts":[""],"key":{"algo":"rsa","size":2048}}' | "${CFSSL_BIN}" gencert -ca=pki/ca.crt -ca-key=pki/private/ca.key -config=ca-config.json - | "${CFSSLJSON_BIN}" -bare proxy-client
|
|
||||||
mv "proxy-client-key.pem" "pki/private/proxy-client.key"
|
|
||||||
mv "proxy-client.pem" "pki/issued/proxy-client.crt"
|
|
||||||
rm -f "proxy-client.csr"
|
|
||||||
|
|
||||||
# Make a superuser client cert with subject "O=system:masters, CN=kubecfg"
|
|
||||||
./easyrsa --dn-mode=org \
|
|
||||||
--req-cn=proxy-clientcfg --req-org=system:aggregator \
|
|
||||||
--req-c= --req-st= --req-city= --req-email= --req-ou= \
|
|
||||||
build-client-full proxy-clientcfg nopass) &>${cert_create_debug_output} || true
|
|
||||||
local output_file_missing=0
|
|
||||||
local output_file
|
|
||||||
for output_file in \
|
|
||||||
"${AGGREGATOR_CERT_DIR}/pki/private/ca.key" \
|
|
||||||
"${AGGREGATOR_CERT_DIR}/pki/ca.crt" \
|
|
||||||
"${AGGREGATOR_CERT_DIR}/pki/issued/proxy-client.crt" \
|
|
||||||
"${AGGREGATOR_CERT_DIR}/pki/private/proxy-client.key"
|
|
||||||
do
|
|
||||||
if [[ ! -s "${output_file}" ]]; then
|
|
||||||
echo "Expected file ${output_file} not created" >&2
|
|
||||||
output_file_missing=1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if (( $output_file_missing )); then
|
|
||||||
# TODO(roberthbailey,porridge): add better error handling here,
|
|
||||||
# see https://github.com/kubernetes/kubernetes/issues/55229
|
|
||||||
cat "${cert_create_debug_output}" >&2
|
|
||||||
echo "=== Failed to generate aggregator certificates: Aborting ===" >&2
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Run the cfssl command to generates certificate files for etcd service, the
|
# Run the cfssl command to generates certificate files for etcd service, the
|
||||||
# certificate files will save in $1 directory.
|
# certificate files will save in $1 directory.
|
||||||
#
|
#
|
||||||
|
@ -1206,81 +453,6 @@ EOF
|
||||||
popd
|
popd
|
||||||
}
|
}
|
||||||
|
|
||||||
#
|
|
||||||
# Using provided master env, extracts value from provided key.
|
|
||||||
#
|
|
||||||
# Args:
|
|
||||||
# $1 master env (kube-env of master; result of calling get-master-env)
|
|
||||||
# $2 env key to use
|
|
||||||
function get-env-val() {
|
|
||||||
local match=`(echo "${1}" | grep -E "^${2}:") || echo ""`
|
|
||||||
if [[ -z ${match} ]]; then
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
echo ${match} | cut -d : -f 2 | cut -d \' -f 2
|
|
||||||
}
|
|
||||||
|
|
||||||
# Load the master env by calling get-master-env, and extract important values
|
|
||||||
function parse-master-env() {
|
|
||||||
# Get required master env vars
|
|
||||||
local master_env=$(get-master-env)
|
|
||||||
KUBE_PROXY_TOKEN=$(get-env-val "${master_env}" "KUBE_PROXY_TOKEN")
|
|
||||||
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${master_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
|
|
||||||
CA_CERT_BASE64=$(get-env-val "${master_env}" "CA_CERT")
|
|
||||||
CA_KEY_BASE64=$(get-env-val "${master_env}" "CA_KEY")
|
|
||||||
KUBEAPISERVER_CERT_BASE64=$(get-env-val "${master_env}" "KUBEAPISERVER_CERT")
|
|
||||||
KUBEAPISERVER_KEY_BASE64=$(get-env-val "${master_env}" "KUBEAPISERVER_KEY")
|
|
||||||
EXTRA_DOCKER_OPTS=$(get-env-val "${master_env}" "EXTRA_DOCKER_OPTS")
|
|
||||||
KUBELET_CERT_BASE64=$(get-env-val "${master_env}" "KUBELET_CERT")
|
|
||||||
KUBELET_KEY_BASE64=$(get-env-val "${master_env}" "KUBELET_KEY")
|
|
||||||
MASTER_CERT_BASE64=$(get-env-val "${master_env}" "MASTER_CERT")
|
|
||||||
MASTER_KEY_BASE64=$(get-env-val "${master_env}" "MASTER_KEY")
|
|
||||||
AGGREGATOR_CA_KEY_BASE64=$(get-env-val "${master_env}" "AGGREGATOR_CA_KEY")
|
|
||||||
REQUESTHEADER_CA_CERT_BASE64=$(get-env-val "${master_env}" "REQUESTHEADER_CA_CERT")
|
|
||||||
PROXY_CLIENT_CERT_BASE64=$(get-env-val "${master_env}" "PROXY_CLIENT_CERT")
|
|
||||||
PROXY_CLIENT_KEY_BASE64=$(get-env-val "${master_env}" "PROXY_CLIENT_KEY")
|
|
||||||
ENABLE_LEGACY_ABAC=$(get-env-val "${master_env}" "ENABLE_LEGACY_ABAC")
|
|
||||||
}
|
|
||||||
|
|
||||||
# Update or verify required gcloud components are installed
|
|
||||||
# at minimum required version.
|
|
||||||
# Assumed vars
|
|
||||||
# KUBE_PROMPT_FOR_UPDATE
|
|
||||||
function update-or-verify-gcloud() {
|
|
||||||
local sudo_prefix=""
|
|
||||||
if [ ! -w $(dirname `which gcloud`) ]; then
|
|
||||||
sudo_prefix="sudo"
|
|
||||||
fi
|
|
||||||
# update and install components as needed
|
|
||||||
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
|
|
||||||
${sudo_prefix} gcloud ${gcloud_prompt:-} components install alpha
|
|
||||||
${sudo_prefix} gcloud ${gcloud_prompt:-} components install beta
|
|
||||||
${sudo_prefix} gcloud ${gcloud_prompt:-} components update
|
|
||||||
else
|
|
||||||
local version=$(gcloud version --format=json)
|
|
||||||
python -c'
|
|
||||||
import json,sys
|
|
||||||
from distutils import version
|
|
||||||
|
|
||||||
minVersion = version.LooseVersion("1.3.0")
|
|
||||||
required = [ "alpha", "beta", "core" ]
|
|
||||||
data = json.loads(sys.argv[1])
|
|
||||||
rel = data.get("Google Cloud SDK")
|
|
||||||
if rel != "HEAD" and version.LooseVersion(rel) < minVersion:
|
|
||||||
print("gcloud version out of date ( < %s )" % minVersion)
|
|
||||||
exit(1)
|
|
||||||
missing = []
|
|
||||||
for c in required:
|
|
||||||
if not data.get(c):
|
|
||||||
missing += [c]
|
|
||||||
if missing:
|
|
||||||
for c in missing:
|
|
||||||
print ("missing required gcloud component \"{0}\"".format(c))
|
|
||||||
exit(1)
|
|
||||||
' """${version}"""
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check whether required client and server binaries exist, prompting to download
|
# Check whether required client and server binaries exist, prompting to download
|
||||||
# if missing.
|
# if missing.
|
||||||
# If KUBERNETES_SKIP_CONFIRM is set to y, we'll automatically download binaries
|
# If KUBERNETES_SKIP_CONFIRM is set to y, we'll automatically download binaries
|
||||||
|
|
|
@ -391,6 +391,62 @@ function detect-master() {
|
||||||
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" >&2
|
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" >&2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function load-or-gen-kube-bearertoken() {
|
||||||
|
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
|
||||||
|
get-kubeconfig-bearertoken
|
||||||
|
fi
|
||||||
|
if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then
|
||||||
|
gen-kube-bearertoken
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Figure out which binary use on the server and assure it is available.
|
||||||
|
# If KUBE_VERSION is specified use binaries specified by it, otherwise
|
||||||
|
# use local dev binaries.
|
||||||
|
#
|
||||||
|
# Assumed vars:
|
||||||
|
# KUBE_VERSION
|
||||||
|
# KUBE_RELEASE_VERSION_REGEX
|
||||||
|
# KUBE_CI_VERSION_REGEX
|
||||||
|
# Vars set:
|
||||||
|
# KUBE_TAR_HASH
|
||||||
|
# SERVER_BINARY_TAR_URL
|
||||||
|
# SERVER_BINARY_TAR_HASH
|
||||||
|
function tars_from_version() {
|
||||||
|
local sha1sum=""
|
||||||
|
if which sha1sum >/dev/null 2>&1; then
|
||||||
|
sha1sum="sha1sum"
|
||||||
|
else
|
||||||
|
sha1sum="shasum -a1"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "${KUBE_VERSION-}" ]]; then
|
||||||
|
find-release-tars
|
||||||
|
upload-server-tars
|
||||||
|
elif [[ ${KUBE_VERSION} =~ ${KUBE_RELEASE_VERSION_REGEX} ]]; then
|
||||||
|
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
|
||||||
|
# TODO: Clean this up.
|
||||||
|
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
|
||||||
|
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
|
||||||
|
elif [[ ${KUBE_VERSION} =~ ${KUBE_CI_VERSION_REGEX} ]]; then
|
||||||
|
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release-dev/ci/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
|
||||||
|
# TODO: Clean this up.
|
||||||
|
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
|
||||||
|
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
|
||||||
|
else
|
||||||
|
echo "Version doesn't match regexp" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if ! SERVER_BINARY_TAR_HASH=$(curl -Ss --fail "${SERVER_BINARY_TAR_URL}.sha1"); then
|
||||||
|
echo "Failure trying to curl release .sha1"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! curl -Ss --head "${SERVER_BINARY_TAR_URL}" >&/dev/null; then
|
||||||
|
echo "Can't find release at ${SERVER_BINARY_TAR_URL}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# Reads kube-env metadata from master
|
# Reads kube-env metadata from master
|
||||||
#
|
#
|
||||||
# Assumed vars:
|
# Assumed vars:
|
||||||
|
@ -407,6 +463,748 @@ function get-master-env() {
|
||||||
'http://metadata/computeMetadata/v1/instance/attributes/kube-master-certs'" 2>/dev/null
|
'http://metadata/computeMetadata/v1/instance/attributes/kube-master-certs'" 2>/dev/null
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Quote something appropriate for a yaml string.
|
||||||
|
#
|
||||||
|
# TODO(zmerlynn): Note that this function doesn't so much "quote" as
|
||||||
|
# "strip out quotes", and we really should be using a YAML library for
|
||||||
|
# this, but PyYAML isn't shipped by default, and *rant rant rant ... SIGH*
|
||||||
|
function yaml-quote {
|
||||||
|
echo "'$(echo "${@:-}" | sed -e "s/'/''/g")'"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Writes the cluster name into a temporary file.
|
||||||
|
# Assumed vars
|
||||||
|
# CLUSTER_NAME
|
||||||
|
function write-cluster-name {
|
||||||
|
cat >"${KUBE_TEMP}/cluster-name.txt" << EOF
|
||||||
|
${CLUSTER_NAME}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
function write-master-env {
|
||||||
|
# If the user requested that the master be part of the cluster, set the
|
||||||
|
# environment variable to program the master kubelet to register itself.
|
||||||
|
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" && -z "${KUBELET_APISERVER:-}" ]]; then
|
||||||
|
KUBELET_APISERVER="${MASTER_NAME}"
|
||||||
|
fi
|
||||||
|
if [[ -z "${KUBERNETES_MASTER_NAME:-}" ]]; then
|
||||||
|
KUBERNETES_MASTER_NAME="${MASTER_NAME}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml"
|
||||||
|
build-kube-master-certs "${KUBE_TEMP}/kube-master-certs.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
|
function write-node-env {
|
||||||
|
if [[ -z "${KUBERNETES_MASTER_NAME:-}" ]]; then
|
||||||
|
KUBERNETES_MASTER_NAME="${MASTER_NAME}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
build-kube-env false "${KUBE_TEMP}/node-kube-env.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
|
function build-kube-master-certs {
|
||||||
|
local file=$1
|
||||||
|
rm -f ${file}
|
||||||
|
cat >$file <<EOF
|
||||||
|
KUBEAPISERVER_CERT: $(yaml-quote ${KUBEAPISERVER_CERT_BASE64:-})
|
||||||
|
KUBEAPISERVER_KEY: $(yaml-quote ${KUBEAPISERVER_KEY_BASE64:-})
|
||||||
|
CA_KEY: $(yaml-quote ${CA_KEY_BASE64:-})
|
||||||
|
AGGREGATOR_CA_KEY: $(yaml-quote ${AGGREGATOR_CA_KEY_BASE64:-})
|
||||||
|
REQUESTHEADER_CA_CERT: $(yaml-quote ${REQUESTHEADER_CA_CERT_BASE64:-})
|
||||||
|
PROXY_CLIENT_CERT: $(yaml-quote ${PROXY_CLIENT_CERT_BASE64:-})
|
||||||
|
PROXY_CLIENT_KEY: $(yaml-quote ${PROXY_CLIENT_KEY_BASE64:-})
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# $1: if 'true', we're building a master yaml, else a node
|
||||||
|
function build-kube-env {
|
||||||
|
local master=$1
|
||||||
|
local file=$2
|
||||||
|
|
||||||
|
local server_binary_tar_url=$SERVER_BINARY_TAR_URL
|
||||||
|
local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}"
|
||||||
|
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
||||||
|
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] ; then
|
||||||
|
# TODO: Support fallback .tar.gz settings on Container Linux
|
||||||
|
server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}")
|
||||||
|
kube_manifests_tar_url=$(split_csv "${KUBE_MANIFESTS_TAR_URL}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -f ${file}
|
||||||
|
cat >$file <<EOF
|
||||||
|
CLUSTER_NAME: $(yaml-quote ${CLUSTER_NAME})
|
||||||
|
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
|
||||||
|
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
|
||||||
|
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
|
||||||
|
NODE_TAGS: $(yaml-quote ${NODE_TAGS:-})
|
||||||
|
NODE_NETWORK: $(yaml-quote ${NETWORK:-})
|
||||||
|
NODE_SUBNETWORK: $(yaml-quote ${SUBNETWORK:-})
|
||||||
|
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
|
||||||
|
SERVER_BINARY_TAR_URL: $(yaml-quote ${server_binary_tar_url})
|
||||||
|
SERVER_BINARY_TAR_HASH: $(yaml-quote ${SERVER_BINARY_TAR_HASH})
|
||||||
|
PROJECT_ID: $(yaml-quote ${PROJECT})
|
||||||
|
NETWORK_PROJECT_ID: $(yaml-quote ${NETWORK_PROJECT})
|
||||||
|
SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
|
||||||
|
KUBERNETES_MASTER_NAME: $(yaml-quote ${KUBERNETES_MASTER_NAME})
|
||||||
|
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
|
||||||
|
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
|
||||||
|
ENABLE_METRICS_SERVER: $(yaml-quote ${ENABLE_METRICS_SERVER:-false})
|
||||||
|
ENABLE_METADATA_AGENT: $(yaml-quote ${ENABLE_METADATA_AGENT:-none})
|
||||||
|
METADATA_AGENT_VERSION: $(yaml-quote ${METADATA_AGENT_VERSION:-})
|
||||||
|
DOCKER_REGISTRY_MIRROR_URL: $(yaml-quote ${DOCKER_REGISTRY_MIRROR_URL:-})
|
||||||
|
ENABLE_L7_LOADBALANCING: $(yaml-quote ${ENABLE_L7_LOADBALANCING:-none})
|
||||||
|
ENABLE_CLUSTER_LOGGING: $(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})
|
||||||
|
ENABLE_CLUSTER_UI: $(yaml-quote ${ENABLE_CLUSTER_UI:-false})
|
||||||
|
ENABLE_NODE_PROBLEM_DETECTOR: $(yaml-quote ${ENABLE_NODE_PROBLEM_DETECTOR:-none})
|
||||||
|
NODE_PROBLEM_DETECTOR_VERSION: $(yaml-quote ${NODE_PROBLEM_DETECTOR_VERSION:-})
|
||||||
|
NODE_PROBLEM_DETECTOR_TAR_HASH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TAR_HASH:-})
|
||||||
|
ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
|
||||||
|
ENABLE_RESCHEDULER: $(yaml-quote ${ENABLE_RESCHEDULER:-false})
|
||||||
|
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
|
||||||
|
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
|
||||||
|
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
|
||||||
|
CLUSTER_DNS_CORE_DNS: $(yaml-quote ${CLUSTER_DNS_CORE_DNS:-false})
|
||||||
|
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
|
||||||
|
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
|
||||||
|
ENABLE_DNS_HORIZONTAL_AUTOSCALER: $(yaml-quote ${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false})
|
||||||
|
KUBE_PROXY_DAEMONSET: $(yaml-quote ${KUBE_PROXY_DAEMONSET:-false})
|
||||||
|
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
|
||||||
|
KUBE_PROXY_MODE: $(yaml-quote ${KUBE_PROXY_MODE:-iptables})
|
||||||
|
NODE_PROBLEM_DETECTOR_TOKEN: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TOKEN:-})
|
||||||
|
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
|
||||||
|
ENABLE_POD_SECURITY_POLICY: $(yaml-quote ${ENABLE_POD_SECURITY_POLICY:-})
|
||||||
|
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
|
||||||
|
RUNTIME_CONFIG: $(yaml-quote ${RUNTIME_CONFIG})
|
||||||
|
CA_CERT: $(yaml-quote ${CA_CERT_BASE64:-})
|
||||||
|
KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
|
||||||
|
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})
|
||||||
|
NETWORK_PROVIDER: $(yaml-quote ${NETWORK_PROVIDER:-})
|
||||||
|
NETWORK_POLICY_PROVIDER: $(yaml-quote ${NETWORK_POLICY_PROVIDER:-})
|
||||||
|
PREPULL_E2E_IMAGES: $(yaml-quote ${PREPULL_E2E_IMAGES:-})
|
||||||
|
HAIRPIN_MODE: $(yaml-quote ${HAIRPIN_MODE:-})
|
||||||
|
E2E_STORAGE_TEST_ENVIRONMENT: $(yaml-quote ${E2E_STORAGE_TEST_ENVIRONMENT:-})
|
||||||
|
KUBE_DOCKER_REGISTRY: $(yaml-quote ${KUBE_DOCKER_REGISTRY:-})
|
||||||
|
KUBE_ADDON_REGISTRY: $(yaml-quote ${KUBE_ADDON_REGISTRY:-})
|
||||||
|
MULTIZONE: $(yaml-quote ${MULTIZONE:-})
|
||||||
|
NON_MASQUERADE_CIDR: $(yaml-quote ${NON_MASQUERADE_CIDR:-})
|
||||||
|
ENABLE_DEFAULT_STORAGE_CLASS: $(yaml-quote ${ENABLE_DEFAULT_STORAGE_CLASS:-})
|
||||||
|
ENABLE_APISERVER_BASIC_AUDIT: $(yaml-quote ${ENABLE_APISERVER_BASIC_AUDIT:-})
|
||||||
|
ENABLE_APISERVER_ADVANCED_AUDIT: $(yaml-quote ${ENABLE_APISERVER_ADVANCED_AUDIT:-})
|
||||||
|
ENABLE_CACHE_MUTATION_DETECTOR: $(yaml-quote ${ENABLE_CACHE_MUTATION_DETECTOR:-false})
|
||||||
|
ENABLE_PATCH_CONVERSION_DETECTOR: $(yaml-quote ${ENABLE_PATCH_CONVERSION_DETECTOR:-false})
|
||||||
|
ADVANCED_AUDIT_POLICY: $(yaml-quote ${ADVANCED_AUDIT_POLICY:-})
|
||||||
|
ADVANCED_AUDIT_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_BACKEND:-log})
|
||||||
|
ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE:-})
|
||||||
|
ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE:-})
|
||||||
|
ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT:-})
|
||||||
|
ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS:-})
|
||||||
|
ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST:-})
|
||||||
|
ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF:-})
|
||||||
|
GCE_API_ENDPOINT: $(yaml-quote ${GCE_API_ENDPOINT:-})
|
||||||
|
GCE_GLBC_IMAGE: $(yaml-quote ${GCE_GLBC_IMAGE:-})
|
||||||
|
PROMETHEUS_TO_SD_ENDPOINT: $(yaml-quote ${PROMETHEUS_TO_SD_ENDPOINT:-})
|
||||||
|
PROMETHEUS_TO_SD_PREFIX: $(yaml-quote ${PROMETHEUS_TO_SD_PREFIX:-})
|
||||||
|
ENABLE_PROMETHEUS_TO_SD: $(yaml-quote ${ENABLE_PROMETHEUS_TO_SD:-false})
|
||||||
|
ENABLE_POD_PRIORITY: $(yaml-quote ${ENABLE_POD_PRIORITY:-})
|
||||||
|
CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME:-})
|
||||||
|
CONTAINER_RUNTIME_ENDPOINT: $(yaml-quote ${CONTAINER_RUNTIME_ENDPOINT:-})
|
||||||
|
CONTAINER_RUNTIME_NAME: $(yaml-quote ${CONTAINER_RUNTIME_NAME:-})
|
||||||
|
NODE_LOCAL_SSDS_EXT: $(yaml-quote ${NODE_LOCAL_SSDS_EXT:-})
|
||||||
|
LOAD_IMAGE_COMMAND: $(yaml-quote ${LOAD_IMAGE_COMMAND:-})
|
||||||
|
EOF
|
||||||
|
if [ -n "${KUBELET_PORT:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
KUBELET_PORT: $(yaml-quote ${KUBELET_PORT})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
KUBE_APISERVER_REQUEST_TIMEOUT: $(yaml-quote ${KUBE_APISERVER_REQUEST_TIMEOUT})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
TERMINATED_POD_GC_THRESHOLD: $(yaml-quote ${TERMINATED_POD_GC_THRESHOLD})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [[ "${master}" == "true" && ("${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci") || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
||||||
|
[[ "${master}" == "false" && ("${NODE_OS_DISTRIBUTION}" == "trusty" || "${NODE_OS_DISTRIBUTION}" == "gci") || "${NODE_OS_DISTRIBUTION}" = "ubuntu" ]] ; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
KUBE_MANIFESTS_TAR_URL: $(yaml-quote ${kube_manifests_tar_url})
|
||||||
|
KUBE_MANIFESTS_TAR_HASH: $(yaml-quote ${KUBE_MANIFESTS_TAR_HASH})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${TEST_CLUSTER:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
TEST_CLUSTER: $(yaml-quote ${TEST_CLUSTER})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${KUBELET_TEST_ARGS:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
KUBELET_TEST_ARGS: $(yaml-quote ${KUBELET_TEST_ARGS})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${NODE_KUBELET_TEST_ARGS:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
NODE_KUBELET_TEST_ARGS: $(yaml-quote ${NODE_KUBELET_TEST_ARGS})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${MASTER_KUBELET_TEST_ARGS:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
MASTER_KUBELET_TEST_ARGS: $(yaml-quote ${MASTER_KUBELET_TEST_ARGS})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${KUBELET_TEST_LOG_LEVEL:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
KUBELET_TEST_LOG_LEVEL: $(yaml-quote ${KUBELET_TEST_LOG_LEVEL})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${DOCKER_TEST_LOG_LEVEL:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
DOCKER_TEST_LOG_LEVEL: $(yaml-quote ${DOCKER_TEST_LOG_LEVEL})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${DOCKER_LOG_DRIVER:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
DOCKER_LOG_DRIVER: $(yaml-quote ${DOCKER_LOG_DRIVER})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${DOCKER_LOG_MAX_SIZE:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
DOCKER_LOG_MAX_SIZE: $(yaml-quote ${DOCKER_LOG_MAX_SIZE})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${DOCKER_LOG_MAX_FILE:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
DOCKER_LOG_MAX_FILE: $(yaml-quote ${DOCKER_LOG_MAX_FILE})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${ENABLE_CUSTOM_METRICS:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
ENABLE_CUSTOM_METRICS: $(yaml-quote ${ENABLE_CUSTOM_METRICS})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${FEATURE_GATES:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
FEATURE_GATES: $(yaml-quote ${FEATURE_GATES})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${ROTATE_CERTIFICATES:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
ROTATE_CERTIFICATES: $(yaml-quote ${ROTATE_CERTIFICATES})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "gci" ]] ||
|
||||||
|
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
VOLUME_PLUGIN_DIR: $(yaml-quote ${VOLUME_PLUGIN_DIR:-/etc/srv/kubernetes/kubelet-plugins/volume/exec})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${PROVIDER_VARS:-}" ]; then
|
||||||
|
local var_name
|
||||||
|
local var_value
|
||||||
|
|
||||||
|
for var_name in ${PROVIDER_VARS}; do
|
||||||
|
eval "local var_value=\$(yaml-quote \${${var_name}})"
|
||||||
|
cat >>$file <<EOF
|
||||||
|
${var_name}: ${var_value}
|
||||||
|
EOF
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${master}" == "true" ]]; then
|
||||||
|
# Master-only env vars.
|
||||||
|
cat >>$file <<EOF
|
||||||
|
KUBERNETES_MASTER: $(yaml-quote "true")
|
||||||
|
KUBE_USER: $(yaml-quote ${KUBE_USER})
|
||||||
|
KUBE_PASSWORD: $(yaml-quote ${KUBE_PASSWORD})
|
||||||
|
KUBE_BEARER_TOKEN: $(yaml-quote ${KUBE_BEARER_TOKEN})
|
||||||
|
MASTER_CERT: $(yaml-quote ${MASTER_CERT_BASE64:-})
|
||||||
|
MASTER_KEY: $(yaml-quote ${MASTER_KEY_BASE64:-})
|
||||||
|
KUBECFG_CERT: $(yaml-quote ${KUBECFG_CERT_BASE64:-})
|
||||||
|
KUBECFG_KEY: $(yaml-quote ${KUBECFG_KEY_BASE64:-})
|
||||||
|
KUBELET_APISERVER: $(yaml-quote ${KUBELET_APISERVER:-})
|
||||||
|
ENABLE_MANIFEST_URL: $(yaml-quote ${ENABLE_MANIFEST_URL:-false})
|
||||||
|
MANIFEST_URL: $(yaml-quote ${MANIFEST_URL:-})
|
||||||
|
MANIFEST_URL_HEADER: $(yaml-quote ${MANIFEST_URL_HEADER:-})
|
||||||
|
NUM_NODES: $(yaml-quote ${NUM_NODES})
|
||||||
|
STORAGE_BACKEND: $(yaml-quote ${STORAGE_BACKEND:-etcd3})
|
||||||
|
STORAGE_MEDIA_TYPE: $(yaml-quote ${STORAGE_MEDIA_TYPE:-})
|
||||||
|
ENABLE_GARBAGE_COLLECTOR: $(yaml-quote ${ENABLE_GARBAGE_COLLECTOR:-})
|
||||||
|
ENABLE_LEGACY_ABAC: $(yaml-quote ${ENABLE_LEGACY_ABAC:-})
|
||||||
|
MASTER_ADVERTISE_ADDRESS: $(yaml-quote ${MASTER_ADVERTISE_ADDRESS:-})
|
||||||
|
ETCD_CA_KEY: $(yaml-quote ${ETCD_CA_KEY_BASE64:-})
|
||||||
|
ETCD_CA_CERT: $(yaml-quote ${ETCD_CA_CERT_BASE64:-})
|
||||||
|
ETCD_PEER_KEY: $(yaml-quote ${ETCD_PEER_KEY_BASE64:-})
|
||||||
|
ETCD_PEER_CERT: $(yaml-quote ${ETCD_PEER_CERT_BASE64:-})
|
||||||
|
EOF
|
||||||
|
# KUBE_APISERVER_REQUEST_TIMEOUT_SEC (if set) controls the --request-timeout
|
||||||
|
# flag
|
||||||
|
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
KUBE_APISERVER_REQUEST_TIMEOUT_SEC: $(yaml-quote ${KUBE_APISERVER_REQUEST_TIMEOUT_SEC})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
# ETCD_IMAGE (if set) allows to use a custom etcd image.
|
||||||
|
if [ -n "${ETCD_IMAGE:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
ETCD_IMAGE: $(yaml-quote ${ETCD_IMAGE})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
# ETCD_DOCKER_REPOSITORY (if set) allows to use a custom etcd docker repository to pull the etcd image from.
|
||||||
|
if [ -n "${ETCD_DOCKER_REPOSITORY:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
ETCD_DOCKER_REPOSITORY: $(yaml-quote ${ETCD_DOCKER_REPOSITORY})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
# ETCD_VERSION (if set) allows you to use custom version of etcd.
|
||||||
|
# The main purpose of using it may be rollback of etcd v3 API,
|
||||||
|
# where we need 3.0.* image, but are rolling back to 2.3.7.
|
||||||
|
if [ -n "${ETCD_VERSION:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
ETCD_VERSION: $(yaml-quote ${ETCD_VERSION})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${ETCD_HOSTNAME:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
ETCD_HOSTNAME: $(yaml-quote ${ETCD_HOSTNAME})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC: $(yaml-quote ${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC: $(yaml-quote ${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${ETCD_COMPACTION_INTERVAL_SEC:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
ETCD_COMPACTION_INTERVAL_SEC: $(yaml-quote ${ETCD_COMPACTION_INTERVAL_SEC})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${ETCD_QUOTA_BACKEND_BYTES:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
ETCD_QUOTA_BACKEND_BYTES: $(yaml-quote ${ETCD_QUOTA_BACKEND_BYTES})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
APISERVER_TEST_ARGS: $(yaml-quote ${APISERVER_TEST_ARGS})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
CONTROLLER_MANAGER_TEST_ARGS: $(yaml-quote ${CONTROLLER_MANAGER_TEST_ARGS})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
CONTROLLER_MANAGER_TEST_LOG_LEVEL: $(yaml-quote ${CONTROLLER_MANAGER_TEST_LOG_LEVEL})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
SCHEDULER_TEST_ARGS: $(yaml-quote ${SCHEDULER_TEST_ARGS})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
SCHEDULER_TEST_LOG_LEVEL: $(yaml-quote ${SCHEDULER_TEST_LOG_LEVEL})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${INITIAL_ETCD_CLUSTER:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
INITIAL_ETCD_CLUSTER: $(yaml-quote ${INITIAL_ETCD_CLUSTER})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
INITIAL_ETCD_CLUSTER_STATE: $(yaml-quote ${INITIAL_ETCD_CLUSTER_STATE})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${ETCD_QUORUM_READ:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
ETCD_QUORUM_READ: $(yaml-quote ${ETCD_QUORUM_READ})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${CLUSTER_SIGNING_DURATION:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
CLUSTER_SIGNING_DURATION: $(yaml-quote ${CLUSTER_SIGNING_DURATION})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [[ "${NODE_ACCELERATORS:-}" == *"type=nvidia"* ]]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
ENABLE_NVIDIA_GPU_DEVICE_PLUGIN: $(yaml-quote "true")
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${ADDON_MANAGER_LEADER_ELECTION:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
ADDON_MANAGER_LEADER_ELECTION: $(yaml-quote ${ADDON_MANAGER_LEADER_ELECTION})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
else
|
||||||
|
# Node-only env vars.
|
||||||
|
cat >>$file <<EOF
|
||||||
|
KUBERNETES_MASTER: $(yaml-quote "false")
|
||||||
|
ZONE: $(yaml-quote ${ZONE})
|
||||||
|
EXTRA_DOCKER_OPTS: $(yaml-quote ${EXTRA_DOCKER_OPTS:-})
|
||||||
|
EOF
|
||||||
|
if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
KUBEPROXY_TEST_ARGS: $(yaml-quote ${KUBEPROXY_TEST_ARGS})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${KUBEPROXY_TEST_LOG_LEVEL:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
KUBEPROXY_TEST_LOG_LEVEL: $(yaml-quote ${KUBEPROXY_TEST_LOG_LEVEL})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if [ -n "${NODE_LABELS:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
NODE_LABELS: $(yaml-quote ${NODE_LABELS})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${NON_MASTER_NODE_LABELS:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
NON_MASTER_NODE_LABELS: $(yaml-quote ${NON_MASTER_NODE_LABELS})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${EVICTION_HARD:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
EVICTION_HARD: $(yaml-quote ${EVICTION_HARD})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
ENABLE_CLUSTER_AUTOSCALER: $(yaml-quote ${ENABLE_CLUSTER_AUTOSCALER})
|
||||||
|
AUTOSCALER_MIG_CONFIG: $(yaml-quote ${AUTOSCALER_MIG_CONFIG})
|
||||||
|
AUTOSCALER_EXPANDER_CONFIG: $(yaml-quote ${AUTOSCALER_EXPANDER_CONFIG})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
SCHEDULING_ALGORITHM_PROVIDER: $(yaml-quote ${SCHEDULING_ALGORITHM_PROVIDER})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function sha1sum-file() {
|
||||||
|
if which sha1sum >/dev/null 2>&1; then
|
||||||
|
sha1sum "$1" | awk '{ print $1 }'
|
||||||
|
else
|
||||||
|
shasum -a1 "$1" | awk '{ print $1 }'
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create certificate pairs for the cluster.
|
||||||
|
# $1: The public IP for the master.
|
||||||
|
#
|
||||||
|
# These are used for static cert distribution (e.g. static clustering) at
|
||||||
|
# cluster creation time. This will be obsoleted once we implement dynamic
|
||||||
|
# clustering.
|
||||||
|
#
|
||||||
|
# The following certificate pairs are created:
|
||||||
|
#
|
||||||
|
# - ca (the cluster's certificate authority)
|
||||||
|
# - server
|
||||||
|
# - kubelet
|
||||||
|
# - kubecfg (for kubectl)
|
||||||
|
#
|
||||||
|
# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
|
||||||
|
# the certs that we need.
|
||||||
|
#
|
||||||
|
# Assumed vars
|
||||||
|
# KUBE_TEMP
|
||||||
|
# MASTER_NAME
|
||||||
|
#
|
||||||
|
# Vars set:
|
||||||
|
# CERT_DIR
|
||||||
|
# CA_CERT_BASE64
|
||||||
|
# MASTER_CERT_BASE64
|
||||||
|
# MASTER_KEY_BASE64
|
||||||
|
# KUBELET_CERT_BASE64
|
||||||
|
# KUBELET_KEY_BASE64
|
||||||
|
# KUBECFG_CERT_BASE64
|
||||||
|
# KUBECFG_KEY_BASE64
|
||||||
|
function create-certs {
|
||||||
|
local -r primary_cn="${1}"
|
||||||
|
|
||||||
|
# Determine extra certificate names for master
|
||||||
|
local octets=($(echo "${SERVICE_CLUSTER_IP_RANGE}" | sed -e 's|/.*||' -e 's/\./ /g'))
|
||||||
|
((octets[3]+=1))
|
||||||
|
local -r service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
|
||||||
|
local sans=""
|
||||||
|
for extra in $@; do
|
||||||
|
if [[ -n "${extra}" ]]; then
|
||||||
|
sans="${sans}IP:${extra},"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
sans="${sans}IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}"
|
||||||
|
|
||||||
|
echo "Generating certs for alternate-names: ${sans}"
|
||||||
|
|
||||||
|
setup-easyrsa
|
||||||
|
PRIMARY_CN="${primary_cn}" SANS="${sans}" generate-certs
|
||||||
|
AGGREGATOR_PRIMARY_CN="${primary_cn}" AGGREGATOR_SANS="${sans}" generate-aggregator-certs
|
||||||
|
|
||||||
|
# By default, linux wraps base64 output every 76 cols, so we use 'tr -d' to remove whitespaces.
|
||||||
|
# Note 'base64 -w0' doesn't work on Mac OS X, which has different flags.
|
||||||
|
CA_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/ca.key" | base64 | tr -d '\r\n')
|
||||||
|
CA_CERT_BASE64=$(cat "${CERT_DIR}/pki/ca.crt" | base64 | tr -d '\r\n')
|
||||||
|
MASTER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/${MASTER_NAME}.crt" | base64 | tr -d '\r\n')
|
||||||
|
MASTER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/${MASTER_NAME}.key" | base64 | tr -d '\r\n')
|
||||||
|
KUBELET_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubelet.crt" | base64 | tr -d '\r\n')
|
||||||
|
KUBELET_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubelet.key" | base64 | tr -d '\r\n')
|
||||||
|
KUBECFG_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubecfg.crt" | base64 | tr -d '\r\n')
|
||||||
|
KUBECFG_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubecfg.key" | base64 | tr -d '\r\n')
|
||||||
|
KUBEAPISERVER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kube-apiserver.crt" | base64 | tr -d '\r\n')
|
||||||
|
KUBEAPISERVER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kube-apiserver.key" | base64 | tr -d '\r\n')
|
||||||
|
|
||||||
|
# Setting up an addition directory (beyond pki) as it is the simplest way to
|
||||||
|
# ensure we get a different CA pair to sign the proxy-client certs and which
|
||||||
|
# we can send CA public key to the user-apiserver to validate communication.
|
||||||
|
AGGREGATOR_CA_KEY_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/private/ca.key" | base64 | tr -d '\r\n')
|
||||||
|
REQUESTHEADER_CA_CERT_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/ca.crt" | base64 | tr -d '\r\n')
|
||||||
|
PROXY_CLIENT_CERT_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/issued/proxy-client.crt" | base64 | tr -d '\r\n')
|
||||||
|
PROXY_CLIENT_KEY_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/private/proxy-client.key" | base64 | tr -d '\r\n')
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set up easy-rsa directory structure.
|
||||||
|
#
|
||||||
|
# Assumed vars
|
||||||
|
# KUBE_TEMP
|
||||||
|
#
|
||||||
|
# Vars set:
|
||||||
|
# CERT_DIR
|
||||||
|
# AGGREGATOR_CERT_DIR
|
||||||
|
function setup-easyrsa {
|
||||||
|
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
|
||||||
|
# Note: This was heavily cribbed from make-ca-cert.sh
|
||||||
|
(set -x
|
||||||
|
cd "${KUBE_TEMP}"
|
||||||
|
curl -L -O --connect-timeout 20 --retry 6 --retry-delay 2 https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz
|
||||||
|
tar xzf easy-rsa.tar.gz
|
||||||
|
mkdir easy-rsa-master/kubelet
|
||||||
|
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/kubelet
|
||||||
|
mkdir easy-rsa-master/aggregator
|
||||||
|
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/aggregator) &>${cert_create_debug_output} || true
|
||||||
|
CERT_DIR="${KUBE_TEMP}/easy-rsa-master/easyrsa3"
|
||||||
|
AGGREGATOR_CERT_DIR="${KUBE_TEMP}/easy-rsa-master/aggregator"
|
||||||
|
if [ ! -x "${CERT_DIR}/easyrsa" -o ! -x "${AGGREGATOR_CERT_DIR}/easyrsa" ]; then
|
||||||
|
# TODO(roberthbailey,porridge): add better error handling here,
|
||||||
|
# see https://github.com/kubernetes/kubernetes/issues/55229
|
||||||
|
cat "${cert_create_debug_output}" >&2
|
||||||
|
echo "=== Failed to setup easy-rsa: Aborting ===" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Runs the easy RSA commands to generate certificate files.
|
||||||
|
# The generated files are IN ${CERT_DIR}
|
||||||
|
#
|
||||||
|
# Assumed vars
|
||||||
|
# KUBE_TEMP
|
||||||
|
# MASTER_NAME
|
||||||
|
# CERT_DIR
|
||||||
|
# PRIMARY_CN: Primary canonical name
|
||||||
|
# SANS: Subject alternate names
|
||||||
|
#
|
||||||
|
#
|
||||||
|
function generate-certs {
|
||||||
|
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
|
||||||
|
# Note: This was heavily cribbed from make-ca-cert.sh
|
||||||
|
(set -x
|
||||||
|
cd "${CERT_DIR}"
|
||||||
|
./easyrsa init-pki
|
||||||
|
# this puts the cert into pki/ca.crt and the key into pki/private/ca.key
|
||||||
|
./easyrsa --batch "--req-cn=${PRIMARY_CN}@$(date +%s)" build-ca nopass
|
||||||
|
./easyrsa --subject-alt-name="${SANS}" build-server-full "${MASTER_NAME}" nopass
|
||||||
|
./easyrsa build-client-full kube-apiserver nopass
|
||||||
|
|
||||||
|
kube::util::ensure-cfssl "${KUBE_TEMP}/cfssl"
|
||||||
|
|
||||||
|
# make the config for the signer
|
||||||
|
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "ca-config.json"
|
||||||
|
# create the kubelet client cert with the correct groups
|
||||||
|
echo '{"CN":"kubelet","names":[{"O":"system:nodes"}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | "${CFSSL_BIN}" gencert -ca=pki/ca.crt -ca-key=pki/private/ca.key -config=ca-config.json - | "${CFSSLJSON_BIN}" -bare kubelet
|
||||||
|
mv "kubelet-key.pem" "pki/private/kubelet.key"
|
||||||
|
mv "kubelet.pem" "pki/issued/kubelet.crt"
|
||||||
|
rm -f "kubelet.csr"
|
||||||
|
|
||||||
|
# Make a superuser client cert with subject "O=system:masters, CN=kubecfg"
|
||||||
|
./easyrsa --dn-mode=org \
|
||||||
|
--req-cn=kubecfg --req-org=system:masters \
|
||||||
|
--req-c= --req-st= --req-city= --req-email= --req-ou= \
|
||||||
|
build-client-full kubecfg nopass) &>${cert_create_debug_output} || true
|
||||||
|
local output_file_missing=0
|
||||||
|
local output_file
|
||||||
|
for output_file in \
|
||||||
|
"${CERT_DIR}/pki/private/ca.key" \
|
||||||
|
"${CERT_DIR}/pki/ca.crt" \
|
||||||
|
"${CERT_DIR}/pki/issued/${MASTER_NAME}.crt" \
|
||||||
|
"${CERT_DIR}/pki/private/${MASTER_NAME}.key" \
|
||||||
|
"${CERT_DIR}/pki/issued/kubelet.crt" \
|
||||||
|
"${CERT_DIR}/pki/private/kubelet.key" \
|
||||||
|
"${CERT_DIR}/pki/issued/kubecfg.crt" \
|
||||||
|
"${CERT_DIR}/pki/private/kubecfg.key" \
|
||||||
|
"${CERT_DIR}/pki/issued/kube-apiserver.crt" \
|
||||||
|
"${CERT_DIR}/pki/private/kube-apiserver.key"
|
||||||
|
do
|
||||||
|
if [[ ! -s "${output_file}" ]]; then
|
||||||
|
echo "Expected file ${output_file} not created" >&2
|
||||||
|
output_file_missing=1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if (( $output_file_missing )); then
|
||||||
|
# TODO(roberthbailey,porridge): add better error handling here,
|
||||||
|
# see https://github.com/kubernetes/kubernetes/issues/55229
|
||||||
|
cat "${cert_create_debug_output}" >&2
|
||||||
|
echo "=== Failed to generate master certificates: Aborting ===" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Runs the easy RSA commands to generate aggregator certificate files.
|
||||||
|
# The generated files are in ${AGGREGATOR_CERT_DIR}
|
||||||
|
#
|
||||||
|
# Assumed vars
|
||||||
|
# KUBE_TEMP
|
||||||
|
# AGGREGATOR_MASTER_NAME
|
||||||
|
# AGGREGATOR_CERT_DIR
|
||||||
|
# AGGREGATOR_PRIMARY_CN: Primary canonical name
|
||||||
|
# AGGREGATOR_SANS: Subject alternate names
|
||||||
|
#
|
||||||
|
#
|
||||||
|
function generate-aggregator-certs {
|
||||||
|
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
|
||||||
|
# Note: This was heavily cribbed from make-ca-cert.sh
|
||||||
|
(set -x
|
||||||
|
cd "${KUBE_TEMP}/easy-rsa-master/aggregator"
|
||||||
|
./easyrsa init-pki
|
||||||
|
# this puts the cert into pki/ca.crt and the key into pki/private/ca.key
|
||||||
|
./easyrsa --batch "--req-cn=${AGGREGATOR_PRIMARY_CN}@$(date +%s)" build-ca nopass
|
||||||
|
./easyrsa --subject-alt-name="${AGGREGATOR_SANS}" build-server-full "${AGGREGATOR_MASTER_NAME}" nopass
|
||||||
|
./easyrsa build-client-full aggregator-apiserver nopass
|
||||||
|
|
||||||
|
kube::util::ensure-cfssl "${KUBE_TEMP}/cfssl"
|
||||||
|
|
||||||
|
# make the config for the signer
|
||||||
|
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "ca-config.json"
|
||||||
|
# create the aggregator client cert with the correct groups
|
||||||
|
echo '{"CN":"aggregator","hosts":[""],"key":{"algo":"rsa","size":2048}}' | "${CFSSL_BIN}" gencert -ca=pki/ca.crt -ca-key=pki/private/ca.key -config=ca-config.json - | "${CFSSLJSON_BIN}" -bare proxy-client
|
||||||
|
mv "proxy-client-key.pem" "pki/private/proxy-client.key"
|
||||||
|
mv "proxy-client.pem" "pki/issued/proxy-client.crt"
|
||||||
|
rm -f "proxy-client.csr"
|
||||||
|
|
||||||
|
# Make a superuser client cert with subject "O=system:masters, CN=kubecfg"
|
||||||
|
./easyrsa --dn-mode=org \
|
||||||
|
--req-cn=proxy-clientcfg --req-org=system:aggregator \
|
||||||
|
--req-c= --req-st= --req-city= --req-email= --req-ou= \
|
||||||
|
build-client-full proxy-clientcfg nopass) &>${cert_create_debug_output} || true
|
||||||
|
local output_file_missing=0
|
||||||
|
local output_file
|
||||||
|
for output_file in \
|
||||||
|
"${AGGREGATOR_CERT_DIR}/pki/private/ca.key" \
|
||||||
|
"${AGGREGATOR_CERT_DIR}/pki/ca.crt" \
|
||||||
|
"${AGGREGATOR_CERT_DIR}/pki/issued/proxy-client.crt" \
|
||||||
|
"${AGGREGATOR_CERT_DIR}/pki/private/proxy-client.key"
|
||||||
|
do
|
||||||
|
if [[ ! -s "${output_file}" ]]; then
|
||||||
|
echo "Expected file ${output_file} not created" >&2
|
||||||
|
output_file_missing=1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if (( $output_file_missing )); then
|
||||||
|
# TODO(roberthbailey,porridge): add better error handling here,
|
||||||
|
# see https://github.com/kubernetes/kubernetes/issues/55229
|
||||||
|
cat "${cert_create_debug_output}" >&2
|
||||||
|
echo "=== Failed to generate aggregator certificates: Aborting ===" >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Using provided master env, extracts value from provided key.
|
||||||
|
#
|
||||||
|
# Args:
|
||||||
|
# $1 master env (kube-env of master; result of calling get-master-env)
|
||||||
|
# $2 env key to use
|
||||||
|
function get-env-val() {
|
||||||
|
local match=`(echo "${1}" | grep -E "^${2}:") || echo ""`
|
||||||
|
if [[ -z ${match} ]]; then
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
echo ${match} | cut -d : -f 2 | cut -d \' -f 2
|
||||||
|
}
|
||||||
|
|
||||||
|
# Load the master env by calling get-master-env, and extract important values
|
||||||
|
function parse-master-env() {
|
||||||
|
# Get required master env vars
|
||||||
|
local master_env=$(get-master-env)
|
||||||
|
KUBE_PROXY_TOKEN=$(get-env-val "${master_env}" "KUBE_PROXY_TOKEN")
|
||||||
|
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${master_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
|
||||||
|
CA_CERT_BASE64=$(get-env-val "${master_env}" "CA_CERT")
|
||||||
|
CA_KEY_BASE64=$(get-env-val "${master_env}" "CA_KEY")
|
||||||
|
KUBEAPISERVER_CERT_BASE64=$(get-env-val "${master_env}" "KUBEAPISERVER_CERT")
|
||||||
|
KUBEAPISERVER_KEY_BASE64=$(get-env-val "${master_env}" "KUBEAPISERVER_KEY")
|
||||||
|
EXTRA_DOCKER_OPTS=$(get-env-val "${master_env}" "EXTRA_DOCKER_OPTS")
|
||||||
|
KUBELET_CERT_BASE64=$(get-env-val "${master_env}" "KUBELET_CERT")
|
||||||
|
KUBELET_KEY_BASE64=$(get-env-val "${master_env}" "KUBELET_KEY")
|
||||||
|
MASTER_CERT_BASE64=$(get-env-val "${master_env}" "MASTER_CERT")
|
||||||
|
MASTER_KEY_BASE64=$(get-env-val "${master_env}" "MASTER_KEY")
|
||||||
|
AGGREGATOR_CA_KEY_BASE64=$(get-env-val "${master_env}" "AGGREGATOR_CA_KEY")
|
||||||
|
REQUESTHEADER_CA_CERT_BASE64=$(get-env-val "${master_env}" "REQUESTHEADER_CA_CERT")
|
||||||
|
PROXY_CLIENT_CERT_BASE64=$(get-env-val "${master_env}" "PROXY_CLIENT_CERT")
|
||||||
|
PROXY_CLIENT_KEY_BASE64=$(get-env-val "${master_env}" "PROXY_CLIENT_KEY")
|
||||||
|
ENABLE_LEGACY_ABAC=$(get-env-val "${master_env}" "ENABLE_LEGACY_ABAC")
|
||||||
|
}
|
||||||
|
|
||||||
|
# Update or verify required gcloud components are installed
|
||||||
|
# at minimum required version.
|
||||||
|
# Assumed vars
|
||||||
|
# KUBE_PROMPT_FOR_UPDATE
|
||||||
|
function update-or-verify-gcloud() {
|
||||||
|
local sudo_prefix=""
|
||||||
|
if [ ! -w $(dirname `which gcloud`) ]; then
|
||||||
|
sudo_prefix="sudo"
|
||||||
|
fi
|
||||||
|
# update and install components as needed
|
||||||
|
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
|
||||||
|
${sudo_prefix} gcloud ${gcloud_prompt:-} components install alpha
|
||||||
|
${sudo_prefix} gcloud ${gcloud_prompt:-} components install beta
|
||||||
|
${sudo_prefix} gcloud ${gcloud_prompt:-} components update
|
||||||
|
else
|
||||||
|
local version=$(gcloud version --format=json)
|
||||||
|
python -c'
|
||||||
|
import json,sys
|
||||||
|
from distutils import version
|
||||||
|
|
||||||
|
minVersion = version.LooseVersion("1.3.0")
|
||||||
|
required = [ "alpha", "beta", "core" ]
|
||||||
|
data = json.loads(sys.argv[1])
|
||||||
|
rel = data.get("Google Cloud SDK")
|
||||||
|
if rel != "HEAD" and version.LooseVersion(rel) < minVersion:
|
||||||
|
print("gcloud version out of date ( < %s )" % minVersion)
|
||||||
|
exit(1)
|
||||||
|
missing = []
|
||||||
|
for c in required:
|
||||||
|
if not data.get(c):
|
||||||
|
missing += [c]
|
||||||
|
if missing:
|
||||||
|
for c in missing:
|
||||||
|
print ("missing required gcloud component \"{0}\"".format(c))
|
||||||
|
exit(1)
|
||||||
|
' """${version}"""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# Robustly try to create a static ip.
|
# Robustly try to create a static ip.
|
||||||
# $1: The name of the ip to create
|
# $1: The name of the ip to create
|
||||||
# $2: The name of the region to create the ip in.
|
# $2: The name of the region to create the ip in.
|
||||||
|
|
Loading…
Reference in New Issue