k3s/cluster/azure/util.sh

580 lines
18 KiB
Bash
Raw Normal View History

2014-11-17 07:56:15 +00:00
#!/bin/bash
2014-11-19 18:25:27 +00:00
2014-11-17 07:56:15 +00:00
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/azure/${KUBE_CONFIG_FILE-"config-default.sh"}"
function azure_call {
2014-11-21 00:40:35 +00:00
local -a params=()
local param
# the '... in "$@"' is implicit on a for, so doesn't need to be stated.
for param; do
params+=("${param}")
done
2014-11-21 00:40:35 +00:00
local rc=0
local stderr
local count=0
while [[ count -lt 10 ]]; do
stderr=$(azure "${params[@]}" 2>&1 >&3) && break
rc=$?
if [[ "${stderr}" != *"getaddrinfo ENOTFOUND"* ]]; then
break
fi
count=$(($count + 1))
done 3>&1
if [[ "${rc}" -ne 0 ]]; then
echo "${stderr}" >&2
return "${rc}"
fi
}
2014-11-17 07:56:15 +00:00
function json_val () {
python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1'';
}
# Verify prereqs
function verify-prereqs {
2014-11-21 00:40:35 +00:00
if [[ -z "$(which azure)" ]]; then
echo "Couldn't find azure in PATH"
echo " please install with 'npm install azure-cli'"
exit 1
fi
2014-11-21 00:40:35 +00:00
if [[ -z "$(azure_call account list | grep true)" ]]; then
echo "Default azure account not set"
echo " please set with 'azure account set'"
exit 1
fi
account=$(azure_call account list | grep true)
if which md5 > /dev/null 2>&1; then
AZ_HSH=$(md5 -q -s "$account")
else
AZ_HSH=$(echo -n "$account" | md5sum)
fi
AZ_HSH=${AZ_HSH:0:7}
AZ_STG=kube$AZ_HSH
echo "==> AZ_STG: $AZ_STG"
AZ_CS="$AZ_CS_PREFIX-$AZ_HSH"
echo "==> AZ_CS: $AZ_CS"
CONTAINER=kube-$TAG
echo "==> CONTAINER: $CONTAINER"
2014-11-17 07:56:15 +00:00
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${KUBE_TEMP}"' EXIT
fi
2014-11-17 07:56:15 +00:00
}
# Verify and find the various tar files that we are going to use on the server.
#
# Vars set:
# SERVER_BINARY_TAR
# SALT_TAR
function find-release-tars {
SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
fi
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
exit 1
fi
SALT_TAR="${KUBE_ROOT}/server/kubernetes-salt.tar.gz"
if [[ ! -f "$SALT_TAR" ]]; then
SALT_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-salt.tar.gz"
fi
if [[ ! -f "$SALT_TAR" ]]; then
echo "!!! Cannot find kubernetes-salt.tar.gz"
exit 1
fi
2014-11-17 07:56:15 +00:00
}
# Take the local tar files and upload them to Azure Storage. They will then be
# downloaded by the master as part of the start up script for the master.
#
# Assumed vars:
# SERVER_BINARY_TAR
# SALT_TAR
# Vars set:
# SERVER_BINARY_TAR_URL
# SALT_TAR_URL
function upload-server-tars() {
SERVER_BINARY_TAR_URL=
SALT_TAR_URL=
echo "==> SERVER_BINARY_TAR: $SERVER_BINARY_TAR"
echo "==> SALT_TAR: $SALT_TAR"
echo "+++ Staging server tars to Azure Storage: $AZ_STG"
local server_binary_url="${SERVER_BINARY_TAR##*/}"
local salt_url="${SALT_TAR##*/}"
SERVER_BINARY_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$server_binary_url"
SALT_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$salt_url"
echo "==> SERVER_BINARY_TAR_URL: $SERVER_BINARY_TAR_URL"
echo "==> SALT_TAR_URL: $SALT_TAR_URL"
2015-01-18 07:32:34 +00:00
echo "--> Checking storage exists..."
2014-11-21 00:40:35 +00:00
if [[ -z "$(azure_call storage account show $AZ_STG 2>/dev/null | \
grep data)" ]]; then
echo "--> Creating storage..."
azure_call storage account create -l "$AZ_LOCATION" $AZ_STG
fi
2014-11-17 07:56:15 +00:00
echo "--> Getting storage key..."
stg_key=$(azure_call storage account keys list $AZ_STG --json | \
json_val '["primaryKey"]')
2014-11-17 07:56:15 +00:00
2015-01-18 07:32:34 +00:00
echo "--> Checking storage container exists..."
2014-11-21 00:40:35 +00:00
if [[ -z "$(azure_call storage container show -a $AZ_STG -k "$stg_key" \
$CONTAINER 2>/dev/null | grep data)" ]]; then
echo "--> Creating storage container..."
azure_call storage container create \
-a $AZ_STG \
-k "$stg_key" \
-p Blob \
$CONTAINER
fi
echo "--> Checking server binary exists in the container..."
2014-11-21 00:40:35 +00:00
if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
$CONTAINER $server_binary_url 2>/dev/null | grep data)" ]]; then
echo "--> Deleting server binary in the container..."
azure_call storage blob delete \
-a $AZ_STG \
-k "$stg_key" \
$CONTAINER \
$server_binary_url
fi
echo "--> Uploading server binary to the container..."
azure_call storage blob upload \
-a $AZ_STG \
-k "$stg_key" \
$SERVER_BINARY_TAR \
$CONTAINER \
$server_binary_url
echo "--> Checking salt data exists in the container..."
2014-11-21 00:40:35 +00:00
if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
$CONTAINER $salt_url 2>/dev/null | grep data)" ]]; then
echo "--> Deleting salt data in the container..."
azure_call storage blob delete \
-a $AZ_STG \
-k "$stg_key" \
$CONTAINER \
$salt_url
fi
echo "--> Uploading salt data to the container..."
azure_call storage blob upload \
-a $AZ_STG \
-k "$stg_key" \
$SALT_TAR \
$CONTAINER \
$salt_url
2014-11-17 07:56:15 +00:00
}
# Detect the information about the minions
#
# Assumed vars:
# MINION_NAMES
# ZONE
# Vars set:
#
2014-11-17 07:56:15 +00:00
function detect-minions () {
2014-11-21 00:40:35 +00:00
if [[ -z "$AZ_CS" ]]; then
verify-prereqs
fi
ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}"))
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
MINION_NAMES[$i]=$(ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net hostname -f)
done
2014-11-17 07:56:15 +00:00
}
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# ZONE
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master () {
2014-11-21 00:40:35 +00:00
if [[ -z "$AZ_CS" ]]; then
verify-prereqs
fi
2014-11-17 07:56:15 +00:00
KUBE_MASTER=${MASTER_NAME}
KUBE_MASTER_IP="${AZ_CS}.cloudapp.net"
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
2014-11-17 07:56:15 +00:00
}
# Ensure that we have a password created for validating to the master. Will
# read from $HOME/.kubernetres_auth if available.
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password {
local file="$HOME/.kubernetes_auth"
if [[ -r "$file" ]]; then
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
return
fi
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
# Remove this code, since in all use cases I can see, we are overwriting this
# at cluster creation time.
cat << EOF > "$file"
2014-11-17 07:56:15 +00:00
{
"User": "$KUBE_USER",
"Password": "$KUBE_PASSWORD"
}
EOF
chmod 0600 "$file"
2014-11-17 07:56:15 +00:00
}
# Generate authentication token for admin user. Will
# read from $HOME/.kubernetes_auth if available.
#
# Vars set:
# KUBE_ADMIN_TOKEN
function get-admin-token {
local file="$HOME/.kubernetes_auth"
if [[ -r "$file" ]]; then
KUBE_ADMIN_TOKEN=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["BearerToken"]')
return
fi
KUBE_ADMIN_TOKEN=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(32))')
2014-11-17 07:56:15 +00:00
}
# Instantiate a kubernetes cluster
#
# Assumed vars
# KUBE_ROOT
# <Various vars set in config file>
function kube-up {
# Make sure we have the tar files staged on Azure Storage
find-release-tars
upload-server-tars
ensure-temp-dir
get-password
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
local htpasswd
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
2014-11-17 07:56:15 +00:00
# Generate openvpn certs
echo "--> Generating openvpn certs"
echo 01 > ${KUBE_TEMP}/ca.srl
openssl genrsa -out ${KUBE_TEMP}/ca.key
openssl req -new -x509 -days 1095 \
-key ${KUBE_TEMP}/ca.key \
-out ${KUBE_TEMP}/ca.crt \
-subj "/CN=openvpn-ca"
openssl genrsa -out ${KUBE_TEMP}/server.key
openssl req -new \
-key ${KUBE_TEMP}/server.key \
-out ${KUBE_TEMP}/server.csr \
-subj "/CN=server"
openssl x509 -req -days 1095 \
-in ${KUBE_TEMP}/server.csr \
-CA ${KUBE_TEMP}/ca.crt \
-CAkey ${KUBE_TEMP}/ca.key \
-CAserial ${KUBE_TEMP}/ca.srl \
-out ${KUBE_TEMP}/server.crt
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
openssl genrsa -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.key
openssl req -new \
-key ${KUBE_TEMP}/${MINION_NAMES[$i]}.key \
-out ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \
-subj "/CN=${MINION_NAMES[$i]}"
openssl x509 -req -days 1095 \
-in ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \
-CA ${KUBE_TEMP}/ca.crt \
-CAkey ${KUBE_TEMP}/ca.key \
-CAserial ${KUBE_TEMP}/ca.srl \
-out ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt
done
# Build up start up script for master
echo "--> Building up start up script for master"
2014-11-17 07:56:15 +00:00
(
echo "#!/bin/bash"
echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\""
echo "SERVER_CRT=\"$(cat ${KUBE_TEMP}/server.crt)\""
echo "SERVER_KEY=\"$(cat ${KUBE_TEMP}/server.key)\""
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo "readonly MASTER_NAME='${MASTER_NAME}'"
echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly PORTAL_NET='${PORTAL_NET}'"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/salt-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
2014-11-21 00:40:35 +00:00
if [[ ! -f $AZ_SSH_KEY ]]; then
ssh-keygen -f $AZ_SSH_KEY -N ''
fi
2014-11-21 00:40:35 +00:00
if [[ ! -f $AZ_SSH_CERT ]]; then
openssl req -new -x509 -days 1095 -key $AZ_SSH_KEY -out $AZ_SSH_CERT \
-subj "/CN=azure-ssh-key"
fi
if [[ -z "$(azure_call network vnet show "$AZ_VNET" 2>/dev/null | grep data)" ]]; then
echo error create vnet $AZ_VNET with subnet $AZ_SUBNET
2014-11-17 07:56:15 +00:00
exit 1
fi
echo "--> Starting VM"
azure_call vm create \
-w "$AZ_VNET" \
-n $MASTER_NAME \
-l "$AZ_LOCATION" \
-t $AZ_SSH_CERT \
-e 22000 -P \
-d ${KUBE_TEMP}/master-start.sh \
-b $AZ_SUBNET \
$AZ_CS $AZ_IMAGE $USER
ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}"))
#Build up start up script for minions
echo "--> Building up start up script for minions"
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
(
echo "#!/bin/bash"
echo "MASTER_NAME='${MASTER_NAME}'"
echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\""
echo "CLIENT_CRT=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt)\""
echo "CLIENT_KEY=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.key)\""
echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/salt-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
echo "--> Starting VM"
azure_call vm create \
-c -w "$AZ_VNET" \
-n ${MINION_NAMES[$i]} \
-l "$AZ_LOCATION" \
-t $AZ_SSH_CERT \
-e ${ssh_ports[$i]} -P \
-d ${KUBE_TEMP}/minion-start-${i}.sh \
-b $AZ_SUBNET \
$AZ_CS $AZ_IMAGE $USER
done
2015-01-18 07:32:34 +00:00
echo "--> Creating endpoint"
azure_call vm endpoint create $MASTER_NAME 443
detect-master > /dev/null
echo "==> KUBE_MASTER_IP: ${KUBE_MASTER_IP}"
echo "Waiting for cluster initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This might loop forever if there was some uncaught error during start"
echo " up."
echo
until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/api/v1beta1/pods"; do
printf "."
sleep 2
done
printf "\n"
echo "Kubernetes cluster created."
local kube_cert=".kubecfg.crt"
local kube_key=".kubecfg.key"
local ca_cert=".kubernetes.ca.crt"
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
# config file. Distribute the same way the htpasswd is done.
(umask 077
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
cat << EOF > ~/.kubernetes_auth
2014-11-17 07:56:15 +00:00
{
"User": "$KUBE_USER",
"Password": "$KUBE_PASSWORD",
"CAFile": "$HOME/$ca_cert",
"CertFile": "$HOME/$kube_cert",
"KeyFile": "$HOME/$kube_key"
}
EOF
chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
)
# Wait for salt on the minions
sleep 30
echo "Sanity checking cluster..."
# Basic sanity checking
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# Make sure docker is installed
echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}."
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} \
$AZ_CS.cloudapp.net which docker > /dev/null || {
echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
echo "cluster. (sorry!)" >&2
exit 1
}
done
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${KUBE_MASTER_IP}"
echo
echo "The user name and password to use is located in ~/.kubernetes_auth."
echo
2014-11-17 07:56:15 +00:00
}
# Delete a kubernetes cluster
function kube-down {
echo "Bringing down cluster"
2014-11-17 07:56:15 +00:00
set +e
azure_call vm delete $MASTER_NAME -b -q
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
azure_call vm delete ${MINION_NAMES[$i]} -b -q
done
2014-11-17 07:56:15 +00:00
wait
2014-11-17 07:56:15 +00:00
}
# Update a kubernetes cluster with latest source
#function kube-push {
# detect-project
# detect-master
# Make sure we have the tar files staged on Azure Storage
2014-11-17 07:56:15 +00:00
# find-release-tars
# upload-server-tars
# (
# echo "#! /bin/bash"
# echo "mkdir -p /var/cache/kubernetes-install"
# echo "cd /var/cache/kubernetes-install"
# echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
# echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
# grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
# grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh"
# echo "echo Executing configuration"
# echo "sudo salt '*' mine.update"
# echo "sudo salt --force-color '*' state.highstate"
# ) | gcutil ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" sudo bash
# get-password
# echo
# echo "Kubernetes cluster is running. The master is running at:"
# echo
# echo " https://${KUBE_MASTER_IP}"
# echo
# echo "The user name and password to use is located in ~/.kubernetes_auth."
# echo
#}
# -----------------------------------------------------------------------------
# Cluster specific test helpers used from hack/e2e-test.sh
# Execute prior to running tests to build a release if required for env.
#
# Assumed Vars:
# KUBE_ROOT
function test-build-release {
# Make a release
"${KUBE_ROOT}/build/release.sh"
2014-11-17 07:56:15 +00:00
}
# SSH to a node by name ($1) and run a command ($2).
function ssh-to-node {
local node="$1"
local cmd="$2"
ssh --ssh_arg "-o LogLevel=quiet" "${node}" "${cmd}"
2014-11-17 07:56:15 +00:00
}
# Restart the kube-proxy on a node ($1)
function restart-kube-proxy {
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
2014-11-17 07:56:15 +00:00
}
# Restart the kube-proxy on the master ($1)
function restart-apiserver {
ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
}
2014-11-17 07:56:15 +00:00
# Setup monitoring using heapster and InfluxDB
Deferred creation of SkyDNS, monitoring and logging objects This implements phase 1 of the proposal in #3579, moving the creation of the pods, RCs, and services to the master after the apiserver is available. This is such a wide commit because our existing initial config story is special: * Add kube-addons service and associated salt configuration: ** We configure /etc/kubernetes/addons to be a directory of objects that are appropriately configured for the current cluster. ** "/etc/init.d/kube-addons start" slurps up everything in that dir. (Most of the difficult is the business logic in salt around getting that directory built at all.) ** We cheat and overlay cluster/addons into saltbase/salt/kube-addons as config files for the kube-addons meta-service. * Change .yaml.in files to salt templates * Rename {setup,teardown}-{monitoring,logging} to {setup,teardown}-{monitoring,logging}-firewall to properly reflect their real purpose now (the purpose of these functions is now ONLY to bring up the firewall rules, and possibly to relay the IP to the user). * Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both functions were improperly configuring global rules, yet used lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the rule. The logging rule needed a $NETWORK specifier. The monitoring rule tried gcloud describe first, but given the instancing, this feels like a waste of time now. * Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING, ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master, since these are needed there now. (Desperately want just a yaml or json file we can share between providers that has all this crap. Maybe #3525 is an answer?) Huge caveats: I've gone pretty firm testing on GCE, including twiddling the env variables and making sure the objects I expect to come up, come up. I've tested that it doesn't break GKE bringup somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
function setup-monitoring-firewall {
echo "not implemented" >/dev/null
2014-11-17 07:56:15 +00:00
}
Deferred creation of SkyDNS, monitoring and logging objects This implements phase 1 of the proposal in #3579, moving the creation of the pods, RCs, and services to the master after the apiserver is available. This is such a wide commit because our existing initial config story is special: * Add kube-addons service and associated salt configuration: ** We configure /etc/kubernetes/addons to be a directory of objects that are appropriately configured for the current cluster. ** "/etc/init.d/kube-addons start" slurps up everything in that dir. (Most of the difficult is the business logic in salt around getting that directory built at all.) ** We cheat and overlay cluster/addons into saltbase/salt/kube-addons as config files for the kube-addons meta-service. * Change .yaml.in files to salt templates * Rename {setup,teardown}-{monitoring,logging} to {setup,teardown}-{monitoring,logging}-firewall to properly reflect their real purpose now (the purpose of these functions is now ONLY to bring up the firewall rules, and possibly to relay the IP to the user). * Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both functions were improperly configuring global rules, yet used lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the rule. The logging rule needed a $NETWORK specifier. The monitoring rule tried gcloud describe first, but given the instancing, this feels like a waste of time now. * Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING, ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master, since these are needed there now. (Desperately want just a yaml or json file we can share between providers that has all this crap. Maybe #3525 is an answer?) Huge caveats: I've gone pretty firm testing on GCE, including twiddling the env variables and making sure the objects I expect to come up, come up. I've tested that it doesn't break GKE bringup somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
function teardown-monitoring-firewall {
echo "not implemented" >/dev/null
2014-11-17 07:56:15 +00:00
}
Deferred creation of SkyDNS, monitoring and logging objects This implements phase 1 of the proposal in #3579, moving the creation of the pods, RCs, and services to the master after the apiserver is available. This is such a wide commit because our existing initial config story is special: * Add kube-addons service and associated salt configuration: ** We configure /etc/kubernetes/addons to be a directory of objects that are appropriately configured for the current cluster. ** "/etc/init.d/kube-addons start" slurps up everything in that dir. (Most of the difficult is the business logic in salt around getting that directory built at all.) ** We cheat and overlay cluster/addons into saltbase/salt/kube-addons as config files for the kube-addons meta-service. * Change .yaml.in files to salt templates * Rename {setup,teardown}-{monitoring,logging} to {setup,teardown}-{monitoring,logging}-firewall to properly reflect their real purpose now (the purpose of these functions is now ONLY to bring up the firewall rules, and possibly to relay the IP to the user). * Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both functions were improperly configuring global rules, yet used lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the rule. The logging rule needed a $NETWORK specifier. The monitoring rule tried gcloud describe first, but given the instancing, this feels like a waste of time now. * Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING, ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master, since these are needed there now. (Desperately want just a yaml or json file we can share between providers that has all this crap. Maybe #3525 is an answer?) Huge caveats: I've gone pretty firm testing on GCE, including twiddling the env variables and making sure the objects I expect to come up, come up. I've tested that it doesn't break GKE bringup somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
function setup-logging-firewall {
echo "TODO: setup logging"
}
Deferred creation of SkyDNS, monitoring and logging objects This implements phase 1 of the proposal in #3579, moving the creation of the pods, RCs, and services to the master after the apiserver is available. This is such a wide commit because our existing initial config story is special: * Add kube-addons service and associated salt configuration: ** We configure /etc/kubernetes/addons to be a directory of objects that are appropriately configured for the current cluster. ** "/etc/init.d/kube-addons start" slurps up everything in that dir. (Most of the difficult is the business logic in salt around getting that directory built at all.) ** We cheat and overlay cluster/addons into saltbase/salt/kube-addons as config files for the kube-addons meta-service. * Change .yaml.in files to salt templates * Rename {setup,teardown}-{monitoring,logging} to {setup,teardown}-{monitoring,logging}-firewall to properly reflect their real purpose now (the purpose of these functions is now ONLY to bring up the firewall rules, and possibly to relay the IP to the user). * Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both functions were improperly configuring global rules, yet used lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the rule. The logging rule needed a $NETWORK specifier. The monitoring rule tried gcloud describe first, but given the instancing, this feels like a waste of time now. * Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING, ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master, since these are needed there now. (Desperately want just a yaml or json file we can share between providers that has all this crap. Maybe #3525 is an answer?) Huge caveats: I've gone pretty firm testing on GCE, including twiddling the env variables and making sure the objects I expect to come up, come up. I've tested that it doesn't break GKE bringup somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
function teardown-logging-firewall {
echo "TODO: teardown logging"
2015-01-18 07:32:34 +00:00
}