2014-07-22 22:43:41 +00:00
|
|
|
#!/bin/bash
|
|
|
|
|
|
|
|
# Copyright 2014 Google Inc. All rights reserved.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
# A library of helper functions for deploying on Rackspace
|
|
|
|
|
|
|
|
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
|
|
|
|
# config-default.sh.
|
2014-10-12 00:32:53 +00:00
|
|
|
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
2014-07-22 22:43:41 +00:00
|
|
|
source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
|
|
|
|
|
|
|
|
verify-prereqs() {
|
|
|
|
# Make sure that prerequisites are installed.
|
2014-10-20 17:12:39 +00:00
|
|
|
for x in nova swiftly; do
|
2014-07-22 22:43:41 +00:00
|
|
|
if [ "$(which $x)" == "" ]; then
|
|
|
|
echo "cluster/rackspace/util.sh: Can't find $x in PATH, please fix and retry."
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
done
|
2014-10-20 17:12:39 +00:00
|
|
|
|
|
|
|
if [[ -z "${OS_AUTH_URL-}" ]]; then
|
2014-10-20 17:18:34 +00:00
|
|
|
echo "cluster/rackspace/util.sh: OS_AUTH_URL not set."
|
2014-10-20 17:12:39 +00:00
|
|
|
echo -e "\texport OS_AUTH_URL=https://identity.api.rackspacecloud.com/v2.0/"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [[ -z "${OS_USERNAME-}" ]]; then
|
2014-10-20 17:18:34 +00:00
|
|
|
echo "cluster/rackspace/util.sh: OS_USERNAME not set."
|
2014-10-20 17:12:39 +00:00
|
|
|
echo -e "\texport OS_USERNAME=myusername"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [[ -z "${OS_PASSWORD-}" ]]; then
|
2014-10-20 17:18:34 +00:00
|
|
|
echo "cluster/rackspace/util.sh: OS_PASSWORD not set."
|
2014-10-20 17:12:39 +00:00
|
|
|
echo -e "\texport OS_PASSWORD=myapikey"
|
|
|
|
return 1
|
|
|
|
fi
|
2014-07-22 22:43:41 +00:00
|
|
|
}
|
|
|
|
|
2014-10-12 00:32:53 +00:00
|
|
|
# Ensure that we have a password created for validating to the master. Will
|
|
|
|
# read from $HOME/.kubernetres_auth if available.
|
|
|
|
#
|
|
|
|
# Vars set:
|
|
|
|
# KUBE_USER
|
|
|
|
# KUBE_PASSWORD
|
|
|
|
get-password() {
|
|
|
|
local file="$HOME/.kubernetes_auth"
|
|
|
|
if [[ -r "$file" ]]; then
|
|
|
|
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
|
|
|
|
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
KUBE_USER=admin
|
|
|
|
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
|
|
|
|
|
|
|
# Store password for reuse.
|
|
|
|
cat << EOF > "$file"
|
|
|
|
{
|
|
|
|
"User": "$KUBE_USER",
|
|
|
|
"Password": "$KUBE_PASSWORD"
|
|
|
|
}
|
|
|
|
EOF
|
|
|
|
chmod 0600 "$file"
|
|
|
|
}
|
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
rax-ssh-key() {
|
|
|
|
if [ ! -f $HOME/.ssh/${SSH_KEY_NAME} ]; then
|
|
|
|
echo "cluster/rackspace/util.sh: Generating SSH KEY ${HOME}/.ssh/${SSH_KEY_NAME}"
|
|
|
|
ssh-keygen -f ${HOME}/.ssh/${SSH_KEY_NAME} -N '' > /dev/null
|
|
|
|
fi
|
|
|
|
|
|
|
|
if ! $(nova keypair-list | grep $SSH_KEY_NAME > /dev/null 2>&1); then
|
|
|
|
echo "cluster/rackspace/util.sh: Uploading key to Rackspace:"
|
|
|
|
echo -e "\tnova keypair-add ${SSH_KEY_NAME} --pub-key ${HOME}/.ssh/${SSH_KEY_NAME}.pub"
|
|
|
|
nova keypair-add ${SSH_KEY_NAME} --pub-key ${HOME}/.ssh/${SSH_KEY_NAME}.pub > /dev/null 2>&1
|
|
|
|
else
|
|
|
|
echo "cluster/rackspace/util.sh: SSH key ${SSH_KEY_NAME}.pub already uploaded"
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2014-10-12 00:32:53 +00:00
|
|
|
find-release-tars() {
|
|
|
|
SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
|
2014-10-20 17:12:39 +00:00
|
|
|
RELEASE_DIR="${KUBE_ROOT}/server/"
|
2014-10-12 00:32:53 +00:00
|
|
|
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
|
|
|
|
SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
|
2014-10-20 17:12:39 +00:00
|
|
|
RELEASE_DIR="${KUBE_ROOT}/_output/release-tars/"
|
2014-10-12 00:32:53 +00:00
|
|
|
fi
|
|
|
|
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
|
|
|
|
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
|
|
|
|
exit 1
|
2014-07-22 22:43:41 +00:00
|
|
|
fi
|
2014-10-12 00:32:53 +00:00
|
|
|
}
|
|
|
|
|
2014-10-20 17:12:39 +00:00
|
|
|
rackspace-set-vars() {
|
|
|
|
|
|
|
|
CLOUDFILES_CONTAINER="kubernetes-releases-${OS_USERNAME}"
|
|
|
|
CONTAINER_PREFIX=${CONTAINER_PREFIX-devel/}
|
|
|
|
find-release-tars
|
|
|
|
}
|
|
|
|
|
|
|
|
# Retrieves a tempurl from cloudfiles to make the release object publicly accessible temporarily.
|
2014-10-12 00:32:53 +00:00
|
|
|
find-object-url() {
|
2014-07-22 22:43:41 +00:00
|
|
|
|
2014-10-20 17:12:39 +00:00
|
|
|
rackspace-set-vars
|
2014-10-17 23:05:11 +00:00
|
|
|
|
2014-10-20 17:12:39 +00:00
|
|
|
KUBE_TAR=${CLOUDFILES_CONTAINER}/${CONTAINER_PREFIX}/kubernetes-server-linux-amd64.tar.gz
|
2014-10-12 00:32:53 +00:00
|
|
|
|
2014-10-20 17:12:39 +00:00
|
|
|
RELEASE_TMP_URL=$(swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD} tempurl GET ${KUBE_TAR})
|
2014-07-22 22:43:41 +00:00
|
|
|
echo "cluster/rackspace/util.sh: Object temp URL:"
|
2014-10-17 20:10:08 +00:00
|
|
|
echo -e "\t${RELEASE_TMP_URL}"
|
2014-07-22 22:43:41 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2014-10-20 17:12:39 +00:00
|
|
|
ensure_dev_container() {
|
|
|
|
|
|
|
|
SWIFTLY_CMD="swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD}"
|
|
|
|
|
|
|
|
if ! ${SWIFTLY_CMD} get ${CLOUDFILES_CONTAINER} > /dev/null 2>&1 ; then
|
2015-02-17 10:48:48 +00:00
|
|
|
echo "cluster/rackspace/util.sh: Container doesn't exist. Creating container ${CLOUDFILES_CONTAINER}"
|
2014-10-20 17:12:39 +00:00
|
|
|
${SWIFTLY_CMD} put ${CLOUDFILES_CONTAINER} > /dev/null 2>&1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
# Copy kubernetes-server-linux-amd64.tar.gz to cloud files object store
|
|
|
|
copy_dev_tarballs() {
|
|
|
|
|
|
|
|
echo "cluster/rackspace/util.sh: Uploading to Cloud Files"
|
|
|
|
${SWIFTLY_CMD} put -i ${RELEASE_DIR}/kubernetes-server-linux-amd64.tar.gz \
|
|
|
|
${CLOUDFILES_CONTAINER}/${CONTAINER_PREFIX}/kubernetes-server-linux-amd64.tar.gz > /dev/null 2>&1
|
|
|
|
|
|
|
|
echo "Release pushed."
|
|
|
|
}
|
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
rax-boot-master() {
|
|
|
|
|
2014-10-12 00:32:53 +00:00
|
|
|
DISCOVERY_URL=$(curl https://discovery.etcd.io/new)
|
|
|
|
DISCOVERY_ID=$(echo "${DISCOVERY_URL}" | cut -f 4 -d /)
|
|
|
|
echo "cluster/rackspace/util.sh: etcd discovery URL: ${DISCOVERY_URL}"
|
|
|
|
|
|
|
|
# Copy cloud-config to KUBE_TEMP and work some sed magic
|
|
|
|
sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \
|
2014-12-11 20:18:45 +00:00
|
|
|
-e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\\&}|" \
|
2014-10-12 00:32:53 +00:00
|
|
|
-e "s|KUBE_USER|${KUBE_USER}|" \
|
|
|
|
-e "s|KUBE_PASSWORD|${KUBE_PASSWORD}|" \
|
|
|
|
-e "s|PORTAL_NET|${PORTAL_NET}|" \
|
2015-02-17 10:48:48 +00:00
|
|
|
-e "s|OS_AUTH_URL|${OS_AUTH_URL}|" \
|
|
|
|
-e "s|OS_USERNAME|${OS_USERNAME}|" \
|
|
|
|
-e "s|OS_PASSWORD|${OS_PASSWORD}|" \
|
|
|
|
-e "s|OS_TENANT_NAME|${OS_TENANT_NAME}|" \
|
|
|
|
-e "s|OS_REGION_NAME|${OS_REGION_NAME}|" \
|
2014-10-12 00:32:53 +00:00
|
|
|
$(dirname $0)/rackspace/cloud-config/master-cloud-config.yaml > $KUBE_TEMP/master-cloud-config.yaml
|
2014-07-22 22:43:41 +00:00
|
|
|
|
|
|
|
|
2014-10-20 17:12:39 +00:00
|
|
|
MASTER_BOOT_CMD="nova boot \
|
2014-07-22 22:43:41 +00:00
|
|
|
--key-name ${SSH_KEY_NAME} \
|
|
|
|
--flavor ${KUBE_MASTER_FLAVOR} \
|
|
|
|
--image ${KUBE_IMAGE} \
|
|
|
|
--meta ${MASTER_TAG} \
|
2014-10-12 00:32:53 +00:00
|
|
|
--meta ETCD=${DISCOVERY_ID} \
|
2014-07-22 22:43:41 +00:00
|
|
|
--user-data ${KUBE_TEMP}/master-cloud-config.yaml \
|
|
|
|
--config-drive true \
|
|
|
|
--nic net-id=${NETWORK_UUID} \
|
|
|
|
${MASTER_NAME}"
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
echo "cluster/rackspace/util.sh: Booting ${MASTER_NAME} with following command:"
|
|
|
|
echo -e "\t$MASTER_BOOT_CMD"
|
|
|
|
$MASTER_BOOT_CMD
|
|
|
|
}
|
|
|
|
|
|
|
|
rax-boot-minions() {
|
|
|
|
|
2014-10-12 00:32:53 +00:00
|
|
|
cp $(dirname $0)/rackspace/cloud-config/minion-cloud-config.yaml \
|
2014-07-22 22:43:41 +00:00
|
|
|
${KUBE_TEMP}/minion-cloud-config.yaml
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
|
|
|
|
2014-10-12 00:32:53 +00:00
|
|
|
sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \
|
|
|
|
-e "s|INDEX|$((i + 1))|g" \
|
2014-12-11 21:01:05 +00:00
|
|
|
-e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\\&}|" \
|
2014-11-14 04:32:35 +00:00
|
|
|
-e "s|ENABLE_NODE_MONITORING|${ENABLE_NODE_MONITORING:-false}|" \
|
2014-11-14 07:07:43 +00:00
|
|
|
-e "s|ENABLE_NODE_LOGGING|${ENABLE_NODE_LOGGING:-false}|" \
|
|
|
|
-e "s|LOGGING_DESTINATION|${LOGGING_DESTINATION:-}|" \
|
2014-11-07 04:49:21 +00:00
|
|
|
-e "s|ENABLE_CLUSTER_DNS|${ENABLE_CLUSTER_DNS:-false}|" \
|
|
|
|
-e "s|DNS_SERVER_IP|${DNS_SERVER_IP:-}|" \
|
|
|
|
-e "s|DNS_DOMAIN|${DNS_DOMAIN:-}|" \
|
2014-10-12 00:32:53 +00:00
|
|
|
$(dirname $0)/rackspace/cloud-config/minion-cloud-config.yaml > $KUBE_TEMP/minion-cloud-config-$(($i + 1)).yaml
|
|
|
|
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
MINION_BOOT_CMD="nova boot \
|
|
|
|
--key-name ${SSH_KEY_NAME} \
|
|
|
|
--flavor ${KUBE_MINION_FLAVOR} \
|
|
|
|
--image ${KUBE_IMAGE} \
|
|
|
|
--meta ${MINION_TAG} \
|
2014-10-12 00:32:53 +00:00
|
|
|
--user-data ${KUBE_TEMP}/minion-cloud-config-$(( i +1 )).yaml \
|
2014-07-22 22:43:41 +00:00
|
|
|
--config-drive true \
|
|
|
|
--nic net-id=${NETWORK_UUID} \
|
|
|
|
${MINION_NAMES[$i]}"
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
echo "cluster/rackspace/util.sh: Booting ${MINION_NAMES[$i]} with following command:"
|
|
|
|
echo -e "\t$MINION_BOOT_CMD"
|
|
|
|
$MINION_BOOT_CMD
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
|
|
|
rax-nova-network() {
|
|
|
|
if ! $(nova network-list | grep $NOVA_NETWORK_LABEL > /dev/null 2>&1); then
|
|
|
|
SAFE_CIDR=$(echo $NOVA_NETWORK_CIDR | tr -d '\\')
|
|
|
|
NETWORK_CREATE_CMD="nova network-create $NOVA_NETWORK_LABEL $SAFE_CIDR"
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
echo "cluster/rackspace/util.sh: Creating cloud network with following command:"
|
|
|
|
echo -e "\t${NETWORK_CREATE_CMD}"
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
$NETWORK_CREATE_CMD
|
|
|
|
else
|
|
|
|
echo "cluster/rackspace/util.sh: Using existing cloud network $NOVA_NETWORK_LABEL"
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
detect-minions() {
|
|
|
|
KUBE_MINION_IP_ADDRESSES=()
|
|
|
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
|
|
|
local minion_ip=$(nova show --minimal ${MINION_NAMES[$i]} \
|
|
|
|
| grep accessIPv4 | awk '{print $4}')
|
|
|
|
echo "cluster/rackspace/util.sh: Found ${MINION_NAMES[$i]} at ${minion_ip}"
|
|
|
|
KUBE_MINION_IP_ADDRESSES+=("${minion_ip}")
|
|
|
|
done
|
|
|
|
if [ -z "$KUBE_MINION_IP_ADDRESSES" ]; then
|
|
|
|
echo "cluster/rackspace/util.sh: Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'"
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
detect-master() {
|
|
|
|
KUBE_MASTER=${MASTER_NAME}
|
|
|
|
|
2014-12-15 22:38:30 +00:00
|
|
|
echo "Waiting for ${MASTER_NAME} IP Address."
|
|
|
|
echo
|
|
|
|
echo " This will continually check to see if the master node has an IP address."
|
|
|
|
echo
|
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
KUBE_MASTER_IP=$(nova show $KUBE_MASTER --minimal | grep accessIPv4 | awk '{print $4}')
|
2014-12-15 22:38:30 +00:00
|
|
|
|
|
|
|
while [ "${KUBE_MASTER_IP-|}" == "|" ]; do
|
|
|
|
KUBE_MASTER_IP=$(nova show $KUBE_MASTER --minimal | grep accessIPv4 | awk '{print $4}')
|
|
|
|
printf "."
|
|
|
|
sleep 2
|
|
|
|
done
|
|
|
|
|
|
|
|
echo "${KUBE_MASTER} IP Address is ${KUBE_MASTER_IP}"
|
2014-07-22 22:43:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# $1 should be the network you would like to get an IP address for
|
|
|
|
detect-master-nova-net() {
|
|
|
|
KUBE_MASTER=${MASTER_NAME}
|
|
|
|
|
|
|
|
MASTER_IP=$(nova show $KUBE_MASTER --minimal | grep $1 | awk '{print $5}')
|
|
|
|
}
|
|
|
|
|
|
|
|
kube-up() {
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
SCRIPT_DIR=$(CDPATH="" cd $(dirname $0); pwd)
|
2014-10-20 17:12:39 +00:00
|
|
|
|
|
|
|
rackspace-set-vars
|
|
|
|
ensure_dev_container
|
|
|
|
copy_dev_tarballs
|
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
# Find the release to use. Generally it will be passed when doing a 'prod'
|
|
|
|
# install and will default to the release/config.sh version when doing a
|
|
|
|
# developer up.
|
2014-10-17 20:10:08 +00:00
|
|
|
find-object-url
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
# Create a temp directory to hold scripts that will be uploaded to master/minions
|
|
|
|
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
|
|
|
|
trap "rm -rf ${KUBE_TEMP}" EXIT
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
get-password
|
2014-10-12 00:32:53 +00:00
|
|
|
python $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $KUBE_USER $KUBE_PASSWORD
|
2014-07-22 22:43:41 +00:00
|
|
|
HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd)
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
rax-nova-network
|
|
|
|
NETWORK_UUID=$(nova network-list | grep -i ${NOVA_NETWORK_LABEL} | awk '{print $2}')
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
# create and upload ssh key if necessary
|
|
|
|
rax-ssh-key
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
echo "cluster/rackspace/util.sh: Starting Cloud Servers"
|
|
|
|
rax-boot-master
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
rax-boot-minions
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
FAIL=0
|
|
|
|
for job in `jobs -p`
|
|
|
|
do
|
|
|
|
wait $job || let "FAIL+=1"
|
|
|
|
done
|
|
|
|
if (( $FAIL != 0 )); then
|
|
|
|
echo "${FAIL} commands failed. Exiting."
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
|
2014-08-29 22:40:25 +00:00
|
|
|
detect-master
|
2014-07-22 22:43:41 +00:00
|
|
|
|
|
|
|
echo "Waiting for cluster initialization."
|
|
|
|
echo
|
|
|
|
echo " This will continually check to see if the API for kubernetes is reachable."
|
|
|
|
echo " This might loop forever if there was some uncaught error during start"
|
|
|
|
echo " up."
|
|
|
|
echo
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
#This will fail until apiserver salt is updated
|
2014-10-12 00:32:53 +00:00
|
|
|
until $(curl --insecure --user ${KUBE_USER}:${KUBE_PASSWORD} --max-time 5 \
|
2014-08-29 22:40:25 +00:00
|
|
|
--fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do
|
|
|
|
printf "."
|
|
|
|
sleep 2
|
|
|
|
done
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
echo "Kubernetes cluster created."
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-07-22 22:43:41 +00:00
|
|
|
# Don't bail on errors, we want to be able to print some info.
|
|
|
|
set +e
|
|
|
|
|
|
|
|
detect-minions
|
|
|
|
|
|
|
|
echo "All minions may not be online yet, this is okay."
|
|
|
|
echo
|
2014-09-29 20:11:31 +00:00
|
|
|
echo "Kubernetes cluster is running. The master is running at:"
|
2014-07-22 22:43:41 +00:00
|
|
|
echo
|
2014-09-29 20:11:31 +00:00
|
|
|
echo " https://${KUBE_MASTER_IP}"
|
|
|
|
echo
|
|
|
|
echo "The user name and password to use is located in ~/.kubernetes_auth."
|
2014-07-22 22:43:41 +00:00
|
|
|
echo
|
|
|
|
echo "Security note: The server above uses a self signed certificate. This is"
|
|
|
|
echo " subject to \"Man in the middle\" type attacks."
|
2014-09-29 20:11:31 +00:00
|
|
|
echo
|
2014-07-22 22:43:41 +00:00
|
|
|
}
|
2014-11-07 01:23:14 +00:00
|
|
|
|
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
|
|
|
function setup-logging-firewall {
|
2015-01-07 23:02:35 +00:00
|
|
|
echo "TODO: setup logging"
|
|
|
|
}
|
|
|
|
|
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
|
|
|
function teardown-logging-firewall {
|
2015-01-07 23:02:35 +00:00
|
|
|
echo "TODO: teardown logging"
|
|
|
|
}
|
|
|
|
|
2014-11-11 19:03:07 +00:00
|
|
|
# Perform preparations required to run e2e tests
|
|
|
|
function prepare-e2e() {
|
|
|
|
echo "Rackspace doesn't need special preparations for e2e tests"
|
|
|
|
}
|