2014-07-14 17:50:04 +00:00
|
|
|
#!/bin/bash
|
|
|
|
|
|
|
|
# Copyright 2014 Google Inc. All rights reserved.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
|
|
|
|
|
2014-10-03 21:58:49 +00:00
|
|
|
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
|
|
|
source "${KUBE_ROOT}/cluster/vagrant/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
2014-07-14 17:50:04 +00:00
|
|
|
|
|
|
|
function detect-master () {
|
2014-12-12 19:08:22 +00:00
|
|
|
KUBE_MASTER_IP=$MASTER_IP
|
2015-01-16 00:43:03 +00:00
|
|
|
echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[]
|
|
|
|
function detect-minions {
|
2015-01-16 00:43:03 +00:00
|
|
|
echo "Minions already detected" 1>&2
|
2014-12-12 19:08:22 +00:00
|
|
|
KUBE_MINION_IP_ADDRESSES=("${MINION_IPS[@]}")
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
2014-12-12 19:08:22 +00:00
|
|
|
# Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so
|
|
|
|
# that our Vagrantfile doesn't error out.
|
2014-07-14 17:50:04 +00:00
|
|
|
function verify-prereqs {
|
|
|
|
for x in vagrant virtualbox; do
|
2014-09-29 20:37:04 +00:00
|
|
|
if ! which "$x" >/dev/null; then
|
2014-07-14 17:50:04 +00:00
|
|
|
echo "Can't find $x in PATH, please fix and retry."
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
|
2015-01-16 21:44:10 +00:00
|
|
|
# Set VAGRANT_CWD to KUBE_ROOT so that we find the right Vagrantfile no
|
|
|
|
# matter what directory the tools are called from.
|
|
|
|
export VAGRANT_CWD="${KUBE_ROOT}"
|
|
|
|
|
2014-12-12 19:08:22 +00:00
|
|
|
export USING_KUBE_SCRIPTS=true
|
|
|
|
}
|
2014-09-03 19:07:12 +00:00
|
|
|
|
2014-12-12 19:08:22 +00:00
|
|
|
# Create a temp dir that'll be deleted at the end of this bash session.
|
|
|
|
#
|
|
|
|
# Vars set:
|
|
|
|
# KUBE_TEMP
|
|
|
|
function ensure-temp-dir {
|
|
|
|
if [[ -z ${KUBE_TEMP-} ]]; then
|
|
|
|
export KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
|
|
|
|
trap 'rm -rf "${KUBE_TEMP}"' EXIT
|
|
|
|
fi
|
|
|
|
}
|
2014-11-14 06:14:56 +00:00
|
|
|
|
2014-12-12 19:08:22 +00:00
|
|
|
# Create a set of provision scripts for the master and each of the minions
|
|
|
|
function create-provision-scripts {
|
|
|
|
ensure-temp-dir
|
|
|
|
|
|
|
|
(
|
|
|
|
echo "#! /bin/bash"
|
|
|
|
echo "KUBE_ROOT=/vagrant"
|
|
|
|
echo "MASTER_NAME='${INSTANCE_PREFIX}-master'"
|
|
|
|
echo "MASTER_IP='${MASTER_IP}'"
|
|
|
|
echo "MINION_NAMES=(${MINION_NAMES[@]})"
|
|
|
|
echo "MINION_IPS=(${MINION_IPS[@]})"
|
2015-02-09 21:58:45 +00:00
|
|
|
echo "NODE_IP='${MASTER_IP}'"
|
|
|
|
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
|
|
|
|
echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'"
|
|
|
|
echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
|
|
|
|
echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'"
|
|
|
|
echo "MINION_CONTAINER_NETMASKS='${MINION_CONTAINER_NETMASKS[@]}'"
|
|
|
|
echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})"
|
2014-12-12 19:08:22 +00:00
|
|
|
echo "PORTAL_NET='${PORTAL_NET}'"
|
|
|
|
echo "MASTER_USER='${MASTER_USER}'"
|
|
|
|
echo "MASTER_PASSWD='${MASTER_PASSWD}'"
|
2015-01-08 21:53:45 +00:00
|
|
|
echo "ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'"
|
|
|
|
echo "ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
|
|
|
|
echo "LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
|
|
|
|
echo "ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
|
|
|
|
echo "DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
|
|
|
|
echo "DNS_DOMAIN='${DNS_DOMAIN:-}'"
|
2015-02-11 15:11:37 +00:00
|
|
|
echo "DNS_REPLICAS='${DNS_REPLICAS:-}'"
|
2015-01-30 17:16:24 +00:00
|
|
|
echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'"
|
2014-12-12 19:08:22 +00:00
|
|
|
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
|
2015-02-09 21:58:45 +00:00
|
|
|
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
|
2014-12-12 19:08:22 +00:00
|
|
|
) > "${KUBE_TEMP}/master-start.sh"
|
2014-11-14 06:14:56 +00:00
|
|
|
|
2014-12-12 19:08:22 +00:00
|
|
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
|
|
|
(
|
|
|
|
echo "#! /bin/bash"
|
|
|
|
echo "MASTER_NAME='${MASTER_NAME}'"
|
|
|
|
echo "MASTER_IP='${MASTER_IP}'"
|
|
|
|
echo "MINION_NAMES=(${MINION_NAMES[@]})"
|
|
|
|
echo "MINION_IPS=(${MINION_IPS[@]})"
|
|
|
|
echo "MINION_IP='${MINION_IPS[$i]}'"
|
|
|
|
echo "MINION_ID='$i'"
|
2015-02-09 21:58:45 +00:00
|
|
|
echo "NODE_IP='${MINION_IPS[$i]}'"
|
|
|
|
echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
|
|
|
|
echo "CONTAINER_ADDR='${MINION_CONTAINER_ADDRS[$i]}'"
|
|
|
|
echo "CONTAINER_NETMASK='${MINION_CONTAINER_NETMASKS[$i]}'"
|
|
|
|
echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})"
|
2014-12-12 19:08:22 +00:00
|
|
|
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
|
|
|
|
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS-}'"
|
|
|
|
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh"
|
|
|
|
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
|
|
|
|
) > "${KUBE_TEMP}/minion-start-${i}.sh"
|
|
|
|
done
|
2014-11-14 06:14:56 +00:00
|
|
|
}
|
|
|
|
|
2014-12-12 19:08:22 +00:00
|
|
|
function verify-cluster {
|
|
|
|
echo "Each machine instance has been created/updated."
|
2014-09-03 19:07:12 +00:00
|
|
|
echo " Now waiting for the Salt provisioning process to complete on each machine."
|
|
|
|
echo " This can take some time based on your network, disk, and cpu speed."
|
|
|
|
echo " It is possible for an error to occur during Salt provision of cluster and this could loop forever."
|
|
|
|
|
|
|
|
# verify master has all required daemons
|
|
|
|
echo "Validating master"
|
2014-09-29 20:37:04 +00:00
|
|
|
local machine="master"
|
2014-11-11 00:34:21 +00:00
|
|
|
local -a required_daemon=("salt-master" "salt-minion" "kube-apiserver" "nginx" "kube-controller-manager" "kube-scheduler")
|
2014-09-29 20:37:04 +00:00
|
|
|
local validated="1"
|
|
|
|
until [[ "$validated" == "0" ]]; do
|
|
|
|
validated="0"
|
|
|
|
local daemon
|
|
|
|
for daemon in "${required_daemon[@]}"; do
|
|
|
|
vagrant ssh "$machine" -c "which '${daemon}'" >/dev/null 2>&1 || {
|
|
|
|
printf "."
|
|
|
|
validated="1"
|
|
|
|
sleep 2
|
|
|
|
}
|
2014-09-03 19:07:12 +00:00
|
|
|
done
|
|
|
|
done
|
|
|
|
|
|
|
|
# verify each minion has all required daemons
|
2014-09-29 20:37:04 +00:00
|
|
|
local i
|
2014-09-03 19:07:12 +00:00
|
|
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
|
|
|
echo "Validating ${VAGRANT_MINION_NAMES[$i]}"
|
2014-09-29 20:37:04 +00:00
|
|
|
local machine=${VAGRANT_MINION_NAMES[$i]}
|
|
|
|
local -a required_daemon=("salt-minion" "kubelet" "docker")
|
|
|
|
local validated="1"
|
|
|
|
until [[ "$validated" == "0" ]]; do
|
|
|
|
validated="0"
|
|
|
|
local daemon
|
|
|
|
for daemon in "${required_daemon[@]}"; do
|
|
|
|
vagrant ssh "$machine" -c "which $daemon" >/dev/null 2>&1 || {
|
|
|
|
printf "."
|
|
|
|
validated="1"
|
|
|
|
sleep 2
|
|
|
|
}
|
2014-09-03 19:07:12 +00:00
|
|
|
done
|
|
|
|
done
|
|
|
|
done
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2014-09-03 19:07:12 +00:00
|
|
|
echo
|
|
|
|
echo "Waiting for each minion to be registered with cloud provider"
|
2014-12-12 19:08:22 +00:00
|
|
|
for (( i=0; i<${#MINION_IPS[@]}; i++)); do
|
|
|
|
local machine="${MINION_IPS[$i]}"
|
2014-09-29 20:37:04 +00:00
|
|
|
local count="0"
|
|
|
|
until [[ "$count" == "1" ]]; do
|
|
|
|
local minions
|
2015-02-16 23:34:50 +00:00
|
|
|
minions=$("${KUBE_ROOT}/cluster/kubectl.sh --kubeconfig=${HOME}/.kubernetes_vagrant_kubeconfig" get minions -o template -t '{{range.items}}{{.id}}:{{end}}')
|
2014-12-12 19:08:22 +00:00
|
|
|
count=$(echo $minions | grep -c "${MINION_IPS[i]}") || {
|
2014-09-29 20:37:04 +00:00
|
|
|
printf "."
|
|
|
|
sleep 2
|
|
|
|
count="0"
|
|
|
|
}
|
2014-09-03 19:07:12 +00:00
|
|
|
done
|
|
|
|
done
|
2014-09-29 20:11:31 +00:00
|
|
|
|
2015-01-16 00:43:03 +00:00
|
|
|
(
|
|
|
|
echo
|
|
|
|
echo "Kubernetes cluster is running. The master is running at:"
|
|
|
|
echo
|
|
|
|
echo " https://${MASTER_IP}"
|
|
|
|
echo
|
|
|
|
echo "The user name and password to use is located in ~/.kubernetes_vagrant_auth."
|
|
|
|
echo
|
|
|
|
)
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
2014-12-12 19:08:22 +00:00
|
|
|
|
|
|
|
# Instantiate a kubernetes cluster
|
|
|
|
function kube-up {
|
|
|
|
get-password
|
|
|
|
create-provision-scripts
|
|
|
|
|
|
|
|
vagrant up
|
|
|
|
|
|
|
|
local kube_cert=".kubecfg.vagrant.crt"
|
|
|
|
local kube_key=".kubecfg.vagrant.key"
|
|
|
|
local ca_cert=".kubernetes.vagrant.ca.crt"
|
|
|
|
|
|
|
|
(umask 077
|
|
|
|
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
|
|
|
|
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
|
|
|
|
vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
|
|
|
|
|
|
|
|
cat <<EOF >"${HOME}/.kubernetes_vagrant_auth"
|
|
|
|
{
|
|
|
|
"User": "$KUBE_USER",
|
|
|
|
"Password": "$KUBE_PASSWORD",
|
|
|
|
"CAFile": "$HOME/$ca_cert",
|
|
|
|
"CertFile": "$HOME/$kube_cert",
|
|
|
|
"KeyFile": "$HOME/$kube_key"
|
|
|
|
}
|
|
|
|
EOF
|
|
|
|
|
2015-02-16 23:34:50 +00:00
|
|
|
cat <<EOF >"${HOME}/.kubernetes_vagrant_kubeconfig"
|
|
|
|
apiVersion: v1
|
|
|
|
clusters:
|
|
|
|
- cluster:
|
|
|
|
server: https://${MASTER_IP}:443
|
|
|
|
name: vagrant
|
|
|
|
contexts:
|
|
|
|
- context:
|
|
|
|
cluster: vagrant
|
|
|
|
namespace: default
|
|
|
|
user: vagrant
|
|
|
|
name: vagrant
|
|
|
|
current-context: "vagrant"
|
|
|
|
kind: Config
|
|
|
|
preferences: {}
|
|
|
|
users:
|
|
|
|
- name: vagrant
|
|
|
|
user:
|
|
|
|
auth-path: ${HOME}/.kubernetes_vagrant_auth
|
|
|
|
EOF
|
|
|
|
|
2014-12-12 19:08:22 +00:00
|
|
|
chmod 0600 ~/.kubernetes_vagrant_auth "${HOME}/${kube_cert}" \
|
|
|
|
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
|
|
|
|
)
|
|
|
|
|
|
|
|
verify-cluster
|
|
|
|
}
|
|
|
|
|
2014-07-14 17:50:04 +00:00
|
|
|
# Delete a kubernetes cluster
|
|
|
|
function kube-down {
|
2014-09-03 19:07:12 +00:00
|
|
|
vagrant destroy -f
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Update a kubernetes cluster with latest source
|
|
|
|
function kube-push {
|
2014-12-12 19:08:22 +00:00
|
|
|
get-password
|
|
|
|
create-provision-scripts
|
2014-09-03 19:07:12 +00:00
|
|
|
vagrant provision
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Execute prior to running tests to build a release if required for env
|
|
|
|
function test-build-release {
|
2014-10-22 23:26:59 +00:00
|
|
|
# Make a release
|
|
|
|
"${KUBE_ROOT}/build/release.sh"
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Execute prior to running tests to initialize required structure
|
|
|
|
function test-setup {
|
2015-01-16 00:43:03 +00:00
|
|
|
echo "Vagrant test setup complete" 1>&2
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Execute after running tests to perform any required clean-up
|
|
|
|
function test-teardown {
|
2015-01-16 00:43:03 +00:00
|
|
|
echo "Vagrant ignores tear-down" 1>&2
|
2014-07-14 17:50:04 +00:00
|
|
|
}
|
2014-08-21 14:44:02 +00:00
|
|
|
|
|
|
|
# Set the {user} and {password} environment values required to interact with provider
|
|
|
|
function get-password {
|
2014-09-29 20:37:04 +00:00
|
|
|
export KUBE_USER=vagrant
|
|
|
|
export KUBE_PASSWORD=vagrant
|
2015-01-16 00:43:03 +00:00
|
|
|
echo "Using credentials: $KUBE_USER:$KUBE_PASSWORD" 1>&2
|
2014-08-21 14:44:02 +00:00
|
|
|
}
|
2014-10-13 03:08:46 +00:00
|
|
|
|
2014-10-18 17:23:14 +00:00
|
|
|
# Find the minion name based on the IP address
|
2015-01-21 00:47:51 +00:00
|
|
|
function find-vagrant-name-by-ip {
|
2014-10-18 17:23:14 +00:00
|
|
|
local ip="$1"
|
|
|
|
local ip_pattern="${MINION_IP_BASE}(.*)"
|
|
|
|
|
|
|
|
# This is subtle. We map 10.245.2.2 -> minion-1. We do this by matching a
|
|
|
|
# regexp and using the capture to construct the name.
|
|
|
|
[[ $ip =~ $ip_pattern ]] || {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
echo "minion-$((${BASH_REMATCH[1]} - 1))"
|
|
|
|
}
|
|
|
|
|
2015-01-21 00:47:51 +00:00
|
|
|
# Find the vagrant machien name based on the host name of the minion
|
|
|
|
function find-vagrant-name-by-minion-name {
|
|
|
|
local ip="$1"
|
2015-02-09 21:58:45 +00:00
|
|
|
if [[ "$ip" == "${INSTANCE_PREFIX}-master" ]]; then
|
|
|
|
echo "master"
|
|
|
|
return $?
|
|
|
|
fi
|
2015-01-21 00:47:51 +00:00
|
|
|
local ip_pattern="${INSTANCE_PREFIX}-minion-(.*)"
|
|
|
|
|
|
|
|
[[ $ip =~ $ip_pattern ]] || {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
echo "minion-${BASH_REMATCH[1]}"
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# SSH to a node by name or IP ($1) and run a command ($2).
|
2014-10-13 03:08:46 +00:00
|
|
|
function ssh-to-node {
|
|
|
|
local node="$1"
|
|
|
|
local cmd="$2"
|
2014-10-18 17:23:14 +00:00
|
|
|
local machine
|
2015-01-21 00:47:51 +00:00
|
|
|
|
|
|
|
machine=$(find-vagrant-name-by-ip $node) || true
|
|
|
|
[[ -n ${machine-} ]] || machine=$(find-vagrant-name-by-minion-name $node) || true
|
|
|
|
[[ -n ${machine-} ]] || {
|
|
|
|
echo "Cannot find machine to ssh to: $1"
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-02-09 21:58:45 +00:00
|
|
|
vagrant ssh "${machine}" -c "${cmd}"
|
2014-10-13 03:08:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# Restart the kube-proxy on a node ($1)
|
|
|
|
function restart-kube-proxy {
|
|
|
|
ssh-to-node "$1" "sudo systemctl restart kube-proxy"
|
|
|
|
}
|
2014-11-07 01:23:14 +00:00
|
|
|
|
2015-02-09 21:58:45 +00:00
|
|
|
# Restart the apiserver
|
|
|
|
function restart-apiserver {
|
2015-02-11 21:41:42 +00:00
|
|
|
ssh-to-node "$1" "sudo systemctl restart kube-apiserver"
|
2015-02-09 21:58:45 +00:00
|
|
|
}
|
|
|
|
|
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
|
|
|
function setup-monitoring-firewall {
|
2015-01-16 00:43:03 +00:00
|
|
|
echo "TODO" 1>&2
|
2014-11-07 01:23:14 +00:00
|
|
|
}
|
2014-11-11 19:03:07 +00:00
|
|
|
|
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
|
|
|
function teardown-monitoring-firewall {
|
2015-01-16 00:43:03 +00:00
|
|
|
echo "TODO" 1>&2
|
2014-11-07 21:30:31 +00:00
|
|
|
}
|
|
|
|
|
2014-11-11 19:03:07 +00:00
|
|
|
# Perform preparations required to run e2e tests
|
|
|
|
function prepare-e2e() {
|
2015-01-16 00:43:03 +00:00
|
|
|
echo "Vagrant doesn't need special preparations for e2e tests" 1>&2
|
2014-11-11 19:03:07 +00:00
|
|
|
}
|
2015-01-07 23:02:35 +00:00
|
|
|
|
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
|
|
|
function setup-logging-firewall {
|
2015-01-07 23:02:35 +00:00
|
|
|
echo "TODO: setup logging"
|
|
|
|
}
|
|
|
|
|
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
|
|
|
function teardown-logging-firewall {
|
2015-01-07 23:02:35 +00:00
|
|
|
echo "TODO: teardown logging"
|
Deferred creation of SkyDNS, monitoring and logging objects
This implements phase 1 of the proposal in #3579, moving the creation
of the pods, RCs, and services to the master after the apiserver is
available.
This is such a wide commit because our existing initial config story
is special:
* Add kube-addons service and associated salt configuration:
** We configure /etc/kubernetes/addons to be a directory of objects
that are appropriately configured for the current cluster.
** "/etc/init.d/kube-addons start" slurps up everything in that dir.
(Most of the difficult is the business logic in salt around getting
that directory built at all.)
** We cheat and overlay cluster/addons into saltbase/salt/kube-addons
as config files for the kube-addons meta-service.
* Change .yaml.in files to salt templates
* Rename {setup,teardown}-{monitoring,logging} to
{setup,teardown}-{monitoring,logging}-firewall to properly reflect
their real purpose now (the purpose of these functions is now ONLY to
bring up the firewall rules, and possibly to relay the IP to the user).
* Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both
functions were improperly configuring global rules, yet used
lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the
rule. The logging rule needed a $NETWORK specifier. The monitoring
rule tried gcloud describe first, but given the instancing, this feels
like a waste of time now.
* Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING,
ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master,
since these are needed there now.
(Desperately want just a yaml or json file we can share between
providers that has all this crap. Maybe #3525 is an answer?)
Huge caveats: I've gone pretty firm testing on GCE, including
twiddling the env variables and making sure the objects I expect to
come up, come up. I've tested that it doesn't break GKE bringup
somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
|
|
|
}
|