k3s/cluster/gke/util.sh

273 lines
7.8 KiB
Bash
Raw Normal View History

2014-12-09 23:07:54 +00:00
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gke/${KUBE_CONFIG_FILE:-config-default.sh}"
# Perform preparations required to run e2e tests
#
# Assumed vars:
# GCLOUD
function prepare-e2e() {
echo "... in prepare-e2e()" >&2
# Ensure GCLOUD is set to some gcloud binary.
if [[ -z "${GCLOUD:-}" ]]; then
echo "GCLOUD environment variable is not set. It should be your gcloud binary. " >&2
echo "A sane default is probably \$ export GCLOUD=gcloud" >&2
exit 1
fi
}
# Use the gcloud defaults to find the project. If it is already set in the
# environment then go with that.
#
# Assumed vars:
# GCLOUD
# Vars set:
# PROJECT
function detect-project() {
echo "... in detect-project()" >&2
if [[ -z "${PROJECT:-}" ]]; then
export PROJECT=$("${GCLOUD}" config list project | tail -n 1 | cut -f 3 -d ' ')
fi
if [[ -z "${PROJECT:-}" ]]; then
echo "Could not detect Google Cloud Platform project. Set the default project using " >&2
echo "'gcloud config set project <PROJECT>'" >&2
exit 1
fi
echo "Project: ${PROJECT}" >&2
}
# Execute prior to running tests to build a release if required for env.
function test-build-release() {
echo "... in test-build-release()" >&2
# We currently use the Kubernetes version that GKE supports (not testing
# bleeding-edge builds).
}
# Verify needed binaries exist.
function verify-prereqs() {
echo "... in verify-prereqs()" >&2
${GCLOUD} preview --help >/dev/null || {
echo "Either the GCLOUD environment variable is wrong, or the 'preview' component"
echo "is not installed. (Fix with 'gcloud components update preview')"
}
2014-12-09 23:07:54 +00:00
}
# Instantiate a kubernetes cluster
#
# Assumed vars:
# GCLOUD
# CLUSTER_NAME
# ZONE
# CLUSTER_API_VERSION (optional)
# NUM_MINIONS
function kube-up() {
echo "... in kube-up()" >&2
detect-project >&2
2015-01-08 00:28:06 +00:00
# Make the specified network if we need to.
if ! gcloud compute networks describe "${NETWORK}" &>/dev/null; then
echo "Creating new network: ${NETWORK}" >&2
gcloud compute networks create "${NETWORK}" --range "${NETWORK_RANGE}"
fi
# Allow SSH on all nodes in the network. This doesn't actually check whether
# such a rule exists, only whether we've created this exact rule.
if ! gcloud compute firewall-rules describe "${FIREWALL_SSH}" &>/dev/null; then
echo "Creating new firewall for SSH: ${FIREWALL_SSH}" >&2
gcloud compute firewall-rules create "${FIREWALL_SSH}" \
--allow="tcp:22" \
--network="${NETWORK}" \
--project="${PROJECT}" \
--source-ranges="0.0.0.0/0"
fi
2015-01-08 00:28:06 +00:00
# Bring up the cluster.
2014-12-09 23:07:54 +00:00
"${GCLOUD}" preview container clusters create "${CLUSTER_NAME}" \
--zone="${ZONE}" \
--project="${PROJECT}" \
--cluster-api-version="${CLUSTER_API_VERSION:-}" \
--num-nodes="${NUM_MINIONS}" \
--network="${NETWORK}"
2014-12-09 23:07:54 +00:00
}
# Called during cluster/kube-up.sh
Deferred creation of SkyDNS, monitoring and logging objects This implements phase 1 of the proposal in #3579, moving the creation of the pods, RCs, and services to the master after the apiserver is available. This is such a wide commit because our existing initial config story is special: * Add kube-addons service and associated salt configuration: ** We configure /etc/kubernetes/addons to be a directory of objects that are appropriately configured for the current cluster. ** "/etc/init.d/kube-addons start" slurps up everything in that dir. (Most of the difficult is the business logic in salt around getting that directory built at all.) ** We cheat and overlay cluster/addons into saltbase/salt/kube-addons as config files for the kube-addons meta-service. * Change .yaml.in files to salt templates * Rename {setup,teardown}-{monitoring,logging} to {setup,teardown}-{monitoring,logging}-firewall to properly reflect their real purpose now (the purpose of these functions is now ONLY to bring up the firewall rules, and possibly to relay the IP to the user). * Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both functions were improperly configuring global rules, yet used lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the rule. The logging rule needed a $NETWORK specifier. The monitoring rule tried gcloud describe first, but given the instancing, this feels like a waste of time now. * Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING, ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master, since these are needed there now. (Desperately want just a yaml or json file we can share between providers that has all this crap. Maybe #3525 is an answer?) Huge caveats: I've gone pretty firm testing on GCE, including twiddling the env variables and making sure the objects I expect to come up, come up. I've tested that it doesn't break GKE bringup somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
function setup-monitoring-firewall() {
echo "... in setup-monitoring-firewall()" >&2
2014-12-09 23:07:54 +00:00
# TODO(mbforbes): This isn't currently supported in GKE.
}
# Execute prior to running tests to initialize required structure. This is
# called from hack/e2e-go only when running -up (it is run after kube-up, so
# the cluster already exists at this point).
#
# Assumed vars:
# CLUSTER_NAME
# GCLOUD
# Vars set:
# MINION_TAG
function test-setup() {
echo "... in test-setup()" >&2
# Detect the project into $PROJECT if it isn't set
detect-project >&2
# At this point, CLUSTER_NAME should have been used, so its value is final.
MINION_TAG="k8s-${CLUSTER_NAME}-node"
# Open up port 80 & 8080 so common containers on minions can be reached.
# TODO(mbforbes): Is adding ${USER} necessary, and sufficient, to avoid
# collisions here?
"${GCLOUD}" compute firewall-rules create \
"${MINION_TAG}-${USER}-http-alt" \
--allow tcp:80 tcp:8080 \
--project "${PROJECT}" \
--target-tags "${MINION_TAG}" \
--network="${NETWORK}"
2014-12-09 23:07:54 +00:00
}
# Ensure that we have a password created for validating to the master.
#
# Assumed vars:
# ZONE
# CLUSTER_NAME
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password() {
echo "... in get-password()" >&2
detect-project >&2
KUBE_USER=$("${GCLOUD}" preview container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep user | cut -f 4 -d ' ')
KUBE_PASSWORD=$("${GCLOUD}" preview container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep password | cut -f 4 -d ' ')
}
# Detect the instance name and IP for the master
#
# Assumed vars:
# ZONE
# CLUSTER_NAME
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master() {
echo "... in detect-master()" >&2
detect-project >&2
KUBE_MASTER="k8s-${CLUSTER_NAME}-master"
KUBE_MASTER_IP=$("${GCLOUD}" preview container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep endpoint | cut -f 2 -d ' ')
}
# Assumed vars:
# NUM_MINIONS
# CLUSTER_NAME
# Vars set:
# (none)
2014-12-09 23:07:54 +00:00
function detect-minions() {
echo "... in detect-minions()" >&2
}
# SSH to a node by name ($1) and run a command ($2).
#
# Assumed vars:
# GCLOUD
# ZONE
function ssh-to-node() {
echo "... in ssh-to-node()" >&2
detect-project >&2
local node="$1"
local cmd="$2"
"${GCLOUD}" compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" \
--zone="${ZONE}" "${node}" --command "${cmd}"
}
# Restart the kube-proxy on a node ($1)
function restart-kube-proxy() {
echo "... in restart-kube-proxy()" >&2
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
}
# Restart the kube-proxy on master ($1)
function restart-apiserver() {
echo "... in restart-kube-apiserver()" >&2
ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
}
2014-12-09 23:07:54 +00:00
# Execute after running tests to perform any required clean-up. This is called
# from hack/e2e-test.sh. This calls kube-down, so the cluster still exists when
# this is called.
#
# Assumed vars:
# CLUSTER_NAME
# GCLOUD
# KUBE_ROOT
function test-teardown() {
echo "... in test-teardown()" >&2
detect-project >&2
# At this point, CLUSTER_NAME should have been used, so its value is final.
MINION_TAG="k8s-${CLUSTER_NAME}-node"
# First, remove anything we did with test-setup (currently, the firewall).
# NOTE: Keep in sync with name above in test-setup.
"${GCLOUD}" compute firewall-rules delete "${MINION_TAG}-${USER}-http-alt" \
--project="${PROJECT}" || true
# Then actually turn down the cluster.
"${KUBE_ROOT}/cluster/kube-down.sh"
}
# Tears down monitoring.
Deferred creation of SkyDNS, monitoring and logging objects This implements phase 1 of the proposal in #3579, moving the creation of the pods, RCs, and services to the master after the apiserver is available. This is such a wide commit because our existing initial config story is special: * Add kube-addons service and associated salt configuration: ** We configure /etc/kubernetes/addons to be a directory of objects that are appropriately configured for the current cluster. ** "/etc/init.d/kube-addons start" slurps up everything in that dir. (Most of the difficult is the business logic in salt around getting that directory built at all.) ** We cheat and overlay cluster/addons into saltbase/salt/kube-addons as config files for the kube-addons meta-service. * Change .yaml.in files to salt templates * Rename {setup,teardown}-{monitoring,logging} to {setup,teardown}-{monitoring,logging}-firewall to properly reflect their real purpose now (the purpose of these functions is now ONLY to bring up the firewall rules, and possibly to relay the IP to the user). * Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both functions were improperly configuring global rules, yet used lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the rule. The logging rule needed a $NETWORK specifier. The monitoring rule tried gcloud describe first, but given the instancing, this feels like a waste of time now. * Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING, ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master, since these are needed there now. (Desperately want just a yaml or json file we can share between providers that has all this crap. Maybe #3525 is an answer?) Huge caveats: I've gone pretty firm testing on GCE, including twiddling the env variables and making sure the objects I expect to come up, come up. I've tested that it doesn't break GKE bringup somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
function teardown-monitoring-firewall() {
echo "... in teardown-monitoring-firewall()" >&2
2014-12-09 23:07:54 +00:00
# TODO(mbforbes): This isn't currently supported in GKE.
}
# Actually take down the cluster. This is called from test-teardown.
#
# Assumed vars:
# GCLOUD
# ZONE
# CLUSTER_NAME
function kube-down() {
echo "... in kube-down()" >&2
detect-project >&2
"${GCLOUD}" preview container clusters delete --project="${PROJECT}" \
--zone="${ZONE}" "${CLUSTER_NAME}"
}
Deferred creation of SkyDNS, monitoring and logging objects This implements phase 1 of the proposal in #3579, moving the creation of the pods, RCs, and services to the master after the apiserver is available. This is such a wide commit because our existing initial config story is special: * Add kube-addons service and associated salt configuration: ** We configure /etc/kubernetes/addons to be a directory of objects that are appropriately configured for the current cluster. ** "/etc/init.d/kube-addons start" slurps up everything in that dir. (Most of the difficult is the business logic in salt around getting that directory built at all.) ** We cheat and overlay cluster/addons into saltbase/salt/kube-addons as config files for the kube-addons meta-service. * Change .yaml.in files to salt templates * Rename {setup,teardown}-{monitoring,logging} to {setup,teardown}-{monitoring,logging}-firewall to properly reflect their real purpose now (the purpose of these functions is now ONLY to bring up the firewall rules, and possibly to relay the IP to the user). * Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both functions were improperly configuring global rules, yet used lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the rule. The logging rule needed a $NETWORK specifier. The monitoring rule tried gcloud describe first, but given the instancing, this feels like a waste of time now. * Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING, ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master, since these are needed there now. (Desperately want just a yaml or json file we can share between providers that has all this crap. Maybe #3525 is an answer?) Huge caveats: I've gone pretty firm testing on GCE, including twiddling the env variables and making sure the objects I expect to come up, come up. I've tested that it doesn't break GKE bringup somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
function setup-logging-firewall {
echo "TODO: setup logging"
}
Deferred creation of SkyDNS, monitoring and logging objects This implements phase 1 of the proposal in #3579, moving the creation of the pods, RCs, and services to the master after the apiserver is available. This is such a wide commit because our existing initial config story is special: * Add kube-addons service and associated salt configuration: ** We configure /etc/kubernetes/addons to be a directory of objects that are appropriately configured for the current cluster. ** "/etc/init.d/kube-addons start" slurps up everything in that dir. (Most of the difficult is the business logic in salt around getting that directory built at all.) ** We cheat and overlay cluster/addons into saltbase/salt/kube-addons as config files for the kube-addons meta-service. * Change .yaml.in files to salt templates * Rename {setup,teardown}-{monitoring,logging} to {setup,teardown}-{monitoring,logging}-firewall to properly reflect their real purpose now (the purpose of these functions is now ONLY to bring up the firewall rules, and possibly to relay the IP to the user). * Rework GCE {setup,teardown}-{monitoring,logging}-firewall: Both functions were improperly configuring global rules, yet used lifecycles tied to the cluster. Use $NODE_INSTANCE_PREFIX with the rule. The logging rule needed a $NETWORK specifier. The monitoring rule tried gcloud describe first, but given the instancing, this feels like a waste of time now. * Plumb ENABLE_CLUSTER_MONITORING, ENABLE_CLUSTER_LOGGING, ELASTICSEARCH_LOGGING_REPLICAS and DNS_REPLICAS down to the master, since these are needed there now. (Desperately want just a yaml or json file we can share between providers that has all this crap. Maybe #3525 is an answer?) Huge caveats: I've gone pretty firm testing on GCE, including twiddling the env variables and making sure the objects I expect to come up, come up. I've tested that it doesn't break GKE bringup somehow. But I haven't had a chance to test the other providers.
2015-01-18 23:16:52 +00:00
function teardown-logging-firewall {
echo "TODO: teardown logging"
}