GKE deployment: Kill cluster/gke

kubernetes/test-infra#3983 migrated the remaining GKE jobs using the
bash deployment (cluster/gke).

Fixes kubernetes/test-infra#3307
pull/6/head
Zach Loafman 2017-08-08 13:47:16 -07:00
parent c9d142d73d
commit 0f12159ccd
5 changed files with 0 additions and 659 deletions

View File

@ -1,52 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script should be sourced as a part of config-test or config-default.
# Specifically, the following environment variables are assumed:
# - CLUSTER_NAME (the name of the cluster)
if [ ! -z "${REGION:-}" ] && [ ! -z "${ZONE:-}" ]; then
echo "Only one of REGION and ZONE can be set." >&2
exit 1
fi
if [ -z "${REGION:-}" ]; then
ZONE="${ZONE:-us-central1-f}"
fi
NUM_NODES="${NUM_NODES:-3}"
ADDITIONAL_ZONES="${ADDITIONAL_ZONES:-}"
CLUSTER_API_VERSION="${CLUSTER_API_VERSION:-}"
NETWORK="${NETWORK:-default}"
FIREWALL_SSH="${FIREWALL_SSH:-${NETWORK}-allow-ssh}"
GCLOUD="${GCLOUD:-gcloud}"
CMD_GROUP="${CMD_GROUP:-}"
GCLOUD_CONFIG_DIR="${GCLOUD_CONFIG_DIR:-${HOME}/.config/gcloud/kubernetes}"
MACHINE_TYPE="${MACHINE_TYPE:-n1-standard-2}"
IMAGE_TYPE="${IMAGE_TYPE:-}"
if [[ "${FEDERATION:-}" == true ]]; then
NODE_SCOPES="${NODE_SCOPES:-compute-rw,storage-ro,https://www.googleapis.com/auth/ndev.clouddns.readwrite}"
else
NODE_SCOPES="${NODE_SCOPES:-compute-rw,storage-ro}"
fi
# WARNING: any new vars added here must correspond to options that can be
# passed to `gcloud {CMD_GROUP} container clusters create`, or they will
# have no effect. If you change/add a var used to toggle a value in
# cluster/gce/configure-vm.sh, please ping someone on GKE.
# This is a hack, but I keep setting this when I run commands manually, and
# then things grossly fail during normal runs because cluster/kubecfg.sh and
# cluster/kubectl.sh both use this if it's set.
unset KUBERNETES_MASTER

View File

@ -1,56 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The following are default-specific settings.
CLUSTER_NAME="${CLUSTER_NAME:-${USER}-gke}"
NETWORK=${KUBE_GKE_NETWORK:-default}
# For ease of maintenance, extract any pieces that do not vary between default
# and test in a common config.
source $(dirname "${BASH_SOURCE}")/config-common.sh
# Optional: Install node logging
ENABLE_NODE_LOGGING=false
LOGGING_DESTINATION=gcp # options: elasticsearch, gcp
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING=false
ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests:
# glbc - CE L7 Load Balancer Controller
ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
# Optional: Cluster monitoring to setup as part of the cluster bring up:
# none - No cluster monitoring setup
# influxdb - Heapster, InfluxDB, and Grafana
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
# standalone - Heapster only. Metrics available via Heapster REST API.
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-standalone}"
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-false}
# Indicates if the values (i.e. KUBE_USER and KUBE_PASSWORD for basic
# authentication) in metadata should be treated as canonical, and therefore disk
# copies ought to be recreated/clobbered.
METADATA_CLOBBERS_CONFIG=true
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST"

View File

@ -1,28 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The following are test-specific settings.
CLUSTER_NAME="${CLUSTER_NAME:-${USER}-gke-e2e}"
NETWORK=${KUBE_GKE_NETWORK:-e2e}
NODE_TAG="k8s-${CLUSTER_NAME}-node"
IMAGE_TYPE="${KUBE_GKE_IMAGE_TYPE:-container_vm}"
ENABLE_KUBERNETES_ALPHA="${KUBE_GKE_ENABLE_KUBERNETES_ALPHA:-}"
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
# For ease of maintenance, extract any pieces that do not vary between default
# and test in a common config.
source $(dirname "${BASH_SOURCE}")/config-common.sh

View File

@ -1,65 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "This is NOT a production-ready tool.\n\
IT'S A HACKY, BEST-EFFORT WAY TO \"STOP\" CREATION OF THE GKE CLUSTER."
read -n 1 -p "Are you sure you want to proceed (y/N)?: " decision
echo ""
if [[ "${decision}" != "y" ]]; then
echo "Aborting..."
exit 0
fi
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
if [ -f "${KUBE_ROOT}/cluster/env.sh" ]; then
source "${KUBE_ROOT}/cluster/env.sh"
fi
source "${KUBE_ROOT}/cluster/gke/util.sh"
STAGING_ENDPOINT="CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER=https://staging-container.sandbox.googleapis.com/"
detect-project
cluster=$(gcloud container operations list "--project=${PROJECT}" | grep "CREATE_CLUSTER" | grep "RUNNING" || true)
if [ -z "${cluster}" ]; then
echo "Couldn't find any cluster being created in production environment. Trying staging..."
cluster=$(env ${STAGING_ENDPOINT} gcloud container operations list "--project=${PROJECT}" | grep "CREATE_CLUSTER" | grep "RUNNING" || true)
fi
if [ -z "${cluster}" ]; then
echo "No cluster creation in progress found. Aborting."
exit 0
fi
zone=$(echo "${cluster}" | tr -s "[:blank:]" | cut -f3 -d" ")
cluster_name=$(echo "${cluster}" | tr -s "[:blank:]" | cut -f4 -d" ")
gcloud="gcloud"
if [ "${zone}" == "us-east1-a" ]; then
gcloud="env ${STAGING_ENDPOINT} gcloud"
fi
migs=$(${gcloud} compute instance-groups managed list --project=${PROJECT} --zones=${zone} | grep "gke-${cluster_name}" | cut -f1 -d" ")
echo "Managed instance groups for cluster ${cluster_name}: ${migs}"
for mig in ${migs}; do
echo "Resizing ${mig}..."
${gcloud} compute instance-groups managed resize --project="${PROJECT}" --zone="${zone}" "${mig}" --size=1
done
echo "All managed instance groups resized to 1. Cluster creation operation should end soon, and you will be be able to delete the cluster."

View File

@ -1,458 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the local config.
# Uses the config file specified in $KUBE_CONFIG_FILE, or defaults to config-default.sh
KUBE_PROMPT_FOR_UPDATE=${KUBE_PROMPT_FOR_UPDATE:-"n"}
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gke/${KUBE_CONFIG_FILE:-config-default.sh}"
source "${KUBE_ROOT}/cluster/common.sh"
source "${KUBE_ROOT}/hack/lib/util.sh"
function with-retry() {
local retry_limit=$1
local cmd=("${@:2}")
local retry_count=0
local rc=0
until [[ ${retry_count} -ge ${retry_limit} ]]; do
((retry_count+=1))
"${cmd[@]}" && rc=0 || rc=$?
if [[ ${rc} == 0 ]]; then
return 0
fi
sleep 3
done
echo "Failed to execute '${cmd[@]}' for $retry_limit times." >&2
return ${rc}
}
# Perform preparations required to run e2e tests
#
# Assumed vars:
# GCLOUD
function prepare-e2e() {
echo "... in gke:prepare-e2e()" >&2
# Ensure GCLOUD is set to some gcloud binary.
if [[ -z "${GCLOUD:-}" ]]; then
echo "GCLOUD environment variable is not set. It should be your gcloud binary. " >&2
echo "A sane default is probably \$ export GCLOUD=gcloud" >&2
exit 1
fi
}
# Use the gcloud defaults to find the project. If it is already set in the
# environment then go with that.
#
# Assumed vars:
# GCLOUD
# Vars set:
# PROJECT
# SCOPE_ARGS
function detect-project() {
echo "... in gke:detect-project()" >&2
if [[ -z "${PROJECT:-}" ]]; then
export PROJECT=$("${GCLOUD}" config list project --format 'value(core.project)')
echo "... Using project: ${PROJECT}" >&2
fi
if [[ -z "${PROJECT:-}" ]]; then
echo "Could not detect Google Cloud Platform project. Set the default project using " >&2
echo "'gcloud config set project <PROJECT>'" >&2
exit 1
fi
SCOPE_ARGS=(
"--project=${PROJECT}"
)
if [[ ! -z "${ZONE:-}" ]]; then
SCOPE_ARGS+=("--zone=${ZONE}")
fi
if [[ ! -z "${REGION:-}" ]]; then
SCOPE_ARGS+=("--region=${REGION}")
fi
}
# Execute prior to running tests to build a release if required for env.
#
# Assumed Vars:
# KUBE_ROOT
function test-build-release() {
echo "... in gke:test-build-release()" >&2
"${KUBE_ROOT}/build/release.sh"
}
# Verify needed binaries exist.
function verify-prereqs() {
echo "... in gke:verify-prereqs()" >&2
if ! which gcloud >/dev/null; then
local resp
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
echo "Can't find gcloud in PATH. Do you wish to install the Google Cloud SDK? [Y/n]"
read resp
fi
if [[ "${resp}" != "n" && "${resp}" != "N" ]]; then
curl https://sdk.cloud.google.com | bash
fi
if ! which gcloud >/dev/null; then
echo "Can't find gcloud in PATH, please fix and retry. The Google Cloud "
echo "SDK can be downloaded from https://cloud.google.com/sdk/."
exit 1
fi
fi
update-or-verify-gcloud
}
# Validate a kubernetes cluster
function validate-cluster {
# Simply override the NUM_NODES variable if we've spread nodes across multiple
# zones before calling into the generic validate-cluster logic.
local EXPECTED_NUM_NODES="${NUM_NODES}"
if [ ! -z "${REGION:-}" ]; then
(( EXPECTED_NUM_NODES *= 3 ))
fi
for zone in $(echo "${ADDITIONAL_ZONES}" | sed "s/,/ /g")
do
(( EXPECTED_NUM_NODES += NUM_NODES ))
done
NUM_NODES=${EXPECTED_NUM_NODES} bash -c "${KUBE_ROOT}/cluster/validate-cluster.sh"
}
# Instantiate a kubernetes cluster
#
# Assumed vars:
# GCLOUD
# CLUSTER_NAME
# ZONE (optional)
# REGION (optional)
# CLUSTER_API_VERSION (optional)
# NUM_NODES
# ADDITIONAL_ZONES (optional)
# NODE_SCOPES
# MACHINE_TYPE
# HEAPSTER_MACHINE_TYPE (optional)
# CLUSTER_IP_RANGE (optional)
# GKE_CREATE_FLAGS (optional, space delineated)
# ENABLE_KUBERNETES_ALPHA (optional)
function kube-up() {
echo "... in gke:kube-up()" >&2
detect-project >&2
# Make the specified network if we need to.
if ! "${GCLOUD}" compute networks --project "${PROJECT}" describe "${NETWORK}" &>/dev/null; then
echo "Creating new network: ${NETWORK}" >&2
with-retry 3 "${GCLOUD}" compute networks create "${NETWORK}" --project="${PROJECT}" --mode=auto
else
echo "... Using network: ${NETWORK}" >&2
fi
# Allow SSH on all nodes in the network. This doesn't actually check whether
# such a rule exists, only whether we've created this exact rule.
if ! "${GCLOUD}" compute firewall-rules --project "${PROJECT}" describe "${FIREWALL_SSH}" &>/dev/null; then
echo "Creating new firewall for SSH: ${FIREWALL_SSH}" >&2
with-retry 3 "${GCLOUD}" compute firewall-rules create "${FIREWALL_SSH}" \
--allow="tcp:22" \
--network="${NETWORK}" \
--project="${PROJECT}" \
--source-ranges="0.0.0.0/0"
else
echo "... Using firewall-rule: ${FIREWALL_SSH}" >&2
fi
local shared_args=(
${SCOPE_ARGS[@]}
"--scopes=${NODE_SCOPES}"
)
if [[ ! -z "${IMAGE_TYPE:-}" ]]; then
shared_args+=("--image-type=${IMAGE_TYPE}")
fi
if [[ -z "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
local -r nodes="${NUM_NODES}"
else
local -r nodes=$(( NUM_NODES - 1 ))
fi
local create_args=(
${shared_args[@]}
"--num-nodes=${nodes}"
"--network=${NETWORK}"
"--cluster-version=${CLUSTER_API_VERSION}"
"--machine-type=${MACHINE_TYPE}"
"--quiet"
)
if [[ ! -z "${ENABLE_KUBERNETES_ALPHA:-}" ]]; then
create_args+=("--enable-kubernetes-alpha")
fi
if [[ ! -z "${ADDITIONAL_ZONES:-}" ]]; then
create_args+=("--additional-zones=${ADDITIONAL_ZONES}")
fi
if [[ ! -z "${CLUSTER_IP_RANGE:-}" ]]; then
create_args+=("--cluster-ipv4-cidr=${CLUSTER_IP_RANGE}")
fi
if [[ ! -z "${ENABLE_LEGACY_ABAC:-}" ]]; then
if [[ "${ENABLE_LEGACY_ABAC:-}" == "true" ]]; then
create_args+=("--enable-legacy-authorization")
else
create_args+=("--no-enable-legacy-authorization")
fi
fi
create_args+=( ${GKE_CREATE_FLAGS:-} )
# Bring up the cluster.
"${GCLOUD}" ${CMD_GROUP:-} container clusters create "${CLUSTER_NAME}" "${create_args[@]}"
create-kubeconfig-for-federation
if [[ ! -z "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
"${GCLOUD}" ${CMD_GROUP:-} container node-pools create "heapster-pool" --cluster "${CLUSTER_NAME}" --num-nodes=1 --machine-type="${HEAPSTER_MACHINE_TYPE}" "${shared_args[@]}"
fi
}
# Execute prior to running tests to initialize required structure. This is
# called from hack/e2e-go only when running -up (it is run after kube-up, so
# the cluster already exists at this point).
#
# Assumed vars:
# CLUSTER_NAME
# GCLOUD
# ZONE
# Vars set:
# NODE_TAG
function test-setup() {
echo "... in gke:test-setup()" >&2
# Detect the project into $PROJECT if it isn't set
detect-project >&2
"${KUBE_ROOT}/cluster/kube-up.sh"
detect-nodes >&2
# At this point, CLUSTER_NAME should have been used, so its value is final.
NODE_TAG=$($GCLOUD compute instances list ${NODE_NAMES[0]} --project="${PROJECT}" --format='value(tags.items)' | grep -o "gke-${CLUSTER_NAME}-.\{8\}-node")
OLD_NODE_TAG="k8s-${CLUSTER_NAME}-node"
# Open up port 80 & 8080 so common containers on minions can be reached.
with-retry 3 "${GCLOUD}" compute firewall-rules create \
"${CLUSTER_NAME}-http-alt" \
--allow tcp:80,tcp:8080 \
--project "${PROJECT}" \
--target-tags "${NODE_TAG},${OLD_NODE_TAG}" \
--network="${NETWORK}" &
with-retry 3 "${GCLOUD}" compute firewall-rules create \
"${CLUSTER_NAME}-nodeports" \
--allow tcp:30000-32767,udp:30000-32767 \
--project "${PROJECT}" \
--target-tags "${NODE_TAG},${OLD_NODE_TAG}" \
--network="${NETWORK}" &
# Wait for firewall rules.
kube::util::wait-for-jobs || {
echo "... gke:test-setup(): Could not create firewall" >&2
return 1
}
}
# Detect the IP for the master. Note that on GKE, we don't know the name of the
# master, so KUBE_MASTER is not set.
#
# Assumed vars:
# ZONE
# CLUSTER_NAME
# Vars set:
# KUBE_MASTER_IP
function detect-master() {
echo "... in gke:detect-master()" >&2
detect-project >&2
KUBE_MASTER_IP=$("${GCLOUD}" ${CMD_GROUP:-} container clusters describe \
${SCOPE_ARGS[@]} --format='value(endpoint)' \
"${CLUSTER_NAME}")
}
# Assumed vars:
# none
# Vars set:
# NODE_NAMES
function detect-nodes() {
echo "... in gke:detect-nodes()" >&2
detect-node-names
}
# Detect minions created in the minion group
#
# Note that for zonal clusters this will only select nodes in the same zone as the
# cluster, meaning that it won't include all nodes in a multi-zone cluster.
# For regional clusters, this will select nodes only from arbitrarily chosen node instance group.
#
# Assumed vars:
# GCLOUD
# PROJECT
# ZONE (optional)
# REGION (optional)
# CLUSTER_NAME
# Vars set:
# NODE_NAMES
function detect-node-names {
echo "... in gke:detect-node-names()" >&2
detect-project
detect-node-instance-groups
NODE_NAMES=()
for group in "${NODE_INSTANCE_GROUPS[@]:-}"; do
# We can't simply use --zone "${ZONE}" as ZONE may not be set (e.g. when REGION is set).
local igm_zone=$(gcloud compute instance-groups managed list "${group}" --format='value(zone)')
NODE_NAMES+=($(gcloud compute instance-groups managed list-instances \
"${group}" --zone "${igm_zone}" \
--project "${PROJECT}" --format='value(instance)'))
done
echo "NODE_NAMES=${NODE_NAMES[*]:-}"
}
# Detect instance group name generated by gke.
#
# Note that for zonal clusters the NODE_INSTANCE_GROUPS var will only have instance groups in the
# same zone as the cluster, meaning that it won't include all groups in a
# multi-zone cluster.
# For regional clusters, NODE_INSTANCE_GROUPS is set to arbitrarily chosen node instance group.
# The ALL_INSTANCE_GROUP_URLS will contain all the instance group URLs,
# which include multi-zone groups.
#
# Assumed vars:
# GCLOUD
# SCOPE_ARGS
# ZONE (optional)
# REGION (optional)
# CLUSTER_NAME
# Vars set:
# NODE_INSTANCE_GROUPS
# ALL_INSTANCE_GROUP_URLS
function detect-node-instance-groups {
echo "... in gke:detect-node-instance-groups()" >&2
local urls=$("${GCLOUD}" ${CMD_GROUP:-} container clusters describe \
${SCOPE_ARGS[@]} --format='value(instanceGroupUrls)' "${CLUSTER_NAME}")
urls=(${urls//;/ })
ALL_INSTANCE_GROUP_URLS=${urls[*]}
NODE_INSTANCE_GROUPS=()
if [[ ! -z "${ZONE:-}" ]]; then
for url in "${urls[@]:-}"; do
local igm_zone=$(expr ${url} : '.*/zones/\([a-z0-9-]*\)/')
if [[ "${igm_zone}" == "${ZONE}" ]]; then
NODE_INSTANCE_GROUPS+=("${url##*/}")
fi
done
fi
if [[ ! -z "${REGION:-}" ]]; then
NODE_INSTANCE_GROUPS+=("${urls[0]}")
fi
}
# SSH to a node by name ($1) and run a command ($2).
#
# Assumed vars:
# GCLOUD
# ZONE
function ssh-to-node() {
echo "... in gke:ssh-to-node()" >&2
detect-project >&2
local node="$1"
local cmd="$2"
# Loop until we can successfully ssh into the box
for try in {1..5}; do
if gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "echo test > /dev/null"; then
break
fi
sleep 5
done
# Then actually try the command.
gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "${cmd}"
}
# Execute after running tests to perform any required clean-up. This is called
# from hack/e2e.go. This calls kube-down, so the cluster still exists when this
# is called.
#
# Assumed vars:
# CLUSTER_NAME
# GCLOUD
# KUBE_ROOT
# ZONE
function test-teardown() {
echo "... in gke:test-teardown()" >&2
detect-project >&2
# Tear down the cluster first.
"${KUBE_ROOT}/cluster/kube-down.sh" || true
# Then remove the firewall rules. We do it in this order because the
# time to delete a firewall is actually dependent on the number of
# instances, but we can safely delete the cluster before the firewall.
#
# NOTE: Keep in sync with names above in test-setup.
for fw in "${CLUSTER_NAME}-http-alt" "${CLUSTER_NAME}-nodeports" "${FIREWALL_SSH}"; do
if [[ -n $("${GCLOUD}" compute firewall-rules --project "${PROJECT}" describe "${fw}" --format='value(name)' 2>/dev/null || true) ]]; then
with-retry 3 "${GCLOUD}" compute firewall-rules delete "${fw}" --project="${PROJECT}" --quiet &
fi
done
# Wait for firewall rule teardown.
kube::util::wait-for-jobs || true
# It's unfortunate that the $FIREWALL_SSH rule and network are created in
# kube-up, but we can only really delete them in test-teardown. So much for
# symmetry.
if [[ "${KUBE_DELETE_NETWORK}" == "true" ]]; then
if [[ -n $("${GCLOUD}" compute networks --project "${PROJECT}" describe "${NETWORK}" --format='value(name)' 2>/dev/null || true) ]]; then
if ! with-retry 3 "${GCLOUD}" compute networks delete --project "${PROJECT}" --quiet "${NETWORK}"; then
echo "Failed to delete network '${NETWORK}'. Listing firewall-rules:"
"${GCLOUD}" compute firewall-rules --project "${PROJECT}" list --filter="network=${NETWORK}"
fi
fi
fi
}
# Actually take down the cluster. This is called from test-teardown.
#
# Assumed vars:
# GCLOUD
# SCOPE_ARGS
# ZONE (optional)
# REGION (optional)
# CLUSTER_NAME
function kube-down() {
echo "... in gke:kube-down()" >&2
detect-project >&2
if "${GCLOUD}" ${CMD_GROUP:-} container clusters describe ${SCOPE_ARGS[@]} "${CLUSTER_NAME}" --quiet &>/dev/null; then
with-retry 3 "${GCLOUD}" ${CMD_GROUP:-} container clusters delete ${SCOPE_ARGS[@]} \
"${CLUSTER_NAME}" --quiet
fi
}