diff --git a/cluster/common.sh b/cluster/common.sh index 6fd3d4b661..4e9789dcf1 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -159,15 +159,6 @@ function clear-kubeconfig() { echo "Cleared config for ${CONTEXT} from ${KUBECONFIG}" } -function tear_down_alive_resources() { - local kubectl="${KUBE_ROOT}/cluster/kubectl.sh" - "${kubectl}" delete deployments --all || true - "${kubectl}" delete rc --all || true - "${kubectl}" delete pods --all || true - "${kubectl}" delete svc --all || true - "${kubectl}" delete pvc --all || true -} - # Gets username, password for the current-context in kubeconfig, if they exist. # Assumed vars: # KUBECONFIG # if unset, defaults to global @@ -253,17 +244,6 @@ function gen-kube-bearertoken() { KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) } -# Generate uid -# This function only works on systems with python. It generates a time based -# UID instead of a UUID because GCE has a name length limit. -# -# Vars set: -# KUBE_UID -function gen-uid { - KUBE_UID=$(python -c 'import uuid; print(uuid.uuid1().fields[0])') -} - - function load-or-gen-kube-basicauth() { if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then get-kubeconfig-basicauth @@ -293,28 +273,6 @@ function load-or-gen-kube-bearertoken() { fi } -# Get the master IP for the current-context in kubeconfig if one exists. -# -# Assumed vars: -# KUBECONFIG # if unset, defaults to global -# KUBE_CONTEXT # if unset, defaults to current-context -# -# Vars set: -# KUBE_MASTER_URL -# -# KUBE_MASTER_URL will be empty if no current-context is set, or the -# current-context user does not exist or contain a server entry. -function detect-master-from-kubeconfig() { - export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG} - - local cc=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.current-context}") - if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then - cc="${KUBE_CONTEXT}" - fi - local cluster=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.contexts[?(@.name == \"${cc}\")].context.cluster}") - KUBE_MASTER_URL=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.clusters[?(@.name == \"${cluster}\")].cluster.server}") -} - # Sets KUBE_VERSION variable to the proper version number (e.g. "v1.0.6", # "v1.2.0-alpha.1.881+376438b69c7612") or a version' publication of the form # / (e.g. "release/stable",' "ci/latest-1"). @@ -569,7 +527,6 @@ function build-kube-env { fi build-runtime-config - gen-uid rm -f ${file} cat >$file < 5 )); then - echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}" >&2 - exit 2 - fi - echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}" >&2 - attempt=$(($attempt+1)) - sleep $((5 * $attempt)) - else - break - fi - done -} - -# Robustly try to add metadata on an instance, from a file. -# $1: The name of the instance. -# $2...$n: The metadata key=file pairs to add. -function add-instance-metadata-from-file() { - local -r instance=$1 - shift 1 - local -r kvs=( "$@" ) - detect-project - local attempt=0 - while true; do - echo "${kvs[@]}" - if ! gcloud compute instances add-metadata "${instance}" \ - --project "${PROJECT}" \ - --zone "${ZONE}" \ - --metadata-from-file "$(join_csv ${kvs[@]})"; then - if (( attempt > 5 )); then - echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}" >&2 - exit 2 - fi - echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}" >&2 - attempt=$(($attempt+1)) - sleep $(($attempt * 5)) - else - break - fi - done -} - # Instantiate a kubernetes cluster # # Assumed vars diff --git a/hack/ginkgo-e2e.sh b/hack/ginkgo-e2e.sh index dd1e0a1ab3..bc75401861 100755 --- a/hack/ginkgo-e2e.sh +++ b/hack/ginkgo-e2e.sh @@ -45,6 +45,17 @@ export KUBECTL KUBE_CONFIG_FILE source "${KUBE_ROOT}/cluster/kube-util.sh" +function detect-master-from-kubeconfig() { + export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG} + + local cc=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.current-context}") + if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then + cc="${KUBE_CONTEXT}" + fi + local cluster=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.contexts[?(@.name == \"${cc}\")].context.cluster}") + KUBE_MASTER_URL=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.clusters[?(@.name == \"${cluster}\")].cluster.server}") +} + # ---- Do cloud-provider-specific setup if [[ -n "${KUBERNETES_CONFORMANCE_TEST:-}" ]]; then echo "Conformance test: not doing test setup."