mirror of https://github.com/k3s-io/k3s
Merge pull request #41672 from mikedanese/delete-azure
Automatic merge from submit-queue (batch tested with PRs 41672, 42084, 42233, 42165, 42273) remove azure getting kube-ups. Haven't been touched in > 7 months. @colemickens , i"m going to send out an email about this. ```release-note Remove Azure kube-up as the Azure community has focused efforts elsewhere. ```pull/6/head
commit
7c3398c9b5
|
@ -1,2 +0,0 @@
|
|||
_deployments
|
||||
config-real.sh
|
|
@ -1,60 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
INSTANCE_PREFIX=kubernetes
|
||||
AZ_LOCATION='West US'
|
||||
TAG=testing
|
||||
AZ_CS_PREFIX=kube
|
||||
AZ_VNET=${AZ_VNET:-MyVnet}
|
||||
AZ_SUBNET=${AZ_SUBNET:-Subnet-1}
|
||||
AZ_IMAGE=b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_1-LTS-amd64-server-20140927-en-us-30GB
|
||||
AZ_CS="" # is set in azure-legacy/util.sh verify-prereqs
|
||||
|
||||
AZ_SSH_KEY=$HOME/.ssh/azure_rsa
|
||||
AZ_SSH_CERT=$HOME/.ssh/azure.pem
|
||||
|
||||
NUM_MINIONS=${NUM_MINIONS:-4}
|
||||
MASTER_SIZE='Medium'
|
||||
MINION_SIZE='Medium'
|
||||
|
||||
MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
MASTER_TAG="${INSTANCE_PREFIX}-master"
|
||||
MINION_TAG="${INSTANCE_PREFIX}-minion"
|
||||
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
|
||||
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
|
||||
MINION_SCOPES=""
|
||||
|
||||
SERVICE_CLUSTER_IP_RANGE="10.244.244.0/16" # formerly PORTAL_NET
|
||||
|
||||
# Optional: Install node logging
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
# Optional: Cluster monitoring to setup as part of the cluster bring up:
|
||||
# none - No cluster monitoring setup
|
||||
# influxdb - Heapster, InfluxDB, and Grafana
|
||||
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
|
||||
# Optional: Install Kubernetes UI
|
||||
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds
|
|
@ -1,58 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Retry a download until we get it.
|
||||
#
|
||||
# $1 is the URL to download
|
||||
download-or-bust() {
|
||||
local -r url="$1"
|
||||
local -r file="${url##*/}"
|
||||
rm -f "$file"
|
||||
until [[ -e "${file}" ]]; do
|
||||
curl --ipv4 -Lo "$file" --connect-timeout 20 --retry 6 --retry-delay 10 "$url"
|
||||
md5sum "$file"
|
||||
done
|
||||
}
|
||||
|
||||
# Install salt from GCS. See README.md for instructions on how to update these
|
||||
# debs.
|
||||
#
|
||||
# $1 If set to --master, also install the master
|
||||
install-salt() {
|
||||
apt-get update
|
||||
|
||||
mkdir -p /var/cache/salt-install
|
||||
cd /var/cache/salt-install
|
||||
|
||||
TARS=(
|
||||
libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb
|
||||
python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb
|
||||
salt-common_2014.1.13+ds-1~bpo70+1_all.deb
|
||||
salt-minion_2014.1.13+ds-1~bpo70+1_all.deb
|
||||
)
|
||||
if [[ ${1-} == '--master' ]]; then
|
||||
TARS+=(salt-master_2014.1.13+ds-1~bpo70+1_all.deb)
|
||||
fi
|
||||
URL_BASE="https://storage.googleapis.com/kubernetes-release/salt"
|
||||
|
||||
for tar in "${TARS[@]}"; do
|
||||
download-or-bust "${URL_BASE}/${tar}"
|
||||
dpkg -i "${tar}"
|
||||
done
|
||||
|
||||
# This will install any of the unmet dependencies from above.
|
||||
apt-get install -f -y
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Create the overlay files for the salt tree. We create these in a separate
|
||||
# place so that we can blow away the rest of the salt configs on a kube-push and
|
||||
# re-apply these.
|
||||
|
||||
mkdir -p /srv/salt-overlay/pillar
|
||||
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
||||
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
||||
node_instance_prefix: $NODE_INSTANCE_PREFIX
|
||||
service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE
|
||||
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
|
||||
mkdir -p /srv/salt-overlay/salt/nginx
|
||||
echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd
|
|
@ -1,77 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Create the kube config file for kubelet and kube-proxy in minions.
|
||||
# password and username required
|
||||
|
||||
function create-salt-kubelet-auth() {
|
||||
local -r kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
|
||||
mkdir -p /srv/salt-overlay/salt/kubelet
|
||||
(umask 077;
|
||||
cat > "${kubelet_kubeconfig_file}" <<EOF
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${KUBE_MASTER_IP}
|
||||
name: azure_kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: azure_kubernetes
|
||||
user: kubelet
|
||||
name: azure_kubernetes
|
||||
current-context: azure_kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
password: ${KUBE_PASSWORD}
|
||||
username: ${KUBE_USER}
|
||||
EOF
|
||||
)
|
||||
}
|
||||
|
||||
function create-salt-kube-proxy-auth() {
|
||||
local -r kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
|
||||
mkdir -p /srv/salt-overlay/salt/kube-proxy
|
||||
(umask 077;
|
||||
cat > "${kubelet_kubeconfig_file}" <<EOF
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${KUBE_MASTER_IP}
|
||||
name: azure_kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: azure_kubernetes
|
||||
user: kube-proxy
|
||||
name: azure_kubernetes
|
||||
current-context: azure_kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
password: ${KUBE_PASSWORD}
|
||||
username: ${KUBE_USER}
|
||||
EOF
|
||||
)
|
||||
}
|
||||
|
||||
create-salt-kubelet-auth
|
||||
create-salt-kube-proxy-auth
|
|
@ -1,35 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Download and install release
|
||||
|
||||
# This script assumes that the environment variable MASTER_RELEASE_TAR contains
|
||||
# the release tar to download and unpack. It is meant to be pushed to the
|
||||
# master and run.
|
||||
|
||||
|
||||
echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)"
|
||||
download-or-bust "$SERVER_BINARY_TAR_URL"
|
||||
|
||||
echo "Downloading binary release tar ($SALT_TAR_URL)"
|
||||
download-or-bust "$SALT_TAR_URL"
|
||||
|
||||
echo "Unpacking Salt tree"
|
||||
rm -rf kubernetes
|
||||
tar xzf "${SALT_TAR_URL##*/}"
|
||||
|
||||
echo "Running release install script"
|
||||
sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}"
|
|
@ -1,92 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Prepopulate the name of the Master
|
||||
mkdir -p /etc/salt/minion.d
|
||||
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
|
||||
|
||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
grains:
|
||||
roles:
|
||||
- kubernetes-master
|
||||
cloud: azure-legacy
|
||||
EOF
|
||||
|
||||
|
||||
# Helper that sets a salt grain in grains.conf, if the upper-cased key is a non-empty env
|
||||
function env_to_salt {
|
||||
local key=$1
|
||||
local env_key=`echo $key | tr '[:lower:]' '[:upper:]'`
|
||||
local value=${!env_key}
|
||||
if [[ -n "${value}" ]]; then
|
||||
# Note this is yaml, so indentation matters
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
${key}: '$(echo "${value}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
env_to_salt docker_opts
|
||||
env_to_salt docker_root
|
||||
env_to_salt kubelet_root
|
||||
env_to_salt master_extra_sans
|
||||
env_to_salt runtime_config
|
||||
|
||||
|
||||
# Auto accept all keys from minions that try to join
|
||||
mkdir -p /etc/salt/master.d
|
||||
cat <<EOF >/etc/salt/master.d/auto-accept.conf
|
||||
auto_accept: True
|
||||
EOF
|
||||
|
||||
cat <<EOF >/etc/salt/master.d/reactor.conf
|
||||
# React to new minions starting by running highstate on them.
|
||||
reactor:
|
||||
- 'salt/minion/*/start':
|
||||
- /srv/reactor/highstate-new.sls
|
||||
EOF
|
||||
|
||||
mkdir -p /srv/salt/nginx
|
||||
echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd
|
||||
|
||||
mkdir -p /etc/openvpn
|
||||
umask=$(umask)
|
||||
umask 0066
|
||||
echo "$CA_CRT" > /etc/openvpn/ca.crt
|
||||
echo "$SERVER_CRT" > /etc/openvpn/server.crt
|
||||
echo "$SERVER_KEY" > /etc/openvpn/server.key
|
||||
umask $umask
|
||||
|
||||
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
|
||||
log_level: debug
|
||||
log_level_logfile: debug
|
||||
EOF
|
||||
|
||||
cat <<EOF >/etc/salt/master.d/log-level-debug.conf
|
||||
log_level: debug
|
||||
log_level_logfile: debug
|
||||
EOF
|
||||
|
||||
echo "Sleep 150 to wait minion to be up"
|
||||
sleep 150
|
||||
|
||||
install-salt --master
|
||||
|
||||
# Wait a few minutes and trigger another Salt run to better recover from
|
||||
# any transient errors.
|
||||
echo "Sleeping 180"
|
||||
sleep 180
|
||||
salt-call state.highstate || true
|
|
@ -1,75 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
mkdir -p /etc/openvpn
|
||||
umask=$(umask)
|
||||
umask 0066
|
||||
echo "$CA_CRT" > /etc/openvpn/ca.crt
|
||||
echo "$CLIENT_CRT" > /etc/openvpn/client.crt
|
||||
echo "$CLIENT_KEY" > /etc/openvpn/client.key
|
||||
umask $umask
|
||||
|
||||
# Prepopulate the name of the Master
|
||||
mkdir -p /etc/salt/minion.d
|
||||
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
|
||||
|
||||
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
|
||||
log_level: debug
|
||||
log_level_logfile: debug
|
||||
EOF
|
||||
|
||||
hostnamef=$(uname -n)
|
||||
apt-get install -y ipcalc
|
||||
netmask=$(ipcalc $MINION_IP_RANGE | grep Netmask | awk '{ print $2 }')
|
||||
network=$(ipcalc $MINION_IP_RANGE | grep Address | awk '{ print $2 }')
|
||||
cbrstring="$network $netmask"
|
||||
|
||||
# Our minions will have a pool role to distinguish them from the master.
|
||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
grains:
|
||||
roles:
|
||||
- kubernetes-pool
|
||||
cbr-cidr: $MINION_IP_RANGE
|
||||
cloud: azure-legacy
|
||||
hostnamef: $hostnamef
|
||||
cbr-string: $cbrstring
|
||||
EOF
|
||||
|
||||
if [[ -n "${DOCKER_OPTS}" ]]; then
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
|
||||
if [[ -n "${DOCKER_ROOT}" ]]; then
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
docker_root: '$(echo "$DOCKER_ROOT" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
|
||||
if [[ -n "${KUBELET_ROOT}" ]]; then
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
kubelet_root: '$(echo "$KUBELET_ROOT" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
|
||||
install-salt
|
||||
|
||||
# Wait a few minutes and trigger another Salt run to better recover from
|
||||
# any transient errors.
|
||||
echo "Sleeping 180"
|
||||
sleep 180
|
||||
salt-call state.highstate || true
|
|
@ -1,540 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A library of helper functions and constant for the local config.
|
||||
|
||||
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
|
||||
# config-default.sh.
|
||||
|
||||
set -e
|
||||
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ]; do
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
SOURCE="$(readlink "$SOURCE")"
|
||||
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
|
||||
done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/azure-legacy/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
|
||||
function prepare-e2e() {
|
||||
# (e2e script runs detect-project, I don't think we need to anything)
|
||||
# Note: we can't print anything here, or else the test tools will break with the extra output
|
||||
return
|
||||
}
|
||||
|
||||
function azure_call {
|
||||
local -a params=()
|
||||
local param
|
||||
# the '... in "$@"' is implicit on a for, so doesn't need to be stated.
|
||||
for param; do
|
||||
params+=("${param}")
|
||||
done
|
||||
local rc=0
|
||||
local stderr
|
||||
local count=0
|
||||
while [[ count -lt 10 ]]; do
|
||||
stderr=$(azure "${params[@]}" 2>&1 >&3) && break
|
||||
rc=$?
|
||||
if [[ "${stderr}" != *"getaddrinfo ENOTFOUND"* ]]; then
|
||||
break
|
||||
fi
|
||||
count=$(($count + 1))
|
||||
done 3>&1
|
||||
if [[ "${rc}" -ne 0 ]]; then
|
||||
echo "${stderr}" >&2
|
||||
return "${rc}"
|
||||
fi
|
||||
}
|
||||
|
||||
function json_val () {
|
||||
python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1'';
|
||||
}
|
||||
|
||||
# Verify prereqs
|
||||
function verify-prereqs {
|
||||
if [[ -z "$(which azure)" ]]; then
|
||||
echo "Couldn't find azure in PATH"
|
||||
echo " please install with 'npm install azure-cli'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$(azure_call account list | grep true)" ]]; then
|
||||
echo "Default azure account not set"
|
||||
echo " please set with 'azure account set'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
account=$(azure_call account list | grep true)
|
||||
if which md5 > /dev/null 2>&1; then
|
||||
AZ_HSH=$(md5 -q -s "$account")
|
||||
else
|
||||
AZ_HSH=$(echo -n "$account" | md5sum)
|
||||
fi
|
||||
|
||||
AZ_HSH=${AZ_HSH:0:7}
|
||||
AZ_STG=kube$AZ_HSH
|
||||
echo "==> AZ_STG: $AZ_STG"
|
||||
|
||||
AZ_CS="$AZ_CS_PREFIX-$AZ_HSH"
|
||||
echo "==> AZ_CS: $AZ_CS"
|
||||
|
||||
CONTAINER=kube-$TAG
|
||||
echo "==> CONTAINER: $CONTAINER"
|
||||
}
|
||||
|
||||
# Take the local tar files and upload them to Azure Storage. They will then be
|
||||
# downloaded by the master as part of the start up script for the master.
|
||||
#
|
||||
# Assumed vars:
|
||||
# SERVER_BINARY_TAR
|
||||
# SALT_TAR
|
||||
# Vars set:
|
||||
# SERVER_BINARY_TAR_URL
|
||||
# SALT_TAR_URL
|
||||
function upload-server-tars() {
|
||||
SERVER_BINARY_TAR_URL=
|
||||
SALT_TAR_URL=
|
||||
|
||||
echo "==> SERVER_BINARY_TAR: $SERVER_BINARY_TAR"
|
||||
echo "==> SALT_TAR: $SALT_TAR"
|
||||
|
||||
echo "+++ Staging server tars to Azure Storage: $AZ_STG"
|
||||
local server_binary_url="${SERVER_BINARY_TAR##*/}"
|
||||
local salt_url="${SALT_TAR##*/}"
|
||||
|
||||
SERVER_BINARY_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$server_binary_url"
|
||||
SALT_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$salt_url"
|
||||
|
||||
echo "==> SERVER_BINARY_TAR_URL: $SERVER_BINARY_TAR_URL"
|
||||
echo "==> SALT_TAR_URL: $SALT_TAR_URL"
|
||||
|
||||
echo "--> Checking storage exists..."
|
||||
if [[ -z "$(azure_call storage account show $AZ_STG 2>/dev/null | \
|
||||
grep data)" ]]; then
|
||||
echo "--> Creating storage..."
|
||||
azure_call storage account create -l "$AZ_LOCATION" $AZ_STG --type LRS
|
||||
fi
|
||||
|
||||
echo "--> Getting storage key..."
|
||||
stg_key=$(azure_call storage account keys list $AZ_STG --json | \
|
||||
json_val '["primaryKey"]')
|
||||
|
||||
echo "--> Checking storage container exists..."
|
||||
if [[ -z "$(azure_call storage container show -a $AZ_STG -k "$stg_key" \
|
||||
$CONTAINER 2>/dev/null | grep data)" ]]; then
|
||||
echo "--> Creating storage container..."
|
||||
azure_call storage container create \
|
||||
-a $AZ_STG \
|
||||
-k "$stg_key" \
|
||||
-p Blob \
|
||||
$CONTAINER
|
||||
fi
|
||||
|
||||
echo "--> Checking server binary exists in the container..."
|
||||
if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
|
||||
$CONTAINER $server_binary_url 2>/dev/null | grep data)" ]]; then
|
||||
echo "--> Deleting server binary in the container..."
|
||||
azure_call storage blob delete \
|
||||
-a $AZ_STG \
|
||||
-k "$stg_key" \
|
||||
$CONTAINER \
|
||||
$server_binary_url
|
||||
fi
|
||||
|
||||
echo "--> Uploading server binary to the container..."
|
||||
azure_call storage blob upload \
|
||||
-a $AZ_STG \
|
||||
-k "$stg_key" \
|
||||
$SERVER_BINARY_TAR \
|
||||
$CONTAINER \
|
||||
$server_binary_url
|
||||
|
||||
echo "--> Checking salt data exists in the container..."
|
||||
if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
|
||||
$CONTAINER $salt_url 2>/dev/null | grep data)" ]]; then
|
||||
echo "--> Deleting salt data in the container..."
|
||||
azure_call storage blob delete \
|
||||
-a $AZ_STG \
|
||||
-k "$stg_key" \
|
||||
$CONTAINER \
|
||||
$salt_url
|
||||
fi
|
||||
|
||||
echo "--> Uploading salt data to the container..."
|
||||
azure_call storage blob upload \
|
||||
-a $AZ_STG \
|
||||
-k "$stg_key" \
|
||||
$SALT_TAR \
|
||||
$CONTAINER \
|
||||
$salt_url
|
||||
}
|
||||
|
||||
# Detect the information about the minions
|
||||
#
|
||||
# Assumed vars:
|
||||
# MINION_NAMES
|
||||
# ZONE
|
||||
# Vars set:
|
||||
#
|
||||
function detect-minions () {
|
||||
if [[ -z "$AZ_CS" ]]; then
|
||||
verify-prereqs-local
|
||||
fi
|
||||
ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}"))
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
MINION_NAMES[$i]=$(ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net hostname -f)
|
||||
done
|
||||
}
|
||||
|
||||
# Detect the IP for the master
|
||||
#
|
||||
# Assumed vars:
|
||||
# MASTER_NAME
|
||||
# ZONE
|
||||
# Vars set:
|
||||
# KUBE_MASTER
|
||||
# KUBE_MASTER_IP
|
||||
function detect-master () {
|
||||
if [[ -z "$AZ_CS" ]]; then
|
||||
verify-prereqs-local
|
||||
fi
|
||||
|
||||
KUBE_MASTER=${MASTER_NAME}
|
||||
KUBE_MASTER_IP="${AZ_CS}.cloudapp.net"
|
||||
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
|
||||
}
|
||||
|
||||
# Instantiate a kubernetes cluster
|
||||
#
|
||||
# Assumed vars
|
||||
# KUBE_ROOT
|
||||
# <Various vars set in config file>
|
||||
function kube-up {
|
||||
# Make sure we have the tar files staged on Azure Storage
|
||||
find-release-tars
|
||||
upload-server-tars
|
||||
|
||||
ensure-temp-dir
|
||||
|
||||
gen-kube-basicauth
|
||||
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
|
||||
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
|
||||
local htpasswd
|
||||
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
|
||||
|
||||
# Generate openvpn certs
|
||||
echo "--> Generating openvpn certs"
|
||||
echo 01 > ${KUBE_TEMP}/ca.srl
|
||||
openssl genrsa -out ${KUBE_TEMP}/ca.key
|
||||
openssl req -new -x509 -days 1095 \
|
||||
-key ${KUBE_TEMP}/ca.key \
|
||||
-out ${KUBE_TEMP}/ca.crt \
|
||||
-subj "/CN=openvpn-ca"
|
||||
openssl genrsa -out ${KUBE_TEMP}/server.key
|
||||
openssl req -new \
|
||||
-key ${KUBE_TEMP}/server.key \
|
||||
-out ${KUBE_TEMP}/server.csr \
|
||||
-subj "/CN=server"
|
||||
openssl x509 -req -days 1095 \
|
||||
-in ${KUBE_TEMP}/server.csr \
|
||||
-CA ${KUBE_TEMP}/ca.crt \
|
||||
-CAkey ${KUBE_TEMP}/ca.key \
|
||||
-CAserial ${KUBE_TEMP}/ca.srl \
|
||||
-out ${KUBE_TEMP}/server.crt
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
openssl genrsa -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.key
|
||||
openssl req -new \
|
||||
-key ${KUBE_TEMP}/${MINION_NAMES[$i]}.key \
|
||||
-out ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \
|
||||
-subj "/CN=${MINION_NAMES[$i]}"
|
||||
openssl x509 -req -days 1095 \
|
||||
-in ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \
|
||||
-CA ${KUBE_TEMP}/ca.crt \
|
||||
-CAkey ${KUBE_TEMP}/ca.key \
|
||||
-CAserial ${KUBE_TEMP}/ca.srl \
|
||||
-out ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt
|
||||
done
|
||||
|
||||
KUBE_MASTER_IP="${AZ_CS}.cloudapp.net"
|
||||
|
||||
# Build up start up script for master
|
||||
echo "--> Building up start up script for master"
|
||||
(
|
||||
echo "#!/bin/bash"
|
||||
echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\""
|
||||
echo "SERVER_CRT=\"$(cat ${KUBE_TEMP}/server.crt)\""
|
||||
echo "SERVER_KEY=\"$(cat ${KUBE_TEMP}/server.key)\""
|
||||
echo "mkdir -p /var/cache/kubernetes-install"
|
||||
echo "cd /var/cache/kubernetes-install"
|
||||
echo "readonly MASTER_NAME='${MASTER_NAME}'"
|
||||
echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
|
||||
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
|
||||
echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
|
||||
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
|
||||
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
|
||||
echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
|
||||
echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
|
||||
echo "readonly KUBE_USER='${KUBE_USER}'"
|
||||
echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD}'"
|
||||
echo "readonly KUBE_MASTER_IP='${KUBE_MASTER_IP}'"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/common.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/create-dynamic-salt-files.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/create-kubeconfig.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/download-release.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/salt-master.sh"
|
||||
) > "${KUBE_TEMP}/master-start.sh"
|
||||
|
||||
if [[ ! -f $AZ_SSH_KEY ]]; then
|
||||
ssh-keygen -f $AZ_SSH_KEY -N ''
|
||||
fi
|
||||
|
||||
if [[ ! -f $AZ_SSH_CERT ]]; then
|
||||
openssl req -new -x509 -days 1095 -key $AZ_SSH_KEY -out $AZ_SSH_CERT \
|
||||
-subj "/CN=azure-ssh-key"
|
||||
fi
|
||||
|
||||
if [[ -z "$(azure_call network vnet show "$AZ_VNET" 2>/dev/null | grep data)" ]]; then
|
||||
echo error create vnet $AZ_VNET with subnet $AZ_SUBNET
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "--> Starting VM"
|
||||
azure_call vm create \
|
||||
-z "$MASTER_SIZE" \
|
||||
-w "$AZ_VNET" \
|
||||
-n $MASTER_NAME \
|
||||
-l "$AZ_LOCATION" \
|
||||
-t $AZ_SSH_CERT \
|
||||
-e 22000 -P \
|
||||
-d ${KUBE_TEMP}/master-start.sh \
|
||||
-b $AZ_SUBNET \
|
||||
$AZ_CS $AZ_IMAGE $USER
|
||||
|
||||
ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}"))
|
||||
|
||||
#Build up start up script for minions
|
||||
echo "--> Building up start up script for minions"
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
(
|
||||
echo "#!/bin/bash"
|
||||
echo "MASTER_NAME='${MASTER_NAME}'"
|
||||
echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\""
|
||||
echo "CLIENT_CRT=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt)\""
|
||||
echo "CLIENT_KEY=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.key)\""
|
||||
echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'"
|
||||
echo "readonly KUBE_USER='${KUBE_USER}'"
|
||||
echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD}'"
|
||||
echo "readonly KUBE_MASTER_IP='${KUBE_MASTER_IP}'"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/common.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/create-kubeconfig.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/salt-minion.sh"
|
||||
) > "${KUBE_TEMP}/minion-start-${i}.sh"
|
||||
|
||||
echo "--> Starting VM"
|
||||
azure_call vm create \
|
||||
-z "$MINION_SIZE" \
|
||||
-c -w "$AZ_VNET" \
|
||||
-n ${MINION_NAMES[$i]} \
|
||||
-l "$AZ_LOCATION" \
|
||||
-t $AZ_SSH_CERT \
|
||||
-e ${ssh_ports[$i]} -P \
|
||||
-d ${KUBE_TEMP}/minion-start-${i}.sh \
|
||||
-b $AZ_SUBNET \
|
||||
$AZ_CS $AZ_IMAGE $USER
|
||||
done
|
||||
|
||||
echo "--> Creating endpoint"
|
||||
azure_call vm endpoint create $MASTER_NAME 443
|
||||
|
||||
detect-master > /dev/null
|
||||
|
||||
echo "==> KUBE_MASTER_IP: ${KUBE_MASTER_IP}"
|
||||
|
||||
echo "Waiting for cluster initialization."
|
||||
echo
|
||||
echo " This will continually check to see if the API for kubernetes is reachable."
|
||||
echo " This might loop forever if there was some uncaught error during start"
|
||||
echo " up."
|
||||
echo
|
||||
|
||||
until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
|
||||
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/healthz"; do
|
||||
printf "."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
printf "\n"
|
||||
echo "Kubernetes cluster created."
|
||||
|
||||
export CONTEXT="azure_${INSTANCE_PREFIX}"
|
||||
create-kubeconfig
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
|
||||
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
|
||||
# config file. Distribute the same way the htpasswd is done.
|
||||
(umask 077
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
|
||||
)
|
||||
|
||||
echo "Sanity checking cluster..."
|
||||
echo
|
||||
echo " This will continually check the minions to ensure docker is"
|
||||
echo " installed. This is usually a good indicator that salt has"
|
||||
echo " successfully provisioned. This might loop forever if there was"
|
||||
echo " some uncaught error during start up."
|
||||
echo
|
||||
# Basic sanity checking
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
# Make sure docker is installed
|
||||
echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}."
|
||||
until ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} \
|
||||
$AZ_CS.cloudapp.net which docker > /dev/null 2>&1; do
|
||||
printf "."
|
||||
sleep 2
|
||||
done
|
||||
done
|
||||
|
||||
sleep 60
|
||||
KUBECONFIG_NAME="kubeconfig"
|
||||
KUBECONFIG="${HOME}/.kube/config"
|
||||
echo "Distributing kubeconfig for kubelet to master kubelet"
|
||||
scp -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -P 22000 ${KUBECONFIG} \
|
||||
$AZ_CS.cloudapp.net:${KUBECONFIG_NAME}
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo cp ${KUBECONFIG_NAME} /var/lib/kubelet/${KUBECONFIG_NAME}
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo service kubelet restart
|
||||
|
||||
echo "Distributing kubeconfig for kubelet to all minions"
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
scp -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -P ${ssh_ports[$i]} ${KUBECONFIG} \
|
||||
$AZ_CS.cloudapp.net:${KUBECONFIG_NAME}
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
|
||||
sudo cp ${KUBECONFIG_NAME} /var/lib/kubelet/${KUBECONFIG_NAME}
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
|
||||
sudo cp ${KUBECONFIG_NAME} /var/lib/kube-proxy/${KUBECONFIG_NAME}
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
|
||||
sudo service kubelet restart
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
|
||||
sudo killall kube-proxy
|
||||
done
|
||||
|
||||
# ensures KUBECONFIG is set
|
||||
get-kubeconfig-basicauth
|
||||
echo
|
||||
echo "Kubernetes cluster is running. The master is running at:"
|
||||
echo
|
||||
echo " https://${KUBE_MASTER_IP}"
|
||||
echo
|
||||
echo "The user name and password to use is located in ${KUBECONFIG}."
|
||||
echo
|
||||
}
|
||||
|
||||
# Delete a kubernetes cluster
|
||||
function kube-down {
|
||||
echo "Bringing down cluster"
|
||||
|
||||
set +e
|
||||
azure_call vm delete $MASTER_NAME -b -q
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
azure_call vm delete ${MINION_NAMES[$i]} -b -q
|
||||
done
|
||||
|
||||
wait
|
||||
}
|
||||
|
||||
# Update a kubernetes cluster with latest source
|
||||
#function kube-push {
|
||||
# detect-project
|
||||
# detect-master
|
||||
|
||||
# Make sure we have the tar files staged on Azure Storage
|
||||
# find-release-tars
|
||||
# upload-server-tars
|
||||
|
||||
# (
|
||||
# echo "#! /bin/bash"
|
||||
# echo "mkdir -p /var/cache/kubernetes-install"
|
||||
# echo "cd /var/cache/kubernetes-install"
|
||||
# echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
|
||||
# echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
|
||||
# grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
|
||||
# grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh"
|
||||
# echo "echo Executing configuration"
|
||||
# echo "sudo salt '*' mine.update"
|
||||
# echo "sudo salt --force-color '*' state.highstate"
|
||||
# ) | gcutil ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" sudo bash
|
||||
|
||||
# get-kubeconfig-basicauth
|
||||
|
||||
# echo
|
||||
# echo "Kubernetes cluster is running. The master is running at:"
|
||||
# echo
|
||||
# echo " https://${KUBE_MASTER_IP}"
|
||||
# echo
|
||||
# echo "The user name and password to use is located in ${KUBECONFIG:-$DEFAULT_KUBECONFIG}."
|
||||
# echo
|
||||
|
||||
#}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cluster specific test helpers
|
||||
|
||||
# Execute prior to running tests to build a release if required for env.
|
||||
#
|
||||
# Assumed Vars:
|
||||
# KUBE_ROOT
|
||||
function test-build-release {
|
||||
# Make a release
|
||||
"${KUBE_ROOT}/build/release.sh"
|
||||
}
|
||||
|
||||
# SSH to a node by name ($1) and run a command ($2).
|
||||
function ssh-to-node {
|
||||
local node="$1"
|
||||
local cmd="$2"
|
||||
ssh --ssh_arg "-o LogLevel=quiet" "${node}" "${cmd}"
|
||||
}
|
||||
|
||||
# Restart the kube-proxy on a node ($1)
|
||||
function restart-kube-proxy {
|
||||
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
|
||||
}
|
||||
|
||||
# Restart the kube-proxy on the master ($1)
|
||||
function restart-apiserver {
|
||||
ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
|
||||
}
|
||||
|
||||
function test-setup {
|
||||
"${KUBE_ROOT}/cluster/kube-up.sh"
|
||||
}
|
||||
|
||||
function test-teardown {
|
||||
"${KUBE_ROOT}/cluster/kube-down.sh"
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
_deployments
|
||||
config-real.sh
|
|
@ -1,41 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Azure location to deploy to. (Storage account, resource group, resources)
|
||||
# Must be be specified in the compact form. ("westus" is ok, "West US" is not)
|
||||
AZURE_LOCATION="${AZURE_LOCATION:-"westus"}"
|
||||
|
||||
# An identifier for the deployment. It can be left blank and an identifier
|
||||
# will be generated from the date/time.
|
||||
AZURE_DEPLOY_ID="${AZURE_DEPLOY_ID:-"kube-$(date +"%Y%m%d-%H%M%S")"}"
|
||||
|
||||
AZURE_MASTER_SIZE="${AZURE_MASTER_SIZE:-"Standard_A1"}"
|
||||
AZURE_NODE_SIZE="${AZURE_NODE_SIZE:-"Standard_A1"}"
|
||||
|
||||
# Username of the admin account created on the VMs
|
||||
AZURE_USERNAME="${AZURE_USERNAME:-"kube"}"
|
||||
|
||||
# Initial number of worker nodes to provision
|
||||
NUM_NODES=${NUM_NODES:-3}
|
||||
|
||||
# The target Azure subscription ID
|
||||
# This should be a GUID.
|
||||
AZURE_SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID:-}"
|
||||
|
||||
# The authentication mechanism to use. The default "device" is recommended as
|
||||
# it requires the least ahead-of-time setup.
|
||||
# This should be one of: { "device", "client_secret" }
|
||||
AZURE_AUTH_METHOD="${AZURE_AUTH_METHOD:-"device"}"
|
|
@ -1,288 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A library of helper functions and constant for the local config.
|
||||
|
||||
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
|
||||
# config-default.sh.
|
||||
|
||||
set -e
|
||||
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ]; do
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
SOURCE="$(readlink "$SOURCE")"
|
||||
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
|
||||
done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
KUBE_ROOT="${DIR}/../.."
|
||||
KUBE_CONFIG_FILE="${KUBE_CONFIG_FILE:-"${DIR}/config-default.sh"}"
|
||||
source "${KUBE_CONFIG_FILE}"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
AZKUBE_VERSION="v0.0.5"
|
||||
REGISTER_MASTER_KUBELET="true"
|
||||
|
||||
function verify-prereqs() {
|
||||
required_binaries=("docker" "jq")
|
||||
|
||||
for rb in "${required_binaries[@]}"; do
|
||||
if ! which "$rb" > /dev/null 2>&1; then
|
||||
echo "Couldn't find ${rb} in PATH"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if ! "${KUBE_ROOT}/cluster/kubectl.sh" >/dev/null 2>&1 ; then
|
||||
echo "kubectl is unavailable. Ensure ${KUBE_ROOT}/cluster/kubectl.sh runs with a successful exit."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function azure-ensure-config() {
|
||||
if [[ -z "${AZURE_SUBSCRIPTION_ID:-}" ]]; then
|
||||
echo "AZURE_SUBSCRIPTION_ID must be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export AZURE_OUTPUT_RELDIR="_deployments/${AZURE_DEPLOY_ID}"
|
||||
export AZURE_OUTPUT_DIR="${DIR}/${AZURE_OUTPUT_RELDIR}"
|
||||
mkdir -p "${AZURE_OUTPUT_DIR}"
|
||||
|
||||
case "${AZURE_AUTH_METHOD:-}" in
|
||||
"client_secret")
|
||||
if [[ -z "${AZURE_CLIENT_ID}" ]]; then
|
||||
echo "AZURE_CLIENT_ID must be set"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${AZURE_CLIENT_SECRET}" ]]; then
|
||||
echo "AZURE_CLIENT_SECRET must be set"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"")
|
||||
echo "AZURE_AUTH_METHOD not set, assuming \"device\"."
|
||||
;;
|
||||
"device" | "")
|
||||
echo "This will be interactive. (export AZURE_AUTH_METHOD=client_secret to avoid the prompt)"
|
||||
export AZURE_AUTH_METHOD="device"
|
||||
;;
|
||||
*)
|
||||
echo "AZURE_AUTH_METHOD is an unsupported value: \"${AZURE_AUTH_METHOD}\""
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function repo-contains-image() {
|
||||
registry="$1"
|
||||
repo="$2"
|
||||
image="$3"
|
||||
version="$4"
|
||||
|
||||
prefix="${registry}"
|
||||
if [[ "${prefix}" == "docker.io" ]]; then
|
||||
prefix="registry.hub.docker.com/v2/repositories"
|
||||
tags_json=$(curl "https://registry.hub.docker.com/v2/repositories/${repo}/${image}/tags/${version}/" 2>/dev/null)
|
||||
tags_found="$(echo "${tags_json}" | jq ".v2?")"
|
||||
elif [[ "${prefix}" == "gcr.io" ]]; then
|
||||
tags_json=$(curl "https://gcr.io/v2/${repo}/${image}/tags/list" 2>/dev/null)
|
||||
tags_found="$(echo "${tags_json}" | jq ".tags | indices([\"${version}\"]) | any")"
|
||||
fi
|
||||
|
||||
|
||||
if [[ "${tags_found}" == "true" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
function ensure-hyperkube() {
|
||||
hyperkube="hyperkube-amd64"
|
||||
official_image_tag="gcr.io/google_containers/${hyperkube}:${KUBE_GIT_VERSION}"
|
||||
|
||||
if repo-contains-image "gcr.io" "google_containers" "${hyperkube}" "${KUBE_GIT_VERSION}" ; then
|
||||
echo "${hyperkube}:${KUBE_GIT_VERSION} was found in the gcr.io/google_containers repository"
|
||||
export AZURE_HYPERKUBE_SPEC="${official_image_tag}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "${hyperkube}:${KUBE_GIT_VERSION} was not found in the gcr.io/google_containers repository"
|
||||
if [[ -z "${AZURE_DOCKER_REGISTRY:-}" || -z "${AZURE_DOCKER_REPO:-}" ]]; then
|
||||
echo "AZURE_DOCKER_REGISTRY and AZURE_DOCKER_REPO must be set in order to push ${hyperkube}:${KUBE_GIT_VERSION}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# check if it is already in the user owned docker hub
|
||||
local user_image_tag="${AZURE_DOCKER_REGISTRY}/${AZURE_DOCKER_REPO}/${hyperkube}:${KUBE_GIT_VERSION}"
|
||||
if repo-contains-image "${AZURE_DOCKER_REGISTRY}" "${AZURE_DOCKER_REPO}" "${hyperkube}" "${KUBE_GIT_VERSION}" ; then
|
||||
echo "${image}:${version} was found in ${repo} (success)"
|
||||
export AZURE_HYPERKUBE_SPEC="${user_image_tag}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# should these steps tell them to just immediately tag it with the final user-specified repo?
|
||||
# for now just stick with the assumption that `make release` will eventually tag a hyperkube image on gcr.io
|
||||
# and then the existing code can re-tag that for the user's repo and then push
|
||||
if ! docker inspect "${user_image_tag}" ; then
|
||||
if ! docker inspect "${official_image_tag}" ; then
|
||||
REGISTRY="gcr.io/google_containers" \
|
||||
VERSION="${KUBE_GIT_VERSION}" \
|
||||
make -C "${KUBE_ROOT}/cluster/images/hyperkube" build
|
||||
fi
|
||||
|
||||
docker tag "${official_image_tag}" "${user_image_tag}"
|
||||
fi
|
||||
|
||||
docker push "${user_image_tag}"
|
||||
|
||||
echo "${image}:${version} was pushed to ${repo}"
|
||||
export AZURE_HYPERKUBE_SPEC="${user_image_tag}"
|
||||
}
|
||||
|
||||
function deploy-kube-system() {
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kube-system
|
||||
EOF
|
||||
}
|
||||
|
||||
function get-common-params() {
|
||||
declare -ag AZKUBE_AUTH_PARAMS
|
||||
declare -ag AZKUBE_DOCKER_PARAMS
|
||||
declare -ag AZKUBE_RESOURCE_GROUP_PARAM
|
||||
|
||||
case "${AZURE_AUTH_METHOD}" in
|
||||
"client_secret")
|
||||
AZKUBE_AUTH_PARAMS+=("--client-id=${AZURE_CLIENT_ID}" "--client-secret=${AZURE_CLIENT_SECRET}")
|
||||
;;
|
||||
"device")
|
||||
AZKUBE_AUTH_PARAMS=()
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
if [[ ! -z "${AZURE_HTTPS_PROXY:-}" ]]; then
|
||||
AZKUBE_DOCKER_PARAMS+=("--net=host" "--env=https_proxy=${AZURE_HTTPS_PROXY}")
|
||||
fi
|
||||
|
||||
if [[ ! -z "${AZURE_RESOURCE_GROUP:-}" ]]; then
|
||||
echo "Forcing use of resource group ${AZURE_RESOURCE_GROUP}"
|
||||
AZKUBE_RESOURCE_GROUP_PARAM+=("--resource-group=${AZURE_RESOURCE_GROUP}")
|
||||
fi
|
||||
}
|
||||
|
||||
function azure-deploy(){
|
||||
get-common-params
|
||||
|
||||
docker run -it \
|
||||
--user "$(id -u)" \
|
||||
"${AZKUBE_DOCKER_PARAMS[@]:+${AZKUBE_DOCKER_PARAMS[@]}}" \
|
||||
-v "$HOME/.azkube:/.azkube" -v "/tmp:/tmp" \
|
||||
-v "${AZURE_OUTPUT_DIR}:/opt/azkube/${AZURE_OUTPUT_RELDIR}" \
|
||||
"colemickens/azkube:${AZKUBE_VERSION}" /opt/azkube/azkube deploy \
|
||||
--kubernetes-hyperkube-spec="${AZURE_HYPERKUBE_SPEC}" \
|
||||
--deployment-name="${AZURE_DEPLOY_ID}" \
|
||||
--location="${AZURE_LOCATION}" \
|
||||
"${AZKUBE_RESOURCE_GROUP_PARAM[@]:+${AZKUBE_RESOURCE_GROUP_PARAM[@]}}" \
|
||||
--subscription-id="${AZURE_SUBSCRIPTION_ID}" \
|
||||
--auth-method="${AZURE_AUTH_METHOD}" "${AZKUBE_AUTH_PARAMS[@]:+${AZKUBE_AUTH_PARAMS[@]}}" \
|
||||
--master-size="${AZURE_MASTER_SIZE}" \
|
||||
--node-size="${AZURE_NODE_SIZE}" \
|
||||
--node-count="${NUM_NODES}" \
|
||||
--username="${AZURE_USERNAME}" \
|
||||
--output-directory="/opt/azkube/${AZURE_OUTPUT_RELDIR}" \
|
||||
--no-cloud-provider \
|
||||
"${AZURE_AZKUBE_ARGS[@]:+${AZURE_AZKUBE_ARGS[@]}}"
|
||||
}
|
||||
|
||||
function kube-up {
|
||||
date_start="$(date)"
|
||||
startdate="$(date +%s)"
|
||||
echo "++> AZURE KUBE-UP STARTED: $(date)"
|
||||
|
||||
verify-prereqs
|
||||
azure-ensure-config
|
||||
|
||||
if [[ -z "${AZURE_HYPERKUBE_SPEC:-}" ]]; then
|
||||
find-release-version
|
||||
export KUBE_GIT_VERSION="${KUBE_GIT_VERSION//+/-}"
|
||||
|
||||
# this will export AZURE_HYPERKUBE_SPEC based on whether an official image was found
|
||||
# or if it was uploaded to the user specified docker repository.
|
||||
if ! ensure-hyperkube; then
|
||||
echo "Failed to ensure hyperkube was available. Exitting."
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
echo "Using user specified AZURE_HYPERKUBE_SPEC: ${AZURE_HYPERKUBE_SPEC}"
|
||||
echo "Note: The existence of this is not verified! (It might only be pullable from your DC)"
|
||||
fi
|
||||
|
||||
azure-deploy
|
||||
|
||||
kubectl config set-cluster "${AZURE_DEPLOY_ID}" --server="https://${AZURE_DEPLOY_ID}.${AZURE_LOCATION}.cloudapp.azure.com:6443" --certificate-authority="${AZURE_OUTPUT_DIR}/ca.crt" --api-version="v1"
|
||||
kubectl config set-credentials "${AZURE_DEPLOY_ID}_user" --client-certificate="${AZURE_OUTPUT_DIR}/client.crt" --client-key="${AZURE_OUTPUT_DIR}/client.key"
|
||||
kubectl config set-context "${AZURE_DEPLOY_ID}" --cluster="${AZURE_DEPLOY_ID}" --user="${AZURE_DEPLOY_ID}_user"
|
||||
kubectl config use-context "${AZURE_DEPLOY_ID}"
|
||||
|
||||
deploy-kube-system
|
||||
|
||||
enddate="$(date +%s)"
|
||||
duration="$(( (startdate - enddate) ))"
|
||||
|
||||
echo "++> AZURE KUBE-UP FINISHED: $(date) (duration: ${duration} seconds)"
|
||||
}
|
||||
|
||||
function kube-down {
|
||||
verify-prereqs
|
||||
|
||||
# required
|
||||
if [[ -z "${AZURE_SUBSCRIPTION_ID:-}" ]]; then
|
||||
echo "AZURE_SUBSCRIPTION_ID must be set"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${AZURE_DEPLOY_ID:-}" ]]; then
|
||||
echo "AZURE_DEPLOY_ID must be set. This selects the deployment (and resource group) to delete."
|
||||
return -1
|
||||
fi
|
||||
|
||||
#optional
|
||||
declare -a destroy_params
|
||||
declare -a docker_params
|
||||
if [[ ${AZURE_DOWN_SKIP_CONFIRM:-} == "true" ]]; then
|
||||
destroy_params+=("--skip-confirm")
|
||||
fi
|
||||
if [[ ! -z "${AZURE_HTTPS_PROXY:-}" ]]; then
|
||||
docker_params+=("--net=host" "--env=https_proxy=${AZURE_HTTPS_PROXY}")
|
||||
fi
|
||||
|
||||
docker run -it \
|
||||
--user "$(id -u)" \
|
||||
-v "$HOME/.azkube:/.azkube" -v "/tmp:/tmp" \
|
||||
"${AZKUBE_DOCKER_PARAMS[@]:+${AZKUBE_DOCKER_PARAMS[@]}}" \
|
||||
"colemickens/azkube:${AZKUBE_VERSION}" /opt/azkube/azkube destroy \
|
||||
--deployment-name="${AZURE_DEPLOY_ID}" \
|
||||
--subscription-id="${AZURE_SUBSCRIPTION_ID}" \
|
||||
--auth-method="${AZURE_AUTH_METHOD}" "${AZKUBE_AUTH_PARAMS[@]:+${AZKUBE_AUTH_PARAMS[@]}}" \
|
||||
"${destroy_params[@]:+${destroy_params[@]}}" \
|
||||
"${AZURE_AZKUBE_ARGS[@]:+${AZURE_AZKUBE_ARGS[@]}}"
|
||||
}
|
||||
|
Loading…
Reference in New Issue