k3s/cluster/ubuntu/util.sh

853 lines
22 KiB
Bash
Raw Normal View History

2014-12-11 06:13:34 +00:00
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
2014-12-11 06:13:34 +00:00
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
2015-11-10 13:45:38 +00:00
# A library of helper functions that each provider hosting Kubernetes
# must implement to use cluster/kube-*.sh scripts.
set -e
2014-12-11 06:13:34 +00:00
SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR"
2014-12-11 06:13:34 +00:00
MASTER=""
MASTER_IP=""
2015-09-23 09:49:22 +00:00
NODE_IPS=""
2014-12-11 06:13:34 +00:00
# Assumed Vars:
# KUBE_ROOT
2015-11-10 13:45:38 +00:00
function test-build-release() {
# Make a release
"${KUBE_ROOT}/build/release.sh"
}
# From user input set the necessary k8s and etcd configuration information
function setClusterInfo() {
2015-09-23 09:49:22 +00:00
# Initialize NODE_IPS in setClusterInfo function
# NODE_IPS is defined as a global variable, and is concatenated with other nodeIP
# When setClusterInfo is called for many times, this could cause potential problems
2015-11-10 13:45:38 +00:00
# Such as, you will have NODE_IPS=192.168.0.2,192.168.0.3,192.168.0.2,192.168.0.3,
# which is obviously wrong.
2015-09-23 09:49:22 +00:00
NODE_IPS=""
local ii=0
2015-08-27 13:53:09 +00:00
for i in $nodes; do
nodeIP=${i#*@}
2014-12-11 06:13:34 +00:00
2015-08-27 13:53:09 +00:00
if [[ "${roles[${ii}]}" == "ai" ]]; then
MASTER_IP=$nodeIP
MASTER=$i
2015-09-23 09:49:22 +00:00
NODE_IPS="$nodeIP"
2015-08-27 13:53:09 +00:00
elif [[ "${roles[${ii}]}" == "a" ]]; then
MASTER_IP=$nodeIP
MASTER=$i
2015-08-27 13:53:09 +00:00
elif [[ "${roles[${ii}]}" == "i" ]]; then
2015-09-23 09:49:22 +00:00
if [[ -z "${NODE_IPS}" ]];then
NODE_IPS="$nodeIP"
else
2015-09-23 09:49:22 +00:00
NODE_IPS="$NODE_IPS,$nodeIP"
fi
else
echo "unsupported role for ${i}. please check"
exit 1
fi
2014-12-11 06:13:34 +00:00
((ii=ii+1))
done
2014-12-11 06:13:34 +00:00
}
# Verify ssh prereqs
2015-11-10 13:45:38 +00:00
function verify-prereqs() {
local rc
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
# "Could not open a connection to your authentication agent."
if [[ "${rc}" -eq 2 ]]; then
eval "$(ssh-agent)" > /dev/null
trap-add "kill ${SSH_AGENT_PID}" EXIT
fi
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
# "The agent has no identities."
if [[ "${rc}" -eq 1 ]]; then
# Try adding one of the default identities, with or without passphrase.
ssh-add || true
fi
# Expect at least one identity to be available.
if ! ssh-add -L 1> /dev/null 2> /dev/null; then
echo "Could not find or add an SSH identity."
echo "Please start ssh-agent, add your identity, and retry."
exit 1
fi
}
2015-05-29 03:04:47 +00:00
# Install handler for signal trap
2015-11-10 13:45:38 +00:00
function trap-add() {
2015-05-29 03:04:47 +00:00
local handler="$1"
local signal="${2-EXIT}"
local cur
cur="$(eval "sh -c 'echo \$3' -- $(trap -p ${signal})")"
if [[ -n "${cur}" ]]; then
handler="${cur}; ${handler}"
fi
2015-05-29 03:04:47 +00:00
trap "${handler}" ${signal}
}
2015-11-10 13:45:38 +00:00
function verify-cluster() {
local ii=0
for i in ${nodes}
do
if [ "${roles[${ii}]}" == "a" ]; then
verify-master
elif [ "${roles[${ii}]}" == "i" ]; then
2015-11-16 06:32:44 +00:00
verify-node "$i"
elif [ "${roles[${ii}]}" == "ai" ]; then
verify-master
2015-11-16 06:32:44 +00:00
verify-node "$i"
else
echo "unsupported role for ${i}. please check"
exit 1
fi
((ii=ii+1))
done
}
2015-11-10 13:45:38 +00:00
function verify-master() {
# verify master has all required daemons
2015-11-10 13:45:38 +00:00
echo -n "Validating master"
local -a required_daemon=("kube-apiserver" "kube-controller-manager" "kube-scheduler")
local validated="1"
local try_count=1
local max_try_count=30
until [[ "$validated" == "0" ]]; do
validated="0"
local daemon
for daemon in "${required_daemon[@]}"; do
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS "$MASTER" "pgrep -f '${daemon}'" >/dev/null 2>&1 || {
2015-11-10 13:45:38 +00:00
echo -n "."
validated="1"
((try_count=try_count+1))
if [[ ${try_count} -gt ${max_try_count} ]]; then
2015-11-16 06:32:44 +00:00
echo -e "\nWarning: Process '${daemon}' failed to run on ${MASTER}, please check.\n"
exit 1
fi
sleep 2
}
done
done
2015-11-10 13:45:38 +00:00
echo
}
2015-11-10 13:45:38 +00:00
function verify-node() {
2015-09-23 09:49:22 +00:00
# verify node has all required daemons
2015-11-10 13:45:38 +00:00
echo -n "Validating ${1}"
local -a required_daemon=("kube-proxy" "kubelet" "docker")
local validated="1"
local try_count=1
local max_try_count=30
until [[ "$validated" == "0" ]]; do
validated="0"
local daemon
for daemon in "${required_daemon[@]}"; do
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS "$1" "pgrep -f '${daemon}'" >/dev/null 2>&1 || {
2015-11-10 13:45:38 +00:00
echo -n "."
validated="1"
((try_count=try_count+1))
if [[ ${try_count} -gt ${max_try_count} ]]; then
2015-11-16 06:32:44 +00:00
echo -e "\nWarning: Process '${daemon}' failed to run on ${1}, please check.\n"
exit 1
fi
sleep 2
}
done
done
2015-11-10 13:45:38 +00:00
echo
}
2015-11-10 13:45:38 +00:00
function create-etcd-opts() {
cat <<EOF > ~/kube/default/etcd
2015-11-10 13:45:38 +00:00
ETCD_OPTS="\
-name infra\
2015-11-18 08:03:50 +00:00
-listen-client-urls http://127.0.0.1:4001,http://${1}:4001\
-advertise-client-urls http://${1}:4001"
EOF
}
2015-11-10 13:45:38 +00:00
function create-kube-apiserver-opts() {
cat <<EOF > ~/kube/default/kube-apiserver
2015-11-10 13:45:38 +00:00
KUBE_APISERVER_OPTS="\
--insecure-bind-address=0.0.0.0\
--insecure-port=8080\
--etcd-servers=http://127.0.0.1:4001\
--logtostderr=true\
--service-cluster-ip-range=${1}\
--admission-control=${2}\
--service-node-port-range=${3}\
--advertise-address=${4}\
2015-11-10 13:45:38 +00:00
--client-ca-file=/srv/kubernetes/ca.crt\
--tls-cert-file=/srv/kubernetes/server.cert\
--tls-private-key-file=/srv/kubernetes/server.key"
EOF
}
2015-11-10 13:45:38 +00:00
function create-kube-controller-manager-opts() {
cat <<EOF > ~/kube/default/kube-controller-manager
2015-11-10 13:45:38 +00:00
KUBE_CONTROLLER_MANAGER_OPTS="\
--master=127.0.0.1:8080\
--root-ca-file=/srv/kubernetes/ca.crt\
--service-account-private-key-file=/srv/kubernetes/server.key\
--logtostderr=true"
EOF
}
2015-11-10 13:45:38 +00:00
function create-kube-scheduler-opts() {
cat <<EOF > ~/kube/default/kube-scheduler
2015-11-10 13:45:38 +00:00
KUBE_SCHEDULER_OPTS="\
--logtostderr=true\
--master=127.0.0.1:8080"
EOF
}
2015-11-10 13:45:38 +00:00
function create-kubelet-opts() {
cat <<EOF > ~/kube/default/kubelet
2015-11-10 13:45:38 +00:00
KUBELET_OPTS="\
--hostname-override=${1} \
--api-servers=http://${2}:8080 \
--logtostderr=true \
--cluster-dns=${3} \
2015-12-17 01:51:07 +00:00
--cluster-domain=${4} \
2015-12-10 03:11:21 +00:00
--config=${5}"
EOF
}
2015-11-10 13:45:38 +00:00
function create-kube-proxy-opts() {
cat <<EOF > ~/kube/default/kube-proxy
2015-11-10 13:45:38 +00:00
KUBE_PROXY_OPTS="\
--hostname-override=${1} \
--master=http://${2}:8080 \
2015-11-10 13:45:38 +00:00
--logtostderr=true"
EOF
}
2015-11-10 13:45:38 +00:00
function create-flanneld-opts() {
cat <<EOF > ~/kube/default/flanneld
2015-12-01 06:48:05 +00:00
FLANNEL_OPTS="--etcd-endpoints=http://${1}:4001 \
--ip-masq \
2015-12-01 06:48:05 +00:00
--iface=${2}"
EOF
}
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# Vars set:
# KUBE_MASTER_IP
2015-11-10 13:45:38 +00:00
function detect-master() {
2015-12-11 06:43:16 +00:00
source "${KUBE_CONFIG_FILE}"
setClusterInfo
export KUBE_MASTER="${MASTER}"
2015-11-18 01:59:00 +00:00
export KUBE_MASTER_IP="${MASTER_IP}"
echo "Using master ${MASTER_IP}"
}
2015-09-23 09:49:22 +00:00
# Detect the information about the nodes
#
# Assumed vars:
# nodes
# Vars set:
2015-09-23 09:49:22 +00:00
# KUBE_NODE_IP_ADDRESS (array)
2015-11-10 13:45:38 +00:00
function detect-nodes() {
2015-12-11 06:43:16 +00:00
source "${KUBE_CONFIG_FILE}"
2015-09-23 09:49:22 +00:00
KUBE_NODE_IP_ADDRESSES=()
setClusterInfo
local ii=0
for i in ${nodes}
do
if [ "${roles[${ii}]}" == "i" ] || [ "${roles[${ii}]}" == "ai" ]; then
2015-09-23 09:49:22 +00:00
KUBE_NODE_IP_ADDRESSES+=("${i#*@}")
fi
((ii=ii+1))
done
2015-09-23 09:49:22 +00:00
if [[ -z "${KUBE_NODE_IP_ADDRESSES[@]}" ]]; then
2015-11-16 06:32:44 +00:00
echo "Could not detect Kubernetes node nodes.\
2015-11-10 13:45:38 +00:00
Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
}
# Instantiate a kubernetes cluster on ubuntu
2015-06-26 12:14:32 +00:00
function kube-up() {
2015-12-11 06:43:16 +00:00
export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/ubuntu/config-default.sh}
source "${KUBE_CONFIG_FILE}"
2015-11-18 01:59:00 +00:00
# downloading tarball release
"${KUBE_ROOT}/cluster/ubuntu/download-release.sh"
setClusterInfo
local ii=0
for i in ${nodes}
do
{
if [ "${roles[${ii}]}" == "a" ]; then
provision-master
elif [ "${roles[${ii}]}" == "ai" ]; then
2015-09-23 09:49:22 +00:00
provision-masterandnode
2015-08-27 13:53:09 +00:00
elif [ "${roles[${ii}]}" == "i" ]; then
2015-11-16 06:32:44 +00:00
provision-node "$i"
else
2015-11-10 13:45:38 +00:00
echo "unsupported role for ${i}. Please check"
exit 1
fi
}
((ii=ii+1))
done
wait
2015-11-05 11:08:59 +00:00
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
verify-cluster
detect-master
export CONTEXT="ubuntu"
2015-06-18 05:59:27 +00:00
export KUBE_SERVER="http://${KUBE_MASTER_IP}:8080"
source "${KUBE_ROOT}/cluster/common.sh"
# set kubernetes user and password
load-or-gen-kube-basicauth
2015-06-18 05:59:27 +00:00
create-kubeconfig
}
function provision-master() {
2015-11-16 06:32:44 +00:00
echo -e "\nDeploying master on machine ${MASTER_IP}"
2015-11-10 13:45:38 +00:00
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS "$MASTER" "mkdir -p ~/kube/default"
2015-11-10 13:45:38 +00:00
# copy the binaries and scripts to the ~/kube directory on the master
scp -r $SSH_OPTS \
saltbase/salt/generate-cert/make-ca-cert.sh \
ubuntu/reconfDocker.sh \
2015-12-11 06:43:16 +00:00
"${KUBE_CONFIG_FILE}" \
2015-11-10 13:45:38 +00:00
ubuntu/util.sh \
ubuntu/master/* \
ubuntu/binaries/master/ \
"${MASTER}:~/kube"
EXTRA_SANS=(
IP:$MASTER_IP
IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1
DNS:kubernetes
DNS:kubernetes.default
DNS:kubernetes.default.svc
DNS:kubernetes.default.svc.cluster.local
)
2015-11-16 06:32:44 +00:00
EXTRA_SANS=$(echo "${EXTRA_SANS[@]}" | tr ' ' ,)
2015-11-10 13:45:38 +00:00
BASH_DEBUG_FLAGS=""
if [[ "$DEBUG" == "true" ]] ; then
BASH_DEBUG_FLAGS="set -x"
fi
2015-11-10 13:45:38 +00:00
# remote login to MASTER and configue k8s master
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t "${MASTER}" "
set +e
${BASH_DEBUG_FLAGS}
2015-11-10 13:45:38 +00:00
source ~/kube/util.sh
setClusterInfo
2015-11-18 08:03:50 +00:00
create-etcd-opts '${MASTER_IP}'
2015-11-10 13:45:38 +00:00
create-kube-apiserver-opts \
'${SERVICE_CLUSTER_IP_RANGE}' \
'${ADMISSION_CONTROL}' \
'${SERVICE_NODE_PORT_RANGE}' \
'${MASTER_IP}'
2015-11-10 13:45:38 +00:00
create-kube-controller-manager-opts '${NODE_IPS}'
create-kube-scheduler-opts
2015-12-01 06:48:05 +00:00
create-flanneld-opts '127.0.0.1' '${MASTER_IP}'
sudo -E -p '[sudo] password to start master: ' -- /bin/bash -ce '
${BASH_DEBUG_FLAGS}
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
2015-11-10 13:45:38 +00:00
cp ~/kube/init_scripts/* /etc/init.d/
2015-11-10 13:45:38 +00:00
groupadd -f -r kube-cert
2015-11-21 03:29:41 +00:00
${PROXY_SETTING} ~/kube/make-ca-cert.sh \"${MASTER_IP}\" \"${EXTRA_SANS}\"
2015-11-10 13:45:38 +00:00
mkdir -p /opt/bin/
cp ~/kube/master/* /opt/bin/
service etcd start
2015-12-11 06:43:16 +00:00
FLANNEL_NET=\"${FLANNEL_NET}\" KUBE_CONFIG_FILE=\"${KUBE_CONFIG_FILE}\" ~/kube/reconfDocker.sh a
2015-11-10 13:45:38 +00:00
'" || {
echo "Deploying master on machine ${MASTER_IP} failed"
exit 1
}
}
2015-09-23 09:49:22 +00:00
function provision-node() {
2015-11-16 06:32:44 +00:00
echo -e "\nDeploying node on machine ${1#*@}"
2015-11-10 13:45:38 +00:00
ssh $SSH_OPTS $1 "mkdir -p ~/kube/default"
2015-11-10 13:45:38 +00:00
# copy the binaries and scripts to the ~/kube directory on the node
scp -r $SSH_OPTS \
2015-12-11 06:43:16 +00:00
"${KUBE_CONFIG_FILE}" \
2015-11-10 13:45:38 +00:00
ubuntu/util.sh \
ubuntu/reconfDocker.sh \
ubuntu/minion/* \
ubuntu/binaries/minion \
"${1}:~/kube"
BASH_DEBUG_FLAGS=""
if [[ "$DEBUG" == "true" ]] ; then
BASH_DEBUG_FLAGS="set -x"
fi
2015-11-10 13:45:38 +00:00
# remote login to node and configue k8s node
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t "$1" "
set +e
${BASH_DEBUG_FLAGS}
2015-11-10 13:45:38 +00:00
source ~/kube/util.sh
2015-11-10 13:45:38 +00:00
setClusterInfo
create-kubelet-opts \
'${1#*@}' \
'${MASTER_IP}' \
'${DNS_SERVER_IP}' \
2015-12-10 03:11:21 +00:00
'${DNS_DOMAIN}' \
'${KUBELET_CONFIG}'
create-kube-proxy-opts \
2015-12-01 06:48:05 +00:00
'${1#*@}' \
'${MASTER_IP}'
create-flanneld-opts '${MASTER_IP}' '${1#*@}'
sudo -E -p '[sudo] password to start node: ' -- /bin/bash -ce '
${BASH_DEBUG_FLAGS}
2015-11-10 13:45:38 +00:00
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
cp ~/kube/init_scripts/* /etc/init.d/
mkdir -p /opt/bin/
2015-11-10 13:45:38 +00:00
cp ~/kube/minion/* /opt/bin
service flanneld start
2015-12-11 06:43:16 +00:00
KUBE_CONFIG_FILE=\"${KUBE_CONFIG_FILE}\" ~/kube/reconfDocker.sh i
2015-11-10 13:45:38 +00:00
'" || {
echo "Deploying node on machine ${1#*@} failed"
exit 1
}
}
2015-09-23 09:49:22 +00:00
function provision-masterandnode() {
2015-11-10 13:45:38 +00:00
echo -e "\nDeploying master and node on machine ${MASTER_IP}"
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
2015-11-10 13:45:38 +00:00
# copy the binaries and scripts to the ~/kube directory on the master
2015-08-27 13:53:09 +00:00
# scp order matters
2015-11-10 13:45:38 +00:00
scp -r $SSH_OPTS \
saltbase/salt/generate-cert/make-ca-cert.sh \
2015-12-11 06:43:16 +00:00
"${KUBE_CONFIG_FILE}" \
2015-11-10 13:45:38 +00:00
ubuntu/util.sh \
ubuntu/minion/* \
ubuntu/master/* \
ubuntu/reconfDocker.sh \
ubuntu/binaries/master/ \
ubuntu/binaries/minion \
"${MASTER}:~/kube"
2015-11-10 13:45:38 +00:00
EXTRA_SANS=(
IP:${MASTER_IP}
IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1
DNS:kubernetes
DNS:kubernetes.default
DNS:kubernetes.default.svc
DNS:kubernetes.default.svc.cluster.local
)
2015-11-16 06:32:44 +00:00
EXTRA_SANS=$(echo "${EXTRA_SANS[@]}" | tr ' ' ,)
2015-11-10 13:45:38 +00:00
BASH_DEBUG_FLAGS=""
if [[ "$DEBUG" == "true" ]] ; then
BASH_DEBUG_FLAGS="set -x"
fi
2015-11-10 13:45:38 +00:00
# remote login to the master/node and configue k8s
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t "$MASTER" "
set +e
${BASH_DEBUG_FLAGS}
2015-11-10 13:45:38 +00:00
source ~/kube/util.sh
2015-11-10 13:45:38 +00:00
setClusterInfo
2015-11-18 08:03:50 +00:00
create-etcd-opts '${MASTER_IP}'
2015-11-10 13:45:38 +00:00
create-kube-apiserver-opts \
'${SERVICE_CLUSTER_IP_RANGE}' \
'${ADMISSION_CONTROL}' \
'${SERVICE_NODE_PORT_RANGE}' \
'${MASTER_IP}'
2015-11-10 13:45:38 +00:00
create-kube-controller-manager-opts '${NODE_IPS}'
create-kube-scheduler-opts
create-kubelet-opts \
'${MASTER_IP}' \
'${MASTER_IP}' \
'${DNS_SERVER_IP}' \
2015-12-10 03:11:21 +00:00
'${DNS_DOMAIN}' \
'${KUBELET_CONFIG}'
create-kube-proxy-opts \
2015-12-01 06:48:05 +00:00
'${MASTER_IP}' \
2015-12-17 01:51:07 +00:00
'${MASTER_IP}'
2015-12-01 06:48:05 +00:00
create-flanneld-opts '127.0.0.1' '${MASTER_IP}'
sudo -E -p '[sudo] password to start master: ' -- /bin/bash -ce '
${BASH_DEBUG_FLAGS}
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
2015-11-10 13:45:38 +00:00
cp ~/kube/init_scripts/* /etc/init.d/
2015-11-10 13:45:38 +00:00
groupadd -f -r kube-cert
2015-11-21 03:29:41 +00:00
${PROXY_SETTING} ~/kube/make-ca-cert.sh \"${MASTER_IP}\" \"${EXTRA_SANS}\"
mkdir -p /opt/bin/
2015-11-10 13:45:38 +00:00
cp ~/kube/master/* /opt/bin/
cp ~/kube/minion/* /opt/bin/
service etcd start
2015-12-11 06:43:16 +00:00
FLANNEL_NET=\"${FLANNEL_NET}\" KUBE_CONFIG_FILE=\"${KUBE_CONFIG_FILE}\" ~/kube/reconfDocker.sh ai
2015-11-10 13:45:38 +00:00
'" || {
echo "Deploying master and node on machine ${MASTER_IP} failed"
exit 1
}
}
2015-11-12 02:07:03 +00:00
# check whether kubelet has torn down all of the pods
function check-pods-torn-down() {
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
local attempt=0
while [[ ! -z "$(kubectl get pods | tail -n +2)" ]]; do
if (( attempt > 120 )); then
echo "timeout waiting for tearing down pods" >> ~/kube/err.log
fi
echo "waiting for tearing down pods"
attempt=$((attempt+1))
sleep 5
done
}
# Delete a kubernetes cluster
2015-11-10 13:45:38 +00:00
function kube-down() {
2015-11-05 11:08:59 +00:00
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
2015-12-11 06:43:16 +00:00
export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/ubuntu/config-default.sh}
source "${KUBE_CONFIG_FILE}"
source "${KUBE_ROOT}/cluster/common.sh"
2015-11-10 13:45:38 +00:00
tear_down_alive_resources
check-pods-torn-down
local ii=0
for i in ${nodes}; do
2015-08-27 13:53:09 +00:00
if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then
2015-11-10 13:45:38 +00:00
echo "Cleaning on master ${i#*@}"
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t "$i" "
2015-11-10 13:45:38 +00:00
pgrep etcd && \
sudo -p '[sudo] password to stop master: ' -- /bin/bash -c '
service etcd stop
rm -rf \
/opt/bin/etcd* \
/etc/init/etcd.conf \
/etc/init.d/etcd \
/etc/default/etcd
2015-11-10 13:45:38 +00:00
rm -rf /infra*
rm -rf /srv/kubernetes
'
" || echo "Cleaning on master ${i#*@} failed"
2015-11-12 02:07:03 +00:00
if [[ "${roles[${ii}]}" == "ai" ]]; then
ssh $SSH_OPTS -t "$i" "sudo rm -rf /var/lib/kubelet"
fi
2015-08-27 13:53:09 +00:00
elif [[ "${roles[${ii}]}" == "i" ]]; then
2015-11-10 13:45:38 +00:00
echo "Cleaning on node ${i#*@}"
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t "$i" "
2015-11-10 13:45:38 +00:00
pgrep flanneld && \
sudo -p '[sudo] password to stop node: ' -- /bin/bash -c '
2015-11-12 02:07:03 +00:00
service flanneld stop
rm -rf /var/lib/kubelet
2015-11-10 13:45:38 +00:00
'
" || echo "Cleaning on node ${i#*@} failed"
2015-08-27 13:53:09 +00:00
else
echo "unsupported role for ${i}"
fi
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t "$i" "sudo -- /bin/bash -c '
2015-11-10 13:45:38 +00:00
rm -f \
/opt/bin/kube* \
/opt/bin/flanneld \
/etc/init/kube* \
/etc/init/flanneld.conf \
/etc/init.d/kube* \
/etc/init.d/flanneld \
/etc/default/kube* \
/etc/default/flanneld
2015-11-10 13:45:38 +00:00
rm -rf ~/kube
rm -f /run/flannel/subnet.env
'" || echo "cleaning legacy files on ${i#*@} failed"
2015-08-27 13:53:09 +00:00
((ii=ii+1))
done
}
# Perform common upgrade setup tasks
function prepare-push() {
2015-09-15 09:17:27 +00:00
# Use local binaries for kube-push
2015-11-16 06:32:44 +00:00
if [[ -z "${KUBE_VERSION}" ]]; then
echo "Use local binaries for kube-push"
if [[ ! -d "${KUBE_ROOT}/cluster/ubuntu/binaries" ]]; then
echo "No local binaries.Please check"
exit 1
else
echo "Please make sure all the required local binaries are prepared ahead"
sleep 3
fi
else
# Run download-release.sh to get the required release
2015-09-18 06:14:20 +00:00
export KUBE_VERSION
2015-11-16 06:32:44 +00:00
"${KUBE_ROOT}/cluster/ubuntu/download-release.sh"
fi
}
2015-09-24 08:26:04 +00:00
# Update a kubernetes master with expected release
2015-11-10 13:45:38 +00:00
function push-master() {
2015-12-11 06:43:16 +00:00
export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/ubuntu/config-default.sh}
source "${KUBE_CONFIG_FILE}"
2015-09-15 09:17:27 +00:00
if [[ ! -f "${KUBE_ROOT}/cluster/ubuntu/binaries/master/kube-apiserver" ]]; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi
2015-11-05 11:08:59 +00:00
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
setClusterInfo
2015-11-10 13:45:38 +00:00
local ii=0
for i in ${nodes}; do
2015-11-10 13:45:38 +00:00
if [[ "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]]; then
2015-09-09 01:19:09 +00:00
echo "Cleaning master ${i#*@}"
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t "$i" "
2015-11-10 13:45:38 +00:00
pgrep etcd && sudo -p '[sudo] stop the all process: ' -- /bin/bash -c '
service etcd stop
sleep 3
rm -rf \
/etc/init/etcd.conf \
/etc/init/kube* \
/etc/init/flanneld.conf \
/etc/init.d/etcd \
/etc/init.d/kube* \
/etc/init.d/flanneld \
/etc/default/etcd \
/etc/default/kube* \
/etc/default/flanneld
rm -f \
/opt/bin/etcd* \
/opt/bin/kube* \
/opt/bin/flanneld
rm -f /run/flannel/subnet.env
rm -rf ~/kube
'" || echo "Cleaning master ${i#*@} failed"
fi
2015-11-10 13:45:38 +00:00
if [[ "${roles[${ii}]}" == "a" ]]; then
provision-master
2015-11-10 13:45:38 +00:00
elif [[ "${roles[${ii}]}" == "ai" ]]; then
2015-09-23 09:49:22 +00:00
provision-masterandnode
elif [[ "${roles[${ii}]}" == "i" ]]; then
2015-09-09 01:19:09 +00:00
((ii=ii+1))
continue
else
2015-09-09 01:19:09 +00:00
echo "unsupported role for ${i}, please check"
exit 1
fi
((ii=ii+1))
done
verify-cluster
}
2015-09-24 08:26:04 +00:00
# Update a kubernetes node with expected release
function push-node() {
2015-12-11 06:43:16 +00:00
export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/ubuntu/config-default.sh}
source "${KUBE_CONFIG_FILE}"
2015-09-15 09:17:27 +00:00
if [[ ! -f "${KUBE_ROOT}/cluster/ubuntu/binaries/minion/kubelet" ]]; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi
2015-11-05 11:08:59 +00:00
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
setClusterInfo
2015-11-10 13:45:38 +00:00
local node_ip=${1}
local ii=0
2015-11-16 06:32:44 +00:00
local existing=false
for i in ${nodes}; do
2015-11-16 06:32:44 +00:00
if [[ "${roles[${ii}]}" == "i" && ${i#*@} == "$node_ip" ]]; then
2015-09-09 01:19:09 +00:00
echo "Cleaning node ${i#*@}"
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t "$i" "
2015-11-10 13:45:38 +00:00
sudo -p '[sudo] stop the all process: ' -- /bin/bash -c '
service flanneld stop
rm -f /opt/bin/kube* \
/opt/bin/flanneld
rm -rf \
/etc/init/kube* \
/etc/init/flanneld.conf \
/etc/init.d/kube* \
/etc/init.d/flanneld \
/etc/default/kube* \
/etc/default/flanneld
rm -f /run/flannel/subnet.env
rm -rf ~/kube
'" || echo "Cleaning node ${i#*@} failed"
2015-11-16 06:32:44 +00:00
provision-node "$i"
2015-09-09 01:19:09 +00:00
existing=true
2015-11-16 06:32:44 +00:00
elif [[ "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]] && [[ ${i#*@} == "$node_ip" ]]; then
2015-09-09 01:19:09 +00:00
echo "${i} is master node, please try ./kube-push -m instead"
existing=true
elif [[ "${roles[${ii}]}" == "i" || "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]]; then
((ii=ii+1))
continue
else
2015-09-09 01:19:09 +00:00
echo "unsupported role for ${i}, please check"
exit 1
fi
((ii=ii+1))
done
2015-09-09 01:19:09 +00:00
if [[ "${existing}" == false ]]; then
echo "node ${node_ip} does not exist"
else
verify-cluster
fi
}
2015-09-24 08:26:04 +00:00
# Update a kubernetes cluster with expected source
function kube-push() {
prepare-push
2015-12-11 06:43:16 +00:00
export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/ubuntu/config-default.sh}
source "${KUBE_CONFIG_FILE}"
2015-09-09 01:19:09 +00:00
if [[ ! -f "${KUBE_ROOT}/cluster/ubuntu/binaries/master/kube-apiserver" ]]; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi
2015-11-05 11:08:59 +00:00
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
#stop all the kube's process & etcd
local ii=0
for i in ${nodes}; do
2015-11-10 13:45:38 +00:00
if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then
echo "Cleaning on master ${i#*@}"
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t "$i" "
2015-11-10 13:45:38 +00:00
pgrep etcd && \
sudo -p '[sudo] password to stop master: ' -- /bin/bash -c '
service etcd stop
rm -rf \
/opt/bin/etcd* \
/etc/init/etcd.conf \
/etc/init.d/etcd \
/etc/default/etcd
'" || echo "Cleaning on master ${i#*@} failed"
2015-09-09 01:19:09 +00:00
elif [[ "${roles[${ii}]}" == "i" ]]; then
2015-11-10 13:45:38 +00:00
echo "Cleaning on node ${i#*@}"
ssh $SSH_OPTS -t $i "
pgrep flanneld && \
sudo -p '[sudo] password to stop node: ' -- /bin/bash -c '
service flanneld stop
'" || echo "Cleaning on node ${i#*@} failed"
2015-09-09 01:19:09 +00:00
else
echo "unsupported role for ${i}"
fi
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t "$i" "sudo -- /bin/bash -c '
2015-11-10 13:45:38 +00:00
rm -f \
/opt/bin/kube* \
/opt/bin/flanneld
rm -rf \
/etc/init/kube* \
/etc/init/flanneld.conf \
/etc/init.d/kube* \
/etc/init.d/flanneld \
/etc/default/kube* \
/etc/default/flanneld
rm -f /run/flannel/subnet.env
rm -rf ~/kube
'" || echo "Cleaning legacy files on ${i#*@} failed"
2015-09-09 01:19:09 +00:00
((ii=ii+1))
done
2015-09-09 01:19:09 +00:00
#provision all nodes,including master & nodes
setClusterInfo
2015-11-10 13:45:38 +00:00
local ii=0
for i in ${nodes}; do
if [[ "${roles[${ii}]}" == "a" ]]; then
provision-master
elif [[ "${roles[${ii}]}" == "i" ]]; then
2015-11-16 06:32:44 +00:00
provision-node "$i"
elif [[ "${roles[${ii}]}" == "ai" ]]; then
2015-09-23 09:49:22 +00:00
provision-masterandnode
else
echo "unsupported role for ${i}. please check"
exit 1
fi
((ii=ii+1))
done
verify-cluster
}
# Perform preparations required to run e2e tests
function prepare-e2e() {
echo "Ubuntu doesn't need special preparations for e2e tests" 1>&2
}