2014-12-11 06:13:34 +00:00
#!/bin/bash
2015-04-10 08:12:00 +00:00
# Copyright 2015 The Kubernetes Authors All rights reserved.
2014-12-11 06:13:34 +00:00
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
2015-11-10 13:45:38 +00:00
# A library of helper functions that each provider hosting Kubernetes
# must implement to use cluster/kube-*.sh scripts.
2015-04-10 08:12:00 +00:00
set -e
2014-12-11 06:13:34 +00:00
2015-04-10 08:12:00 +00:00
SSH_OPTS = "-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR"
2014-12-11 06:13:34 +00:00
2015-04-10 08:12:00 +00:00
MASTER = ""
MASTER_IP = ""
2015-09-23 09:49:22 +00:00
NODE_IPS = ""
2014-12-11 06:13:34 +00:00
2015-05-11 02:07:58 +00:00
# Assumed Vars:
# KUBE_ROOT
2015-11-10 13:45:38 +00:00
function test-build-release( ) {
2015-05-11 02:07:58 +00:00
# Make a release
" ${ KUBE_ROOT } /build/release.sh "
}
2015-08-08 21:29:57 +00:00
# From user input set the necessary k8s and etcd configuration information
2015-04-10 08:12:00 +00:00
function setClusterInfo( ) {
2015-09-23 09:49:22 +00:00
# Initialize NODE_IPS in setClusterInfo function
2015-11-05 03:41:07 +00:00
# NODE_IPS is defined as a global variable, and is concatenated with other nodeIP
2015-07-31 13:13:28 +00:00
# When setClusterInfo is called for many times, this could cause potential problems
2015-11-10 13:45:38 +00:00
# Such as, you will have NODE_IPS=192.168.0.2,192.168.0.3,192.168.0.2,192.168.0.3,
# which is obviously wrong.
2015-09-23 09:49:22 +00:00
NODE_IPS = ""
2015-11-05 03:41:07 +00:00
2015-11-05 06:40:00 +00:00
local ii = 0
2015-08-27 13:53:09 +00:00
for i in $nodes ; do
2015-06-24 03:01:59 +00:00
nodeIP = ${ i #*@ }
2014-12-11 06:13:34 +00:00
2015-08-27 13:53:09 +00:00
if [ [ " ${ roles [ ${ ii } ] } " = = "ai" ] ] ; then
2015-06-24 03:01:59 +00:00
MASTER_IP = $nodeIP
MASTER = $i
2015-09-23 09:49:22 +00:00
NODE_IPS = " $nodeIP "
2015-08-27 13:53:09 +00:00
elif [ [ " ${ roles [ ${ ii } ] } " = = "a" ] ] ; then
2015-06-24 03:01:59 +00:00
MASTER_IP = $nodeIP
MASTER = $i
2015-08-27 13:53:09 +00:00
elif [ [ " ${ roles [ ${ ii } ] } " = = "i" ] ] ; then
2015-09-23 09:49:22 +00:00
if [ [ -z " ${ NODE_IPS } " ] ] ; then
NODE_IPS = " $nodeIP "
2015-04-10 08:12:00 +00:00
else
2015-09-23 09:49:22 +00:00
NODE_IPS = " $NODE_IPS , $nodeIP "
2015-04-10 08:12:00 +00:00
fi
2015-06-24 03:01:59 +00:00
else
echo " unsupported role for ${ i } . please check "
exit 1
fi
2014-12-11 06:13:34 +00:00
2015-06-24 03:01:59 +00:00
( ( ii = ii+1) )
2015-04-10 08:12:00 +00:00
done
2014-12-11 06:13:34 +00:00
2015-04-10 08:12:00 +00:00
}
2016-02-02 22:47:21 +00:00
# Sanity check on $CNI_PLUGIN_CONF and $CNI_PLUGIN_EXES
function check-CNI-config( ) {
if [ -z " $CNI_PLUGIN_CONF " ] && [ -n " $CNI_PLUGIN_EXES " ] ; then
echo " Warning: CNI_PLUGIN_CONF is emtpy but CNI_PLUGIN_EXES is not (it is $CNI_PLUGIN_EXES ); Flannel will be used " >& 2
2016-02-23 18:26:26 +00:00
elif [ -n " $CNI_PLUGIN_CONF " ] && [ -z " $CNI_PLUGIN_EXES " ] ; then
2016-02-02 22:47:21 +00:00
echo " Warning: CNI_PLUGIN_EXES is empty but CNI_PLUGIN_CONF is not (it is $CNI_PLUGIN_CONF ); Flannel will be used " & 2
2016-02-23 18:26:26 +00:00
elif [ -n " $CNI_PLUGIN_CONF " ] && [ -n " $CNI_PLUGIN_EXES " ] ; then
2016-02-02 22:47:21 +00:00
local problems = 0
if ! [ -r " $CNI_PLUGIN_CONF " ] ; then
echo " ERROR: CNI_PLUGIN_CONF is set to $CNI_PLUGIN_CONF but that is not a readable existing file! " >& 2
let problems = 1
fi
local ii = 0
for exe in $CNI_PLUGIN_EXES ; do
if ! [ -x " $exe " ] ; then
echo " ERROR: CNI_PLUGIN_EXES[ $ii ], which is $exe , is not an existing executable file! " >& 2
let problems = problems+1
fi
let ii = ii+1
done
if ( ( problems > 0 ) ) ; then
exit 1
fi
fi
}
2015-04-10 08:12:00 +00:00
# Verify ssh prereqs
2015-11-10 13:45:38 +00:00
function verify-prereqs( ) {
2015-05-21 14:16:53 +00:00
local rc
rc = 0
ssh-add -L 1> /dev/null 2> /dev/null || rc = " $? "
# "Could not open a connection to your authentication agent."
if [ [ " ${ rc } " -eq 2 ] ] ; then
eval " $( ssh-agent) " > /dev/null
trap-add " kill ${ SSH_AGENT_PID } " EXIT
fi
rc = 0
ssh-add -L 1> /dev/null 2> /dev/null || rc = " $? "
# "The agent has no identities."
if [ [ " ${ rc } " -eq 1 ] ] ; then
# Try adding one of the default identities, with or without passphrase.
ssh-add || true
2015-06-24 03:01:59 +00:00
fi
2015-05-21 14:16:53 +00:00
# Expect at least one identity to be available.
2015-04-10 08:12:00 +00:00
if ! ssh-add -L 1> /dev/null 2> /dev/null; then
echo "Could not find or add an SSH identity."
echo "Please start ssh-agent, add your identity, and retry."
exit 1
fi
}
2015-05-29 03:04:47 +00:00
# Install handler for signal trap
2015-11-10 13:45:38 +00:00
function trap-add( ) {
2015-05-29 03:04:47 +00:00
local handler = " $1 "
local signal = " ${ 2 -EXIT } "
local cur
cur = " $( eval " sh -c 'echo \$3' -- $( trap -p ${ signal } ) " ) "
if [ [ -n " ${ cur } " ] ] ; then
handler = " ${ cur } ; ${ handler } "
2015-04-10 08:12:00 +00:00
fi
2015-05-29 03:04:47 +00:00
trap " ${ handler } " ${ signal }
2015-04-10 08:12:00 +00:00
}
2015-11-10 13:45:38 +00:00
function verify-cluster( ) {
2015-11-05 06:40:00 +00:00
local ii = 0
2015-04-10 08:12:00 +00:00
for i in ${ nodes }
do
if [ " ${ roles [ ${ ii } ] } " = = "a" ] ; then
2015-06-24 03:01:59 +00:00
verify-master
2015-04-10 08:12:00 +00:00
elif [ " ${ roles [ ${ ii } ] } " = = "i" ] ; then
2015-11-16 06:32:44 +00:00
verify-node " $i "
2015-04-10 08:12:00 +00:00
elif [ " ${ roles [ ${ ii } ] } " = = "ai" ] ; then
verify-master
2015-11-16 06:32:44 +00:00
verify-node " $i "
2015-04-10 08:12:00 +00:00
else
echo " unsupported role for ${ i } . please check "
exit 1
fi
( ( ii = ii+1) )
done
}
2015-11-10 13:45:38 +00:00
function verify-master( ) {
2015-04-10 08:12:00 +00:00
# verify master has all required daemons
2015-11-10 13:45:38 +00:00
echo -n "Validating master"
2015-04-10 08:12:00 +00:00
local -a required_daemon = ( "kube-apiserver" "kube-controller-manager" "kube-scheduler" )
local validated = "1"
2015-08-13 20:32:07 +00:00
local try_count = 1
local max_try_count = 30
2015-04-10 08:12:00 +00:00
until [ [ " $validated " = = "0" ] ] ; do
validated = "0"
local daemon
for daemon in " ${ required_daemon [@] } " ; do
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS " $MASTER " " pgrep -f ' ${ daemon } ' " >/dev/null 2>& 1 || {
2015-11-10 13:45:38 +00:00
echo -n "."
2015-04-10 08:12:00 +00:00
validated = "1"
2015-08-13 20:32:07 +00:00
( ( try_count = try_count+1) )
if [ [ ${ try_count } -gt ${ max_try_count } ] ] ; then
2015-11-16 06:32:44 +00:00
echo -e " \nWarning: Process ' ${ daemon } ' failed to run on ${ MASTER } , please check.\n "
2015-08-13 20:32:07 +00:00
exit 1
fi
2015-04-10 08:12:00 +00:00
sleep 2
}
done
done
2015-11-10 13:45:38 +00:00
echo
2015-04-10 08:12:00 +00:00
}
2015-11-10 13:45:38 +00:00
function verify-node( ) {
2015-09-23 09:49:22 +00:00
# verify node has all required daemons
2015-11-10 13:45:38 +00:00
echo -n " Validating ${ 1 } "
2015-04-10 08:12:00 +00:00
local -a required_daemon = ( "kube-proxy" "kubelet" "docker" )
local validated = "1"
2015-08-13 20:32:07 +00:00
local try_count = 1
local max_try_count = 30
2015-04-10 08:12:00 +00:00
until [ [ " $validated " = = "0" ] ] ; do
validated = "0"
local daemon
for daemon in " ${ required_daemon [@] } " ; do
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS " $1 " " pgrep -f ' ${ daemon } ' " >/dev/null 2>& 1 || {
2015-11-10 13:45:38 +00:00
echo -n "."
2015-04-10 08:12:00 +00:00
validated = "1"
2015-08-13 20:32:07 +00:00
( ( try_count = try_count+1) )
if [ [ ${ try_count } -gt ${ max_try_count } ] ] ; then
2015-11-16 06:32:44 +00:00
echo -e " \nWarning: Process ' ${ daemon } ' failed to run on ${ 1 } , please check.\n "
2015-08-13 20:32:07 +00:00
exit 1
fi
2015-04-10 08:12:00 +00:00
sleep 2
}
done
done
2015-11-10 13:45:38 +00:00
echo
2015-04-10 08:12:00 +00:00
}
2016-02-02 22:47:21 +00:00
# Create ~/kube/default/etcd with proper contents.
# $1: The one IP address where the etcd leader listens.
2015-11-10 13:45:38 +00:00
function create-etcd-opts( ) {
2015-04-10 08:12:00 +00:00
cat <<EOF > ~/kube/default/etcd
2015-11-10 13:45:38 +00:00
ETCD_OPTS = " \
-name infra\
2015-11-18 08:03:50 +00:00
-listen-client-urls http://127.0.0.1:4001,http://${ 1 } :4001\
-advertise-client-urls http://${ 1 } :4001"
2015-04-10 08:12:00 +00:00
EOF
}
2016-02-02 22:47:21 +00:00
# Create ~/kube/default/kube-apiserver with proper contents.
# $1: CIDR block for service addresses.
# $2: Admission Controllers to invoke in the API server.
# $3: A port range to reserve for services with NodePort visibility.
# $4: The IP address on which to advertise the apiserver to members of the cluster.
2015-11-10 13:45:38 +00:00
function create-kube-apiserver-opts( ) {
2015-04-10 08:12:00 +00:00
cat <<EOF > ~/kube/default/kube-apiserver
2015-11-10 13:45:38 +00:00
KUBE_APISERVER_OPTS = " \
--insecure-bind-address= 0.0.0.0\
--insecure-port= 8080\
--etcd-servers= http://127.0.0.1:4001\
--logtostderr= true\
--service-cluster-ip-range= ${ 1 } \
--admission-control= ${ 2 } \
--service-node-port-range= ${ 3 } \
2015-12-10 02:49:11 +00:00
--advertise-address= ${ 4 } \
2015-11-10 13:45:38 +00:00
--client-ca-file= /srv/kubernetes/ca.crt\
--tls-cert-file= /srv/kubernetes/server.cert\
--tls-private-key-file= /srv/kubernetes/server.key"
2015-04-10 08:12:00 +00:00
EOF
}
2016-02-02 22:47:21 +00:00
# Create ~/kube/default/kube-controller-manager with proper contents.
2015-11-10 13:45:38 +00:00
function create-kube-controller-manager-opts( ) {
2015-04-10 08:12:00 +00:00
cat <<EOF > ~/kube/default/kube-controller-manager
2015-11-10 13:45:38 +00:00
KUBE_CONTROLLER_MANAGER_OPTS = " \
--master= 127.0.0.1:8080\
--root-ca-file= /srv/kubernetes/ca.crt\
--service-account-private-key-file= /srv/kubernetes/server.key\
--logtostderr= true"
2015-04-10 08:12:00 +00:00
EOF
}
2016-02-02 22:47:21 +00:00
# Create ~/kube/default/kube-scheduler with proper contents.
2015-11-10 13:45:38 +00:00
function create-kube-scheduler-opts( ) {
2015-04-10 08:12:00 +00:00
cat <<EOF > ~/kube/default/kube-scheduler
2015-11-10 13:45:38 +00:00
KUBE_SCHEDULER_OPTS = " \
--logtostderr= true\
--master= 127.0.0.1:8080"
2015-04-10 08:12:00 +00:00
EOF
}
2016-02-02 22:47:21 +00:00
# Create ~/kube/default/kubelet with proper contents.
# $1: The hostname or IP address by which the kubelet will identify itself.
# $2: The one hostname or IP address at which the API server is reached (insecurely).
# $3: If non-empty then the DNS server IP to configure in each pod.
# $4: If non-empty then added to each pod's domain search list.
# $5: Pathname of the kubelet config file or directory.
# $6: If empty then flannel is used otherwise CNI is used.
2015-11-10 13:45:38 +00:00
function create-kubelet-opts( ) {
2016-02-02 22:47:21 +00:00
if [ -n " $6 " ] ; then
cni_opts = " --network-plugin=cni --network-plugin-dir=/etc/cni/net.d"
else
cni_opts = ""
fi
2015-04-10 08:12:00 +00:00
cat <<EOF > ~/kube/default/kubelet
2015-11-10 13:45:38 +00:00
KUBELET_OPTS = " \
--hostname-override= ${ 1 } \
--api-servers= http://${ 2 } :8080 \
--logtostderr= true \
2015-12-10 02:49:11 +00:00
--cluster-dns= ${ 3 } \
2015-12-17 01:51:07 +00:00
--cluster-domain= ${ 4 } \
2016-02-02 22:47:21 +00:00
--config= ${ 5 } \
$cni_opts "
2015-04-10 08:12:00 +00:00
EOF
}
2016-02-02 22:47:21 +00:00
# Create ~/kube/default/kube-proxy with proper contents.
# $1: The hostname or IP address by which the node is identified.
# $2: The one hostname or IP address at which the API server is reached (insecurely).
2015-11-10 13:45:38 +00:00
function create-kube-proxy-opts( ) {
2015-04-10 08:12:00 +00:00
cat <<EOF > ~/kube/default/kube-proxy
2015-11-10 13:45:38 +00:00
KUBE_PROXY_OPTS = " \
2015-11-05 03:41:07 +00:00
--hostname-override= ${ 1 } \
--master= http://${ 2 } :8080 \
2016-02-12 19:00:41 +00:00
--logtostderr= true \
${ 3 } "
2015-04-10 08:12:00 +00:00
EOF
}
2016-02-02 22:47:21 +00:00
# Create ~/kube/default/flanneld with proper contents.
# $1: The one hostname or IP address at which the etcd leader listens.
2015-11-10 13:45:38 +00:00
function create-flanneld-opts( ) {
2015-04-10 08:12:00 +00:00
cat <<EOF > ~/kube/default/flanneld
2015-12-01 06:48:05 +00:00
FLANNEL_OPTS = " --etcd-endpoints=http:// ${ 1 } :4001 \
2015-12-09 19:35:38 +00:00
--ip-masq \
2015-12-01 06:48:05 +00:00
--iface= ${ 2 } "
2015-04-10 08:12:00 +00:00
EOF
}
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# Vars set:
# KUBE_MASTER_IP
2015-11-10 13:45:38 +00:00
function detect-master( ) {
2015-12-11 06:43:16 +00:00
source " ${ KUBE_CONFIG_FILE } "
2015-04-10 08:12:00 +00:00
setClusterInfo
2015-11-05 03:41:07 +00:00
export KUBE_MASTER = " ${ MASTER } "
2015-11-18 01:59:00 +00:00
export KUBE_MASTER_IP = " ${ MASTER_IP } "
echo " Using master ${ MASTER_IP } "
2015-04-10 08:12:00 +00:00
}
2015-09-23 09:49:22 +00:00
# Detect the information about the nodes
2015-04-10 08:12:00 +00:00
#
# Assumed vars:
# nodes
# Vars set:
2015-09-23 09:49:22 +00:00
# KUBE_NODE_IP_ADDRESS (array)
2015-11-10 13:45:38 +00:00
function detect-nodes( ) {
2015-12-11 06:43:16 +00:00
source " ${ KUBE_CONFIG_FILE } "
2015-04-10 08:12:00 +00:00
2015-09-23 09:49:22 +00:00
KUBE_NODE_IP_ADDRESSES = ( )
2015-04-10 08:12:00 +00:00
setClusterInfo
2015-06-24 03:01:59 +00:00
2015-11-05 06:40:00 +00:00
local ii = 0
2015-04-10 08:12:00 +00:00
for i in ${ nodes }
do
if [ " ${ roles [ ${ ii } ] } " = = "i" ] || [ " ${ roles [ ${ ii } ] } " = = "ai" ] ; then
2015-09-23 09:49:22 +00:00
KUBE_NODE_IP_ADDRESSES += ( " ${ i #*@ } " )
2015-04-10 08:12:00 +00:00
fi
( ( ii = ii+1) )
done
2015-09-23 09:49:22 +00:00
if [ [ -z " ${ KUBE_NODE_IP_ADDRESSES [@] } " ] ] ; then
2015-11-16 06:32:44 +00:00
echo " Could not detect Kubernetes node nodes.\
2015-11-10 13:45:38 +00:00
Make sure you've launched a cluster with ' kube-up.sh' " >&2
2015-04-10 08:12:00 +00:00
exit 1
fi
}
# Instantiate a kubernetes cluster on ubuntu
2015-06-26 12:14:32 +00:00
function kube-up( ) {
2015-12-11 06:43:16 +00:00
export KUBE_CONFIG_FILE = ${ KUBE_CONFIG_FILE :- ${ KUBE_ROOT } /cluster/ubuntu/config-default.sh }
source " ${ KUBE_CONFIG_FILE } "
2015-04-10 08:12:00 +00:00
2015-11-18 01:59:00 +00:00
# downloading tarball release
" ${ KUBE_ROOT } /cluster/ubuntu/download-release.sh "
2015-04-10 08:12:00 +00:00
2016-02-11 19:27:27 +00:00
# Fetch the hacked easyrsa that make-ca-cert.sh will use
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>& 1
2016-02-02 22:47:21 +00:00
if ! check-CNI-config; then
return
fi
2015-04-10 08:12:00 +00:00
setClusterInfo
2015-11-05 06:40:00 +00:00
local ii = 0
2015-04-10 08:12:00 +00:00
for i in ${ nodes }
do
2015-06-24 03:01:59 +00:00
{
if [ " ${ roles [ ${ ii } ] } " = = "a" ] ; then
provision-master
elif [ " ${ roles [ ${ ii } ] } " = = "ai" ] ; then
2015-09-23 09:49:22 +00:00
provision-masterandnode
2015-08-27 13:53:09 +00:00
elif [ " ${ roles [ ${ ii } ] } " = = "i" ] ; then
2015-11-16 06:32:44 +00:00
provision-node " $i "
2015-06-24 03:01:59 +00:00
else
2015-11-10 13:45:38 +00:00
echo " unsupported role for ${ i } . Please check "
2015-06-24 03:01:59 +00:00
exit 1
fi
}
2015-04-10 08:12:00 +00:00
( ( ii = ii+1) )
done
wait
2015-11-05 11:08:59 +00:00
export KUBECTL_PATH = " ${ KUBE_ROOT } /cluster/ubuntu/binaries/kubectl "
2015-04-10 08:12:00 +00:00
verify-cluster
2015-06-03 23:54:56 +00:00
detect-master
export CONTEXT = "ubuntu"
2015-06-18 05:59:27 +00:00
export KUBE_SERVER = " http:// ${ KUBE_MASTER_IP } :8080 "
source " ${ KUBE_ROOT } /cluster/common.sh "
# set kubernetes user and password
2015-10-26 17:38:53 +00:00
load-or-gen-kube-basicauth
2015-06-18 05:59:27 +00:00
2015-06-03 23:54:56 +00:00
create-kubeconfig
2015-04-10 08:12:00 +00:00
}
function provision-master( ) {
2015-11-05 03:41:07 +00:00
2015-11-16 06:32:44 +00:00
echo -e " \nDeploying master on machine ${ MASTER_IP } "
2015-11-10 13:45:38 +00:00
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS " $MASTER " "mkdir -p ~/kube/default"
2015-11-10 13:45:38 +00:00
# copy the binaries and scripts to the ~/kube directory on the master
scp -r $SSH_OPTS \
saltbase/salt/generate-cert/make-ca-cert.sh \
2016-02-11 19:27:27 +00:00
easy-rsa.tar.gz \
2015-11-10 13:45:38 +00:00
ubuntu/reconfDocker.sh \
2015-12-11 06:43:16 +00:00
" ${ KUBE_CONFIG_FILE } " \
2015-11-10 13:45:38 +00:00
ubuntu/util.sh \
ubuntu/master/* \
ubuntu/binaries/master/ \
" ${ MASTER } :~/kube "
2016-02-02 22:47:21 +00:00
if [ -z " $CNI_PLUGIN_CONF " ] || [ -z " $CNI_PLUGIN_EXES " ] ; then
# Flannel is being used: copy the flannel binaries and scripts, set reconf flag
scp -r $SSH_OPTS ubuntu/master-flannel/* " ${ MASTER } :~/kube "
NEED_RECONFIG_DOCKER = true
else
# CNI is being used: set reconf flag
NEED_RECONFIG_DOCKER = false
fi
2015-11-10 13:45:38 +00:00
EXTRA_SANS = (
IP:$MASTER_IP
IP:${ SERVICE_CLUSTER_IP_RANGE %.* } .1
DNS:kubernetes
DNS:kubernetes.default
DNS:kubernetes.default.svc
DNS:kubernetes.default.svc.cluster.local
)
2015-11-05 03:41:07 +00:00
2015-11-16 06:32:44 +00:00
EXTRA_SANS = $( echo " ${ EXTRA_SANS [@] } " | tr ' ' ,)
2015-11-10 13:45:38 +00:00
2015-11-21 03:07:42 +00:00
BASH_DEBUG_FLAGS = ""
if [ [ " $DEBUG " = = "true" ] ] ; then
BASH_DEBUG_FLAGS = "set -x"
fi
2015-11-10 13:45:38 +00:00
# remote login to MASTER and configue k8s master
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t " ${ MASTER } " "
2015-11-21 03:07:42 +00:00
set +e
${ BASH_DEBUG_FLAGS }
2015-11-10 13:45:38 +00:00
source ~/kube/util.sh
setClusterInfo
2015-11-18 08:03:50 +00:00
create-etcd-opts '${MASTER_IP}'
2015-11-10 13:45:38 +00:00
create-kube-apiserver-opts \
'${SERVICE_CLUSTER_IP_RANGE}' \
'${ADMISSION_CONTROL}' \
2015-12-10 02:49:11 +00:00
'${SERVICE_NODE_PORT_RANGE}' \
'${MASTER_IP}'
2015-11-10 13:45:38 +00:00
create-kube-controller-manager-opts '${NODE_IPS}'
create-kube-scheduler-opts
2015-12-01 06:48:05 +00:00
create-flanneld-opts '127.0.0.1' '${MASTER_IP}'
2016-01-14 04:39:10 +00:00
FLANNEL_OTHER_NET_CONFIG = '${FLANNEL_OTHER_NET_CONFIG}' sudo -E -p '[sudo] password to start master: ' -- /bin/bash -ce '
2015-11-21 03:07:42 +00:00
${ BASH_DEBUG_FLAGS }
2015-11-05 03:41:07 +00:00
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
2015-11-10 13:45:38 +00:00
cp ~/kube/init_scripts/* /etc/init.d/
2015-11-05 03:41:07 +00:00
2015-11-10 13:45:38 +00:00
groupadd -f -r kube-cert
2016-02-11 19:27:27 +00:00
${ PROXY_SETTING } DEBUG = '${DEBUG}' ~/kube/make-ca-cert.sh \" ${ MASTER_IP } \" \" ${ EXTRA_SANS } \"
2015-11-10 13:45:38 +00:00
mkdir -p /opt/bin/
cp ~/kube/master/* /opt/bin/
service etcd start
2016-02-02 22:47:21 +00:00
if ${ NEED_RECONFIG_DOCKER } ; then FLANNEL_NET = \" ${ FLANNEL_NET } \" KUBE_CONFIG_FILE = \" ${ KUBE_CONFIG_FILE } \" DOCKER_OPTS = \" ${ DOCKER_OPTS } \" ~/kube/reconfDocker.sh a; fi
2015-11-10 13:45:38 +00:00
' " || {
echo " Deploying master on machine ${ MASTER_IP } failed "
exit 1
}
2015-11-05 03:41:07 +00:00
}
2015-04-10 08:12:00 +00:00
2015-09-23 09:49:22 +00:00
function provision-node( ) {
2015-11-05 03:41:07 +00:00
2015-11-16 06:32:44 +00:00
echo -e " \nDeploying node on machine ${ 1 #*@ } "
2015-11-10 13:45:38 +00:00
2015-06-24 03:01:59 +00:00
ssh $SSH_OPTS $1 "mkdir -p ~/kube/default"
2015-11-10 13:45:38 +00:00
# copy the binaries and scripts to the ~/kube directory on the node
scp -r $SSH_OPTS \
2015-12-11 06:43:16 +00:00
" ${ KUBE_CONFIG_FILE } " \
2015-11-10 13:45:38 +00:00
ubuntu/util.sh \
ubuntu/reconfDocker.sh \
ubuntu/minion/* \
ubuntu/binaries/minion \
" ${ 1 } :~/kube "
2016-02-02 22:47:21 +00:00
if [ -z " $CNI_PLUGIN_CONF " ] || [ -z " $CNI_PLUGIN_EXES " ] ; then
# Prep for Flannel use: copy the flannel binaries and scripts, set reconf flag
scp -r $SSH_OPTS ubuntu/minion-flannel/* " ${ 1 } :~/kube "
SERVICE_STARTS = "service flanneld start"
NEED_RECONFIG_DOCKER = true
CNI_PLUGIN_CONF = ''
else
# Prep for CNI use: copy the CNI config and binaries, adjust upstart config, set reconf flag
ssh $SSH_OPTS " ${ 1 } " "rm -rf tmp-cni; mkdir -p tmp-cni/exes tmp-cni/conf"
scp $SSH_OPTS " $CNI_PLUGIN_CONF " " ${ 1 } :tmp-cni/conf/ "
scp -p $SSH_OPTS $CNI_PLUGIN_EXES " ${ 1 } :tmp-cni/exes/ "
ssh $SSH_OPTS -t " ${ 1 } " '
sudo -p "[sudo] password to prep node %h: " -- /bin/bash -ce "
mkdir -p /opt/cni/bin /etc/cni/net.d
cp ~$( id -un) /tmp-cni/conf/* /etc/cni/net.d/
cp --preserve= mode ~$( id -un) /tmp-cni/exes/* /opt/cni/bin/
'"sed -i.bak -e ' s/start on started flanneld/start on started ${ CNI_KUBELET_TRIGGER } /' -e ' s/stop on stopping flanneld/stop on stopping ${ CNI_KUBELET_TRIGGER } /' "' ~$( id -un) /kube/init_conf/kubelet.conf
'"sed -i.bak -e ' s/start on started flanneld/start on started networking/' -e ' s/stop on stopping flanneld/stop on stopping networking/' "' ~$( id -un) /kube/init_conf/kube-proxy.conf
" '
SERVICE_STARTS = ' service kubelet start
service kube-proxy start'
NEED_RECONFIG_DOCKER = false
fi
2015-11-21 03:07:42 +00:00
BASH_DEBUG_FLAGS = ""
if [ [ " $DEBUG " = = "true" ] ] ; then
BASH_DEBUG_FLAGS = "set -x"
fi
2015-11-10 13:45:38 +00:00
# remote login to node and configue k8s node
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t " $1 " "
2015-11-21 03:07:42 +00:00
set +e
${ BASH_DEBUG_FLAGS }
2015-11-10 13:45:38 +00:00
source ~/kube/util.sh
2015-11-05 03:41:07 +00:00
2015-11-10 13:45:38 +00:00
setClusterInfo
create-kubelet-opts \
'${1#*@}' \
'${MASTER_IP}' \
'${DNS_SERVER_IP}' \
2015-12-10 03:11:21 +00:00
'${DNS_DOMAIN}' \
2016-02-02 22:47:21 +00:00
'${KUBELET_CONFIG}' \
'${CNI_PLUGIN_CONF}'
2015-11-05 03:41:07 +00:00
create-kube-proxy-opts \
2015-12-01 06:48:05 +00:00
'${1#*@}' \
2016-02-12 19:00:41 +00:00
'${MASTER_IP}' \
'${KUBE_PROXY_EXTRA_OPTS}'
2015-12-01 06:48:05 +00:00
create-flanneld-opts '${MASTER_IP}' '${1#*@}'
2015-11-05 03:41:07 +00:00
2015-11-21 03:07:42 +00:00
sudo -E -p '[sudo] password to start node: ' -- /bin/bash -ce '
${ BASH_DEBUG_FLAGS }
2015-11-10 13:45:38 +00:00
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
2015-11-05 03:41:07 +00:00
cp ~/kube/init_scripts/* /etc/init.d/
mkdir -p /opt/bin/
2015-11-10 13:45:38 +00:00
cp ~/kube/minion/* /opt/bin
2016-02-02 22:47:21 +00:00
${ SERVICE_STARTS }
if ${ NEED_RECONFIG_DOCKER } ; then KUBE_CONFIG_FILE = \" ${ KUBE_CONFIG_FILE } \" DOCKER_OPTS = \" ${ DOCKER_OPTS } \" ~/kube/reconfDocker.sh i; fi
2015-11-10 13:45:38 +00:00
' " || {
echo " Deploying node on machine ${ 1 #*@ } failed "
exit 1
}
2015-04-10 08:12:00 +00:00
}
2015-09-23 09:49:22 +00:00
function provision-masterandnode( ) {
2015-11-05 03:41:07 +00:00
2015-11-10 13:45:38 +00:00
echo -e " \nDeploying master and node on machine ${ MASTER_IP } "
2015-04-10 08:12:00 +00:00
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
2015-11-10 13:45:38 +00:00
# copy the binaries and scripts to the ~/kube directory on the master
2015-08-27 13:53:09 +00:00
# scp order matters
2015-11-10 13:45:38 +00:00
scp -r $SSH_OPTS \
saltbase/salt/generate-cert/make-ca-cert.sh \
2016-02-11 19:27:27 +00:00
easy-rsa.tar.gz \
2015-12-11 06:43:16 +00:00
" ${ KUBE_CONFIG_FILE } " \
2015-11-10 13:45:38 +00:00
ubuntu/util.sh \
ubuntu/minion/* \
ubuntu/master/* \
ubuntu/reconfDocker.sh \
ubuntu/binaries/master/ \
ubuntu/binaries/minion \
" ${ MASTER } :~/kube "
2015-11-05 03:41:07 +00:00
2016-02-02 22:47:21 +00:00
if [ -z " $CNI_PLUGIN_CONF " ] || [ -z " $CNI_PLUGIN_EXES " ] ; then
# Prep for Flannel use: copy the flannel binaries and scripts, set reconf flag
scp -r $SSH_OPTS ubuntu/minion-flannel/* ubuntu/master-flannel/* " ${ MASTER } :~/kube "
NEED_RECONFIG_DOCKER = true
CNI_PLUGIN_CONF = ''
else
# Prep for CNI use: copy the CNI config and binaries, adjust upstart config, set reconf flag
ssh $SSH_OPTS " ${ MASTER } " "rm -rf tmp-cni; mkdir -p tmp-cni/exes tmp-cni/conf"
scp $SSH_OPTS " $CNI_PLUGIN_CONF " " ${ MASTER } :tmp-cni/conf/ "
scp -p $SSH_OPTS $CNI_PLUGIN_EXES " ${ MASTER } :tmp-cni/exes/ "
ssh $SSH_OPTS -t " ${ MASTER } " '
sudo -p "[sudo] password to prep master %h: " -- /bin/bash -ce "
mkdir -p /opt/cni/bin /etc/cni/net.d
cp ~$( id -un) /tmp-cni/conf/* /etc/cni/net.d/
cp --preserve= mode ~$( id -un) /tmp-cni/exes/* /opt/cni/bin/
'"sed -i.bak -e ' s/start on started flanneld/start on started etcd/' -e ' s/stop on stopping flanneld/stop on stopping etcd/' "' ~$( id -un) /kube/init_conf/kube*.conf
" '
NEED_RECONFIG_DOCKER = false
fi
2015-11-10 13:45:38 +00:00
EXTRA_SANS = (
IP:${ MASTER_IP }
IP:${ SERVICE_CLUSTER_IP_RANGE %.* } .1
DNS:kubernetes
DNS:kubernetes.default
DNS:kubernetes.default.svc
DNS:kubernetes.default.svc.cluster.local
)
2015-11-05 03:41:07 +00:00
2015-11-16 06:32:44 +00:00
EXTRA_SANS = $( echo " ${ EXTRA_SANS [@] } " | tr ' ' ,)
2015-11-10 13:45:38 +00:00
2015-11-21 03:07:42 +00:00
BASH_DEBUG_FLAGS = ""
if [ [ " $DEBUG " = = "true" ] ] ; then
BASH_DEBUG_FLAGS = "set -x"
fi
2015-11-10 13:45:38 +00:00
# remote login to the master/node and configue k8s
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t " $MASTER " "
2015-11-21 03:07:42 +00:00
set +e
${ BASH_DEBUG_FLAGS }
2015-11-10 13:45:38 +00:00
source ~/kube/util.sh
2015-11-05 03:41:07 +00:00
2015-11-10 13:45:38 +00:00
setClusterInfo
2015-11-18 08:03:50 +00:00
create-etcd-opts '${MASTER_IP}'
2015-11-10 13:45:38 +00:00
create-kube-apiserver-opts \
'${SERVICE_CLUSTER_IP_RANGE}' \
'${ADMISSION_CONTROL}' \
2015-12-10 02:49:11 +00:00
'${SERVICE_NODE_PORT_RANGE}' \
'${MASTER_IP}'
2015-11-10 13:45:38 +00:00
create-kube-controller-manager-opts '${NODE_IPS}'
create-kube-scheduler-opts
create-kubelet-opts \
'${MASTER_IP}' \
'${MASTER_IP}' \
'${DNS_SERVER_IP}' \
2015-12-10 03:11:21 +00:00
'${DNS_DOMAIN}' \
2016-02-02 22:47:21 +00:00
'${KUBELET_CONFIG}' \
'${CNI_PLUGIN_CONF}'
2015-11-05 03:41:07 +00:00
create-kube-proxy-opts \
2015-12-01 06:48:05 +00:00
'${MASTER_IP}' \
2016-02-12 19:00:41 +00:00
'${MASTER_IP}' \
'${KUBE_PROXY_EXTRA_OPTS}'
2015-12-01 06:48:05 +00:00
create-flanneld-opts '127.0.0.1' '${MASTER_IP}'
2015-11-05 03:41:07 +00:00
2016-01-14 04:39:10 +00:00
FLANNEL_OTHER_NET_CONFIG = '${FLANNEL_OTHER_NET_CONFIG}' sudo -E -p '[sudo] password to start master: ' -- /bin/bash -ce '
2015-11-21 03:07:42 +00:00
${ BASH_DEBUG_FLAGS }
2015-11-05 03:41:07 +00:00
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
2015-11-10 13:45:38 +00:00
cp ~/kube/init_scripts/* /etc/init.d/
2015-11-05 03:41:07 +00:00
2015-11-10 13:45:38 +00:00
groupadd -f -r kube-cert
2016-02-11 19:27:27 +00:00
${ PROXY_SETTING } DEBUG = '${DEBUG}' ~/kube/make-ca-cert.sh \" ${ MASTER_IP } \" \" ${ EXTRA_SANS } \"
2015-11-05 03:41:07 +00:00
mkdir -p /opt/bin/
2015-11-10 13:45:38 +00:00
cp ~/kube/master/* /opt/bin/
cp ~/kube/minion/* /opt/bin/
service etcd start
2016-02-02 22:47:21 +00:00
if ${ NEED_RECONFIG_DOCKER } ; then FLANNEL_NET = \" ${ FLANNEL_NET } \" KUBE_CONFIG_FILE = \" ${ KUBE_CONFIG_FILE } \" DOCKER_OPTS = \" ${ DOCKER_OPTS } \" ~/kube/reconfDocker.sh ai; fi
2015-11-10 13:45:38 +00:00
' " || {
echo " Deploying master and node on machine ${ MASTER_IP } failed "
exit 1
2015-11-05 03:41:07 +00:00
}
2015-04-10 08:12:00 +00:00
}
2015-11-12 02:07:03 +00:00
# check whether kubelet has torn down all of the pods
function check-pods-torn-down( ) {
local kubectl = " ${ KUBE_ROOT } /cluster/kubectl.sh "
local attempt = 0
2016-02-02 06:22:16 +00:00
while [ [ ! -z " $( kubectl get pods --show-all --all-namespaces| tail -n +2) " ] ] ; do
2015-11-12 02:07:03 +00:00
if ( ( attempt > 120 ) ) ; then
echo "timeout waiting for tearing down pods" >> ~/kube/err.log
fi
echo "waiting for tearing down pods"
attempt = $(( attempt+1))
sleep 5
done
}
2015-04-10 08:12:00 +00:00
# Delete a kubernetes cluster
2015-11-10 13:45:38 +00:00
function kube-down( ) {
2015-11-05 11:08:59 +00:00
export KUBECTL_PATH = " ${ KUBE_ROOT } /cluster/ubuntu/binaries/kubectl "
2015-12-11 06:43:16 +00:00
export KUBE_CONFIG_FILE = ${ KUBE_CONFIG_FILE :- ${ KUBE_ROOT } /cluster/ubuntu/config-default.sh }
source " ${ KUBE_CONFIG_FILE } "
2015-09-02 02:24:46 +00:00
source " ${ KUBE_ROOT } /cluster/common.sh "
2015-11-10 13:45:38 +00:00
2015-09-02 02:24:46 +00:00
tear_down_alive_resources
2015-11-05 03:41:07 +00:00
check-pods-torn-down
2015-11-05 06:40:00 +00:00
local ii = 0
2015-04-10 08:12:00 +00:00
for i in ${ nodes } ; do
2015-08-27 13:53:09 +00:00
if [ [ " ${ roles [ ${ ii } ] } " = = "ai" || " ${ roles [ ${ ii } ] } " = = "a" ] ] ; then
2015-11-10 13:45:38 +00:00
echo " Cleaning on master ${ i #*@ } "
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t " $i " "
2015-11-10 13:45:38 +00:00
pgrep etcd && \
sudo -p '[sudo] password to stop master: ' -- /bin/bash -c '
service etcd stop
rm -rf \
/opt/bin/etcd* \
/etc/init/etcd.conf \
/etc/init.d/etcd \
/etc/default/etcd
2015-11-05 03:41:07 +00:00
2015-11-10 13:45:38 +00:00
rm -rf /infra*
rm -rf /srv/kubernetes
'
" || echo " Cleaning on master ${ i #*@ } failed"
2015-11-12 02:07:03 +00:00
if [ [ " ${ roles [ ${ ii } ] } " = = "ai" ] ] ; then
ssh $SSH_OPTS -t " $i " "sudo rm -rf /var/lib/kubelet"
fi
2015-11-05 03:41:07 +00:00
2015-08-27 13:53:09 +00:00
elif [ [ " ${ roles [ ${ ii } ] } " = = "i" ] ] ; then
2015-11-10 13:45:38 +00:00
echo " Cleaning on node ${ i #*@ } "
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t " $i " "
2015-11-10 13:45:38 +00:00
pgrep flanneld && \
sudo -p '[sudo] password to stop node: ' -- /bin/bash -c '
2015-11-12 02:07:03 +00:00
service flanneld stop
2015-11-05 03:41:07 +00:00
rm -rf /var/lib/kubelet
2015-11-10 13:45:38 +00:00
'
" || echo " Cleaning on node ${ i #*@ } failed"
2015-08-27 13:53:09 +00:00
else
echo " unsupported role for ${ i } "
fi
2015-11-05 03:41:07 +00:00
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t " $i " " sudo -- /bin/bash -c '
2015-11-10 13:45:38 +00:00
rm -f \
/opt/bin/kube* \
/opt/bin/flanneld \
/etc/init/kube* \
/etc/init/flanneld.conf \
/etc/init.d/kube* \
/etc/init.d/flanneld \
/etc/default/kube* \
/etc/default/flanneld
2015-11-05 03:41:07 +00:00
2015-11-10 13:45:38 +00:00
rm -rf ~/kube
rm -f /run/flannel/subnet.env
' " || echo " cleaning legacy files on ${ i #*@ } failed"
2015-08-27 13:53:09 +00:00
( ( ii = ii+1) )
2015-04-10 08:12:00 +00:00
done
}
2015-08-31 19:15:11 +00:00
# Perform common upgrade setup tasks
function prepare-push( ) {
2015-09-15 09:17:27 +00:00
# Use local binaries for kube-push
2015-11-16 06:32:44 +00:00
if [ [ -z " ${ KUBE_VERSION } " ] ] ; then
2015-11-05 03:41:07 +00:00
echo "Use local binaries for kube-push"
2015-09-13 05:48:32 +00:00
if [ [ ! -d " ${ KUBE_ROOT } /cluster/ubuntu/binaries " ] ] ; then
echo "No local binaries.Please check"
exit 1
2015-11-05 03:41:07 +00:00
else
2015-09-13 05:48:32 +00:00
echo "Please make sure all the required local binaries are prepared ahead"
sleep 3
fi
else
2015-11-05 03:41:07 +00:00
# Run download-release.sh to get the required release
2015-09-18 06:14:20 +00:00
export KUBE_VERSION
2015-11-16 06:32:44 +00:00
" ${ KUBE_ROOT } /cluster/ubuntu/download-release.sh "
2015-08-31 19:15:11 +00:00
fi
}
2015-09-24 08:26:04 +00:00
# Update a kubernetes master with expected release
2015-11-10 13:45:38 +00:00
function push-master( ) {
2015-12-11 06:43:16 +00:00
export KUBE_CONFIG_FILE = ${ KUBE_CONFIG_FILE :- ${ KUBE_ROOT } /cluster/ubuntu/config-default.sh }
source " ${ KUBE_CONFIG_FILE } "
2015-11-05 03:41:07 +00:00
2015-09-15 09:17:27 +00:00
if [ [ ! -f " ${ KUBE_ROOT } /cluster/ubuntu/binaries/master/kube-apiserver " ] ] ; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi
2015-11-05 11:08:59 +00:00
export KUBECTL_PATH = " ${ KUBE_ROOT } /cluster/ubuntu/binaries/kubectl "
2015-11-05 03:41:07 +00:00
2015-08-31 19:15:11 +00:00
setClusterInfo
2015-11-10 13:45:38 +00:00
2015-11-05 06:40:00 +00:00
local ii = 0
2015-08-31 19:15:11 +00:00
for i in ${ nodes } ; do
2015-11-10 13:45:38 +00:00
if [ [ " ${ roles [ ${ ii } ] } " = = "a" || " ${ roles [ ${ ii } ] } " = = "ai" ] ] ; then
2015-09-09 01:19:09 +00:00
echo " Cleaning master ${ i #*@ } "
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t " $i " "
2015-11-10 13:45:38 +00:00
pgrep etcd && sudo -p '[sudo] stop the all process: ' -- /bin/bash -c '
service etcd stop
sleep 3
rm -rf \
/etc/init/etcd.conf \
/etc/init/kube* \
/etc/init/flanneld.conf \
/etc/init.d/etcd \
/etc/init.d/kube* \
/etc/init.d/flanneld \
/etc/default/etcd \
/etc/default/kube* \
/etc/default/flanneld
rm -f \
/opt/bin/etcd* \
/opt/bin/kube* \
/opt/bin/flanneld
rm -f /run/flannel/subnet.env
rm -rf ~/kube
' " || echo " Cleaning master ${ i #*@ } failed"
2015-11-05 03:41:07 +00:00
fi
2015-11-10 13:45:38 +00:00
if [ [ " ${ roles [ ${ ii } ] } " = = "a" ] ] ; then
2015-08-31 19:15:11 +00:00
provision-master
2015-11-10 13:45:38 +00:00
elif [ [ " ${ roles [ ${ ii } ] } " = = "ai" ] ] ; then
2015-09-23 09:49:22 +00:00
provision-masterandnode
2015-08-31 19:15:11 +00:00
elif [ [ " ${ roles [ ${ ii } ] } " = = "i" ] ] ; then
2015-09-09 01:19:09 +00:00
( ( ii = ii+1) )
2015-08-31 19:15:11 +00:00
continue
else
2015-09-09 01:19:09 +00:00
echo " unsupported role for ${ i } , please check "
2015-08-31 19:15:11 +00:00
exit 1
fi
( ( ii = ii+1) )
done
verify-cluster
}
2015-09-24 08:26:04 +00:00
# Update a kubernetes node with expected release
2015-08-31 19:15:11 +00:00
function push-node( ) {
2015-12-11 06:43:16 +00:00
export KUBE_CONFIG_FILE = ${ KUBE_CONFIG_FILE :- ${ KUBE_ROOT } /cluster/ubuntu/config-default.sh }
source " ${ KUBE_CONFIG_FILE } "
2015-09-15 09:17:27 +00:00
if [ [ ! -f " ${ KUBE_ROOT } /cluster/ubuntu/binaries/minion/kubelet " ] ] ; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi
2015-11-05 11:08:59 +00:00
export KUBECTL_PATH = " ${ KUBE_ROOT } /cluster/ubuntu/binaries/kubectl "
2015-11-05 03:41:07 +00:00
2015-08-31 19:15:11 +00:00
setClusterInfo
2015-11-05 03:41:07 +00:00
2015-11-10 13:45:38 +00:00
local node_ip = ${ 1 }
2015-11-05 06:40:00 +00:00
local ii = 0
2015-11-16 06:32:44 +00:00
local existing = false
2015-11-05 06:40:00 +00:00
2015-08-31 19:15:11 +00:00
for i in ${ nodes } ; do
2015-11-16 06:32:44 +00:00
if [ [ " ${ roles [ ${ ii } ] } " = = "i" && ${ i #*@ } = = " $node_ip " ] ] ; then
2015-09-09 01:19:09 +00:00
echo " Cleaning node ${ i #*@ } "
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t " $i " "
2015-11-10 13:45:38 +00:00
sudo -p '[sudo] stop the all process: ' -- /bin/bash -c '
service flanneld stop
rm -f /opt/bin/kube* \
/opt/bin/flanneld
rm -rf \
/etc/init/kube* \
/etc/init/flanneld.conf \
/etc/init.d/kube* \
/etc/init.d/flanneld \
/etc/default/kube* \
/etc/default/flanneld
rm -f /run/flannel/subnet.env
rm -rf ~/kube
' " || echo " Cleaning node ${ i #*@ } failed"
2015-11-16 06:32:44 +00:00
provision-node " $i "
2015-09-09 01:19:09 +00:00
existing = true
2015-11-16 06:32:44 +00:00
elif [ [ " ${ roles [ ${ ii } ] } " = = "a" || " ${ roles [ ${ ii } ] } " = = "ai" ] ] && [ [ ${ i #*@ } = = " $node_ip " ] ] ; then
2015-09-09 01:19:09 +00:00
echo " ${ i } is master node, please try ./kube-push -m instead "
existing = true
elif [ [ " ${ roles [ ${ ii } ] } " = = "i" || " ${ roles [ ${ ii } ] } " = = "a" || " ${ roles [ ${ ii } ] } " = = "ai" ] ] ; then
( ( ii = ii+1) )
continue
2015-08-31 19:15:11 +00:00
else
2015-09-09 01:19:09 +00:00
echo " unsupported role for ${ i } , please check "
2015-08-31 19:15:11 +00:00
exit 1
fi
( ( ii = ii+1) )
done
2015-09-09 01:19:09 +00:00
if [ [ " ${ existing } " = = false ] ] ; then
echo " node ${ node_ip } does not exist "
else
verify-cluster
2015-11-05 03:41:07 +00:00
fi
2015-08-31 19:15:11 +00:00
}
2015-09-24 08:26:04 +00:00
# Update a kubernetes cluster with expected source
2015-11-05 03:41:07 +00:00
function kube-push( ) {
2015-08-31 19:15:11 +00:00
prepare-push
2015-12-11 06:43:16 +00:00
export KUBE_CONFIG_FILE = ${ KUBE_CONFIG_FILE :- ${ KUBE_ROOT } /cluster/ubuntu/config-default.sh }
source " ${ KUBE_CONFIG_FILE } "
2015-09-09 01:19:09 +00:00
2015-09-13 05:48:32 +00:00
if [ [ ! -f " ${ KUBE_ROOT } /cluster/ubuntu/binaries/master/kube-apiserver " ] ] ; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi
2015-11-05 03:41:07 +00:00
2015-11-05 11:08:59 +00:00
export KUBECTL_PATH = " ${ KUBE_ROOT } /cluster/ubuntu/binaries/kubectl "
2015-11-05 03:41:07 +00:00
#stop all the kube's process & etcd
2015-11-05 06:40:00 +00:00
local ii = 0
2015-08-31 19:15:11 +00:00
for i in ${ nodes } ; do
2015-11-10 13:45:38 +00:00
if [ [ " ${ roles [ ${ ii } ] } " = = "ai" || " ${ roles [ ${ ii } ] } " = = "a" ] ] ; then
echo " Cleaning on master ${ i #*@ } "
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t " $i " "
2015-11-10 13:45:38 +00:00
pgrep etcd && \
sudo -p '[sudo] password to stop master: ' -- /bin/bash -c '
service etcd stop
rm -rf \
/opt/bin/etcd* \
/etc/init/etcd.conf \
/etc/init.d/etcd \
/etc/default/etcd
' " || echo " Cleaning on master ${ i #*@ } failed"
2015-09-09 01:19:09 +00:00
elif [ [ " ${ roles [ ${ ii } ] } " = = "i" ] ] ; then
2015-11-10 13:45:38 +00:00
echo " Cleaning on node ${ i #*@ } "
ssh $SSH_OPTS -t $i "
pgrep flanneld && \
sudo -p '[sudo] password to stop node: ' -- /bin/bash -c '
service flanneld stop
' " || echo " Cleaning on node ${ i #*@ } failed"
2015-09-09 01:19:09 +00:00
else
echo " unsupported role for ${ i } "
fi
2015-11-16 06:32:44 +00:00
ssh $SSH_OPTS -t " $i " " sudo -- /bin/bash -c '
2015-11-10 13:45:38 +00:00
rm -f \
/opt/bin/kube* \
/opt/bin/flanneld
rm -rf \
/etc/init/kube* \
/etc/init/flanneld.conf \
/etc/init.d/kube* \
/etc/init.d/flanneld \
/etc/default/kube* \
/etc/default/flanneld
rm -f /run/flannel/subnet.env
rm -rf ~/kube
' " || echo " Cleaning legacy files on ${ i #*@ } failed"
2015-09-09 01:19:09 +00:00
( ( ii = ii+1) )
2015-08-31 19:15:11 +00:00
done
2015-09-09 01:19:09 +00:00
2015-09-13 05:48:32 +00:00
#provision all nodes,including master & nodes
2015-08-31 19:15:11 +00:00
setClusterInfo
2015-11-10 13:45:38 +00:00
2015-11-05 06:40:00 +00:00
local ii = 0
2015-08-31 19:15:11 +00:00
for i in ${ nodes } ; do
if [ [ " ${ roles [ ${ ii } ] } " = = "a" ] ] ; then
provision-master
elif [ [ " ${ roles [ ${ ii } ] } " = = "i" ] ] ; then
2015-11-16 06:32:44 +00:00
provision-node " $i "
2015-08-31 19:15:11 +00:00
elif [ [ " ${ roles [ ${ ii } ] } " = = "ai" ] ] ; then
2015-09-23 09:49:22 +00:00
provision-masterandnode
2015-08-31 19:15:11 +00:00
else
echo " unsupported role for ${ i } . please check "
exit 1
fi
( ( ii = ii+1) )
done
verify-cluster
2015-04-10 08:12:00 +00:00
}
# Perform preparations required to run e2e tests
function prepare-e2e( ) {
echo "Ubuntu doesn't need special preparations for e2e tests" 1>& 2
2015-05-24 04:59:46 +00:00
}