Merge pull request #44344 from mikedanese/ubuntu-delete

Automatic merge from submit-queue (batch tested with PRs 44343, 44344)

delete ubuntu kube-up

```release-note
Remove deprecated ubuntu kube-up deployment.
```
pull/6/head
Kubernetes Submit Queue 2017-04-14 23:03:57 -07:00 committed by GitHub
commit b22a298009
24 changed files with 0 additions and 2461 deletions

View File

@ -1,2 +0,0 @@
binaries
kubedns*

View File

@ -1,139 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Contains configuration values for the Ubuntu cluster
# Define all your cluster nodes, MASTER node comes first"
# And separated with blank space like <user_1@ip_1> <user_2@ip_2> <user_3@ip_3>
export nodes=${nodes:-"vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223"}
# Define all your nodes role: a(master) or i(minion) or ai(both master and minion),
# Roles must be the same order with the nodes.
roles=${roles:-"ai i i"}
# If it practically impossible to set an array as an environment variable
# from a script, so assume variable is a string then convert it to an array
export roles_array=($roles)
# Define minion numbers
export NUM_NODES=${NUM_NODES:-3}
# define the IP range used for service cluster IPs.
# according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here.
export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-192.168.3.0/24} # formerly PORTAL_NET
# define the IP range used for flannel overlay network, should not conflict with above SERVICE_CLUSTER_IP_RANGE
# The Ubuntu scripting supports two ways of networking: Flannel and
# CNI. To use CNI: (1) put a CNI configuration file, whose basename
# is the configured network type plus ".conf", somewhere on the driver
# machine (the one running `kube-up.sh`) and set CNI_PLUGIN_CONF to a
# pathname of that file, (2) put one or more executable binaries on
# the driver machine and set CNI_PLUGIN_EXES to a space-separated list
# of their pathnames, and (3) set CNI_KUBELET_TRIGGER to identify an
# appropriate service on which to trigger the start and stop of the
# kubelet on non-master machines. For (1) and (2) the pathnames may
# be relative, in which case they are relative to kubernetes/cluster.
# If either of CNI_PLUGIN_CONF or CNI_PLUGIN_EXES is undefined or has
# a zero length value then Flannel will be used instead of CNI.
export CNI_PLUGIN_CONF CNI_PLUGIN_EXES CNI_KUBELET_TRIGGER
CNI_PLUGIN_CONF=${CNI_PLUGIN_CONF:-""}
CNI_PLUGIN_EXES=${CNI_PLUGIN_EXES:-""}
CNI_KUBELET_TRIGGER=${CNI_KUBELET_TRIGGER:-networking}
# Flannel networking is used if CNI networking is not. The following
# variable defines the CIDR block from which cluster addresses are
# drawn.
export FLANNEL_NET=${FLANNEL_NET:-172.16.0.0/16}
# If Flannel networking is used then the following variable can be
# used to customize the Flannel backend. The variable's value should
# be a JSON object. An empty string means to use the default, which
# is `{"Type": "vxlan"}`. See
# https://github.com/coreos/flannel#configuration for details on
# configuring Flannel.
export FLANNEL_BACKEND
FLANNEL_BACKEND=''
# Optionally add other contents to the Flannel configuration JSON
# object normally stored in etcd as /coreos.com/network/config. Use
# JSON syntax suitable for insertion into a JSON object constructor
# after other field name:value pairs. For example:
# FLANNEL_OTHER_NET_CONFIG=', "SubnetMin": "172.16.10.0", "SubnetMax": "172.16.90.0"'
export FLANNEL_OTHER_NET_CONFIG
FLANNEL_OTHER_NET_CONFIG=${FLANNEL_OTHER_NET_CONFIG:-""}
# Admission Controllers to invoke prior to persisting objects in
# cluster. If we included ResourceQuota, we should keep it at the end
# of the list to prevent incrementing quota usage prematurely. The
# list below is what
# http://kubernetes.io/docs/admin/admission-controllers/ recommends
# for release >= 1.4.0; see that doc for the recommended settings for
# earlier releases.
export ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota
# Path to the pod manifest file or directory of files of kubelet
export KUBELET_POD_MANIFEST_PATH=${KUBELET_POD_MANIFEST_PATH:-""}
# A port range to reserve for services with NodePort visibility
SERVICE_NODE_PORT_RANGE=${SERVICE_NODE_PORT_RANGE:-"30000-32767"}
# Optional: Enable node logging.
ENABLE_NODE_LOGGING=false
LOGGING_DESTINATION=${LOGGING_DESTINATION:-elasticsearch}
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING=false
ELASTICSEARCH_LOGGING_REPLICAS=${ELASTICSEARCH_LOGGING_REPLICAS:-1}
# Optional: When set to true, heapster, Influxdb and Grafana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-true}"
# Extra options to set on the Docker command line. This is useful for setting
# --insecure-registry for local registries.
DOCKER_OPTS=${DOCKER_OPTS:-""}
# Extra options to set on the kube-proxy command line. This is useful
# for selecting the iptables proxy-mode, for example.
KUBE_PROXY_EXTRA_OPTS=${KUBE_PROXY_EXTRA_OPTS:-""}
# Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
# DNS_SERVER_IP must be a IP in SERVICE_CLUSTER_IP_RANGE
DNS_SERVER_IP=${DNS_SERVER_IP:-"192.168.3.10"}
DNS_DOMAIN=${DNS_DOMAIN:-"cluster.local"}
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Enable setting flags for kube-apiserver to turn on behavior in active-dev
#RUNTIME_CONFIG=""
# Optional: Add http or https proxy when download easy-rsa.
# Add environment variable separated with blank space like "http_proxy=http://10.x.x.x:8080 https_proxy=https://10.x.x.x:8443"
PROXY_SETTING=${PROXY_SETTING:-""}
# Optional: Allows kubelet/kube-api to be run in privileged mode
ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-"false"}
DEBUG=${DEBUG:-"false"}
# Add SSH_OPTS: Add this to config ssh port
SSH_OPTS="-oPort=22 -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR"

View File

@ -1,19 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Contains configuration values for interacting with the Ubuntu cluster in test mode
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/ubuntu/config-default.sh"

View File

@ -1,93 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# deploy the add-on services after the cluster is available
set -e
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "config-default.sh"
KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/ubuntu/config-default.sh}
function init {
echo "Creating kube-system namespace..."
# use kubectl to create kube-system namespace
NAMESPACE=`eval "${KUBECTL} get namespaces | grep kube-system | cat"`
if [ ! "$NAMESPACE" ]; then
${KUBECTL} create -f namespace.yaml
echo "The namespace 'kube-system' is successfully created."
else
echo "The namespace 'kube-system' is already there. Skipping."
fi
echo
}
function deploy_dns {
echo "Deploying DNS on Kubernetes"
sed -e "s/\\\$DNS_DOMAIN/${DNS_DOMAIN}/g" "${KUBE_ROOT}/cluster/addons/dns/kubedns-controller.yaml.sed" > kubedns-controller.yaml
sed -e "s/\\\$DNS_SERVER_IP/${DNS_SERVER_IP}/g" "${KUBE_ROOT}/cluster/addons/dns/kubedns-svc.yaml.sed" > kubedns-svc.yaml
cp "${KUBE_ROOT}/cluster/addons/dns/kubedns-sa.yaml" kubedns-sa.yaml
cp "${KUBE_ROOT}/cluster/addons/dns/kubedns-cm.yaml" kubedns-cm.yaml
KUBEDNS=`eval "${KUBECTL} get services --namespace=kube-system | grep kube-dns | cat"`
if [ ! "$KUBEDNS" ]; then
# use kubectl to create kubedns controller and service
${KUBECTL} --namespace=kube-system create -f kubedns-sa.yaml
${KUBECTL} --namespace=kube-system create -f kubedns-cm.yaml
${KUBECTL} --namespace=kube-system create -f kubedns-controller.yaml
${KUBECTL} --namespace=kube-system create -f kubedns-svc.yaml
echo "Kube-dns controller and service are successfully deployed."
else
echo "Kube-dns controller and service are already deployed. Skipping."
fi
echo
}
function deploy_dashboard {
if ${KUBECTL} get deployment -l k8s-app=kubernetes-dashboard --namespace=kube-system | grep kubernetes-dashboard-v &> /dev/null; then
echo "Kubernetes Dashboard controller already exists"
else
echo "Creating Kubernetes Dashboard controller"
${KUBECTL} create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml
fi
if ${KUBECTL} get service/kubernetes-dashboard --namespace=kube-system &> /dev/null; then
echo "Kubernetes Dashboard service already exists"
else
echo "Creating Kubernetes Dashboard service"
${KUBECTL} create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml
fi
echo
}
init
if [ "${ENABLE_CLUSTER_DNS}" == true ]; then
deploy_dns
fi
if [ "${ENABLE_CLUSTER_UI}" == true ]; then
deploy_dashboard
fi

View File

@ -1,98 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Download the etcd, flannel, and K8s binaries automatically and stored in binaries directory
# Run as root only
# author @resouer @WIZARD-CXY
set -e
function cleanup {
# cleanup work
rm -rf flannel* kubernetes* etcd* binaries out
}
trap cleanup SIGHUP SIGINT SIGTERM
pushd $(dirname $0)
mkdir -p binaries/master
mkdir -p binaries/minion
mkdir -p out
# flannel
FLANNEL_VERSION=${FLANNEL_VERSION:-"0.5.5"}
echo "Prepare flannel ${FLANNEL_VERSION} release ..."
grep -q "^${FLANNEL_VERSION}\$" binaries/.flannel 2>/dev/null || {
( curl --fail -L https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-${FLANNEL_VERSION}-linux-amd64.tar.gz -o flannel.tar.gz &&
tar xzf flannel.tar.gz flannel-${FLANNEL_VERSION}/flanneld -O > out/flanneld
) ||
( curl --fail -L https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-v${FLANNEL_VERSION}-linux-amd64.tar.gz -o flannel.tar.gz &&
tar xzf flannel.tar.gz flanneld -O > out/flanneld
)
chmod 0755 out/flanneld
cp out/flanneld binaries/master
cp out/flanneld binaries/minion
echo ${FLANNEL_VERSION} > binaries/.flannel
}
# ectd
ETCD_VERSION=${ETCD_VERSION:-"3.0.17"}
ETCD="etcd-v${ETCD_VERSION}-linux-amd64"
echo "Prepare etcd ${ETCD_VERSION} release ..."
grep -q "^${ETCD_VERSION}\$" binaries/.etcd 2>/dev/null || {
curl -L https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/${ETCD}.tar.gz -o etcd.tar.gz
tar xzf etcd.tar.gz
cp ${ETCD}/etcd ${ETCD}/etcdctl binaries/master
echo ${ETCD_VERSION} > binaries/.etcd
}
function get_latest_version_number {
# TODO(#33726): switch to dl.k8s.io
local -r latest_url="https://storage.googleapis.com/kubernetes-release/release/stable.txt"
if [[ $(which wget) ]]; then
wget -qO- ${latest_url}
elif [[ $(which curl) ]]; then
curl -Ss ${latest_url}
else
echo "Couldn't find curl or wget. Bailing out." >&2
exit 4
fi
}
if [ -z "$KUBE_VERSION" ]; then
KUBE_VERSION=$(get_latest_version_number | sed 's/^v//')
fi
# k8s
echo "Prepare kubernetes ${KUBE_VERSION} release ..."
grep -q "^${KUBE_VERSION}\$" binaries/.kubernetes 2>/dev/null || {
# TODO(#33726): switch to dl.k8s.io
curl -L https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/kubernetes-client-linux-amd64.tar.gz -o kubernetes-client-linux-amd64.tar.gz
curl -L https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz -o kubernetes-server-linux-amd64.tar.gz
tar xzf kubernetes-client-linux-amd64.tar.gz
tar xzf kubernetes-server-linux-amd64.tar.gz
cp kubernetes/client/bin/kubectl binaries/
cp kubernetes/server/bin/kube-apiserver \
kubernetes/server/bin/kube-controller-manager \
kubernetes/server/bin/kube-scheduler binaries/master
cp kubernetes/server/bin/kubelet \
kubernetes/server/bin/kube-proxy binaries/minion
echo ${KUBE_VERSION} > binaries/.kubernetes
}
rm -rf flannel* kubernetes* etcd* out
echo "Done! All your binaries locate in kubernetes/cluster/ubuntu/binaries directory"
popd

View File

@ -1,29 +0,0 @@
description "Flannel service"
author "@chenxingyu"
respawn
# start in conjunction with etcd
start on started etcd
stop on stopping etcd
pre-start script
FLANNEL=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $FLANNEL ]; then
exit 0
fi
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/flanneld)
FLANNEL=/opt/bin/$UPSTART_JOB
FLANNEL_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$FLANNEL" $FLANNEL_OPTS
end script

View File

@ -1,99 +0,0 @@
#!/bin/sh
set -e
### BEGIN INIT INFO
# Provides: flannel
# Required-Start: $etcd
# Required-Stop:
# Should-Start:
# Should-Stop:
# Default-Start:
# Default-Stop:
# Short-Description: Start flannel networking service
# Description:
# https://github.com/coreos/flannel
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/flannel)
FLANNEL=/opt/bin/$BASE
# This is the pid file managed by kube-apiserver itself
FLANNEL_PIDFILE=/var/run/$BASE.pid
FLANNEL_LOGFILE=/var/log/$BASE.log
FLANNEL_OPTS=""
FLANNEL_DESC="Flannel"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$FLANNEL_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
# Check flanneld is present
if [ ! -x $FLANNEL ]; then
log_failure_msg "$FLANNEL not present or not executable"
exit 1
fi
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$FLANNEL_DESC must be run as root"
exit 1
fi
}
FLANNEL_START="start-stop-daemon \
--start \
--background \
--quiet \
--exec $FLANNEL \
--make-pidfile --pidfile $FLANNEL_PIDFILE \
-- $FLANNEL_OPTS \
>> $FLANNEL_LOGFILE 2>&1"
FLANNEL_STOP="start-stop-daemon \
--stop \
--pidfile $FLANNEL_PIDFILE"
case "$1" in
start)
fail_unless_root
log_begin_msg "Starting $FLANNEL_DESC: $BASE"
$FLANNEL_START
log_end_msg $?
;;
stop)
fail_unless_root
log_begin_msg "Stopping $FLANNEL_DESC: $BASE"
$FLANNEL_STOP
log_end_msg $?
;;
restart | force-reload)
fail_unless_root
log_begin_msg "Stopping $FLANNEL_DESC: $BASE"
$FLANNEL_STOP
$FLANNEL_START
log_end_msg $?
;;
status)
status_of_proc -p "$FLANNEL_DESC" "$FLANNEL" "$FLANNEL_DESC"
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac

View File

@ -1,34 +0,0 @@
description "Etcd service"
author "@jainvipin"
start on (net-device-up
and local-filesystems
and runlevel [2345])
respawn
# set max open files
limit nofile 2048 4096
pre-start script
# see also https://github.com/jainvipin/kubernetes-ubuntu-start
ETCD=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $ETCD ]; then
exit 0
fi
echo "$ETCD binary not found, exiting"
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/etcd)
ETCD=/opt/bin/$UPSTART_JOB
ETCD_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$ETCD" $ETCD_OPTS
end script

View File

@ -1,30 +0,0 @@
description "Kube-Apiserver service"
author "@jainvipin"
respawn
# start in conjunction with etcd
start on started etcd
stop on stopping etcd
pre-start script
# see also https://github.com/jainvipin/kubernetes-start
KUBE_APISERVER=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $KUBE_APISERVER ]; then
exit 0
fi
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/kube-apiserver)
KUBE_APISERVER=/opt/bin/$UPSTART_JOB
KUBE_APISERVER_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$KUBE_APISERVER" $KUBE_APISERVER_OPTS
end script

View File

@ -1,30 +0,0 @@
description "Kube-Controller-Manager service"
author "@jainvipin"
respawn
# start in conjunction with etcd
start on started etcd
stop on stopping etcd
pre-start script
# see also https://github.com/jainvipin/kubernetes-ubuntu-start
KUBE_CONTROLLER_MANAGER=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $KUBE_CONTROLLER_MANAGER ]; then
exit 0
fi
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/kube-controller-manager)
KUBE_CONTROLLER_MANAGER=/opt/bin/$UPSTART_JOB
KUBE_CONTROLLER_MANAGER_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$KUBE_CONTROLLER_MANAGER" $KUBE_CONTROLLER_MANAGER_OPTS
end script

View File

@ -1,30 +0,0 @@
description "Kube-Scheduler service"
author "@jainvipin"
respawn
# start in conjunction with etcd
start on started etcd
stop on stopping etcd
pre-start script
# see also https://github.com/jainvipin/kubernetes-start
KUBE_SCHEDULER=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $KUBE_SCHEDULER ]; then
exit 0
fi
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/kube-scheduler)
KUBE_SCHEDULER=/opt/bin/$UPSTART_JOB
KUBE_SCHEDULER_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$KUBE_SCHEDULER" $KUBE_SCHEDULER_OPTS
end script

View File

@ -1,100 +0,0 @@
#!/bin/sh
set -e
### BEGIN INIT INFO
# Provides: etcd
# Required-Start: $docker
# Required-Stop:
# Should-Start:
# Should-Stop:
# Default-Start:
# Default-Stop:
# Short-Description: Start distrubted key/value pair service
# Description:
# http://www.github.com/coreos/etcd
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/etcd)
ETCD=/opt/bin/$BASE
# This is the pid file managed by etcd itself
ETCD_PIDFILE=/var/run/$BASE.pid
ETCD_LOGFILE=/var/log/$BASE.log
ETCD_OPTS=""
ETCD_DESC="Etcd"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if false && [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$ETCD_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
# Check etcd is present
if [ ! -x $ETCD ]; then
log_failure_msg "$ETCD not present or not executable"
exit 1
fi
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$ETCD_DESC must be run as root"
exit 1
fi
}
ETCD_START="start-stop-daemon \
--start \
--background \
--quiet \
--exec $ETCD \
--make-pidfile \
--pidfile $ETCD_PIDFILE \
-- $ETCD_OPTS \
>> $ETCD_LOGFILE 2>&1"
ETCD_STOP="start-stop-daemon \
--stop \
--pidfile $ETCD_PIDFILE"
case "$1" in
start)
fail_unless_root
log_begin_msg "Starting $ETCD_DESC: $BASE"
$ETCD_START
log_end_msg $?
;;
stop)
fail_unless_root
log_begin_msg "Stopping $ETCD_DESC: $BASE"
$ETCD_STOP
log_end_msg $?
;;
restart | force-reload)
fail_unless_root
log_begin_msg "Restarting $ETCD_DESC: $BASE"
$ETCD_STOP
$ETCD_START
log_end_msg $?
;;
status)
status_of_proc -p "$ETCD_PIDFILE" "$ETCD" "$ETCD_DESC"
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac

View File

@ -1,99 +0,0 @@
#!/bin/sh
set -e
### BEGIN INIT INFO
# Provides: kube-apiserver
# Required-Start: $etcd
# Required-Stop:
# Should-Start:
# Should-Stop:
# Default-Start:
# Default-Stop:
# Short-Description: Start kube-apiserver service
# Description:
# http://www.github.com/GoogleCloudPlatform/Kubernetes
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/kube-apiserver)
KUBE_APISERVER=/opt/bin/$BASE
# This is the pid file managed by kube-apiserver itself
KUBE_APISERVER_PIDFILE=/var/run/$BASE.pid
KUBE_APISERVER_LOGFILE=/var/log/$BASE.log
KUBE_APISERVER_OPTS=""
KUBE_APISERVER_DESC="Kube-Apiserver"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$KUBE_APISERVER_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
# Check kube-apiserver is present
if [ ! -x $KUBE_APISERVER ]; then
log_failure_msg "$KUBE_APISERVER not present or not executable"
exit 1
fi
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$KUBE_APISERVER_DESC must be run as root"
exit 1
fi
}
KUBE_APISERVER_START="start-stop-daemon \
--start \
--background \
--quiet \
--exec $KUBE_APISERVER \
--make-pidfile --pidfile $KUBE_APISERVER_PIDFILE \
-- $KUBE_APISERVER_OPTS \
>> $KUBE_APISERVER_LOGFILE 2>&1"
KUBE_APISERVER_STOP="start-stop-daemon \
--stop \
--pidfile $KUBE_APISERVER_PIDFILE"
case "$1" in
start)
fail_unless_root
log_begin_msg "Starting $KUBE_APISERVER_DESC: $BASE"
$KUBE_APISERVER_START
log_end_msg $?
;;
stop)
fail_unless_root
log_begin_msg "Stopping $KUBE_APISERVER_DESC: $BASE"
$KUBE_APISERVER_STOP
log_end_msg $?
;;
restart | force-reload)
fail_unless_root
log_begin_msg "Stopping $KUBE_APISERVER_DESC: $BASE"
$KUBE_APISERVER_STOP
$KUBE_APISERVER_START
log_end_msg $?
;;
status)
status_of_proc -p "$KUBE_APISERVER_PIDFILE" "$KUBE_APISERVER" "$KUBE_APISERVER_DESC"
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac

View File

@ -1,99 +0,0 @@
#!/bin/sh
set -e
### BEGIN INIT INFO
# Provides: kube-controller-manager
# Required-Start: $etcd
# Required-Stop:
# Should-Start:
# Should-Stop:
# Default-Start:
# Default-Stop:
# Short-Description: Start kube-controller-manager service
# Description:
# http://www.github.com/GoogleCloudPlatform/Kubernetes
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/kube-controller-manager)
KUBE_CONTROLLER_MANAGER=/opt/bin/$BASE
# This is the pid file managed by kube-controller-manager itself
KUBE_CONTROLLER_MANAGER_PIDFILE=/var/run/$BASE.pid
KUBE_CONTROLLER_MANAGER_LOGFILE=/var/log/$BASE.log
KUBE_CONTROLLER_MANAGER_OPTS=""
KUBE_CONTROLLER_MANAGER_DESC="Kube-Controller-Manager"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$KUBE_CONTROLLER_MANAGER_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
# Check kube-controller-manager is present
if [ ! -x $KUBE_CONTROLLER_MANAGER ]; then
log_failure_msg "$KUBE_CONTROLLER_MANAGER not present or not executable"
exit 1
fi
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$KUBE_CONTROLLER_MANAGER_DESC must be run as root"
exit 1
fi
}
KUBE_CONTROLLER_MANAGER_START="start-stop-daemon
--start --background \
--quiet \
--exec $KUBE_CONTROLLER_MANAGER \
--make-pidfile \
--pidfile $KUBE_CONTROLLER_MANAGER_PIDFILE \
-- $KUBE_CONTROLLER_MANAGER_OPTS \
>> $KUBE_CONTROLLER_MANAGER_LOGFILE 2>&1"
KUBE_CONTROLLER_MANAGER_STOP="start-stop-daemon \
--stop \
--pidfile $KUBE_CONTROLLER_MANAGER_PIDFILE"
case "$1" in
start)
fail_unless_root
log_begin_msg "Starting $KUBE_CONTROLLER_MANAGER_DESC: $BASE"
$KUBE_CONTROLLER_MANAGER_START
log_end_msg $?
;;
stop)
fail_unless_root
log_begin_msg "Stopping $KUBE_CONTROLLER_MANAGER_DESC: $BASE"
$KUBE_CONTROLLER_MANAGER_STOP
log_end_msg $?
;;
restart | force-reload)
fail_unless_root
log_daemon_message "Restarting $KUBE_CONTROLLER_MANAGER" || true
$KUBE_CONTROLLER_MANAGER_STOP
$KUBE_CONTROLLER_MANAGER_START
log_end_msg $?
;;
status)
status_of_proc -p "$KUBE_CONTROLLER_MANAGER_PIDFILE" "$KUBE_CONTROLLER_MANAGER" "$KUBE_CONTROLLER_MANAGER_DESC"
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac

View File

@ -1,99 +0,0 @@
#!/bin/sh
set -e
### BEGIN INIT INFO
# Provides: kube-scheduler
# Required-Start: $etcd
# Required-Stop:
# Should-Start:
# Should-Stop:
# Default-Start:
# Default-Stop:
# Short-Description: Start kube-scheduler service
# Description:
# http://www.github.com/GoogleCloudPlatform/Kubernetes
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/kube-scheduler)
KUBE_SCHEDULER=/opt/bin/$BASE
# This is the pid file managed by kube-scheduler itself
KUBE_SCHEDULER_PIDFILE=/var/run/$BASE.pid
KUBE_SCHEDULER_LOGFILE=/var/log/$BASE.log
KUBE_SCHEDULER_OPTS=""
KUBE_SCHEDULER_DESC="Kube-Scheduler"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$KUBE_SCHEDULER_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
# Check kube-scheduler is present
if [ ! -x $KUBE_SCHEDULER ]; then
log_failure_msg "$KUBE_SCHEDULER not present or not executable"
exit 1
fi
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$KUBE_SCHEDULER_DESC must be run as root"
exit 1
fi
}
KUBE_SCHEDULER_START="start-stop-daemon \
--start \
--background \
--quiet \
--exec $KUBE_SCHEDULER \
--make-pidfile --pidfile $KUBE_SCHEDULER_PIDFILE \
-- $KUBE_SCHEDULER_OPTS \
>> $KUBE_SCHEDULER_LOGFILE 2>&1"
KUBE_SCHEDULER_STOP="start-stop-daemon \
--stop \
--pidfile $KUBE_SCHEDULER_PIDFILE"
case "$1" in
start)
fail_unless_root
log_begin_msg "Starting $KUBE_SCHEDULER_DESC: $BASE"
$KUBE_SCHEDULER_START
log_end_msg $?
;;
stop)
fail_unless_root
log_begin_msg "Stopping $KUBE_SCHEDULER_DESC: $BASE"
$KUBE_SCHEDULER_STOP
log_end_msg $?
;;
restart | force-reload)
fail_unless_root
log_begin_msg "Restarting $KUBE_SCHEDULER_DESC: $BASE"
$KUBE_SCHEDULER_STOP
$KUBE_SCHEDULER_START
log_end_msg $?
;;
status)
status_of_proc -p "$KUBE_SCHEDULER_PIDFILE" "$KUBE_SCHEDULER" "$KUBE_SCHEDULER_DESC"
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac

View File

@ -1,28 +0,0 @@
description "Flannel service"
author "@chenxingyu"
respawn
start on (net-device-up
and local-filesystems
and runlevel [2345])
pre-start script
FLANNEL=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $FLANNEL ]; then
exit 0
fi
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/flanneld)
FLANNEL=/opt/bin/$UPSTART_JOB
FLANNEL_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$FLANNEL" $FLANNEL_OPTS
end script

View File

@ -1,99 +0,0 @@
#!/bin/sh
set -e
### BEGIN INIT INFO
# Provides: flannel
# Required-Start:
# Required-Stop:
# Should-Start:
# Should-Stop:
# Default-Start:
# Default-Stop:
# Short-Description: Start flannel networking service
# Description:
# https://github.com/coreos/flannel
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/flannel)
FLANNEL=/opt/bin/$BASE
# This is the pid file managed by kube-apiserver itself
FLANNEL_PIDFILE=/var/run/$BASE.pid
FLANNEL_LOGFILE=/var/log/$BASE.log
FLANNEL_OPTS=""
FLANNEL_DESC="Flannel"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$FLANNEL_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
# Check flanneld is present
if [ ! -x $FLANNEL ]; then
log_failure_msg "$FLANNEL not present or not executable"
exit 1
fi
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$FLANNEL_DESC must be run as root"
exit 1
fi
}
FLANNEL_START="start-stop-daemon \
--start \
--background \
--quiet \
--exec $FLANNEL \
--make-pidfile --pidfile $FLANNEL_PIDFILE \
-- $FLANNEL_OPTS \
>> $FLANNEL_LOGFILE 2>&1"
FLANNEL_STOP="start-stop-daemon \
--stop \
--pidfile $FLANNEL_PIDFILE"
case "$1" in
start)
fail_unless_root
log_begin_msg "Starting $FLANNEL_DESC: $BASE"
$FLANNEL_START
log_end_msg $?
;;
stop)
fail_unless_root
log_begin_msg "Stopping $FLANNEL_DESC: $BASE"
$FLANNEL_STOP
log_end_msg $?
;;
restart | force-reload)
fail_unless_root
log_begin_msg "Stopping $FLANNEL_DESC: $BASE"
$FLANNEL_STOP
$FLANNEL_START
log_end_msg $?
;;
status)
status_of_proc -p "$FLANNEL_DESC" "$FLANNEL" "$FLANNEL_DESC"
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac

View File

@ -1,32 +0,0 @@
description "Kube-Proxy service"
author "@jainvipin"
respawn
# start in conjunction with flanneld
start on started flanneld
stop on stopping flanneld
limit nofile 65536 65536
pre-start script
# see also https://github.com/jainvipin/kubernetes-start
KUBE_PROXY=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $KUBE_PROXY ]; then
exit 0
fi
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/kube-proxy)
KUBE_PROXY=/opt/bin/$UPSTART_JOB
KUBE_PROXY_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$KUBE_PROXY" $KUBE_PROXY_OPTS
end script

View File

@ -1,30 +0,0 @@
description "Kubelet service"
author "@jainvipin"
respawn
# start in conjunction with flanneld
start on started flanneld
stop on stopping flanneld
pre-start script
# see also https://github.com/jainvipin/kubernetes-ubuntu-start
KUBELET=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $KUBELET ]; then
exit 0
fi
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/kubelet)
KUBELET=/opt/bin/$UPSTART_JOB
KUBELET_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$KUBELET" $KUBELET_OPTS
end script

View File

@ -1,99 +0,0 @@
#!/bin/sh
set -e
### BEGIN INIT INFO
# Provides: kube-proxy
# Required-Start: $flannel
# Required-Stop:
# Should-Start:
# Should-Stop:
# Default-Start:
# Default-Stop:
# Short-Description: Start kube-proxy service
# Description:
# http://www.github.com/GoogleCloudPlatform/Kubernetes
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/kube-proxy)
KUBE_PROXY=/opt/bin/$BASE
# This is the pid file managed by kube-proxy itself
KUBE_PROXY_PIDFILE=/var/run/$BASE.pid
KUBE_PROXY_LOGFILE=/var/log/$BASE.log
KUBE_PROXY_OPTS=""
KUBE_PROXY_DESC="Kube-Proxy"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$KUBE_PROXY_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
# Check kube-proxy is present
if [ ! -x $KUBE_PROXY ]; then
log_failure_msg "$KUBE_PROXY not present or not executable"
exit 1
fi
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$KUBE_PROXY_DESC must be run as root"
exit 1
fi
}
KUBE_PROXY_START="start-stop-daemon \
--start \
--background \
--quiet \
--exec $KUBE_PROXY \
--make-pidfile --pidfile $KUBE_PROXY_PIDFILE \
-- $KUBE_PROXY_OPTS \
>> $KUBE_PROXY_LOGFILE 2>&1"
KUBE_PROXY_STOP="start-stop-daemon \
--stop \
--pidfile $KUBE_PROXY_PIDFILE"
case "$1" in
start)
fail_unless_root
log_begin_msg "Starting $KUBE_PROXY_DESC: $BASE"
$KUBE_PROXY_START
log_end_msg $?
;;
stop)
fail_unless_root
log_begin_msg "Stopping $KUBE_PROXY_DESC: $BASE"
$KUBE_PROXY_STOP
log_end_msg $?
;;
restart | force-reload)
fail_unless_root
log_begin_msg "Stopping $KUBE_PROXY_DESC: $BASE"
$KUBE_PROXY_STOP
$KUBE_PROXY_START
log_end_msg $?
;;
status)
status_of_proc -p "$KUBE_PROXY_PIDFILE" "$KUBE_PROXY" "$KUBE_PROXY_DESC"
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac

View File

@ -1,99 +0,0 @@
#!/bin/sh
set -e
### BEGIN INIT INFO
# Provides: kubelet
# Required-Start: $flannel
# Required-Stop:
# Should-Start:
# Should-Stop:
# Default-Start:
# Default-Stop:
# Short-Description: Start kubelet service
# Description:
# http://www.github.com/GoogleCloudPlatform/Kubernetes
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/kube-apiserver)
KUBELET=/opt/bin/$BASE
# This is the pid file managed by kube-apiserver itself
KUBELET_PIDFILE=/var/run/$BASE.pid
KUBELET_LOGFILE=/var/log/$BASE.log
KUBELET_OPTS=""
KUBELET_DESC="Kubelet"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$KUBELET_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
# Check kube-apiserver is present
if [ ! -x $KUBELET ]; then
log_failure_msg "$KUBELET not present or not executable"
exit 1
fi
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$KUBELET_DESC must be run as root"
exit 1
fi
}
KUBELET_START="start-stop-daemon \
--start \
--background \
--quiet \
--exec $KUBELET \
--make-pidfile --pidfile $KUBELET_PIDFILE \
-- $KUBELET_OPTS \
>> $KUBELET_LOGFILE 2>&1"
KUBELET_STOP="start-stop-daemon \
--stop \
--pidfile $KUBELET_PIDFILE"
case "$1" in
start)
fail_unless_root
log_begin_msg "Starting $KUBELET_DESC: $BASE"
$KUBELET_START
log_end_msg $?
;;
stop)
fail_unless_root
log_begin_msg "Stopping $KUBELET_DESC: $BASE"
$KUBELET_STOP
log_end_msg $?
;;
restart | force-reload)
fail_unless_root
log_begin_msg "Stopping $KUBELET_DESC: $BASE"
$KUBELET_STOP
$KUBELET_START
log_end_msg $?
;;
status)
status_of_proc -p "$KUBELET_PIDFILE" "$KUBELET" "$KUBELET_DESC"
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: kube-system

View File

@ -1,81 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# reconfigure docker network setting
source "$HOME/kube/${KUBE_CONFIG_FILE##*/}"
if [[ -n "$DEBUG" ]] && [[ "$DEBUG" != false ]] && [[ "$DEBUG" != FALSE ]]; then
set -x
fi
if [[ "$(id -u)" != "0" ]]; then
echo >&2 "Please run as root"
exit 1
fi
function config_etcd {
attempt=0
while true; do
/opt/bin/etcdctl get /coreos.com/network/config
if [[ "$?" == 0 ]]; then
break
else
# enough timeout??
if (( attempt > 600 )); then
echo "timeout waiting for /coreos.com/network/config" >> ~/kube/err.log
exit 2
fi
/opt/bin/etcdctl mk /coreos.com/network/config "{\"Network\":\"${FLANNEL_NET}\", \"Backend\": ${FLANNEL_BACKEND:-"{\"Type\": \"vxlan\"}"}${FLANNEL_OTHER_NET_CONFIG}}"
attempt=$((attempt+1))
sleep 3
fi
done
}
function restart_docker {
attempt=0
while [[ ! -f /run/flannel/subnet.env ]]; do
if (( attempt > 200 )); then
echo "timeout waiting for /run/flannel/subnet.env" >> ~/kube/err.log
exit 2
fi
attempt=$((attempt+1))
sleep 3
done
sudo ip link set dev docker0 down
sudo brctl delbr docker0
source /run/flannel/subnet.env
source /etc/default/docker
echo DOCKER_OPTS=\" -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock \
--bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}\" > /etc/default/docker
sudo service docker restart
}
if [[ $1 == "i" ]]; then
restart_docker
elif [[ $1 == "ai" ]]; then
config_etcd
restart_docker
elif [[ $1 == "a" ]]; then
config_etcd
else
echo "Another argument is required."
exit 1
fi

View File

@ -1,989 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions that each provider hosting Kubernetes
# must implement to use cluster/kube-*.sh scripts.
set -e
SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR -C"
source "${KUBE_ROOT}/cluster/common.sh"
MASTER=""
MASTER_IP=""
NODE_IPS=""
# Assumed Vars:
# KUBE_ROOT
function test-build-release() {
# Make a release
"${KUBE_ROOT}/build/release.sh"
}
# From user input set the necessary k8s and etcd configuration information
function setClusterInfo() {
# Initialize NODE_IPS in setClusterInfo function
# NODE_IPS is defined as a global variable, and is concatenated with other nodeIP
# When setClusterInfo is called for many times, this could cause potential problems
# Such as, you will have NODE_IPS=192.168.0.2,192.168.0.3,192.168.0.2,192.168.0.3,
# which is obviously wrong.
NODE_IPS=""
local ii=0
for i in $nodes; do
nodeIP=${i#*@}
if [[ "${roles_array[${ii}]}" == "ai" ]]; then
MASTER_IP=$nodeIP
MASTER=$i
NODE_IPS="$nodeIP"
elif [[ "${roles_array[${ii}]}" == "a" ]]; then
MASTER_IP=$nodeIP
MASTER=$i
elif [[ "${roles_array[${ii}]}" == "i" ]]; then
if [[ -z "${NODE_IPS}" ]];then
NODE_IPS="$nodeIP"
else
NODE_IPS="$NODE_IPS,$nodeIP"
fi
else
echo "unsupported role for ${i}. Please check"
exit 1
fi
((ii=ii+1))
done
}
# Sanity check on $CNI_PLUGIN_CONF and $CNI_PLUGIN_EXES
function check-CNI-config() {
if [ -z "$CNI_PLUGIN_CONF" ] && [ -n "$CNI_PLUGIN_EXES" ]; then
echo "Warning: CNI_PLUGIN_CONF is empty but CNI_PLUGIN_EXES is not (it is $CNI_PLUGIN_EXES); Flannel will be used" >& 2
elif [ -n "$CNI_PLUGIN_CONF" ] && [ -z "$CNI_PLUGIN_EXES" ]; then
echo "Warning: CNI_PLUGIN_EXES is empty but CNI_PLUGIN_CONF is not (it is $CNI_PLUGIN_CONF); Flannel will be used" & 2
elif [ -n "$CNI_PLUGIN_CONF" ] && [ -n "$CNI_PLUGIN_EXES" ]; then
local problems=0
if ! [ -r "$CNI_PLUGIN_CONF" ]; then
echo "ERROR: CNI_PLUGIN_CONF is set to $CNI_PLUGIN_CONF but that is not a readable existing file!" >& 2
let problems=1
fi
local ii=0
for exe in $CNI_PLUGIN_EXES; do
if ! [ -x "$exe" ]; then
echo "ERROR: CNI_PLUGIN_EXES[$ii], which is $exe, is not an existing executable file!" >& 2
let problems=problems+1
fi
let ii=ii+1
done
if (( problems > 0 )); then
exit 1
fi
fi
}
# Verify ssh prereqs
function verify-prereqs() {
local rc
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
# "Could not open a connection to your authentication agent."
if [[ "${rc}" -eq 2 ]]; then
eval "$(ssh-agent)" > /dev/null
trap-add "kill ${SSH_AGENT_PID}" EXIT
fi
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
# "The agent has no identities."
if [[ "${rc}" -eq 1 ]]; then
# Try adding one of the default identities, with or without passphrase.
ssh-add || true
fi
# Expect at least one identity to be available.
if ! ssh-add -L 1> /dev/null 2> /dev/null; then
echo "Could not find or add an SSH identity."
echo "Please start ssh-agent, add your identity, and retry."
exit 1
fi
}
# Check if /tmp is mounted noexec
function check-tmp-noexec() {
if ssh $SSH_OPTS "$MASTER" "grep '/tmp' /proc/mounts | grep -q 'noexec'" >/dev/null 2>&1; then
echo "/tmp is mounted noexec on $MASTER_IP, deploying master failed"
exit 1
fi
}
# Install handler for signal trap
function trap-add() {
local handler="$1"
local signal="${2-EXIT}"
local cur
cur="$(eval "sh -c 'echo \$3' -- $(trap -p ${signal})")"
if [[ -n "${cur}" ]]; then
handler="${cur}; ${handler}"
fi
trap "${handler}" ${signal}
}
function verify-cluster() {
local ii=0
for i in ${nodes}
do
if [ "${roles_array[${ii}]}" == "a" ]; then
verify-master
elif [ "${roles_array[${ii}]}" == "i" ]; then
verify-node "$i"
elif [ "${roles_array[${ii}]}" == "ai" ]; then
verify-master
verify-node "$i"
else
echo "unsupported role for ${i}. Please check"
exit 1
fi
((ii=ii+1))
done
}
function verify-master() {
# verify master has all required daemons
echo -n "Validating master"
local -a required_daemon=("kube-apiserver" "kube-controller-manager" "kube-scheduler")
local validated="1"
local try_count=1
local max_try_count=30
until [[ "$validated" == "0" ]]; do
validated="0"
local daemon
for daemon in "${required_daemon[@]}"; do
ssh $SSH_OPTS "$MASTER" "pgrep -f '${daemon}'" >/dev/null 2>&1 || {
echo -n "."
validated="1"
((try_count=try_count+1))
if [[ ${try_count} -gt ${max_try_count} ]]; then
echo -e "\nWarning: Process '${daemon}' failed to run on ${MASTER}, please check.\n"
exit 1
fi
sleep 2
}
done
done
echo
}
function verify-node() {
# verify node has all required daemons
echo -n "Validating ${1}"
local -a required_daemon=("kube-proxy" "kubelet" "docker")
local validated="1"
local try_count=1
local max_try_count=30
until [[ "$validated" == "0" ]]; do
validated="0"
local daemon
for daemon in "${required_daemon[@]}"; do
ssh $SSH_OPTS "$1" "pgrep -f '${daemon}'" >/dev/null 2>&1 || {
echo -n "."
validated="1"
((try_count=try_count+1))
if [[ ${try_count} -gt ${max_try_count} ]]; then
echo -e "\nWarning: Process '${daemon}' failed to run on ${1}, please check.\n"
exit 1
fi
sleep 2
}
done
done
echo
}
# Create ~/kube/default/etcd with proper contents.
# $1: The one IP address where the etcd leader listens.
function create-etcd-opts() {
cat <<EOF > ~/kube/default/etcd
ETCD_OPTS="\
-name infra\
--listen-client-urls=http://127.0.0.1:4001,http://${1}:4001\
-advertise-client-urls http://${1}:4001"
EOF
}
# Create ~/kube/default/kube-apiserver with proper contents.
# $1: CIDR block for service addresses.
# $2: Admission Controllers to invoke in the API server.
# $3: A port range to reserve for services with NodePort visibility.
# $4: The IP address on which to advertise the apiserver to members of the cluster.
# $5: Tells kube-api to run in privileged mode
function create-kube-apiserver-opts() {
cat <<EOF > ~/kube/default/kube-apiserver
KUBE_APISERVER_OPTS="\
--insecure-bind-address=0.0.0.0\
--insecure-port=8080\
--etcd-servers=http://127.0.0.1:4001\
--logtostderr=true\
--service-cluster-ip-range=${1}\
--admission-control=${2}\
--service-node-port-range=${3}\
--advertise-address=${4}\
--allow-privileged=${5}\
--client-ca-file=/srv/kubernetes/ca.crt\
--tls-cert-file=/srv/kubernetes/server.cert\
--tls-private-key-file=/srv/kubernetes/server.key"
EOF
}
# Create ~/kube/default/kube-controller-manager with proper contents.
function create-kube-controller-manager-opts() {
cat <<EOF > ~/kube/default/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="\
--master=127.0.0.1:8080\
--root-ca-file=/srv/kubernetes/ca.crt\
--service-account-private-key-file=/srv/kubernetes/server.key\
--logtostderr=true"
EOF
}
# Create ~/kube/default/kube-scheduler with proper contents.
function create-kube-scheduler-opts() {
cat <<EOF > ~/kube/default/kube-scheduler
KUBE_SCHEDULER_OPTS="\
--logtostderr=true\
--master=127.0.0.1:8080"
EOF
}
# Create ~/kube/default/kubelet with proper contents.
# $1: The hostname or IP address by which the kubelet will identify itself.
# $2: The one hostname or IP address at which the API server is reached (insecurely).
# $3: If non-empty then the DNS server IP to configure in each pod.
# $4: If non-empty then added to each pod's domain search list.
# $5: Pathname of the kubelet config file or directory.
# $6: Whether or not we run kubelet in privileged mode
# $7: If empty then flannel is used otherwise CNI is used.
function create-kubelet-opts() {
if [ -n "$7" ] ; then
cni_opts=" --network-plugin=cni --network-plugin-dir=/etc/cni/net.d"
else
cni_opts=""
fi
cat <<EOF > ~/kube/default/kubelet
KUBELET_OPTS="\
--hostname-override=${1} \
--api-servers=http://${2}:8080 \
--logtostderr=true \
--cluster-dns=${3} \
--cluster-domain=${4} \
--pod-manifest-path=${5} \
--allow-privileged=${6}
$cni_opts"
EOF
}
# Create ~/kube/default/kube-proxy with proper contents.
# $1: The hostname or IP address by which the node is identified.
# $2: The one hostname or IP address at which the API server is reached (insecurely).
function create-kube-proxy-opts() {
cat <<EOF > ~/kube/default/kube-proxy
KUBE_PROXY_OPTS="\
--hostname-override=${1} \
--master=http://${2}:8080 \
--logtostderr=true \
${3}"
EOF
}
# Create ~/kube/default/flanneld with proper contents.
# $1: The one hostname or IP address at which the etcd leader listens.
# $2: The IP address or network interface for the local Flannel daemon to use
function create-flanneld-opts() {
cat <<EOF > ~/kube/default/flanneld
FLANNEL_OPTS="--etcd-endpoints=http://${1}:4001 \
--ip-masq \
--iface=${2}"
EOF
}
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# Vars set:
# KUBE_MASTER_IP
function detect-master() {
source "${KUBE_CONFIG_FILE}"
setClusterInfo
export KUBE_MASTER="${MASTER}"
export KUBE_MASTER_IP="${MASTER_IP}"
echo "Using master ${MASTER_IP}"
}
# Detect the information about the nodes
#
# Assumed vars:
# nodes
# Vars set:
# KUBE_NODE_IP_ADDRESS (array)
function detect-nodes() {
source "${KUBE_CONFIG_FILE}"
KUBE_NODE_IP_ADDRESSES=()
setClusterInfo
local ii=0
for i in ${nodes}
do
if [ "${roles_array[${ii}]}" == "i" ] || [ "${roles_array[${ii}]}" == "ai" ]; then
KUBE_NODE_IP_ADDRESSES+=("${i#*@}")
fi
((ii=ii+1))
done
if [[ -z "${KUBE_NODE_IP_ADDRESSES[@]}" ]]; then
echo "Could not detect Kubernetes node nodes.\
Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
}
# Instantiate a kubernetes cluster on ubuntu
function kube-up() {
export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/ubuntu/config-default.sh}
source "${KUBE_CONFIG_FILE}"
# downloading tarball release
"${KUBE_ROOT}/cluster/ubuntu/download-release.sh"
# Fetch the hacked easyrsa that make-ca-cert.sh will use
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
if ! check-CNI-config; then
return
fi
setClusterInfo
local ii=0
for i in ${nodes}
do
{
if [ "${roles_array[${ii}]}" == "a" ]; then
provision-master
elif [ "${roles_array[${ii}]}" == "ai" ]; then
provision-masterandnode
elif [ "${roles_array[${ii}]}" == "i" ]; then
provision-node "$i"
else
echo "unsupported role for ${i}. Please check"
exit 1
fi
}
((ii=ii+1))
done
wait
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
verify-cluster
detect-master
export CONTEXT="ubuntu"
export KUBE_SERVER="http://${KUBE_MASTER_IP}:8080"
# set kubernetes user and password
load-or-gen-kube-basicauth
create-kubeconfig
}
function provision-master() {
echo -e "\nDeploying master on machine ${MASTER_IP}"
check-tmp-noexec
ssh $SSH_OPTS "$MASTER" "mkdir -p ~/kube/default"
# copy the binaries and scripts to the ~/kube directory on the master
scp -r $SSH_OPTS \
saltbase/salt/generate-cert/make-ca-cert.sh \
easy-rsa.tar.gz \
ubuntu/reconfDocker.sh \
"${KUBE_CONFIG_FILE}" \
ubuntu/util.sh \
ubuntu/master/* \
ubuntu/binaries/master/ \
"${MASTER}:~/kube"
if [ -z "$CNI_PLUGIN_CONF" ] || [ -z "$CNI_PLUGIN_EXES" ]; then
# Flannel is being used: copy the flannel binaries and scripts, set reconf flag
scp -r $SSH_OPTS ubuntu/master-flannel/* "${MASTER}:~/kube"
NEED_RECONFIG_DOCKER=true
else
# CNI is being used: set reconf flag
NEED_RECONFIG_DOCKER=false
fi
EXTRA_SANS=(
IP:$MASTER_IP
IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1
DNS:kubernetes
DNS:kubernetes.default
DNS:kubernetes.default.svc
DNS:kubernetes.default.svc.cluster.local
)
EXTRA_SANS=$(echo "${EXTRA_SANS[@]}" | tr ' ' ,)
BASH_DEBUG_FLAGS=""
if [[ "$DEBUG" == "true" ]] ; then
BASH_DEBUG_FLAGS="set -x"
fi
# remote login to MASTER and configue k8s master
ssh $SSH_OPTS -t "${MASTER}" "
set +e
${BASH_DEBUG_FLAGS}
source ~/kube/util.sh
setClusterInfo
create-etcd-opts '${MASTER_IP}'
create-kube-apiserver-opts \
'${SERVICE_CLUSTER_IP_RANGE}' \
'${ADMISSION_CONTROL}' \
'${SERVICE_NODE_PORT_RANGE}' \
'${MASTER_IP}' \
'${ALLOW_PRIVILEGED}'
create-kube-controller-manager-opts
create-kube-scheduler-opts
create-flanneld-opts '127.0.0.1' '${MASTER_IP}'
FLANNEL_BACKEND='${FLANNEL_BACKEND}' FLANNEL_OTHER_NET_CONFIG='${FLANNEL_OTHER_NET_CONFIG}' sudo -E -p '[sudo] password to start master: ' -- /bin/bash -ce '
${BASH_DEBUG_FLAGS}
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
cp ~/kube/init_scripts/* /etc/init.d/
groupadd -f -r kube-cert
${PROXY_SETTING} DEBUG='${DEBUG}' ~/kube/make-ca-cert.sh \"${MASTER_IP}\" \"${EXTRA_SANS}\"
mkdir -p /opt/bin/
cp ~/kube/master/* /opt/bin/
service etcd start
if ${NEED_RECONFIG_DOCKER}; then FLANNEL_NET=\"${FLANNEL_NET}\" KUBE_CONFIG_FILE=\"${KUBE_CONFIG_FILE}\" DOCKER_OPTS=\"${DOCKER_OPTS}\" DEBUG=\"$DEBUG\" ~/kube/reconfDocker.sh a; fi
'" || {
echo "Deploying master on machine ${MASTER_IP} failed"
exit 1
}
}
function provision-node() {
echo -e "\nDeploying node on machine ${1#*@}"
ssh $SSH_OPTS $1 "mkdir -p ~/kube/default"
# copy the binaries and scripts to the ~/kube directory on the node
scp -r $SSH_OPTS \
"${KUBE_CONFIG_FILE}" \
ubuntu/util.sh \
ubuntu/reconfDocker.sh \
ubuntu/minion/* \
ubuntu/binaries/minion \
"${1}:~/kube"
if [ -z "$CNI_PLUGIN_CONF" ] || [ -z "$CNI_PLUGIN_EXES" ]; then
# Prep for Flannel use: copy the flannel binaries and scripts, set reconf flag
scp -r $SSH_OPTS ubuntu/minion-flannel/* "${1}:~/kube"
SERVICE_STARTS="service flanneld start"
NEED_RECONFIG_DOCKER=true
CNI_PLUGIN_CONF=''
else
# Prep for CNI use: copy the CNI config and binaries, adjust upstart config, set reconf flag
ssh $SSH_OPTS "${1}" "rm -rf tmp-cni; mkdir -p tmp-cni/exes tmp-cni/conf"
scp $SSH_OPTS "$CNI_PLUGIN_CONF" "${1}:tmp-cni/conf/"
scp -p $SSH_OPTS $CNI_PLUGIN_EXES "${1}:tmp-cni/exes/"
ssh $SSH_OPTS -t "${1}" '
sudo -p "[sudo] password to prep node %h: " -- /bin/bash -ce "
mkdir -p /opt/cni/bin /etc/cni/net.d
cp ~$(id -un)/tmp-cni/conf/* /etc/cni/net.d/
cp --preserve=mode ~$(id -un)/tmp-cni/exes/* /opt/cni/bin/
'"sed -i.bak -e 's/start on started flanneld/start on started ${CNI_KUBELET_TRIGGER}/' -e 's/stop on stopping flanneld/stop on stopping ${CNI_KUBELET_TRIGGER}/' "'~$(id -un)/kube/init_conf/kubelet.conf
'"sed -i.bak -e 's/start on started flanneld/start on started networking/' -e 's/stop on stopping flanneld/stop on stopping networking/' "'~$(id -un)/kube/init_conf/kube-proxy.conf
"'
SERVICE_STARTS='service kubelet start
service kube-proxy start'
NEED_RECONFIG_DOCKER=false
fi
BASH_DEBUG_FLAGS=""
if [[ "$DEBUG" == "true" ]] ; then
BASH_DEBUG_FLAGS="set -x"
fi
# remote login to node and configure k8s node
ssh $SSH_OPTS -t "$1" "
set +e
${BASH_DEBUG_FLAGS}
source ~/kube/util.sh
setClusterInfo
create-kubelet-opts \
'${1#*@}' \
'${MASTER_IP}' \
'${DNS_SERVER_IP}' \
'${DNS_DOMAIN}' \
'${KUBELET_POD_MANIFEST_PATH}' \
'${ALLOW_PRIVILEGED}' \
'${CNI_PLUGIN_CONF}'
create-kube-proxy-opts \
'${1#*@}' \
'${MASTER_IP}' \
'${KUBE_PROXY_EXTRA_OPTS}'
create-flanneld-opts '${MASTER_IP}' '${1#*@}'
sudo -E -p '[sudo] password to start node: ' -- /bin/bash -ce '
${BASH_DEBUG_FLAGS}
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
cp ~/kube/init_scripts/* /etc/init.d/
mkdir -p /opt/bin/
cp ~/kube/minion/* /opt/bin
${SERVICE_STARTS}
if ${NEED_RECONFIG_DOCKER}; then KUBE_CONFIG_FILE=\"${KUBE_CONFIG_FILE}\" DOCKER_OPTS=\"${DOCKER_OPTS}\" DEBUG=\"$DEBUG\" ~/kube/reconfDocker.sh i; fi
'" || {
echo "Deploying node on machine ${1#*@} failed"
exit 1
}
}
function provision-masterandnode() {
echo -e "\nDeploying master and node on machine ${MASTER_IP}"
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
# copy the binaries and scripts to the ~/kube directory on the master
# scp order matters
scp -r $SSH_OPTS \
saltbase/salt/generate-cert/make-ca-cert.sh \
easy-rsa.tar.gz \
"${KUBE_CONFIG_FILE}" \
ubuntu/util.sh \
ubuntu/minion/* \
ubuntu/master/* \
ubuntu/reconfDocker.sh \
ubuntu/binaries/master/ \
ubuntu/binaries/minion \
"${MASTER}:~/kube"
if [ -z "$CNI_PLUGIN_CONF" ] || [ -z "$CNI_PLUGIN_EXES" ]; then
# Prep for Flannel use: copy the flannel binaries and scripts, set reconf flag
scp -r $SSH_OPTS ubuntu/minion-flannel/* ubuntu/master-flannel/* "${MASTER}:~/kube"
NEED_RECONFIG_DOCKER=true
CNI_PLUGIN_CONF=''
else
# Prep for CNI use: copy the CNI config and binaries, adjust upstart config, set reconf flag
ssh $SSH_OPTS "${MASTER}" "rm -rf tmp-cni; mkdir -p tmp-cni/exes tmp-cni/conf"
scp $SSH_OPTS "$CNI_PLUGIN_CONF" "${MASTER}:tmp-cni/conf/"
scp -p $SSH_OPTS $CNI_PLUGIN_EXES "${MASTER}:tmp-cni/exes/"
ssh $SSH_OPTS -t "${MASTER}" '
sudo -p "[sudo] password to prep master %h: " -- /bin/bash -ce "
mkdir -p /opt/cni/bin /etc/cni/net.d
cp ~$(id -un)/tmp-cni/conf/* /etc/cni/net.d/
cp --preserve=mode ~$(id -un)/tmp-cni/exes/* /opt/cni/bin/
'"sed -i.bak -e 's/start on started flanneld/start on started etcd/' -e 's/stop on stopping flanneld/stop on stopping etcd/' "'~$(id -un)/kube/init_conf/kube*.conf
"'
NEED_RECONFIG_DOCKER=false
fi
EXTRA_SANS=(
IP:${MASTER_IP}
IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1
DNS:kubernetes
DNS:kubernetes.default
DNS:kubernetes.default.svc
DNS:kubernetes.default.svc.cluster.local
)
EXTRA_SANS=$(echo "${EXTRA_SANS[@]}" | tr ' ' ,)
BASH_DEBUG_FLAGS=""
if [[ "$DEBUG" == "true" ]] ; then
BASH_DEBUG_FLAGS="set -x"
fi
# remote login to the master/node and configure k8s
ssh $SSH_OPTS -t "$MASTER" "
set +e
${BASH_DEBUG_FLAGS}
source ~/kube/util.sh
setClusterInfo
create-etcd-opts '${MASTER_IP}'
create-kube-apiserver-opts \
'${SERVICE_CLUSTER_IP_RANGE}' \
'${ADMISSION_CONTROL}' \
'${SERVICE_NODE_PORT_RANGE}' \
'${MASTER_IP}' \
'${ALLOW_PRIVILEGED}'
create-kube-controller-manager-opts
create-kube-scheduler-opts
create-kubelet-opts \
'${MASTER_IP}' \
'${MASTER_IP}' \
'${DNS_SERVER_IP}' \
'${DNS_DOMAIN}' \
'${KUBELET_POD_MANIFEST_PATH}' \
'${ALLOW_PRIVILEGED}' \
'${CNI_PLUGIN_CONF}'
create-kube-proxy-opts \
'${MASTER_IP}' \
'${MASTER_IP}' \
'${KUBE_PROXY_EXTRA_OPTS}'
create-flanneld-opts '127.0.0.1' '${MASTER_IP}'
FLANNEL_BACKEND='${FLANNEL_BACKEND}' FLANNEL_OTHER_NET_CONFIG='${FLANNEL_OTHER_NET_CONFIG}' sudo -E -p '[sudo] password to start master: ' -- /bin/bash -ce '
${BASH_DEBUG_FLAGS}
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
cp ~/kube/init_scripts/* /etc/init.d/
groupadd -f -r kube-cert
${PROXY_SETTING} DEBUG='${DEBUG}' ~/kube/make-ca-cert.sh \"${MASTER_IP}\" \"${EXTRA_SANS}\"
mkdir -p /opt/bin/
cp ~/kube/master/* /opt/bin/
cp ~/kube/minion/* /opt/bin/
service etcd start
if ${NEED_RECONFIG_DOCKER}; then FLANNEL_NET=\"${FLANNEL_NET}\" KUBE_CONFIG_FILE=\"${KUBE_CONFIG_FILE}\" DOCKER_OPTS=\"${DOCKER_OPTS}\" DEBUG=\"$DEBUG\" ~/kube/reconfDocker.sh ai; fi
'" || {
echo "Deploying master and node on machine ${MASTER_IP} failed"
exit 1
}
}
# check whether kubelet has torn down all of the pods
function check-pods-torn-down() {
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
local attempt=0
while [[ ! -z "$(kubectl get pods --show-all --all-namespaces| tail -n +2)" ]]; do
if (( attempt > 120 )); then
echo "timeout waiting for tearing down pods" >> ~/kube/err.log
fi
echo "waiting for tearing down pods"
attempt=$((attempt+1))
sleep 5
done
}
# Delete a kubernetes cluster
function kube-down() {
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/ubuntu/config-default.sh}
source "${KUBE_CONFIG_FILE}"
tear_down_alive_resources
check-pods-torn-down
local ii=0
for i in ${nodes}; do
if [[ "${roles_array[${ii}]}" == "ai" || "${roles_array[${ii}]}" == "a" ]]; then
echo "Cleaning on master ${i#*@}"
ssh $SSH_OPTS -t "$i" "
pgrep etcd && \
sudo -p '[sudo] password to stop master: ' -- /bin/bash -c '
service etcd stop
rm -rf \
/opt/bin/etcd* \
/etc/init/etcd.conf \
/etc/init.d/etcd \
/etc/default/etcd
rm -rf /infra*
rm -rf /srv/kubernetes
'
" || echo "Cleaning on master ${i#*@} failed"
if [[ "${roles_array[${ii}]}" == "ai" ]]; then
ssh $SSH_OPTS -t "$i" "sudo rm -rf /var/lib/kubelet"
fi
elif [[ "${roles_array[${ii}]}" == "i" ]]; then
echo "Cleaning on node ${i#*@}"
ssh $SSH_OPTS -t "$i" "
pgrep flanneld && \
sudo -p '[sudo] password to stop node: ' -- /bin/bash -c '
service flanneld stop
rm -rf /var/lib/kubelet
'
" || echo "Cleaning on node ${i#*@} failed"
else
echo "unsupported role for ${i}"
fi
ssh $SSH_OPTS -t "$i" "sudo -- /bin/bash -c '
rm -f \
/opt/bin/kube* \
/opt/bin/flanneld \
/etc/init/kube* \
/etc/init/flanneld.conf \
/etc/init.d/kube* \
/etc/init.d/flanneld \
/etc/default/kube* \
/etc/default/flanneld
rm -rf ~/kube
rm -f /run/flannel/subnet.env
'" || echo "cleaning legacy files on ${i#*@} failed"
((ii=ii+1))
done
}
# Perform common upgrade setup tasks
function prepare-push() {
# Use local binaries for kube-push
if [[ -z "${KUBE_VERSION}" ]]; then
echo "Use local binaries for kube-push"
if [[ ! -d "${KUBE_ROOT}/cluster/ubuntu/binaries" ]]; then
echo "No local binaries.Please check"
exit 1
else
echo "Please make sure all the required local binaries are prepared ahead"
sleep 3
fi
else
# Run download-release.sh to get the required release
export KUBE_VERSION
"${KUBE_ROOT}/cluster/ubuntu/download-release.sh"
fi
}
# Update a kubernetes master with expected release
function push-master() {
export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/ubuntu/config-default.sh}
source "${KUBE_CONFIG_FILE}"
if [[ ! -f "${KUBE_ROOT}/cluster/ubuntu/binaries/master/kube-apiserver" ]]; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
setClusterInfo
local ii=0
for i in ${nodes}; do
if [[ "${roles_array[${ii}]}" == "a" || "${roles_array[${ii}]}" == "ai" ]]; then
echo "Cleaning master ${i#*@}"
ssh $SSH_OPTS -t "$i" "
pgrep etcd && sudo -p '[sudo] stop the all process: ' -- /bin/bash -c '
service etcd stop
sleep 3
rm -rf \
/etc/init/etcd.conf \
/etc/init/kube* \
/etc/init/flanneld.conf \
/etc/init.d/etcd \
/etc/init.d/kube* \
/etc/init.d/flanneld \
/etc/default/etcd \
/etc/default/kube* \
/etc/default/flanneld
rm -f \
/opt/bin/etcd* \
/opt/bin/kube* \
/opt/bin/flanneld
rm -f /run/flannel/subnet.env
rm -rf ~/kube
'" || echo "Cleaning master ${i#*@} failed"
fi
if [[ "${roles_array[${ii}]}" == "a" ]]; then
provision-master
elif [[ "${roles_array[${ii}]}" == "ai" ]]; then
provision-masterandnode
elif [[ "${roles_array[${ii}]}" == "i" ]]; then
((ii=ii+1))
continue
else
echo "unsupported role for ${i}, please check"
exit 1
fi
((ii=ii+1))
done
verify-cluster
}
# Update a kubernetes node with expected release
function push-node() {
export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/ubuntu/config-default.sh}
source "${KUBE_CONFIG_FILE}"
if [[ ! -f "${KUBE_ROOT}/cluster/ubuntu/binaries/minion/kubelet" ]]; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
setClusterInfo
local node_ip=${1}
local ii=0
local existing=false
for i in ${nodes}; do
if [[ "${roles_array[${ii}]}" == "i" && ${i#*@} == "$node_ip" ]]; then
echo "Cleaning node ${i#*@}"
ssh $SSH_OPTS -t "$i" "
sudo -p '[sudo] stop the all process: ' -- /bin/bash -c '
service flanneld stop
rm -f /opt/bin/kube* \
/opt/bin/flanneld
rm -rf \
/etc/init/kube* \
/etc/init/flanneld.conf \
/etc/init.d/kube* \
/etc/init.d/flanneld \
/etc/default/kube* \
/etc/default/flanneld
rm -f /run/flannel/subnet.env
rm -rf ~/kube
'" || echo "Cleaning node ${i#*@} failed"
provision-node "$i"
existing=true
elif [[ "${roles_array[${ii}]}" == "a" || "${roles_array[${ii}]}" == "ai" ]] && [[ ${i#*@} == "$node_ip" ]]; then
echo "${i} is master node, please try ./kube-push -m instead"
existing=true
elif [[ "${roles_array[${ii}]}" == "i" || "${roles_array[${ii}]}" == "a" || "${roles_array[${ii}]}" == "ai" ]]; then
((ii=ii+1))
continue
else
echo "unsupported role for ${i}, please check"
exit 1
fi
((ii=ii+1))
done
if [[ "${existing}" == false ]]; then
echo "node ${node_ip} does not exist"
else
verify-cluster
fi
}
# Update a kubernetes cluster with expected source
function kube-push() {
prepare-push
export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/ubuntu/config-default.sh}
source "${KUBE_CONFIG_FILE}"
if [[ ! -f "${KUBE_ROOT}/cluster/ubuntu/binaries/master/kube-apiserver" ]]; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
#stop all the kube's process & etcd
local ii=0
for i in ${nodes}; do
if [[ "${roles_array[${ii}]}" == "ai" || "${roles_array[${ii}]}" == "a" ]]; then
echo "Cleaning on master ${i#*@}"
ssh $SSH_OPTS -t "$i" "
pgrep etcd && \
sudo -p '[sudo] password to stop master: ' -- /bin/bash -c '
service etcd stop
rm -rf \
/opt/bin/etcd* \
/etc/init/etcd.conf \
/etc/init.d/etcd \
/etc/default/etcd
'" || echo "Cleaning on master ${i#*@} failed"
elif [[ "${roles_array[${ii}]}" == "i" ]]; then
echo "Cleaning on node ${i#*@}"
ssh $SSH_OPTS -t $i "
pgrep flanneld && \
sudo -p '[sudo] password to stop node: ' -- /bin/bash -c '
service flanneld stop
'" || echo "Cleaning on node ${i#*@} failed"
else
echo "unsupported role for ${i}"
fi
ssh $SSH_OPTS -t "$i" "sudo -- /bin/bash -c '
rm -f \
/opt/bin/kube* \
/opt/bin/flanneld
rm -rf \
/etc/init/kube* \
/etc/init/flanneld.conf \
/etc/init.d/kube* \
/etc/init.d/flanneld \
/etc/default/kube* \
/etc/default/flanneld
rm -f /run/flannel/subnet.env
rm -rf ~/kube
'" || echo "Cleaning legacy files on ${i#*@} failed"
((ii=ii+1))
done
#provision all nodes,including master & nodes
setClusterInfo
local ii=0
for i in ${nodes}; do
if [[ "${roles_array[${ii}]}" == "a" ]]; then
provision-master
elif [[ "${roles_array[${ii}]}" == "i" ]]; then
provision-node "$i"
elif [[ "${roles_array[${ii}]}" == "ai" ]]; then
provision-masterandnode
else
echo "unsupported role for ${i}. Please check"
exit 1
fi
((ii=ii+1))
done
verify-cluster
}
# Perform preparations required to run e2e tests
function prepare-e2e() {
echo "Ubuntu doesn't need special preparations for e2e tests" 1>&2
}