mirror of https://github.com/k3s-io/k3s
Merge pull request #39462 from Shawyeok/master
Automatic merge from submit-queue Able to quick create a HA cluster by kube-up.sh centos provider Make `kube-up.sh` `centos provider` support quick create a HA cluster, as I said above [#39430](https://github.com/kubernetes/kubernetes/issues/39430), it's more flexible than `kops` or `kubeadm` for some people in a limited network region. I'm new to k8s dev, so if this pull request need to change, please let me know. ```release-note Added support for creating HA clusters for centos using kube-up.sh. ```pull/6/head
commit
75c6990966
|
@ -1,4 +1,5 @@
|
|||
binaries
|
||||
ca-cert
|
||||
|
||||
master/bin/etcd
|
||||
master/bin/etcdctl
|
||||
|
|
|
@ -14,24 +14,90 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
readonly root=$(dirname "${BASH_SOURCE}")
|
||||
|
||||
## Contains configuration values for the CentOS cluster
|
||||
# The user should have sudo privilege
|
||||
export MASTER=${MASTER:-"centos@172.10.0.11"}
|
||||
export MASTER_IP=${MASTER#*@}
|
||||
|
||||
# Define all your master nodes,
|
||||
# And separated with blank space like <user_1@ip_1> <user_2@ip_2> <user_3@ip_3>.
|
||||
# The user should have sudo privilege
|
||||
export MASTERS="${MASTERS:-$MASTER}"
|
||||
|
||||
# length-of <arg0>
|
||||
# Get the length of specific arg0, could be a space-separate string or array.
|
||||
function length-of() {
|
||||
local len=0
|
||||
for part in $1; do
|
||||
let ++len
|
||||
done
|
||||
echo $len
|
||||
}
|
||||
# Number of nodes in your cluster.
|
||||
export NUM_MASTERS="${NUM_MASTERS:-$(length-of "$MASTERS")}"
|
||||
|
||||
# Get default master advertise address: first master node.
|
||||
function default-advertise-address() {
|
||||
# get the first master node
|
||||
local masters_array=(${MASTERS})
|
||||
local master=${masters_array[0]}
|
||||
echo ${master#*@}
|
||||
}
|
||||
|
||||
# Define advertise address of masters, could be a load balancer address.
|
||||
# If not provided, the default is ip of first master node.
|
||||
export MASTER_ADVERTISE_ADDRESS="${MASTER_ADVERTISE_ADDRESS:-$(default-advertise-address)}"
|
||||
export MASTER_ADVERTISE_IP="${MASTER_ADVERTISE_IP:-$(getent hosts "${MASTER_ADVERTISE_ADDRESS}" | awk '{print $1; exit}')}"
|
||||
|
||||
# Define all your minion nodes,
|
||||
# And separated with blank space like <user_1@ip_1> <user_2@ip_2> <user_3@ip_3>.
|
||||
# The user should have sudo privilege
|
||||
export NODES=${NODES:-"centos@172.10.0.12 centos@172.10.0.13"}
|
||||
export NODES="${NODES:-"centos@172.10.0.12 centos@172.10.0.13"}"
|
||||
|
||||
# Number of nodes in your cluster.
|
||||
export NUM_NODES=${NUM_NODES:-2}
|
||||
export NUM_NODES="${NUM_NODES:-$(length-of "$NODES")}"
|
||||
|
||||
# Should be removed when NUM_NODES is deprecated in validate-cluster.sh
|
||||
export NUM_NODES=${NUM_NODES}
|
||||
export NUM_NODES="${NUM_NODES}"
|
||||
|
||||
# By default, the cluster will use the etcd installed on master.
|
||||
export ETCD_SERVERS=${ETCD_SERVERS:-"http://$MASTER_IP:2379"}
|
||||
function concat-etcd-servers() {
|
||||
local etcd_servers=""
|
||||
for master in ${MASTERS}; do
|
||||
local master_ip=${master#*@}
|
||||
local prefix=""
|
||||
if [ -n "$etcd_servers" ]; then
|
||||
prefix="${etcd_servers},"
|
||||
fi
|
||||
etcd_servers="${prefix}http://${master_ip}:2379"
|
||||
done
|
||||
|
||||
echo "$etcd_servers"
|
||||
}
|
||||
export ETCD_SERVERS="$(concat-etcd-servers)"
|
||||
|
||||
# By default, etcd cluster will use runtime configuration
|
||||
# https://coreos.com/etcd/docs/latest/v2/runtime-configuration.html
|
||||
# Get etc initial cluster and store in ETCD_INITIAL_CLUSTER
|
||||
function concat-etcd-initial-cluster() {
|
||||
local etcd_initial_cluster=""
|
||||
local num_infra=0
|
||||
for master in ${MASTERS}; do
|
||||
local master_ip="${master#*@}"
|
||||
if [ -n "$etcd_initial_cluster" ]; then
|
||||
etcd_initial_cluster+=","
|
||||
fi
|
||||
etcd_initial_cluster+="infra${num_infra}=http://${master_ip}:2380"
|
||||
let ++num_infra
|
||||
done
|
||||
|
||||
echo "$etcd_initial_cluster"
|
||||
}
|
||||
export ETCD_INITIAL_CLUSTER="$(concat-etcd-initial-cluster)"
|
||||
|
||||
export CERT_DIR="${CERT_DIR:-$(cd "${root}/ca-cert" && pwd)}"
|
||||
|
||||
# define the IP range used for service cluster IPs.
|
||||
# according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here.
|
||||
|
@ -55,8 +121,10 @@ export ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQ
|
|||
|
||||
# Extra options to set on the Docker command line.
|
||||
# This is useful for setting --insecure-registry for local registries.
|
||||
export DOCKER_OPTS=${DOCKER_OPTS:-""}
|
||||
export DOCKER_OPTS=${DOCKER_OPTS:-""}
|
||||
|
||||
|
||||
# Timeouts for process checking on master and minion
|
||||
export PROCESS_CHECK_TIMEOUT=${PROCESS_CHECK_TIMEOUT:-180} # seconds.
|
||||
|
||||
unset -f default-advertise-address concat-etcd-servers length-of concat-etcd-initial-cluster
|
||||
|
|
|
@ -32,7 +32,7 @@ KUBE_LOG_LEVEL="--v=4"
|
|||
KUBE_ETCD_SERVERS="--etcd-servers=${ETCD_SERVERS}"
|
||||
|
||||
# --insecure-bind-address=127.0.0.1: The IP address on which to serve the --insecure-port.
|
||||
KUBE_API_ADDRESS="--insecure-bind-address=${MASTER_ADDRESS}"
|
||||
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
|
||||
|
||||
# --insecure-port=8080: The port on which to serve unsecured, unauthenticated access.
|
||||
KUBE_API_PORT="--insecure-port=8080"
|
||||
|
|
|
@ -29,13 +29,17 @@ KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE="--root-ca-file=/srv/kubernetes/ca.crt"
|
|||
# --service-account-private-key-file="": Filename containing a PEM-encoded private
|
||||
# RSA key used to sign service account tokens.
|
||||
KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE="--service-account-private-key-file=/srv/kubernetes/server.key"
|
||||
|
||||
# --leader-elect
|
||||
KUBE_LEADER_ELECT="--leader-elect"
|
||||
EOF
|
||||
|
||||
KUBE_CONTROLLER_MANAGER_OPTS=" \${KUBE_LOGTOSTDERR} \\
|
||||
\${KUBE_LOG_LEVEL} \\
|
||||
\${KUBE_MASTER} \\
|
||||
\${KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE} \\
|
||||
\${KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE}"
|
||||
\${KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE}\\
|
||||
\${KUBE_LEADER_ELECT}"
|
||||
|
||||
cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
|
||||
[Unit]
|
||||
|
|
|
@ -17,30 +17,34 @@
|
|||
## Create etcd.conf, etcd.service, and start etcd service.
|
||||
|
||||
|
||||
etcd_data_dir=/var/lib/etcd/
|
||||
etcd_data_dir=/var/lib/etcd
|
||||
mkdir -p ${etcd_data_dir}
|
||||
|
||||
ETCD_NAME=${1:-"default"}
|
||||
ETCD_LISTEN_IP=${2:-"0.0.0.0"}
|
||||
ETCD_INITIAL_CLUSTER=${3:-}
|
||||
|
||||
cat <<EOF >/opt/kubernetes/cfg/etcd.conf
|
||||
# [member]
|
||||
ETCD_NAME=default
|
||||
ETCD_NAME="${ETCD_NAME}"
|
||||
ETCD_DATA_DIR="${etcd_data_dir}/default.etcd"
|
||||
#ETCD_SNAPSHOT_COUNTER="10000"
|
||||
#ETCD_HEARTBEAT_INTERVAL="100"
|
||||
#ETCD_ELECTION_TIMEOUT="1000"
|
||||
#ETCD_LISTEN_PEER_URLS="http://localhost:2380,http://localhost:7001"
|
||||
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
|
||||
ETCD_LISTEN_PEER_URLS="http://${ETCD_LISTEN_IP}:2380"
|
||||
ETCD_LISTEN_CLIENT_URLS="http://${ETCD_LISTEN_IP}:2379,http://127.0.0.1:2379"
|
||||
#ETCD_MAX_SNAPSHOTS="5"
|
||||
#ETCD_MAX_WALS="5"
|
||||
#ETCD_CORS=""
|
||||
#
|
||||
#[cluster]
|
||||
#ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380,http://localhost:7001"
|
||||
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://${ETCD_LISTEN_IP}:2380"
|
||||
# if you use different ETCD_NAME (e.g. test),
|
||||
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
|
||||
#ETCD_INITIAL_CLUSTER="default=http://localhost:2380,default=http://localhost:7001"
|
||||
#ETCD_INITIAL_CLUSTER_STATE="new"
|
||||
#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
|
||||
ETCD_ADVERTISE_CLIENT_URLS="http://localhost:2379"
|
||||
ETCD_INITIAL_CLUSTER="${ETCD_INITIAL_CLUSTER}"
|
||||
ETCD_INITIAL_CLUSTER_STATE="new"
|
||||
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
|
||||
ETCD_ADVERTISE_CLIENT_URLS="http://${ETCD_LISTEN_IP}:2379"
|
||||
#ETCD_DISCOVERY=""
|
||||
#ETCD_DISCOVERY_SRV=""
|
||||
#ETCD_DISCOVERY_FALLBACK="proxy"
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
## Set initial-cluster-state to existing, and restart etcd service.
|
||||
|
||||
sed -i 's/ETCD_INITIAL_CLUSTER_STATE="new"/ETCD_INITIAL_CLUSTER_STATE="existing"/' /opt/kubernetes/cfg/etcd.conf
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable etcd
|
||||
systemctl restart etcd
|
|
@ -29,6 +29,9 @@ KUBE_LOG_LEVEL="--v=4"
|
|||
|
||||
KUBE_MASTER="--master=${MASTER_ADDRESS}:8080"
|
||||
|
||||
# --leader-elect
|
||||
KUBE_LEADER_ELECT="--leader-elect"
|
||||
|
||||
# Add your own!
|
||||
KUBE_SCHEDULER_ARGS=""
|
||||
|
||||
|
@ -37,6 +40,7 @@ EOF
|
|||
KUBE_SCHEDULER_OPTS=" \${KUBE_LOGTOSTDERR} \\
|
||||
\${KUBE_LOG_LEVEL} \\
|
||||
\${KUBE_MASTER} \\
|
||||
\${KUBE_LEADER_ELECT} \\
|
||||
\${KUBE_SCHEDULER_ARGS}"
|
||||
|
||||
cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
|
||||
|
|
|
@ -35,12 +35,16 @@ KUBECTL_PATH=${KUBE_ROOT}/cluster/centos/binaries/kubectl
|
|||
KUBE_TEMP="~/kube_temp"
|
||||
|
||||
|
||||
# Must ensure that the following ENV vars are set
|
||||
function detect-master() {
|
||||
KUBE_MASTER=$MASTER
|
||||
KUBE_MASTER_IP=${MASTER#*@}
|
||||
echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2
|
||||
echo "KUBE_MASTER: ${MASTER}" 1>&2
|
||||
# Get master IP addresses and store in KUBE_MASTER_IP_ADDRESSES[]
|
||||
# Must ensure that the following ENV vars are set:
|
||||
# MASTERS
|
||||
function detect-masters() {
|
||||
KUBE_MASTER_IP_ADDRESSES=()
|
||||
for master in ${MASTERS}; do
|
||||
KUBE_MASTER_IP_ADDRESSES+=("${master#*@}")
|
||||
done
|
||||
echo "KUBE_MASTERS: ${MASTERS}" 1>&2
|
||||
echo "KUBE_MASTER_IP_ADDRESSES: [${KUBE_MASTER_IP_ADDRESSES[*]}]" 1>&2
|
||||
}
|
||||
|
||||
# Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[]
|
||||
|
@ -99,7 +103,9 @@ function validate-cluster() {
|
|||
set +e
|
||||
"${KUBE_ROOT}/cluster/validate-cluster.sh"
|
||||
if [[ "$?" -ne "0" ]]; then
|
||||
troubleshoot-master
|
||||
for master in ${MASTERS}; do
|
||||
troubleshoot-master ${master}
|
||||
done
|
||||
for node in ${NODES}; do
|
||||
troubleshoot-node ${node}
|
||||
done
|
||||
|
@ -110,17 +116,27 @@ function validate-cluster() {
|
|||
|
||||
# Instantiate a kubernetes cluster
|
||||
function kube-up() {
|
||||
provision-master
|
||||
make-ca-cert
|
||||
|
||||
for node in ${NODES}; do
|
||||
provision-node ${node}
|
||||
local num_infra=0
|
||||
for master in ${MASTERS}; do
|
||||
provision-master "${master}" "infra${num_infra}"
|
||||
let ++num_infra
|
||||
done
|
||||
|
||||
detect-master
|
||||
for master in ${MASTERS}; do
|
||||
post-provision-master "${master}"
|
||||
done
|
||||
|
||||
for node in ${NODES}; do
|
||||
provision-node "${node}"
|
||||
done
|
||||
|
||||
detect-masters
|
||||
|
||||
# set CONTEXT and KUBE_SERVER values for create-kubeconfig() and get-password()
|
||||
export CONTEXT="centos"
|
||||
export KUBE_SERVER="http://${KUBE_MASTER_IP}:8080"
|
||||
export KUBE_SERVER="http://${MASTER_ADVERTISE_ADDRESS}:8080"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
# set kubernetes user and password
|
||||
|
@ -130,7 +146,10 @@ function kube-up() {
|
|||
|
||||
# Delete a kubernetes cluster
|
||||
function kube-down() {
|
||||
tear-down-master
|
||||
for master in ${MASTERS}; do
|
||||
tear-down-master ${master}
|
||||
done
|
||||
|
||||
for node in ${NODES}; do
|
||||
tear-down-node ${node}
|
||||
done
|
||||
|
@ -138,14 +157,14 @@ function kube-down() {
|
|||
|
||||
function troubleshoot-master() {
|
||||
# Troubleshooting on master if all required daemons are active.
|
||||
echo "[INFO] Troubleshooting on master ${MASTER}"
|
||||
echo "[INFO] Troubleshooting on master $1"
|
||||
local -a required_daemon=("kube-apiserver" "kube-controller-manager" "kube-scheduler")
|
||||
local daemon
|
||||
local daemon_status
|
||||
printf "%-24s %-10s \n" "PROCESS" "STATUS"
|
||||
for daemon in "${required_daemon[@]}"; do
|
||||
local rc=0
|
||||
kube-ssh "${MASTER}" "sudo systemctl is-active ${daemon}" >/dev/null 2>&1 || rc="$?"
|
||||
kube-ssh "${1}" "sudo systemctl is-active ${daemon}" >/dev/null 2>&1 || rc="$?"
|
||||
if [[ "${rc}" -ne "0" ]]; then
|
||||
daemon_status="inactive"
|
||||
else
|
||||
|
@ -178,19 +197,19 @@ function troubleshoot-node() {
|
|||
|
||||
# Clean up on master
|
||||
function tear-down-master() {
|
||||
echo "[INFO] tear-down-master on ${MASTER}"
|
||||
echo "[INFO] tear-down-master on $1"
|
||||
for service_name in etcd kube-apiserver kube-controller-manager kube-scheduler ; do
|
||||
service_file="/usr/lib/systemd/system/${service_name}.service"
|
||||
kube-ssh "$MASTER" " \
|
||||
kube-ssh "$1" " \
|
||||
if [[ -f $service_file ]]; then \
|
||||
sudo systemctl stop $service_name; \
|
||||
sudo systemctl disable $service_name; \
|
||||
sudo rm -f $service_file; \
|
||||
fi"
|
||||
done
|
||||
kube-ssh "${MASTER}" "sudo rm -rf /opt/kubernetes"
|
||||
kube-ssh "${MASTER}" "sudo rm -rf ${KUBE_TEMP}"
|
||||
kube-ssh "${MASTER}" "sudo rm -rf /var/lib/etcd"
|
||||
kube-ssh "${1}" "sudo rm -rf /opt/kubernetes"
|
||||
kube-ssh "${1}" "sudo rm -rf ${KUBE_TEMP}"
|
||||
kube-ssh "${1}" "sudo rm -rf /var/lib/etcd"
|
||||
}
|
||||
|
||||
# Clean up on node
|
||||
|
@ -210,46 +229,69 @@ echo "[INFO] tear-down-node on $1"
|
|||
kube-ssh "$1" "sudo rm -rf ${KUBE_TEMP}"
|
||||
}
|
||||
|
||||
# Generate the CA certificates for k8s components
|
||||
function make-ca-cert() {
|
||||
echo "[INFO] make-ca-cert"
|
||||
bash "${ROOT}/../saltbase/salt/generate-cert/make-ca-cert.sh" "${MASTER_ADVERTISE_IP}" "IP:${MASTER_ADVERTISE_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local"
|
||||
}
|
||||
|
||||
# Provision master
|
||||
#
|
||||
# Assumed vars:
|
||||
# MASTER
|
||||
# $1 (master)
|
||||
# KUBE_TEMP
|
||||
# ETCD_SERVERS
|
||||
# ETCD_INITIAL_CLUSTER
|
||||
# SERVICE_CLUSTER_IP_RANGE
|
||||
# MASTER_ADVERTISE_ADDRESS
|
||||
function provision-master() {
|
||||
echo "[INFO] Provision master on ${MASTER}"
|
||||
local master_ip=${MASTER#*@}
|
||||
ensure-setup-dir ${MASTER}
|
||||
echo "[INFO] Provision master on $1"
|
||||
local master="$1"
|
||||
local master_ip="${master#*@}"
|
||||
local etcd_name="$2"
|
||||
ensure-setup-dir "${master}"
|
||||
|
||||
# scp -r ${SSH_OPTS} master config-default.sh copy-files.sh util.sh "${MASTER}:${KUBE_TEMP}"
|
||||
kube-scp ${MASTER} "${ROOT}/../saltbase/salt/generate-cert/make-ca-cert.sh ${ROOT}/binaries/master ${ROOT}/master ${ROOT}/config-default.sh ${ROOT}/util.sh" "${KUBE_TEMP}"
|
||||
kube-ssh "${MASTER}" " \
|
||||
rm -rf /opt/kubernetes/bin; \
|
||||
kube-scp "${master}" "${ROOT}/ca-cert ${ROOT}/binaries/master ${ROOT}/master ${ROOT}/config-default.sh ${ROOT}/util.sh" "${KUBE_TEMP}"
|
||||
kube-ssh "${master}" " \
|
||||
sudo rm -rf /opt/kubernetes/bin; \
|
||||
sudo cp -r ${KUBE_TEMP}/master/bin /opt/kubernetes; \
|
||||
sudo mkdir -p /srv/kubernetes; sudo cp -f ${KUBE_TEMP}/ca-cert/* /srv/kubernetes; \
|
||||
sudo chmod -R +x /opt/kubernetes/bin; \
|
||||
sudo ln -s /opt/kubernetes/bin/* /usr/local/bin/; \
|
||||
sudo bash ${KUBE_TEMP}/make-ca-cert.sh ${master_ip} IP:${master_ip},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/etcd.sh; \
|
||||
sudo ln -sf /opt/kubernetes/bin/* /usr/local/bin/; \
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/etcd.sh ${etcd_name} ${master_ip} ${ETCD_INITIAL_CLUSTER}; \
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/apiserver.sh ${master_ip} ${ETCD_SERVERS} ${SERVICE_CLUSTER_IP_RANGE} ${ADMISSION_CONTROL}; \
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/controller-manager.sh ${master_ip}; \
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/flannel.sh ${ETCD_SERVERS} ${FLANNEL_NET}; \
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/scheduler.sh ${master_ip}"
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/controller-manager.sh ${MASTER_ADVERTISE_ADDRESS}; \
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/scheduler.sh ${MASTER_ADVERTISE_ADDRESS}"
|
||||
}
|
||||
|
||||
# Post-provision master, run after all masters were provisioned
|
||||
#
|
||||
# Assumed vars:
|
||||
# $1 (master)
|
||||
# KUBE_TEMP
|
||||
# ETCD_SERVERS
|
||||
# FLANNEL_NET
|
||||
function post-provision-master() {
|
||||
echo "[INFO] Post provision master on $1"
|
||||
local master=$1
|
||||
kube-ssh "${master}" " \
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/flannel.sh ${ETCD_SERVERS} ${FLANNEL_NET}; \
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/post-etcd.sh"
|
||||
}
|
||||
|
||||
# Provision node
|
||||
#
|
||||
# Assumed vars:
|
||||
# $1 (node)
|
||||
# MASTER
|
||||
# KUBE_TEMP
|
||||
# ETCD_SERVERS
|
||||
# FLANNEL_NET
|
||||
# MASTER_ADVERTISE_ADDRESS
|
||||
# DOCKER_OPTS
|
||||
# DNS_SERVER_IP
|
||||
# DNS_DOMAIN
|
||||
function provision-node() {
|
||||
echo "[INFO] Provision node on $1"
|
||||
local master_ip=${MASTER#*@}
|
||||
local node=$1
|
||||
local node_ip=${node#*@}
|
||||
local dns_ip=${DNS_SERVER_IP#*@}
|
||||
|
@ -264,8 +306,8 @@ function provision-node() {
|
|||
sudo ln -s /opt/kubernetes/bin/* /usr/local/bin/; \
|
||||
sudo bash ${KUBE_TEMP}/node/scripts/flannel.sh ${ETCD_SERVERS} ${FLANNEL_NET}; \
|
||||
sudo bash ${KUBE_TEMP}/node/scripts/docker.sh \"${DOCKER_OPTS}\"; \
|
||||
sudo bash ${KUBE_TEMP}/node/scripts/kubelet.sh ${master_ip} ${node_ip} ${dns_ip} ${dns_domain}; \
|
||||
sudo bash ${KUBE_TEMP}/node/scripts/proxy.sh ${master_ip}"
|
||||
sudo bash ${KUBE_TEMP}/node/scripts/kubelet.sh ${MASTER_ADVERTISE_ADDRESS} ${node_ip} ${dns_ip} ${dns_domain}; \
|
||||
sudo bash ${KUBE_TEMP}/node/scripts/proxy.sh ${MASTER_ADVERTISE_ADDRESS}"
|
||||
}
|
||||
|
||||
# Create dirs that'll be used during setup on target machine.
|
||||
|
|
|
@ -5,6 +5,8 @@ cluster/aws/templates/configure-vm-aws.sh: api_servers: '${API_SERVERS}'
|
|||
cluster/aws/templates/configure-vm-aws.sh: env-to-grains "hostname_override"
|
||||
cluster/aws/templates/configure-vm-aws.sh: env-to-grains "runtime_config"
|
||||
cluster/aws/templates/configure-vm-aws.sh: kubelet_api_servers: '${KUBELET_APISERVER}'
|
||||
cluster/centos/config-default.sh: etcd_servers="${prefix}http://${master_ip}:2379"
|
||||
cluster/centos/config-default.sh: local etcd_servers=""
|
||||
cluster/centos/util.sh: local node_ip=${node#*@}
|
||||
cluster/gce/configure-vm.sh: advertise_address: '${EXTERNAL_IP}'
|
||||
cluster/gce/configure-vm.sh: api_servers: '${KUBERNETES_MASTER_NAME}'
|
||||
|
|
Loading…
Reference in New Issue