GCE: Allow for reuse of master

This is for internal use at the moment, for testing Ubernetes Lite, but
arguably makes the code a little cleaner.

Also rename KUBE_SHARE_MASTER -> KUBE_USE_EXISTING_MASTER
pull/6/head
Justin Santa Barbara 2015-11-29 14:38:03 -05:00
parent ad827c6b62
commit 2958ea253a
3 changed files with 41 additions and 9 deletions

View File

@ -20,7 +20,7 @@
# The intent is to allow experimentation/advanced functionality before we
# are ready to commit to supporting it.
# Experimental functionality:
# KUBE_SHARE_MASTER=true
# KUBE_USE_EXISTING_MASTER=true
# Detect and reuse an existing master; useful if you want to
# create more nodes, perhaps with a different instance type or in
# a different subnet/AZ
@ -808,8 +808,8 @@ function kube-up {
# HTTPS to the master is allowed (for API access)
authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 443 --cidr 0.0.0.0/0"
# KUBE_SHARE_MASTER is used to add minions to an existing master
if [[ "${KUBE_SHARE_MASTER:-}" == "true" ]]; then
# KUBE_USE_EXISTING_MASTER is used to add minions to an existing master
if [[ "${KUBE_USE_EXISTING_MASTER:-}" == "true" ]]; then
# Detect existing master
detect-master

View File

@ -575,6 +575,22 @@ function kube-up {
find-release-tars
upload-server-tars
if [[ ${KUBE_USE_EXISTING_MASTER:-} == "true" ]]; then
create-nodes
create-autoscaler
else
check-existing
create-network
create-master
create-nodes-firewall
create-nodes-template
create-nodes
create-autoscaler
check-cluster
fi
}
function check-existing() {
local running_in_terminal=false
# May be false if tty is not allocated (for example with ssh -T).
if [ -t 1 ]; then
@ -595,7 +611,9 @@ function kube-up {
fi
fi
fi
}
function create-network() {
if ! gcloud compute networks --project "${PROJECT}" describe "${NETWORK}" &>/dev/null; then
echo "Creating new network: ${NETWORK}"
# The network needs to be created synchronously or we have a race. The
@ -618,7 +636,9 @@ function kube-up {
--source-ranges "0.0.0.0/0" \
--allow "tcp:22" &
fi
}
function create-master() {
echo "Starting master and configuring firewalls"
gcloud compute firewall-rules create "${MASTER_NAME}-https" \
--project "${PROJECT}" \
@ -663,7 +683,9 @@ function kube-up {
create-certs "${MASTER_RESERVED_IP}"
create-master-instance "${MASTER_RESERVED_IP}" &
}
function create-nodes-firewall() {
# Create a single firewall rule for all minions.
create-firewall-rule "${NODE_TAG}-all" "${CLUSTER_IP_RANGE}" "${NODE_TAG}" &
@ -676,7 +698,9 @@ function kube-up {
kube::util::wait-for-jobs || {
echo -e "${color_red}${fail} commands failed.${color_norm}" >&2
}
}
function create-nodes-template() {
echo "Creating minions."
# TODO(zmerlynn): Refactor setting scope flags.
@ -690,8 +714,12 @@ function kube-up {
write-node-env
local template_name="${NODE_INSTANCE_PREFIX}-template"
create-node-instance-template $template_name
}
function create-nodes() {
local template_name="${NODE_INSTANCE_PREFIX}-template"
local defaulted_max_instances_per_mig=${MAX_INSTANCES_PER_MIG:-500}
@ -731,10 +759,9 @@ function kube-up {
"${NODE_INSTANCE_PREFIX}-group" \
--zone "${ZONE}" \
--project "${PROJECT}" || true;
}
detect-node-names
detect-master
function create-autoscaler() {
# Create autoscaler for nodes if requested
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
METRICS=""
@ -764,6 +791,11 @@ function kube-up {
gcloud compute instance-groups managed set-autoscaling "${NODE_INSTANCE_PREFIX}-group" --zone "${ZONE}" --project "${PROJECT}" \
--min-num-replicas "${last_min_instances}" --max-num-replicas "${last_max_instances}" ${METRICS} || true
fi
}
function check-cluster() {
detect-node-names
detect-master
echo "Waiting up to ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds for cluster initialization."
echo
@ -845,7 +877,7 @@ function kube-down {
fi
# Get the name of the managed instance group template before we delete the
# managed instange group. (The name of the managed instnace group template may
# managed instance group. (The name of the managed instance group template may
# change during a cluster upgrade.)
local template=$(get-template "${PROJECT}" "${ZONE}" "${NODE_INSTANCE_PREFIX}-group")

View File

@ -71,7 +71,7 @@ make multiple zones better supported.
For the initial implemenation, kube-up must be run multiple times, once for
each zone. The first kube-up will take place as normal, but then for each
additional zone the user must run kube-up again, specifying
`KUBE_SHARE_MASTER=true` and `KUBE_SUBNET_CIDR=172.20.x.0/24`. This will then
`KUBE_USE_EXISTING_MASTER=true` and `KUBE_SUBNET_CIDR=172.20.x.0/24`. This will then
create additional nodes in a different zone, but will register them with the
existing master.