mirror of https://github.com/k3s-io/k3s
Implemented KUBE_DELETE_NODES flag in kube-down.
Implemented KUBE_DELETE_NODES flag in kube-down script. It prevents removal of nodes when shutting down a HA master replica.pull/6/head
parent
8f4c0bbcb7
commit
58c8992590
|
@ -34,6 +34,7 @@ NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
|
|||
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
|
||||
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
|
||||
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
|
||||
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
|
||||
|
||||
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
|
|
|
@ -35,6 +35,7 @@ REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
|
|||
KUBE_APISERVER_REQUEST_TIMEOUT=300
|
||||
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
|
||||
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
|
||||
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
|
||||
|
||||
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
|
|
|
@ -1127,34 +1127,36 @@ function kube-down() {
|
|||
echo "Bringing down cluster"
|
||||
set +e # Do not stop on error
|
||||
|
||||
# Get the name of the managed instance group template before we delete the
|
||||
# managed instance group. (The name of the managed instance group template may
|
||||
# change during a cluster upgrade.)
|
||||
local templates=$(get-template "${PROJECT}")
|
||||
if [[ "${KUBE_DELETE_NODES:-}" != "false" ]]; then
|
||||
# Get the name of the managed instance group template before we delete the
|
||||
# managed instance group. (The name of the managed instance group template may
|
||||
# change during a cluster upgrade.)
|
||||
local templates=$(get-template "${PROJECT}")
|
||||
|
||||
for group in ${INSTANCE_GROUPS[@]:-}; do
|
||||
if gcloud compute instance-groups managed describe "${group}" --project "${PROJECT}" --zone "${ZONE}" &>/dev/null; then
|
||||
gcloud compute instance-groups managed delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
--zone "${ZONE}" \
|
||||
"${group}" &
|
||||
fi
|
||||
done
|
||||
for group in ${INSTANCE_GROUPS[@]:-}; do
|
||||
if gcloud compute instance-groups managed describe "${group}" --project "${PROJECT}" --zone "${ZONE}" &>/dev/null; then
|
||||
gcloud compute instance-groups managed delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
--zone "${ZONE}" \
|
||||
"${group}" &
|
||||
fi
|
||||
done
|
||||
|
||||
# Wait for last batch of jobs
|
||||
kube::util::wait-for-jobs || {
|
||||
echo -e "Failed to delete instance group(s)." >&2
|
||||
}
|
||||
# Wait for last batch of jobs
|
||||
kube::util::wait-for-jobs || {
|
||||
echo -e "Failed to delete instance group(s)." >&2
|
||||
}
|
||||
|
||||
for template in ${templates[@]:-}; do
|
||||
if gcloud compute instance-templates describe --project "${PROJECT}" "${template}" &>/dev/null; then
|
||||
gcloud compute instance-templates delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
"${template}"
|
||||
fi
|
||||
done
|
||||
for template in ${templates[@]:-}; do
|
||||
if gcloud compute instance-templates describe --project "${PROJECT}" "${template}" &>/dev/null; then
|
||||
gcloud compute instance-templates delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
"${template}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
local -r REPLICA_NAME="$(get-replica-name)"
|
||||
|
||||
|
@ -1254,23 +1256,25 @@ function kube-down() {
|
|||
fi
|
||||
fi
|
||||
|
||||
# Find out what minions are running.
|
||||
local -a minions
|
||||
minions=( $(gcloud compute instances list \
|
||||
--project "${PROJECT}" --zones "${ZONE}" \
|
||||
--regexp "${NODE_INSTANCE_PREFIX}-.+" \
|
||||
--format='value(name)') )
|
||||
# If any minions are running, delete them in batches.
|
||||
while (( "${#minions[@]}" > 0 )); do
|
||||
echo Deleting nodes "${minions[*]::${batch}}"
|
||||
gcloud compute instances delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
--delete-disks boot \
|
||||
--zone "${ZONE}" \
|
||||
"${minions[@]::${batch}}"
|
||||
minions=( "${minions[@]:${batch}}" )
|
||||
done
|
||||
if [[ "${KUBE_DELETE_NODES:-}" != "false" ]]; then
|
||||
# Find out what minions are running.
|
||||
local -a minions
|
||||
minions=( $(gcloud compute instances list \
|
||||
--project "${PROJECT}" --zones "${ZONE}" \
|
||||
--regexp "${NODE_INSTANCE_PREFIX}-.+" \
|
||||
--format='value(name)') )
|
||||
# If any minions are running, delete them in batches.
|
||||
while (( "${#minions[@]}" > 0 )); do
|
||||
echo Deleting nodes "${minions[*]::${batch}}"
|
||||
gcloud compute instances delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
--delete-disks boot \
|
||||
--zone "${ZONE}" \
|
||||
"${minions[@]::${batch}}"
|
||||
minions=( "${minions[@]:${batch}}" )
|
||||
done
|
||||
fi
|
||||
|
||||
# Delete routes.
|
||||
local -a routes
|
||||
|
|
Loading…
Reference in New Issue