mirror of https://github.com/k3s-io/k3s
commit
2775b9e0de
|
@ -134,7 +134,9 @@ install-salt() {
|
|||
URL_BASE="https://storage.googleapis.com/kubernetes-release/salt"
|
||||
|
||||
for deb in "${DEBS[@]}"; do
|
||||
download-or-bust "${URL_BASE}/${deb}"
|
||||
if [ ! -e "${deb}" ]; then
|
||||
download-or-bust "${URL_BASE}/${deb}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Based on
|
||||
|
@ -152,7 +154,7 @@ EOF
|
|||
|
||||
for deb in "${DEBS[@]}"; do
|
||||
echo "== Installing ${deb}, ignore dependency complaints (will fix later) =="
|
||||
dpkg --force-depends -i "${deb}"
|
||||
dpkg --skip-same-version --force-depends -i "${deb}"
|
||||
done
|
||||
|
||||
# This will install any of the unmet dependencies from above.
|
||||
|
@ -172,6 +174,7 @@ EOF
|
|||
stop-salt-minion() {
|
||||
# This ensures it on next reboot
|
||||
echo manual > /etc/init/salt-minion.override
|
||||
update-rc.d salt-minion disable
|
||||
|
||||
if service salt-minion status >/dev/null; then
|
||||
echo "salt-minion started in defiance of runlevel policy, aborting startup." >&2
|
||||
|
@ -205,18 +208,21 @@ mount-master-pd() {
|
|||
mkdir -p /mnt/master-pd/srv/kubernetes
|
||||
# Contains the cluster's initial config parameters and auth tokens
|
||||
mkdir -p /mnt/master-pd/srv/salt-overlay
|
||||
ln -s /mnt/master-pd/var/etcd /var/etcd
|
||||
ln -s /mnt/master-pd/srv/kubernetes /srv/kubernetes
|
||||
ln -s /mnt/master-pd/srv/salt-overlay /srv/salt-overlay
|
||||
|
||||
ln -s -f /mnt/master-pd/var/etcd /var/etcd
|
||||
ln -s -f /mnt/master-pd/srv/kubernetes /srv/kubernetes
|
||||
ln -s -f /mnt/master-pd/srv/salt-overlay /srv/salt-overlay
|
||||
|
||||
# This is a bit of a hack to get around the fact that salt has to run after the
|
||||
# PD and mounted directory are already set up. We can't give ownership of the
|
||||
# directory to etcd until the etcd user and group exist, but they don't exist
|
||||
# until salt runs if we don't create them here. We could alternatively make the
|
||||
# permissions on the directory more permissive, but this seems less bad.
|
||||
useradd -s /sbin/nologin -d /var/etcd etcd
|
||||
chown etcd /mnt/master-pd/var/etcd
|
||||
chgrp etcd /mnt/master-pd/var/etcd
|
||||
if ! id etcd &>/dev/null; then
|
||||
useradd -s /sbin/nologin -d /var/etcd etcd
|
||||
fi
|
||||
chown -R etcd /mnt/master-pd/var/etcd
|
||||
chgrp -R etcd /mnt/master-pd/var/etcd
|
||||
}
|
||||
|
||||
# Create the overlay files for the salt tree. We create these in a separate
|
||||
|
@ -282,6 +288,14 @@ function create-salt-auth() {
|
|||
}
|
||||
|
||||
function download-release() {
|
||||
# TODO(zmerlynn): We should optimize for the reboot case here, but
|
||||
# unlike the .debs, we don't have version information in the
|
||||
# filenames here, nor do the URLs even provide useful information in
|
||||
# the dev environment case (because they're just a project
|
||||
# bucket). We should probably push a hash into the kube-env, and
|
||||
# store it when we download, and then when it's different infer that
|
||||
# a push occurred (otherwise it's a simple reboot).
|
||||
|
||||
echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)"
|
||||
download-or-bust "$SERVER_BINARY_TAR_URL"
|
||||
|
||||
|
|
|
@ -390,20 +390,23 @@ function create-node-template {
|
|||
|
||||
# Robustly try to add metadata on an instance.
|
||||
# $1: The name of the instace.
|
||||
# $2: The metadata key=value pair to add.
|
||||
# $2...$n: The metadata key=value pairs to add.
|
||||
function add-instance-metadata {
|
||||
local -r instance=$1
|
||||
shift 1
|
||||
local -r kvs=( "$@" )
|
||||
detect-project
|
||||
local attempt=0
|
||||
while true; do
|
||||
if ! gcloud compute instances add-metadata "$1" \
|
||||
if ! gcloud compute instances add-metadata "${instance}" \
|
||||
--project "${PROJECT}" \
|
||||
--zone "${ZONE}" \
|
||||
--metadata "$2"; then
|
||||
--metadata "${kvs[@]}"; then
|
||||
if (( attempt > 5 )); then
|
||||
echo -e "${color_red}Failed to add instance metadata in $1 ${color_norm}"
|
||||
echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}"
|
||||
exit 2
|
||||
fi
|
||||
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in $1. Retrying.${color_norm}"
|
||||
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}"
|
||||
attempt=$(($attempt+1))
|
||||
else
|
||||
break
|
||||
|
@ -412,21 +415,25 @@ function add-instance-metadata {
|
|||
}
|
||||
|
||||
# Robustly try to add metadata on an instance, from a file.
|
||||
# $1: The name of the instace.
|
||||
# $2: The metadata key=file pair to add.
|
||||
# $1: The name of the instance.
|
||||
# $2...$n: The metadata key=file pairs to add.
|
||||
function add-instance-metadata-from-file {
|
||||
local -r instance=$1
|
||||
shift 1
|
||||
local -r kvs=( "$@" )
|
||||
detect-project
|
||||
local attempt=0
|
||||
while true; do
|
||||
if ! gcloud compute instances add-metadata "$1" \
|
||||
echo "${kvs[@]}"
|
||||
if ! gcloud compute instances add-metadata "${instance}" \
|
||||
--project "${PROJECT}" \
|
||||
--zone "${ZONE}" \
|
||||
--metadata-from-file "$2"; then
|
||||
--metadata-from-file "${kvs[@]}"; then
|
||||
if (( attempt > 5 )); then
|
||||
echo -e "${color_red}Failed to add instance metadata in $1 ${color_norm}"
|
||||
echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}"
|
||||
exit 2
|
||||
fi
|
||||
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in $1. Retrying.${color_norm}"
|
||||
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}"
|
||||
attempt=$(($attempt+1))
|
||||
else
|
||||
break
|
||||
|
@ -851,8 +858,10 @@ function kube-push {
|
|||
find-release-tars
|
||||
upload-server-tars
|
||||
|
||||
echo "Updating master metadata ..."
|
||||
write-master-env
|
||||
add-instance-metadata-from-file "${KUBE_MASTER}" "kube-env=${KUBE_TEMP}/master-kube-env.yaml"
|
||||
add-instance-metadata-from-file "${KUBE_MASTER}" "kube-env=${KUBE_TEMP}/master-kube-env.yaml" "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh"
|
||||
|
||||
echo "Pushing to master (log at ${OUTPUT}/kube-push-${KUBE_MASTER}.log) ..."
|
||||
cat ${KUBE_ROOT}/cluster/gce/configure-vm.sh | gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone "${ZONE}" "${KUBE_MASTER}" --command "sudo bash -s -- --push" &> ${OUTPUT}/kube-push-"${KUBE_MASTER}".log
|
||||
|
||||
|
@ -897,7 +906,7 @@ function kube-update-nodes() {
|
|||
echo "Updating node metadata... "
|
||||
write-node-env
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
add-instance-metadata-from-file "${MINION_NAMES[$i]}" "kube-env=${KUBE_TEMP}/node-kube-env.yaml" &
|
||||
add-instance-metadata-from-file "${MINION_NAMES[$i]}" "kube-env=${KUBE_TEMP}/node-kube-env.yaml" "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" &
|
||||
done
|
||||
wait-for-jobs
|
||||
echo "Done"
|
||||
|
|
Loading…
Reference in New Issue