kube-up: Finish refactor of AWS to reuse GCE script

We override key functions in the GCE script with AWS equivalents.
pull/6/head
Justin Santa Barbara 2016-02-04 09:36:30 -05:00
parent 9e1d764524
commit fd5c44c72e
12 changed files with 299 additions and 593 deletions

View File

@ -1,93 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "== Refreshing package database =="
until apt-get update; do
echo "== apt-get update failed, retrying =="
sleep 5
done
function apt-get-install {
# Forcibly install packages (options borrowed from Salt logs).
until apt-get -q -y -o DPkg::Options::=--force-confold -o DPkg::Options::=--force-confdef install $@; do
echo "== install of packages $@ failed, retrying =="
sleep 5
done
}
apt-get-install curl
# Retry a download until we get it.
#
# $1 is the URL to download
download-or-bust() {
local -r url="$1"
local -r file="${url##*/}"
rm -f "$file"
until [[ -e "${1##*/}" ]]; do
echo "Downloading file ($1)"
curl --ipv4 -Lo "$file" --connect-timeout 20 --retry 6 --retry-delay 10 "$1"
md5sum "$file"
done
}
# Install salt from GCS. See README.md for instructions on how to update these
# debs.
install-salt() {
local salt_mode="$1"
if dpkg -s salt-minion &>/dev/null; then
echo "== SaltStack already installed, skipping install step =="
return
fi
mkdir -p /var/cache/salt-install
cd /var/cache/salt-install
DEBS=(
libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb
python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb
salt-common_2014.1.13+ds-1~bpo70+1_all.deb
)
if [[ "${salt_mode}" == "master" ]]; then
DEBS+=( salt-master_2014.1.13+ds-1~bpo70+1_all.deb )
fi
DEBS+=( salt-minion_2014.1.13+ds-1~bpo70+1_all.deb )
URL_BASE="https://storage.googleapis.com/kubernetes-release/salt"
for deb in "${DEBS[@]}"; do
if [ ! -e "${deb}" ]; then
download-or-bust "${URL_BASE}/${deb}"
fi
done
for deb in "${DEBS[@]}"; do
echo "== Installing ${deb}, ignore dependency complaints (will fix later) =="
dpkg --skip-same-version --force-depends -i "${deb}"
done
# This will install any of the unmet dependencies from above.
echo "== Installing unmet dependencies =="
until apt-get install -f -y; do
echo "== apt-get install failed, retrying =="
sleep 5
done
# Log a timestamp
echo "== Finished installing Salt =="
}

View File

@ -0,0 +1,125 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: these functions override functions in the GCE configure-vm script
# We include the GCE script first, and this one second.
ensure-basic-networking() {
:
}
ensure-packages() {
apt-get-install curl
# For reading kube_env.yaml
apt-get-install python-yaml
# TODO: Where to get safe_format_and_mount?
mkdir -p /usr/share/google
cd /usr/share/google
download-or-bust https://raw.githubusercontent.com/GoogleCloudPlatform/compute-image-packages/82b75f314528b90485d5239ab5d5495cc22d775f/google-startup-scripts/usr/share/google/safe_format_and_mount
chmod +x safe_format_and_mount
}
set-kube-env() {
local kube_env_yaml="${INSTALL_DIR}/kube_env.yaml"
# kube-env has all the environment variables we care about, in a flat yaml format
eval "$(python -c '
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v))))
print("""export {var}""".format(var = k))
' < """${kube_env_yaml}""")"
}
remove-docker-artifacts() {
:
}
# Finds the master PD device
find-master-pd() {
echo "Waiting for master pd to be attached"
attempt=0
while true; do
echo Attempt "$(($attempt+1))" to check for /dev/xvdb
if [[ -e /dev/xvdb ]]; then
echo "Found /dev/xvdb"
MASTER_PD_DEVICE="/dev/xvdb"
break
fi
attempt=$(($attempt+1))
sleep 1
done
}
fix-apt-sources() {
:
}
salt-master-role() {
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-master
cloud: aws
EOF
# If the kubelet on the master is enabled, give it the same CIDR range
# as a generic node.
if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
cat <<EOF >>/etc/salt/minion.d/grains.conf
kubelet_api_servers: '${KUBELET_APISERVER}'
cbr-cidr: 10.123.45.0/30
EOF
else
# If the kubelet is running disconnected from a master, give it a fixed
# CIDR range.
cat <<EOF >>/etc/salt/minion.d/grains.conf
cbr-cidr: ${MASTER_IP_RANGE}
EOF
fi
env-to-grains "runtime_config"
}
salt-node-role() {
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-pool
cbr-cidr: 10.123.45.0/30
cloud: aws
api_servers: '${API_SERVERS}'
EOF
# We set the hostname_override to the full EC2 private dns name
# we'd like to use EC2 instance-id, but currently the kubelet health-check assumes the name
# is resolvable, although that check should be going away entirely (#7092)
if [[ -z "${HOSTNAME_OVERRIDE:-}" ]]; then
HOSTNAME_OVERRIDE=`curl --silent curl http://169.254.169.254/2007-01-19/meta-data/local-hostname`
fi
env-to-grains "hostname_override"
}
function run-user-script() {
# TODO(justinsb): Support user scripts on AWS
# AWS doesn't have as rich a metadata service as GCE does
# Maybe specify an env var that is the path to a script?
:
}

View File

@ -1,121 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Create the overlay files for the salt tree. We create these in a separate
# place so that we can blow away the rest of the salt configs on a kube-push and
# re-apply these.
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")'
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
network_provider: '$(echo "$NETWORK_PROVIDER")'
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG")'
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")'
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")'
num_nodes: $(echo "${NUM_NODES}")
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
EOF
readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv"
if [ ! -e "${BASIC_AUTH_FILE}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-apiserver
(umask 077;
echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${BASIC_AUTH_FILE}")
fi
# Generate and distribute a shared secret (bearer token) to
# apiserver and the nodes so that kubelet and kube-proxy can
# authenticate to apiserver.
kubelet_token=$KUBELET_TOKEN
kube_proxy_token=$KUBE_PROXY_TOKEN
# Make a list of tokens and usernames to be pushed to the apiserver
mkdir -p /srv/salt-overlay/salt/kube-apiserver
readonly KNOWN_TOKENS_FILE="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $KNOWN_TOKENS_FILE ;
echo "$kube_proxy_token,kube_proxy,kube_proxy" >> $KNOWN_TOKENS_FILE)
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file)
mkdir -p /srv/salt-overlay/salt/kube-proxy
kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
cat > "${kube_proxy_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${kube_proxy_token}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
cat > "${kubelet_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
token: ${kubelet_token}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
# Generate tokens for other "service accounts". Append to known_tokens.
#
# NB: If this list ever changes, this script actually has to
# change to detect the existence of this file, kill any deleted
# old tokens and add any new tokens (to handle the upgrade case).
service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns")
for account in "${service_accounts[@]}"; do
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "${token},${account},${account}" >> "${KNOWN_TOKENS_FILE}"
done

View File

@ -1,34 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Download and install release
# This script assumes that the environment variable MASTER_RELEASE_TAR contains
# the release tar to download and unpack. It is meant to be pushed to the
# master and run.
echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)"
download-or-bust "$SERVER_BINARY_TAR_URL"
echo "Downloading binary release tar ($SALT_TAR_URL)"
download-or-bust "$SALT_TAR_URL"
echo "Unpacking Salt tree"
rm -rf kubernetes
tar xzf "${SALT_TAR_URL##*/}"
echo "Running release install script"
sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}"

View File

@ -1,27 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apt-get-install python-yaml
# kube-env has all the environment variables we care about, in a flat yaml format
eval "$(python -c '
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v))))
print("""export {var}""".format(var = k))
' < kube-env.yaml)"

View File

@ -16,6 +16,8 @@
# Discover all the ephemeral disks
function ensure-local-disks() {
block_devices=()
ephemeral_devices=$(curl --silent http://169.254.169.254/2014-11-05/meta-data/block-device-mapping/ | grep ephemeral)
@ -119,7 +121,7 @@ else
# 80% goes to the docker thin-pool; we want to leave some space for host-volumes
lvcreate -l 80%VG --thinpool docker-thinpool vg-ephemeral
DOCKER_OPTS="${DOCKER_OPTS} --storage-opt dm.thinpooldev=/dev/mapper/vg--ephemeral-docker--thinpool"
DOCKER_OPTS="${DOCKER_OPTS:-} --storage-opt dm.thinpooldev=/dev/mapper/vg--ephemeral-docker--thinpool"
# Note that we don't move docker; docker goes direct to the thinpool
# Remaining space (20%) is for kubernetes data
@ -159,7 +161,7 @@ fi
if [[ ${docker_storage} == "btrfs" ]]; then
DOCKER_OPTS="${DOCKER_OPTS} -s btrfs"
DOCKER_OPTS="${DOCKER_OPTS:-} -s btrfs"
elif [[ ${docker_storage} == "aufs-nolvm" || ${docker_storage} == "aufs" ]]; then
# Install aufs kernel module
# Fix issue #14162 with extra-virtual
@ -168,9 +170,9 @@ elif [[ ${docker_storage} == "aufs-nolvm" || ${docker_storage} == "aufs" ]]; the
# Install aufs tools
apt-get-install aufs-tools
DOCKER_OPTS="${DOCKER_OPTS} -s aufs"
DOCKER_OPTS="${DOCKER_OPTS:-} -s aufs"
elif [[ ${docker_storage} == "devicemapper" ]]; then
DOCKER_OPTS="${DOCKER_OPTS} -s devicemapper"
DOCKER_OPTS="${DOCKER_OPTS:-} -s devicemapper"
else
echo "Ignoring unknown DOCKER_STORAGE: ${docker_storage}"
fi
@ -183,7 +185,7 @@ if [[ -n "${move_docker}" ]]; then
mkdir -p ${move_docker}/docker
ln -s ${move_docker}/docker /var/lib/docker
DOCKER_ROOT="${move_docker}/docker"
DOCKER_OPTS="${DOCKER_OPTS} -g ${DOCKER_ROOT}"
DOCKER_OPTS="${DOCKER_OPTS:-} -g ${DOCKER_ROOT}"
fi
if [[ -n "${move_kubelet}" ]]; then
@ -197,3 +199,4 @@ if [[ -n "${move_kubelet}" ]]; then
KUBELET_ROOT="${move_kubelet}/kubelet"
fi
}

View File

@ -1,65 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $SALT_MASTER" > /etc/salt/minion.d/master.conf
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-master
cloud: aws
cbr-cidr: "${MASTER_IP_RANGE}"
EOF
# Helper that sets a salt grain in grains.conf, if the upper-cased key is a non-empty env
function env_to_salt {
local key=$1
local env_key=`echo $key | tr '[:lower:]' '[:upper:]'`
local value=${!env_key}
if [[ -n "${value}" ]]; then
# Note this is yaml, so indentation matters
cat <<EOF >>/etc/salt/minion.d/grains.conf
${key}: '$(echo "${value}" | sed -e "s/'/''/g")'
EOF
fi
}
env_to_salt docker_opts
env_to_salt docker_root
env_to_salt kubelet_root
env_to_salt master_extra_sans
env_to_salt runtime_config
env_to_salt non_masquerade_cidr
# Auto accept all keys from minions that try to join
mkdir -p /etc/salt/master.d
cat <<EOF >/etc/salt/master.d/auto-accept.conf
auto_accept: True
EOF
cat <<EOF >/etc/salt/master.d/reactor.conf
# React to new minions starting by running highstate on them.
reactor:
- 'salt/minion/*/start':
- /srv/reactor/highstate-new.sls
EOF
install-salt master
service salt-master start
service salt-minion start

View File

@ -1,61 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $SALT_MASTER" > /etc/salt/minion.d/master.conf
# Turn on debugging for salt-minion
# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion
# Our minions will have a pool role to distinguish them from the master.
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-pool
cbr-cidr: 10.123.45.0/30
cloud: aws
EOF
# We set the hostname_override to the full EC2 private dns name
# we'd like to use EC2 instance-id, but currently the kubelet health-check assumes the name
# is resolvable, although that check should be going away entirely (#7092)
if [[ -z "${HOSTNAME_OVERRIDE}" ]]; then
HOSTNAME_OVERRIDE=`curl --silent curl http://169.254.169.254/2007-01-19/meta-data/local-hostname`
fi
# Helper that sets a salt grain in grains.conf, if the upper-cased key is a non-empty env
function env_to_salt {
local key=$1
local env_key=`echo $key | tr '[:lower:]' '[:upper:]'`
local value=${!env_key}
if [[ -n "${value}" ]]; then
# Note this is yaml, so indentation matters
cat <<EOF >>/etc/salt/minion.d/grains.conf
${key}: '$(echo "${value}" | sed -e "s/'/''/g")'
EOF
fi
}
env_to_salt hostname_override
env_to_salt docker_opts
env_to_salt docker_root
env_to_salt kubelet_root
env_to_salt non_masquerade_cidr
install-salt
service salt-minion start

View File

@ -1,62 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
echo "Waiting for master pd to be attached"
attempt=0
while true; do
echo Attempt "$(($attempt+1))" to check for /dev/xvdb
if [[ -e /dev/xvdb ]]; then
echo "Found /dev/xvdb"
break
fi
attempt=$(($attempt+1))
sleep 1
done
# Mount Master Persistent Disk
echo "Mounting master-pd"
mkdir -p /mnt/master-pd
mkfs -t ext4 /dev/xvdb
echo "/dev/xvdb /mnt/master-pd ext4 noatime 0 0" >> /etc/fstab
mount /mnt/master-pd
# Contains all the data stored in etcd
mkdir -m 700 -p /mnt/master-pd/var/etcd
# Contains the dynamically generated apiserver auth certs and keys
mkdir -p /mnt/master-pd/srv/kubernetes
# Contains the cluster's initial config parameters and auth tokens
mkdir -p /mnt/master-pd/srv/salt-overlay
# Directory for kube-apiserver to store SSH key (if necessary)
mkdir -p /mnt/master-pd/srv/sshproxy
ln -s -f /mnt/master-pd/var/etcd /var/etcd
ln -s -f /mnt/master-pd/srv/kubernetes /srv/kubernetes
ln -s -f /mnt/master-pd/srv/sshproxy /srv/sshproxy
ln -s -f /mnt/master-pd/srv/salt-overlay /srv/salt-overlay
# This is a bit of a hack to get around the fact that salt has to run after the
# PD and mounted directory are already set up. We can't give ownership of the
# directory to etcd until the etcd user and group exist, but they don't exist
# until salt runs if we don't create them here. We could alternatively make the
# permissions on the directory more permissive, but this seems less bad.
if ! id etcd &>/dev/null; then
useradd -s /sbin/nologin -d /var/etcd etcd
fi
chown -R etcd /mnt/master-pd/var/etcd
chgrp -R etcd /mnt/master-pd/var/etcd

View File

@ -572,11 +572,14 @@ function upload-server-tars() {
SERVER_BINARY_TAR_HASH=
SALT_TAR_URL=
SALT_TAR_HASH=
BOOTSTRAP_SCRIPT_URL=
BOOTSTRAP_SCRIPT_HASH=
ensure-temp-dir
SERVER_BINARY_TAR_HASH=$(sha1sum-file "${SERVER_BINARY_TAR}")
SALT_TAR_HASH=$(sha1sum-file "${SALT_TAR}")
BOOTSTRAP_SCRIPT_HASH=$(sha1sum-file "${BOOTSTRAP_SCRIPT}")
if [[ -z ${AWS_S3_BUCKET-} ]]; then
local project_hash=
@ -635,12 +638,13 @@ function upload-server-tars() {
mkdir ${local_dir}
echo "+++ Staging server tars to S3 Storage: ${AWS_S3_BUCKET}/${staging_path}"
local server_binary_path="${staging_path}/${SERVER_BINARY_TAR##*/}"
cp -a "${SERVER_BINARY_TAR}" ${local_dir}
cp -a "${SALT_TAR}" ${local_dir}
cp -a "${BOOTSTRAP_SCRIPT}" ${local_dir}
aws s3 sync --region ${s3_bucket_location} --exact-timestamps ${local_dir} "s3://${AWS_S3_BUCKET}/${staging_path}/"
local server_binary_path="${staging_path}/${SERVER_BINARY_TAR##*/}"
aws s3api put-object-acl --region ${s3_bucket_location} --bucket ${AWS_S3_BUCKET} --key "${server_binary_path}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
SERVER_BINARY_TAR_URL="${s3_url_base}/${AWS_S3_BUCKET}/${server_binary_path}"
@ -648,9 +652,14 @@ function upload-server-tars() {
aws s3api put-object-acl --region ${s3_bucket_location} --bucket ${AWS_S3_BUCKET} --key "${salt_tar_path}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
SALT_TAR_URL="${s3_url_base}/${AWS_S3_BUCKET}/${salt_tar_path}"
local bootstrap_script_path="${staging_path}/${BOOTSTRAP_SCRIPT##*/}"
aws s3api put-object-acl --region ${s3_bucket_location} --bucket ${AWS_S3_BUCKET} --key "${bootstrap_script_path}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
BOOTSTRAP_SCRIPT_URL="${s3_url_base}/${AWS_S3_BUCKET}/${bootstrap_script_path}"
echo "Uploaded server tars:"
echo " SERVER_BINARY_TAR_URL: ${SERVER_BINARY_TAR_URL}"
echo " SALT_TAR_URL: ${SALT_TAR_URL}"
echo " BOOTSTRAP_SCRIPT_URL: ${BOOTSTRAP_SCRIPT_URL}"
}
# Adds a tag to an AWS resource
@ -812,11 +821,14 @@ function kube-up {
ensure-temp-dir
create-bootstrap-script
upload-server-tars
ensure-iam-profiles
load-or-gen-kube-basicauth
load-or-gen-kube-bearertoken
ssh-key-setup
@ -915,6 +927,24 @@ function kube-up {
check-cluster
}
# Builds the bootstrap script and saves it to a local temp file
# Sets BOOTSTRAP_SCRIPT to the path of the script
function create-bootstrap-script() {
ensure-temp-dir
BOOTSTRAP_SCRIPT="${KUBE_TEMP}/bootstrap-script"
(
# Include the default functions from the GCE configure-vm script
sed '/^#+AWS_OVERRIDES_HERE/,$d' "${KUBE_ROOT}/cluster/gce/configure-vm.sh"
# Include the AWS override functions
cat "${KUBE_ROOT}/cluster/aws/templates/configure-vm-aws.sh"
cat "${KUBE_ROOT}/cluster/aws/templates/format-disks.sh"
# Include the GCE configure-vm directly-executed code
sed -e '1,/^#+AWS_OVERRIDES_HERE/d' "${KUBE_ROOT}/cluster/gce/configure-vm.sh"
) > "${BOOTSTRAP_SCRIPT}"
}
# Starts the master node
function start-master() {
# Ensure RUNTIME_CONFIG is populated
@ -926,7 +956,13 @@ function start-master() {
# Get or create master elastic IP
ensure-master-ip
create-certs "" # TODO: Should we pass ELB name / elastic IP ?
# We have to make sure that the cert is valid for API_SERVERS
# i.e. we likely have to pass ELB name / elastic IP in future
create-certs "${MASTER_INTERNAL_IP}"
# This key is no longer needed, and this enables us to get under the 16KB size limit
KUBECFG_CERT_BASE64=""
KUBECFG_KEY_BASE64=""
write-master-env
@ -936,28 +972,20 @@ function start-master() {
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo "cat > kube-env.yaml << __EOF_MASTER_KUBE_ENV_YAML"
echo "cat > kube_env.yaml << __EOF_MASTER_KUBE_ENV_YAML"
cat ${KUBE_TEMP}/master-kube-env.yaml
# TODO: get rid of these exceptions / harmonize with common or GCE
echo "SALT_MASTER: $(yaml-quote ${MASTER_INTERNAL_IP:-})"
echo "DOCKER_STORAGE: $(yaml-quote ${DOCKER_STORAGE:-})"
echo "MASTER_EXTRA_SANS: $(yaml-quote ${MASTER_EXTRA_SANS:-})"
echo "API_SERVERS: $(yaml-quote ${MASTER_INTERNAL_IP:-})"
echo "__EOF_MASTER_KUBE_ENV_YAML"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/extract-kube-env.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/format-disks.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/setup-master-pd.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/download-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/salt-master.sh"
echo ""
echo "wget -O bootstrap ${BOOTSTRAP_SCRIPT_URL}"
echo "chmod +x bootstrap"
echo "./bootstrap"
) > "${KUBE_TEMP}/master-user-data"
# We're running right up against the 16KB limit
# Remove all comment lines and then put back the bin/bash shebang
cat "${KUBE_TEMP}/master-user-data" | sed -e 's/^[[:blank:]]*#.*$//' | sed -e '/^[[:blank:]]*$/d' > "${KUBE_TEMP}/master-user-data.tmp"
echo '#! /bin/bash' | cat - "${KUBE_TEMP}/master-user-data.tmp" > "${KUBE_TEMP}/master-user-data"
rm "${KUBE_TEMP}/master-user-data.tmp"
# Compress the data to fit under the 16KB limit (cloud-init accepts compressed data)
gzip "${KUBE_TEMP}/master-user-data"
echo "Starting Master"
master_id=$($AWS_CMD run-instances \
@ -970,7 +998,7 @@ function start-master() {
--security-group-ids ${MASTER_SG_ID} \
--associate-public-ip-address \
--block-device-mappings "${MASTER_BLOCK_DEVICE_MAPPINGS}" \
--user-data file://${KUBE_TEMP}/master-user-data \
--user-data fileb://${KUBE_TEMP}/master-user-data.gz \
--query Instances[].InstanceId)
add-tag $master_id Name $MASTER_NAME
add-tag $master_id Role $MASTER_TAG
@ -1013,60 +1041,6 @@ function start-master() {
attempt=$(($attempt+1))
sleep 10
done
# Check for SSH connectivity
attempt=0
while true; do
echo -n Attempt "$(($attempt+1))" to check for SSH to master
local output
local ok=1
output=$(ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@${KUBE_MASTER_IP} uptime 2> $LOG) || ok=0
if [[ ${ok} == 0 ]]; then
if (( attempt > 30 )); then
echo
echo "(Failed) output was: ${output}"
echo
echo -e "${color_red}Unable to ssh to master on ${KUBE_MASTER_IP}. Your cluster is unlikely" >&2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
echo -e "cluster. (sorry!)${color_norm}" >&2
exit 1
fi
else
echo -e " ${color_green}[ssh to master working]${color_norm}"
break
fi
echo -e " ${color_yellow}[ssh to master not working yet]${color_norm}"
attempt=$(($attempt+1))
sleep 10
done
# We need the salt-master to be up for the minions to work
attempt=0
while true; do
echo -n Attempt "$(($attempt+1))" to check for salt-master
local output
local ok=1
output=$(ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@${KUBE_MASTER_IP} pgrep salt-master 2> $LOG) || ok=0
if [[ ${ok} == 0 ]]; then
if (( attempt > 30 )); then
echo
echo "(Failed) output was: ${output}"
echo
echo -e "${color_red}salt-master failed to start on ${KUBE_MASTER_IP}. Your cluster is unlikely" >&2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
echo -e "cluster. (sorry!)${color_norm}" >&2
exit 1
fi
else
echo -e " ${color_green}[salt-master running]${color_norm}"
break
fi
echo -e " ${color_yellow}[salt-master not working yet]${color_norm}"
attempt=$(($attempt+1))
sleep 10
done
reboot-on-failure ${master_id}
}
# Creates an ASG for the minion nodes
@ -1075,7 +1049,29 @@ function start-minions() {
build-runtime-config
echo "Creating minion configuration"
generate-minion-user-data > "${KUBE_TEMP}/minion-user-data"
write-node-env
(
# We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami
echo "#! /bin/bash"
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo "cat > kube_env.yaml << __EOF_KUBE_ENV_YAML"
cat ${KUBE_TEMP}/node-kube-env.yaml
# TODO: get rid of these exceptions / harmonize with common or GCE
echo "DOCKER_STORAGE: $(yaml-quote ${DOCKER_STORAGE:-})"
echo "API_SERVERS: $(yaml-quote ${MASTER_INTERNAL_IP:-})"
echo "__EOF_KUBE_ENV_YAML"
echo ""
echo "wget -O bootstrap ${BOOTSTRAP_SCRIPT_URL}"
echo "chmod +x bootstrap"
echo "./bootstrap"
) > "${KUBE_TEMP}/node-user-data"
# Compress the data to fit under the 16KB limit (cloud-init accepts compressed data)
gzip "${KUBE_TEMP}/node-user-data"
local public_ip_option
if [[ "${ENABLE_NODE_PUBLIC_IP}" == "true" ]]; then
public_ip_option="--associate-public-ip-address"
@ -1091,7 +1087,7 @@ function start-minions() {
--security-groups ${NODE_SG_ID} \
${public_ip_option} \
--block-device-mappings "${NODE_BLOCK_DEVICE_MAPPINGS}" \
--user-data "file://${KUBE_TEMP}/minion-user-data"
--user-data "fileb://${KUBE_TEMP}/node-user-data.gz"
echo "Creating autoscaling group"
${AWS_ASG_CMD} create-auto-scaling-group \
@ -1136,19 +1132,6 @@ function wait-minions {
function wait-master() {
detect-master > $LOG
# TODO(justinsb): This is really not necessary any more
# Wait 3 minutes for cluster to come up. We hit it with a "highstate" after that to
# make sure that everything is well configured.
# TODO: Can we poll here?
echo "Waiting 3 minutes for cluster to settle"
local i
for (( i=0; i < 6*3; i++)); do
printf "."
sleep 10
done
echo "Re-running salt highstate"
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@${KUBE_MASTER_IP} sudo salt '*' state.highstate > $LOG
echo "Waiting for cluster initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
@ -1391,6 +1374,7 @@ function kube-push {
# Make sure we have the tar files staged on Google Storage
find-release-tars
create-bootstrap-script
upload-server-tars
(

View File

@ -672,6 +672,8 @@ function create-certs {
fi
sans="${sans}IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}"
echo "Generating certs for alternate-names: ${sans}"
local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX")
# Note: This was heavily cribbed from make-ca-cert.sh
(set -x

View File

@ -18,6 +18,10 @@ set -o errexit
set -o nounset
set -o pipefail
# Note that this script is also used by AWS; we include it and then override
# functions with AWS equivalents. Note `#+AWS_OVERRIDES_HERE` below.
# TODO(justinsb): Refactor into common script & GCE specific script?
# If we have any arguments at all, this is a push and not just setup.
is_push=$@
@ -44,6 +48,16 @@ function ensure-basic-networking() {
echo "Networking functional on $(hostname) ($(hostname -i))"
}
# A hookpoint for installing any needed packages
ensure-packages() {
:
}
# A hookpoint for setting up local devices
ensure-local-disks() {
:
}
function ensure-install-dir() {
INSTALL_DIR="/var/cache/kubernetes-install"
mkdir -p ${INSTALL_DIR}
@ -57,11 +71,11 @@ EOF
}
function set-broken-motd() {
echo -e '\nBroken (or in progress) GCE Kubernetes node setup! Suggested first step:\n tail /var/log/startupscript.log\n' > /etc/motd
echo -e '\nBroken (or in progress) Kubernetes node setup! Suggested first step:\n tail /var/log/startupscript.log\n' > /etc/motd
}
function set-good-motd() {
echo -e '\n=== GCE Kubernetes node setup complete ===\n' > /etc/motd
echo -e '\n=== Kubernetes node setup complete ===\n' > /etc/motd
}
function curl-metadata() {
@ -88,11 +102,7 @@ for k,v in yaml.load(sys.stdin).iteritems():
function remove-docker-artifacts() {
echo "== Deleting docker0 =="
# Forcibly install bridge-utils (options borrowed from Salt logs).
until apt-get -q -y -o DPkg::Options::=--force-confold -o DPkg::Options::=--force-confdef install bridge-utils; do
echo "== install of bridge-utils failed, retrying =="
sleep 5
done
apt-get-install bridge-utils
# Remove docker artifacts on minion nodes, if present
iptables -t nat -F || true
@ -125,6 +135,23 @@ validate-hash() {
fi
}
apt-get-install() {
# Forcibly install packages (options borrowed from Salt logs).
until apt-get -q -y -o DPkg::Options::=--force-confold -o DPkg::Options::=--force-confdef install $@; do
echo "== install of packages $@ failed, retrying =="
sleep 5
done
}
apt-get-update() {
echo "== Refreshing package database =="
until apt-get update; do
echo "== apt-get update failed, retrying =="
echo sleep 5
done
}
#
# Install salt from GCS. See README.md for instructions on how to update these
# debs.
install-salt() {
@ -205,6 +232,18 @@ stop-salt-minion() {
done
}
# Finds the master PD device; returns it in MASTER_PD_DEVICE
find-master-pd() {
MASTER_PD_DEVICE=""
# TODO(zmerlynn): GKE is still lagging in master-pd creation
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
}
# Mounts a persistent disk (formatting if needed) to store the persistent data
# on the master -- etcd's data, a few settings, and security certs/keys/tokens.
#
@ -213,19 +252,16 @@ stop-salt-minion() {
# formats an unformatted disk, and mkdir -p will leave a directory be if it
# already exists.
mount-master-pd() {
# TODO(zmerlynn): GKE is still lagging in master-pd creation
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
find-master-pd
if [[ -z "${MASTER_PD_DEVICE}" ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
device_path="/dev/disk/by-id/${relative_path}"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
echo "Mounting master-pd"
mkdir -p /mnt/master-pd
/usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${device_path}" /mnt/master-pd &>/var/log/master-pd-mount.log || \
/usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${MASTER_PD_DEVICE}" /mnt/master-pd &>/var/log/master-pd-mount.log || \
{ echo "!!! master-pd mount failed, review /var/log/master-pd-mount.log !!!"; return 1; }
# Contains all the data stored in etcd
mkdir -m 700 -p /mnt/master-pd/var/etcd
@ -636,11 +672,8 @@ EOF
cbr-cidr: ${MASTER_IP_RANGE}
EOF
fi
if [[ ! -z "${RUNTIME_CONFIG:-}" ]]; then
cat <<EOF >>/etc/salt/minion.d/grains.conf
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
EOF
fi
env-to-grains "runtime_config"
}
function salt-node-role() {
@ -654,22 +687,31 @@ grains:
EOF
}
function salt-docker-opts() {
DOCKER_OPTS=""
if [[ -n "${EXTRA_DOCKER_OPTS-}" ]]; then
DOCKER_OPTS="${EXTRA_DOCKER_OPTS}"
fi
if [[ -n "{DOCKER_OPTS}" ]]; then
function env-to-grains {
local key=$1
local env_key=`echo $key | tr '[:lower:]' '[:upper:]'`
local value=${!env_key:-}
if [[ -n "${value}" ]]; then
# Note this is yaml, so indentation matters
cat <<EOF >>/etc/salt/minion.d/grains.conf
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
${key}: '$(echo "${value}" | sed -e "s/'/''/g")'
EOF
fi
}
function node-docker-opts() {
if [[ -n "${EXTRA_DOCKER_OPTS-}" ]]; then
DOCKER_OPTS="${DOCKER_OPTS:-} ${EXTRA_DOCKER_OPTS}"
fi
}
function salt-grains() {
env-to-grains "docker_opts"
env-to-grains "docker_root"
env-to-grains "kubelet_root"
}
function configure-salt() {
fix-apt-sources
mkdir -p /etc/salt/minion.d
salt-run-local
if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
@ -679,8 +721,9 @@ function configure-salt() {
fi
else
salt-node-role
salt-docker-opts
node-docker-opts
fi
salt-grains
install-salt
stop-salt-minion
}
@ -690,14 +733,33 @@ function run-salt() {
salt-call --local state.highstate || true
}
function run-user-script() {
if curl-metadata k8s-user-startup-script > "${INSTALL_DIR}/k8s-user-script.sh"; then
user_script=$(cat "${INSTALL_DIR}/k8s-user-script.sh")
fi
if [[ ! -z ${user_script:-} ]]; then
chmod u+x "${INSTALL_DIR}/k8s-user-script.sh"
echo "== running user startup script =="
"${INSTALL_DIR}/k8s-user-script.sh"
fi
}
# This script is re-used on AWS. Some of the above functions will be replaced.
# The AWS kube-up script looks for this marker:
#+AWS_OVERRIDES_HERE
####################################################################################
if [[ -z "${is_push}" ]]; then
echo "== kube-up node config starting =="
set-broken-motd
ensure-basic-networking
fix-apt-sources
apt-get-update
ensure-install-dir
ensure-packages
set-kube-env
ensure-local-disks
[[ "${KUBERNETES_MASTER}" == "true" ]] && mount-master-pd
create-salt-pillar
if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
@ -713,14 +775,7 @@ if [[ -z "${is_push}" ]]; then
run-salt
set-good-motd
if curl-metadata k8s-user-startup-script > "${INSTALL_DIR}/k8s-user-script.sh"; then
user_script=$(cat "${INSTALL_DIR}/k8s-user-script.sh")
fi
if [[ ! -z ${user_script:-} ]]; then
chmod u+x "${INSTALL_DIR}/k8s-user-script.sh"
echo "== running user startup script =="
"${INSTALL_DIR}/k8s-user-script.sh"
fi
run-user-script
echo "== kube-up node config done =="
else
echo "== kube-push node config starting =="