Initial addition of CoreOS as minion for AWS cluster

pull/6/head
Brian Akins 2015-05-13 16:39:22 -04:00
parent fe24da8478
commit fac4350fa6
7 changed files with 341 additions and 36 deletions

View File

@ -77,3 +77,11 @@ ADMISSION_CONTROL=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,Security
# Optional: Enable/disable public IP assignment for minions.
# Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes!
ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_MINION_PUBLIC_IP:-true}
# OS options for minions
KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-ubuntu}"
KUBE_MINION_IMAGE="${KUBE_MINION_IMAGE:-}"
COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}"
CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}"
RKT_VERSION="${KUBE_RKT_VERSION:-0.5.5}"

View File

@ -73,3 +73,10 @@ ADMISSION_CONTROL=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,Security
# Optional: Enable/disable public IP assignment for minions.
# Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes!
ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_MINION_PUBLIC_IP:-true}
# OS options for minions
KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-ubuntu}"
KUBE_MINION_IMAGE="${KUBE_MINION_IMAGE:-}"
COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}"
CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}"
RKT_VERSION="${KUBE_RKT_VERSION:-0.5.5}"

View File

@ -0,0 +1,180 @@
#cloud-config
coreos:
units:
- name: kubernetes-install-rkt.service
command: start
content: |
[Unit]
Description=Fetch Rocket
Documentation=http://github.com/coreos/rkt
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /opt/rkt
ExecStartPre=/usr/bin/wget \
-O /opt/rkt/rkt-v${RKT_VERSION}.tar.gz \
https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz
ExecStart=/usr/bin/tar xzvf /opt/rkt/rkt-v${RKT_VERSION}.tar.gz -C /opt --overwrite
- name: hostname-override.service
command: start
content: |
[Unit]
Description=Kubelet Hostname Override
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStart=/run/setup-hostname-override.sh
- name: kubernetes-install-minion.service
command: start
content: |
[Unit]
Description=Install Kubernetes Server
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /opt/kubernetes/pkg
ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/kubernetes/pkg/kubernetes-server-linux-amd64.tar.gz ${SERVER_BINARY_TAR_URL}
ExecStart=/usr/bin/tar xf /opt/kubernetes/pkg/kubernetes-server-linux-amd64.tar.gz -C /opt --overwrite
- name: kubelet.service
command: start
content: |
[Unit]
Description=Run Kubelet service
Requires=kubernetes-install-minion.service
After=kubernetes-install-minion.service
Requires=hostname-override.service
After=hostname-override.service
[Service]
EnvironmentFile=/etc/kube-env
EnvironmentFile=/etc/hostname-override
ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/run/setup-auth.sh
ExecStart=/opt/kubernetes/server/bin/kubelet \
--api_servers=https://${MASTER_IP} \
--config=/etc/kubernetes/manifests \
--allow_privileged=False \
--v=2 \
--cluster_dns=10.0.0.10 \
--cluster_domain=kubernetes.local \
--logtostderr=true \
--hostname-override=${HOSTNAME_OVERRIDE} \
--container-runtime=${KUBERNETES_CONTAINER_RUNTIME}
Restart=always
RestartSec=10
- name: kube-proxy.service
command: start
content: |
[Unit]
Description=Start Kube-proxy service as Daemon
Requires=kubernetes-install-minion.service
After=kubernetes-install-minion.service
Requires=kubernetes-install-rkt.service
After=kubernetes-install-rkt.service
[Service]
EnvironmentFile=/etc/kube-env
ExecStartPre=/run/config-kube-proxy.sh
ExecStart=/opt/kubernetes/server/bin/kube-proxy \
--master=https://${MASTER_IP} \
--kubeconfig=/var/lib/kube-proxy/kubeconfig \
--v=2 \
--logtostderr=true
Restart=always
RestartSec=10
- name: rkt-metadata.socket
command: start
content: |
[Unit]
Description=rkt metadata service socket
PartOf=rkt-metadata.service
Requires=kubernetes-install-rkt.service
After=kubernetes-install-rkt.service
[Socket]
ListenStream=/run/rkt/metadata-svc.sock
SocketMode=0660
SocketUser=root
SocketGroup=root
RemoveOnStop=true
- name: rkt-metadata.service
command: start
content: |
[Unit]
Description=rkt metadata service
Documentation=http://github.com/coreos/rkt
Requires=rkt-metadata.socket
After=network.target rkt-metadata.socket
Requires=kubernetes-install-rkt.service
After=kubernetes-install-rkt.service
[Service]
EnvironmentFile=/etc/kube-env
ExecStart=/opt/rkt-${RKT_VERSION}/rkt metadata-service
write_files:
- path: /run/setup-hostname-override.sh
permissions: "0755"
content: |
#!/bin/bash
set -x
source /etc/kube-env
if [[ -z "${HOSTNAME_OVERRIDE}" ]]; then
HOSTNAME_OVERRIDE=`curl --silent http://169.254.169.254/2007-01-19/meta-data/local-hostname`
fi
if [[ -z "${HOSTNAME_OVERRIDE}" ]]; then
HOSTNAME_OVERRIDE=`hostname -f`
fi
echo "HOSTNAME_OVERRIDE=${HOSTNAME_OVERRIDE}" > /etc/hostname-override
- path: /run/setup-auth.sh
permissions: "0755"
content: |
#!/bin/bash -e
set -x
source /etc/kube-env
/usr/bin/mkdir -p /var/lib/kubelet
printf '{"BearerToken": "%s", "Insecure": true }' ${KUBE_BEARER_TOKEN} > /var/lib/kubelet/kubernetes_auth
- path: /run/config-kube-proxy.sh
permissions: "0755"
content: |
#!/bin/bash -e
set -x
source /etc/kube-env
/usr/bin/mkdir -p /var/lib/kube-proxy
cat > /var/lib/kube-proxy/kubeconfig << EOF
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: $KUBE_PROXY_TOKEN
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
- path: /etc/kube-env
permissions: 0644
owner: root
content: |

View File

@ -0,0 +1,61 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions for CoreOS.
function detect-minion-image (){
if [[ -z "${KUBE_MINION_IMAGE-}" ]]; then
KUBE_MINION_IMAGE=$(curl -s -L http://${COREOS_CHANNEL}.release.core-os.net/amd64-usr/current/coreos_production_ami_all.json | python -c "import json,sys;obj=json.load(sys.stdin);print filter(lambda t: t['name']=='${AWS_REGION}', obj['amis'])[0]['hvm']")
fi
if [[ -z "${KUBE_MINION_IMAGE-}" ]]; then
echo "unable to determine KUBE_MINION_IMAGE"
exit 2
fi
}
function generate-minion-user-data() {
i=$1
MINION_PRIVATE_IP=$INTERNAL_IP_BASE.1${i}
MINION_IP_RANGE=${MINION_IP_RANGES[$i]}
# this is a bit of a hack. We make all of our "variables" in
# our cloud config controlled by env vars from this script
cat ${KUBE_ROOT}/cluster/aws/coreos/node.yaml
cat <<EOF
ENV_TIMESTAMP=$(yaml-quote $(date -u +%Y-%m-%dT%T%z))
INSTANCE_PREFIX=$(yaml-quote ${INSTANCE_PREFIX})
SERVER_BINARY_TAR_URL=$(yaml-quote ${SERVER_BINARY_TAR_URL})
ENABLE_CLUSTER_DNS=$(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
DNS_SERVER_IP=$(yaml-quote ${DNS_SERVER_IP:-})
DNS_DOMAIN=$(yaml-quote ${DNS_DOMAIN:-})
MASTER_IP=$(yaml-quote ${MASTER_INTERNAL_IP})
MINION_IP_RANGE=$(yaml-quote ${MINION_IP_RANGE})
MINION_IP=$(yaml-quote ${MINION_PRIVATE_IP})
KUBELET_TOKEN=$(yaml-quote ${KUBELET_TOKEN:-})
KUBE_PROXY_TOKEN=$(yaml-quote ${KUBE_PROXY_TOKEN:-})
KUBE_BEARER_TOKEN=$(yaml-quote ${KUBELET_TOKEN:-})
KUBERNETES_CONTAINER_RUNTIME=$(yaml-quote ${CONTAINER_RUNTIME})
RKT_VERSION=$(yaml-quote ${RKT_VERSION})
EOF
}
function check-minion() {
echo "working"
}
function yaml-quote {
echo "'$(echo "${@}" | sed -e "s/'/''/g")'"
}

View File

@ -46,9 +46,8 @@ fi
# Generate and distribute a shared secret (bearer token) to
# apiserver and the nodes so that kubelet and kube-proxy can
# authenticate to apiserver.
# This works on CoreOS, so it should work on a lot of distros.
kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
kube_proxy_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
kubelet_token=$KUBELET_TOKEN
kube_proxy_token=$KUBE_PROXY_TOKEN
# Make a list of tokens and usernames to be pushed to the apiserver
mkdir -p /srv/salt-overlay/salt/kube-apiserver

View File

@ -0,0 +1,50 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions for Ubuntu.
function detect-minion-image() {
if [[ -z "${KUBE_MINION_IMAGE=-}" ]]; then
detect-image
KUBE_MINION_IMAGE=$AWS_IMAGE
fi
}
function generate-minion-user-data {
i=$1
# We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami
echo "#! /bin/bash"
echo "SALT_MASTER='${MASTER_INTERNAL_IP}'"
echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'"
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/format-disks.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/salt-minion.sh"
}
function check-minion() {
local minion_name=$1
local minion_ip=$2
local output=$(ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@$minion_ip sudo docker ps -a 2>/dev/null)
if [[ -z "${output}" ]]; then
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@$minion_ip sudo service docker start > $LOG 2>&1
echo "not working yet"
else
echo "working"
fi
}

View File

@ -22,6 +22,17 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/aws/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
case "${KUBE_OS_DISTRIBUTION}" in
ubuntu|coreos)
echo "Starting cluster using os distro: ${KUBE_OS_DISTRIBUTION}" >&2
source "${KUBE_ROOT}/cluster/aws/${KUBE_OS_DISTRIBUTION}/util.sh"
;;
*)
echo "Cannot start cluster using os distro: ${KUBE_OS_DISTRIBUTION}" >&2
exit 2
;;
esac
# This removes the final character in bash (somehow)
AWS_REGION=${ZONE%?}
@ -366,7 +377,13 @@ function wait-for-instance-running {
}
function kube-up {
get-tokens
detect-image
detect-minion-image
find-release-tars
upload-server-tars
ensure-temp-dir
@ -379,8 +396,6 @@ function kube-up {
ssh-keygen -f "$AWS_SSH_KEY" -N ''
fi
detect-image
$AWS_CMD import-key-pair --key-name kubernetes --public-key-material "file://$AWS_SSH_KEY.pub" > $LOG 2>&1 || true
VPC_ID=$(get_vpc_id)
@ -439,7 +454,6 @@ function kube-up {
SEC_GROUP_ID=$($AWS_CMD create-security-group --group-name kubernetes-sec-group --description kubernetes-sec-group --vpc-id $VPC_ID | json_val '["GroupId"]')
$AWS_CMD authorize-security-group-ingress --group-id $SEC_GROUP_ID --protocol -1 --port all --cidr 0.0.0.0/0 > $LOG
fi
(
# We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami
echo "#! /bin/bash"
@ -466,6 +480,8 @@ function kube-up {
echo "readonly DNS_DOMAIN='${DNS_DOMAIN:-}'"
echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
echo "readonly MASTER_IP_RANGE='${MASTER_IP_RANGE:-}'"
echo "readonly KUBELET_TOKEN='${KUBELET_TOKEN}'"
echo "readonly KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN}'"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/format-disks.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/create-dynamic-salt-files.sh"
@ -546,16 +562,7 @@ function kube-up {
MINION_IDS=()
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
echo "Starting Minion (${MINION_NAMES[$i]})"
(
# We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami
echo "#! /bin/bash"
echo "SALT_MASTER='${MASTER_INTERNAL_IP}'"
echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'"
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/format-disks.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/salt-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
generate-minion-user-data $i > "${KUBE_TEMP}/minion-user-data-${i}"
local public_ip_option
if [[ "${ENABLE_MINION_PUBLIC_IP}" == "true" ]]; then
@ -565,7 +572,7 @@ function kube-up {
fi
minion_id=$($AWS_CMD run-instances \
--image-id $AWS_IMAGE \
--image-id $KUBE_MINION_IMAGE \
--iam-instance-profile Name=$IAM_PROFILE_MINION \
--instance-type $MINION_SIZE \
--subnet-id $SUBNET_ID \
@ -573,7 +580,7 @@ function kube-up {
--key-name kubernetes \
--security-group-ids $SEC_GROUP_ID \
${public_ip_option} \
--user-data file://${KUBE_TEMP}/minion-start-${i}.sh | json_val '["Instances"][0]["InstanceId"]')
--user-data "file://${KUBE_TEMP}/minion-user-data-${i}" | json_val '["Instances"][0]["InstanceId"]')
add-tag $minion_id Name ${MINION_NAMES[$i]}
add-tag $minion_id Role $MINION_TAG
@ -669,31 +676,19 @@ function kube-up {
local minion_name=${MINION_NAMES[$i]}
local minion_ip=${KUBE_MINION_IP_ADDRESSES[$i]}
echo -n Attempt "$(($attempt+1))" to check Docker on node "${minion_name} @ ${minion_ip}" ...
local output=$(ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@$minion_ip sudo docker ps -a 2>/dev/null)
if [[ -z "${output}" ]]; then
local output=`check-minion ${minion_name} ${minion_ip}`
echo $output
if [[ "${output}" != "working" ]]; then
if (( attempt > 9 )); then
echo
echo -e "${color_red}Docker failed to install on node ${minion_name}. Your cluster is unlikely" >&2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
echo -e "cluster. (sorry!)${color_norm}" >&2
echo -e "Your cluster is unlikely to work correctly." >&2
echo "Please run ./cluster/kube-down.sh and re-create the" >&2
echo -e "cluster. (sorry!)" >&2
exit 1
fi
# TODO: Reintroduce this (where does this container come from?)
# elif [[ "${output}" != *"kubernetes/pause"* ]]; then
# if (( attempt > 9 )); then
# echo
# echo -e "${color_red}Failed to observe kubernetes/pause on node ${minion_name}. Your cluster is unlikely" >&2
# echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
# echo -e "cluster. (sorry!)${color_norm}" >&2
# exit 1
# fi
else
echo -e " ${color_green}[working]${color_norm}"
break
fi
echo -e " ${color_yellow}[not working yet]${color_norm}"
# Start Docker, in case it failed to start.
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@$minion_ip sudo service docker start > $LOG 2>&1
attempt=$(($attempt+1))
sleep 30
done
@ -893,3 +888,8 @@ function prepare-e2e() {
# Note: we can't print anything here, or else the test tools will break with the extra output
return
}
function get-tokens() {
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
}