mirror of https://github.com/k3s-io/k3s
Add Rackspace support for dev-build-and-up
parent
407eb785e1
commit
2565164a8d
|
@ -0,0 +1,28 @@
|
|||
#cloud-config
|
||||
|
||||
write_files:
|
||||
- content: |
|
||||
grains:
|
||||
roles:
|
||||
- kubernetes-master
|
||||
cloud: rackspace
|
||||
path: /etc/salt/minion.d/grains.conf
|
||||
- content: |
|
||||
auto_accept: True
|
||||
path: /etc/salt/master.d/auto-accept.conf
|
||||
- content: |
|
||||
reactor:
|
||||
- 'salt/minion/*/start':
|
||||
- /srv/reactor/start.sls
|
||||
path: /etc/salt/master.d/reactor.conf
|
||||
- content: |
|
||||
master: KUBE_MASTER
|
||||
path: /etc/salt/minion.d/master.conf
|
||||
|
||||
runcmd:
|
||||
- [mkdir, -p, /etc/salt/minion.d]
|
||||
- [mkdir, -p, /etc/salt/master.d]
|
||||
- [mkdir, -p, /srv/salt/nginx]
|
||||
- echo "MASTER_HTPASSWD" > /srv/salt/nginx/htpasswd
|
||||
- [bash, /root/masterStart.sh]
|
||||
- curl -L http://bootstrap.saltstack.com | sh -s -- -M -X
|
|
@ -0,0 +1,5 @@
|
|||
#cloud-config
|
||||
|
||||
runcmd:
|
||||
- [mkdir, -p, /etc/salt/minion.d]
|
||||
- [bash, /root/minionStart.sh]
|
|
@ -0,0 +1,38 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Sane defaults for dev environments. The following variables can be easily overriden
|
||||
# by setting each as a ENV variable ahead of time:
|
||||
# KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_MINION_FLAVOR, NUM_MINIONS, NOVA_NETWORK and SSH_KEY_NAME
|
||||
|
||||
# Shared
|
||||
KUBE_IMAGE="${KUBE_IMAGE-255df5fb-e3d4-45a3-9a07-c976debf7c14}" # Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
|
||||
SSH_KEY_NAME="${SSH_KEY_NAME-id_kubernetes}"
|
||||
NOVA_NETWORK_LABEL="kubernetes-pool-net"
|
||||
NOVA_NETWORK_CIDR="${NOVA_NETWORK-192.168.0.0/24}"
|
||||
INSTANCE_PREFIX="kubernetes"
|
||||
|
||||
# Master
|
||||
KUBE_MASTER_FLAVOR="${KUBE_MASTER_FLAVOR-performance1-1}"
|
||||
MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
MASTER_TAG="tag=${INSTANCE_PREFIX}-master"
|
||||
|
||||
# Minion
|
||||
KUBE_MINION_FLAVOR="${KUBE_MINION_FLAVOR-performance1-1}"
|
||||
RAX_NUM_MINIONS="${RAX_NUM_MINIONS-4}"
|
||||
MINION_TAG="tag=${INSTANCE_PREFIX}-minion"
|
||||
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${RAX_NUM_MINIONS}}))
|
||||
KUBE_NETWORK=($(eval echo "10.240.{1..${RAX_NUM_MINIONS}}.0/24"))
|
|
@ -0,0 +1,36 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Bring up a Kubernetes cluster.
|
||||
#
|
||||
# If the full release name (gs://<bucket>/<release>) is passed in then we take
|
||||
# that directly. If not then we assume we are doing development stuff and take
|
||||
# the defaults in the release config.
|
||||
|
||||
# exit on any error
|
||||
set -e
|
||||
|
||||
source $(dirname $0)/../kube-env.sh
|
||||
source $(dirname $0)/../$KUBERNETES_PROVIDER/util.sh
|
||||
|
||||
echo "Starting cluster using provider: $KUBERNETES_PROVIDER"
|
||||
|
||||
verify-prereqs
|
||||
kube-up
|
||||
|
||||
source $(dirname $0)/validate-cluster.sh
|
||||
|
||||
echo "Done"
|
|
@ -0,0 +1,31 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Download and install release
|
||||
|
||||
# This script assumes that the environment variable MASTER_RELEASE_TAR contains
|
||||
# the release tar to download and unpack. It is meant to be pushed to the
|
||||
# master and run.
|
||||
|
||||
echo "Downloading release ($OBJECT_URL)"
|
||||
wget $OBJECT_URL -O master-release.tgz
|
||||
|
||||
echo "Unpacking release"
|
||||
rm -rf master-release || false
|
||||
tar xzf master-release.tgz
|
||||
|
||||
echo "Running release install script"
|
||||
sudo master-release/src/scripts/master-release-install.sh
|
|
@ -0,0 +1,50 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Prepopulate the name of the Master
|
||||
mkdir -p /etc/salt/minion.d
|
||||
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
|
||||
|
||||
# Turn on debugging for salt-minion
|
||||
# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion
|
||||
|
||||
MINION_IP=$(ip -f inet a sh dev eth2 | grep -i inet | awk '{print $2}' | cut -d / -f 1)
|
||||
# Our minions will have a pool role to distinguish them from the master.
|
||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
grains:
|
||||
roles:
|
||||
- kubernetes-pool
|
||||
cbr-cidr: $MINION_IP_RANGE
|
||||
minion_ip: $MINION_IP
|
||||
EOF
|
||||
|
||||
#Move all of this to salt
|
||||
apt-get update
|
||||
apt-get install bridge-utils -y
|
||||
brctl addbr cbr0
|
||||
ip link set dev cbr0 up
|
||||
#for loop to add routes of other minions
|
||||
for (( i=1; i<=${NUM_MINIONS[@]}; i++)); do
|
||||
ip r a 10.240.$i.0/24 dev cbr0
|
||||
done
|
||||
ip link add vxlan42 type vxlan id 42 group 239.0.0.42 dev eth2
|
||||
brctl addif cbr0 vxlan42
|
||||
# Install Salt
|
||||
#
|
||||
# We specify -X to avoid a race condition that can cause minion failure to
|
||||
# install. See https://github.com/saltstack/salt-bootstrap/issues/270
|
||||
curl -L http://bootstrap.saltstack.com | sh -s -- -X
|
||||
ip link set vxlan42 up
|
|
@ -0,0 +1,268 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A library of helper functions for deploying on Rackspace
|
||||
|
||||
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
|
||||
# config-default.sh.
|
||||
source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
|
||||
|
||||
verify-prereqs() {
|
||||
# Make sure that prerequisites are installed.
|
||||
for x in nova; do
|
||||
if [ "$(which $x)" == "" ]; then
|
||||
echo "cluster/rackspace/util.sh: Can't find $x in PATH, please fix and retry."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
rax-ssh-key() {
|
||||
if [ ! -f $HOME/.ssh/${SSH_KEY_NAME} ]; then
|
||||
echo "cluster/rackspace/util.sh: Generating SSH KEY ${HOME}/.ssh/${SSH_KEY_NAME}"
|
||||
ssh-keygen -f ${HOME}/.ssh/${SSH_KEY_NAME} -N '' > /dev/null
|
||||
fi
|
||||
|
||||
if ! $(nova keypair-list | grep $SSH_KEY_NAME > /dev/null 2>&1); then
|
||||
echo "cluster/rackspace/util.sh: Uploading key to Rackspace:"
|
||||
echo -e "\tnova keypair-add ${SSH_KEY_NAME} --pub-key ${HOME}/.ssh/${SSH_KEY_NAME}.pub"
|
||||
nova keypair-add ${SSH_KEY_NAME} --pub-key ${HOME}/.ssh/${SSH_KEY_NAME}.pub > /dev/null 2>&1
|
||||
else
|
||||
echo "cluster/rackspace/util.sh: SSH key ${SSH_KEY_NAME}.pub already uploaded"
|
||||
fi
|
||||
}
|
||||
|
||||
find-object-url() {
|
||||
if [ -n "$1" ]; then
|
||||
CONTAINER=$1
|
||||
else
|
||||
local RELEASE_CONFIG_SCRIPT=$(dirname $0)/../release/rackspace/config.sh
|
||||
if [ -f $(dirname $0)/../release/rackspace/config.sh ]; then
|
||||
. $RELEASE_CONFIG_SCRIPT
|
||||
fi
|
||||
fi
|
||||
|
||||
TEMP_URL=$(swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD} tempurl GET $1/$2)
|
||||
echo "cluster/rackspace/util.sh: Object temp URL:"
|
||||
echo -e "\t${TEMP_URL}"
|
||||
|
||||
}
|
||||
|
||||
rax-boot-master() {
|
||||
|
||||
(
|
||||
echo "#! /bin/bash"
|
||||
echo "OBJECT_URL=\"${TEMP_URL}\""
|
||||
echo "MASTER_HTPASSWD=${HTPASSWD}"
|
||||
grep -v "^#" $(dirname $0)/templates/download-release.sh
|
||||
) > ${KUBE_TEMP}/masterStart.sh
|
||||
|
||||
# Copy cloud-config to KUBE_TEMP and work some sed magic
|
||||
sed -e "s/KUBE_MASTER/$MASTER_NAME/" \
|
||||
-e "s/MASTER_HTPASSWD/$HTPASSWD/" \
|
||||
$(dirname $0)/cloud-config/master-cloud-config.yaml > $KUBE_TEMP/master-cloud-config.yaml
|
||||
|
||||
|
||||
MASTER_BOOT_CMD="nova boot \
|
||||
--key-name ${SSH_KEY_NAME} \
|
||||
--flavor ${KUBE_MASTER_FLAVOR} \
|
||||
--image ${KUBE_IMAGE} \
|
||||
--meta ${MASTER_TAG} \
|
||||
--user-data ${KUBE_TEMP}/master-cloud-config.yaml \
|
||||
--config-drive true \
|
||||
--file /root/masterStart.sh=${KUBE_TEMP}/masterStart.sh \
|
||||
--nic net-id=${NETWORK_UUID} \
|
||||
${MASTER_NAME}"
|
||||
|
||||
echo "cluster/rackspace/util.sh: Booting ${MASTER_NAME} with following command:"
|
||||
echo -e "\t$MASTER_BOOT_CMD"
|
||||
$MASTER_BOOT_CMD
|
||||
}
|
||||
|
||||
rax-boot-minions() {
|
||||
|
||||
cp $(dirname $0)/cloud-config/minion-cloud-config.yaml \
|
||||
${KUBE_TEMP}/minion-cloud-config.yaml
|
||||
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
|
||||
(
|
||||
echo "#! /bin/bash"
|
||||
echo "MASTER_NAME=${MASTER_IP}"
|
||||
echo "MINION_IP_RANGE=${KUBE_NETWORK[$i]}"
|
||||
echo "NUM_MINIONS=${RAX_NUM_MINIONS}"
|
||||
grep -v "^#" $(dirname $0)/templates/salt-minion.sh
|
||||
) > ${KUBE_TEMP}/minionStart${i}.sh
|
||||
|
||||
MINION_BOOT_CMD="nova boot \
|
||||
--key-name ${SSH_KEY_NAME} \
|
||||
--flavor ${KUBE_MINION_FLAVOR} \
|
||||
--image ${KUBE_IMAGE} \
|
||||
--meta ${MINION_TAG} \
|
||||
--user-data ${KUBE_TEMP}/minion-cloud-config.yaml \
|
||||
--config-drive true \
|
||||
--nic net-id=${NETWORK_UUID} \
|
||||
--file=/root/minionStart.sh=${KUBE_TEMP}/minionStart${i}.sh \
|
||||
${MINION_NAMES[$i]}"
|
||||
|
||||
echo "cluster/rackspace/util.sh: Booting ${MINION_NAMES[$i]} with following command:"
|
||||
echo -e "\t$MINION_BOOT_CMD"
|
||||
$MINION_BOOT_CMD
|
||||
done
|
||||
}
|
||||
|
||||
rax-nova-network() {
|
||||
if ! $(nova network-list | grep $NOVA_NETWORK_LABEL > /dev/null 2>&1); then
|
||||
SAFE_CIDR=$(echo $NOVA_NETWORK_CIDR | tr -d '\\')
|
||||
NETWORK_CREATE_CMD="nova network-create $NOVA_NETWORK_LABEL $SAFE_CIDR"
|
||||
|
||||
echo "cluster/rackspace/util.sh: Creating cloud network with following command:"
|
||||
echo -e "\t${NETWORK_CREATE_CMD}"
|
||||
|
||||
$NETWORK_CREATE_CMD
|
||||
else
|
||||
echo "cluster/rackspace/util.sh: Using existing cloud network $NOVA_NETWORK_LABEL"
|
||||
fi
|
||||
}
|
||||
|
||||
detect-minions() {
|
||||
KUBE_MINION_IP_ADDRESSES=()
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
local minion_ip=$(nova show --minimal ${MINION_NAMES[$i]} \
|
||||
| grep accessIPv4 | awk '{print $4}')
|
||||
echo "cluster/rackspace/util.sh: Found ${MINION_NAMES[$i]} at ${minion_ip}"
|
||||
KUBE_MINION_IP_ADDRESSES+=("${minion_ip}")
|
||||
done
|
||||
if [ -z "$KUBE_MINION_IP_ADDRESSES" ]; then
|
||||
echo "cluster/rackspace/util.sh: Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
detect-master() {
|
||||
KUBE_MASTER=${MASTER_NAME}
|
||||
|
||||
KUBE_MASTER_IP=$(nova show $KUBE_MASTER --minimal | grep accessIPv4 | awk '{print $4}')
|
||||
}
|
||||
|
||||
# $1 should be the network you would like to get an IP address for
|
||||
detect-master-nova-net() {
|
||||
KUBE_MASTER=${MASTER_NAME}
|
||||
|
||||
MASTER_IP=$(nova show $KUBE_MASTER --minimal | grep $1 | awk '{print $5}')
|
||||
}
|
||||
|
||||
kube-up() {
|
||||
|
||||
SCRIPT_DIR=$(CDPATH="" cd $(dirname $0); pwd)
|
||||
source $(dirname $0)/../gce/util.sh
|
||||
source $(dirname $0)/util.sh
|
||||
source $(dirname $0)/../../release/rackspace/config.sh
|
||||
|
||||
# Find the release to use. Generally it will be passed when doing a 'prod'
|
||||
# install and will default to the release/config.sh version when doing a
|
||||
# developer up.
|
||||
find-object-url $CONTAINER output/release/$TAR_FILE
|
||||
|
||||
# Create a temp directory to hold scripts that will be uploaded to master/minions
|
||||
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
|
||||
trap "rm -rf ${KUBE_TEMP}" EXIT
|
||||
|
||||
get-password
|
||||
echo "cluster/rackspace/util.sh: Using password: $user:$passwd"
|
||||
python $(dirname $0)/../../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $user $passwd
|
||||
HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd)
|
||||
|
||||
rax-nova-network
|
||||
NETWORK_UUID=$(nova network-list | grep -i ${NOVA_NETWORK_LABEL} | awk '{print $2}')
|
||||
|
||||
# create and upload ssh key if necessary
|
||||
rax-ssh-key
|
||||
|
||||
echo "cluster/rackspace/util.sh: Starting Cloud Servers"
|
||||
rax-boot-master
|
||||
|
||||
# a bit of a hack to wait until master is has an IP from the extra network
|
||||
echo "cluster/rackspace/util.sh: sleeping 30 seconds"
|
||||
sleep 30
|
||||
|
||||
detect-master-nova-net $NOVA_NETWORK_LABEL
|
||||
rax-boot-minions
|
||||
|
||||
FAIL=0
|
||||
for job in `jobs -p`
|
||||
do
|
||||
wait $job || let "FAIL+=1"
|
||||
done
|
||||
if (( $FAIL != 0 )); then
|
||||
echo "${FAIL} commands failed. Exiting."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
detect-master > /dev/null
|
||||
|
||||
echo "Waiting for cluster initialization."
|
||||
echo
|
||||
echo " This will continually check to see if the API for kubernetes is reachable."
|
||||
echo " This might loop forever if there was some uncaught error during start"
|
||||
echo " up."
|
||||
echo
|
||||
|
||||
#This will fail until apiserver salt is updated
|
||||
#until $(curl --insecure --user ${user}:${passwd} --max-time 5 \
|
||||
# --fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do
|
||||
# printf "."
|
||||
# sleep 2
|
||||
#done
|
||||
|
||||
echo "Kubernetes cluster created."
|
||||
echo "Sanity checking cluster..."
|
||||
|
||||
sleep 5
|
||||
|
||||
# Don't bail on errors, we want to be able to print some info.
|
||||
set +e
|
||||
sleep 45
|
||||
|
||||
#detect-minions > /dev/null
|
||||
detect-minions
|
||||
|
||||
|
||||
#This will fail until apiserver salt is updated
|
||||
# Basic sanity checking
|
||||
#for (( i=0; i<${#KUBE_MINION_IP_ADDRESSES[@]}; i++)); do
|
||||
#
|
||||
# # Make sure the kubelet is running
|
||||
# if [ "$(curl --insecure --user ${user}:${passwd} https://${KUBE_MASTER_IP}/proxy/minion/${KUBE_MINION_IP_ADDRESSES[$i]}/healthz)" != "ok" ]; then
|
||||
# echo "Kubelet failed to install on ${KUBE_MINION_IP_ADDRESSES[$i]} your cluster is unlikely to work correctly"
|
||||
# echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)"
|
||||
# exit 1
|
||||
# else
|
||||
# echo "Kubelet is successfully installed on ${MINION_NAMES[$i]}"
|
||||
#
|
||||
# fi
|
||||
#
|
||||
#done
|
||||
echo "All minions may not be online yet, this is okay."
|
||||
echo
|
||||
echo "Kubernetes cluster is running. Access the master at:"
|
||||
echo
|
||||
echo " https://${user}:${passwd}@${KUBE_MASTER_IP}"
|
||||
echo
|
||||
echo "Security note: The server above uses a self signed certificate. This is"
|
||||
echo " subject to \"Man in the middle\" type attacks."
|
||||
}
|
|
@ -27,6 +27,10 @@
|
|||
{% if grains.cloud == 'azure' %}
|
||||
MACHINES="{{ salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|join(',', attribute='hostnamef') }}"
|
||||
{% set machines = "-machines $MACHINES" %}
|
||||
{% elif grains.cloud is defined and grains.cloud == 'rackspace' %}
|
||||
MACHINES="{{ salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|map(attribute='ip_interfaces.eth2')|map('first')|join(',') }}"
|
||||
{% set machines = "-machines $MACHINES" %}
|
||||
{% set minion_regexp = "" %}
|
||||
{% endif %}
|
||||
{% if grains.cloud == 'vsphere' %}
|
||||
# Collect IPs of minions as machines list.
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
# Rackspace
|
||||
In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE. The specific implementation is different mainly due to network differences between the providers:
|
||||
|
||||
## Prerequisites
|
||||
1. You need to have both `nova` and `swiftly` installed. It's recommended to use a python virtualenv to install these packages into.
|
||||
2. Make sure you have the appropriate environment variables set to interact with the OpenStack APIs. See [Rackspace Documentation](http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/section_gs_install_nova.html) for more details.
|
||||
3. You can test this by running `nova list` to make sure you're authenticated successfully.
|
||||
|
||||
## Provider: Rackspace
|
||||
- To use Rackspace as the provider, set the KUBERNETES_PROVIDER ENV variable:
|
||||
`export KUBERNETES_PROVIDER=rackspace` and run the `hack/rackspace/dev-build-and-up.sh` script.
|
||||
|
||||
## Release
|
||||
1. The kubernetes binaries will be built via the common build scripts in `release/`. There is a specific `release/rackspace` directory with scripts for the following steps:
|
||||
2. A cloud files contianer will be created via the `swiftly` CLI and a temp URL will be enabled on the object.
|
||||
3. The built `master-release.tar.gz` will be uploaded to this container and the URL will be passed to master/minions nodes when booted.
|
||||
- NOTE: RELEASE tagging and launch scripts are not used currently.
|
||||
|
||||
## Cluster
|
||||
1. There is a specific `cluster/rackspace` directory with the scripts for the following steps:
|
||||
2. A cloud network will be created and all instances will be attached to this network. We will connect the master API and minion kubelet service via this network.
|
||||
3. A SSH key will be created and uploaded if needed. This key must be used to ssh into the machines since we won't capture the password.
|
||||
4. A master will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data. A basic `masterStart.sh` will be injected as a file and cloud-init will run it.
|
||||
5. We sleep for 25 seconds since we need to make sure we can get the IP address of the master on the cloud network we've created to provide the minions as their salt master.
|
||||
6. We then boot as many minions as defined via `$RAX_NUM_MINIONS`. We pass both a `cloud-config.yaml` as well as a `minionStart.sh`. The latter is executed via cloud-init just like on the master.
|
||||
|
||||
## Some notes:
|
||||
- The scripts expect `eth2` to be the cloud network that the containers will communicate across.
|
||||
- `vxlan` is required on the cloud network interface since cloud networks will filter based on MAC address. This is the workaround for the time being.
|
||||
- A linux image with a recent kernel `> 13.07` is required for `vxlan`. Ubuntu 14.04 works.
|
||||
- A number of the items in `config-default.sh` are overridable via environment variables.
|
||||
- routes must be configured on each minion so that containers and kube-proxy are able to locate containers on another system. This is due to the network design in kubernetes and the MAC address limits on Cloud Networks. Static Routes are currently leveraged until we implement a more advanced solution.
|
|
@ -0,0 +1,34 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script will build a dev release and bring up a new cluster with that
|
||||
# release.
|
||||
|
||||
# First build the binaries
|
||||
$(dirname $0)/../build-go.sh
|
||||
if [ "$?" != "0" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Then build a release
|
||||
$(dirname $0)/../../release/rackspace/release.sh
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "Building the release failed!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Now bring a new cluster up with that release.
|
||||
$(dirname $0)/../../cluster/rackspace/kube-up.sh
|
|
@ -0,0 +1,26 @@
|
|||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A set of Cloud Files defaults for which Kubernetes releases will be uploaded to
|
||||
|
||||
# Make sure swiftly is installed and available
|
||||
if [ "$(which swiftly)" == "" ]; then
|
||||
echo "release/rackspace/config.sh: Couldn't find swiftly in PATH. Please install swiftly:"
|
||||
echo -e "\tpip install swiftly"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CONTAINER="kubernetes-releases-${OS_USERNAME}"
|
||||
|
||||
TAR_FILE=master-release.tgz
|
|
@ -0,0 +1,49 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script will build and release Kubernetes.
|
||||
#
|
||||
# The main parameters to this script come from the config.sh file. This is set
|
||||
# up by default for development releases. Feel free to edit it or override some
|
||||
# of the variables there.
|
||||
|
||||
# exit on any error
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR=$(CDPATH="" cd $(dirname $0); pwd)
|
||||
|
||||
source $SCRIPT_DIR/config.sh
|
||||
KUBE_REPO_ROOT="$(cd "$(dirname "$0")/../../" && pwd -P)"
|
||||
|
||||
source "${KUBE_REPO_ROOT}/cluster/kube-env.sh"
|
||||
source $SCRIPT_DIR/../../cluster/rackspace/${KUBE_CONFIG_FILE-"config-default.sh"}
|
||||
source $SCRIPT_DIR/../../cluster/rackspace/util.sh
|
||||
|
||||
$SCRIPT_DIR/../build-release.sh $INSTANCE_PREFIX
|
||||
|
||||
# Copy everything up to swift object store
|
||||
echo "release/rackspace/release.sh: Uploading to Cloud Files"
|
||||
if ! swiftly -A $OS_AUTH_URL -U $OS_USERNAME -K $OS_PASSWORD get $CONTAINER > /dev/null 2>&1 ; then
|
||||
echo "release/rackspace/release.sh: Container doesn't exist. Creating..."
|
||||
swiftly -A $OS_AUTH_URL -U $OS_USERNAME -K $OS_PASSWORD put $CONTAINER > /dev/null 2>&1
|
||||
|
||||
fi
|
||||
|
||||
for x in master-release.tgz; do
|
||||
swiftly -A $OS_AUTH_URL -U $OS_USERNAME -K $OS_PASSWORD put -i output/release/$x $CONTAINER/output/release/$x > /dev/null 2>&1
|
||||
done
|
||||
|
||||
echo "Release pushed."
|
Loading…
Reference in New Issue