Merge pull request #9303 from doublerr/rackspace/18.1

remove kube-register and update config-default
pull/6/head
Quinton Hoole 2015-06-05 11:58:05 -07:00
commit 9ebeea1d83
7 changed files with 38 additions and 37 deletions

View File

@ -19,14 +19,14 @@ create_token() {
echo $(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
}
get_token_from_csv() {
get_tokens_from_csv() {
KUBE_BEARER_TOKEN=$(awk -F, '/admin/ {print $1}' ${KUBE_TEMP}/${1}_tokens.csv)
KUBELET_TOKEN=$(awk -F, '/kubelet/ {print $1}' ${KUBE_TEMP}/${1}_tokens.csv)
KUBE_PROXY_TOKEN=$(awk -F, '/kube_proxy/ {print $1}' ${KUBE_TEMP}/${1}_tokens.csv)
}
generate_admin_token() {
echo "$(create_token),admin,admin" >> ${KUBE_TEMP}/${1}_tokens.csv
echo "$(create_token),admin,admin" >> ${KUBE_TEMP}/known_tokens.csv
}
# Creates a csv file each time called (i.e one per kubelet).

View File

@ -85,7 +85,6 @@ coreos:
[Service]
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kube-apiserver /opt/bin/kube-apiserver
ExecStartPre=/usr/bin/mkdir -p /var/lib/kube-apiserver
ExecStartPre=/usr/bin/cp /media/configdrive/openstack/content/0000 /var/lib/kube-apiserver/known_tokens.csv
ExecStart=/opt/bin/kube-apiserver \
--address=127.0.0.1 \
--cloud_provider=rackspace \
@ -147,26 +146,6 @@ coreos:
--master=127.0.0.1:8080
Restart=always
RestartSec=5
- name: kube-register.service
command: start
content: |
[Unit]
Description=Kubernetes Registration Service
Documentation=https://github.com/kelseyhightower/kube-register
Requires=kube-apiserver.service
After=kube-apiserver.service
Requires=fleet.service
After=fleet.service
[Service]
ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-register -z /opt/bin/kube-register https://github.com/kelseyhightower/kube-register/releases/download/v0.0.3/kube-register-0.0.3-linux-amd64
ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-register
ExecStart=/opt/bin/kube-register \
--api-endpoint=http://127.0.0.1:8080 \
--fleet-endpoint=unix:///var/run/fleet.sock \
--healthz-port=10248 \
--metadata=kubernetes_role=minion
Restart=always
RestartSec=10
#Running nginx service with --net="host" is a necessary evil until running all k8s services in docker.
- name: kubernetes-nginx.service
command: start

View File

@ -26,13 +26,30 @@ write_files:
else
echo "kubernetes release found. Skipping download."
fi
- path: /run/setup-auth.sh
- path: /run/config-kubelet.sh
permissions: 0755
content: |
#!/bin/bash -e
set -x
/usr/bin/mkdir -p /var/lib/kubelet
/bin/echo "{\"BearerToken\": \"KUBELET_TOKEN\", \"Insecure\": true }" > /var/lib/kubelet/kubernetes_auth
cat > /var/lib/kubelet/kubeconfig << EOF
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
token: KUBELET_TOKEN
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
- path: /run/config-kube-proxy.sh
permissions: 0755
content: |
@ -128,7 +145,7 @@ coreos:
Requires=download-release.service
[Service]
EnvironmentFile=/run/kubelet/apiservers.env
ExecStartPre=/run/setup-auth.sh
ExecStartPre=/run/config-kubelet.sh
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kubelet /opt/bin/kubelet
ExecStart=/opt/bin/kubelet \
--address=$private_ipv4 \

View File

@ -19,22 +19,22 @@
# KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_MINION_FLAVOR, NUM_MINIONS, NOVA_NETWORK and SSH_KEY_NAME
# Shared
KUBE_IMAGE="${KUBE_IMAGE-2c210e44-5149-4ae3-83d6-f855a4d28490}" # CoreOS(Beta)
KUBE_IMAGE="${KUBE_IMAGE-f2a71670-ced3-4274-80b6-0efcd0f8f91b}" # CoreOS(Beta)
SSH_KEY_NAME="${SSH_KEY_NAME-id_kubernetes}"
NOVA_NETWORK_LABEL="kubernetes-pool-net"
NOVA_NETWORK_CIDR="${NOVA_NETWORK-192.168.0.0/24}"
INSTANCE_PREFIX="kubernetes"
# Master
KUBE_MASTER_FLAVOR="${KUBE_MASTER_FLAVOR-performance1-1}"
KUBE_MASTER_FLAVOR="${KUBE_MASTER_FLAVOR-general1-1}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="tags=${INSTANCE_PREFIX}-master"
# Minion
KUBE_MINION_FLAVOR="${KUBE_MINION_FLAVOR-performance1-2}"
RAX_NUM_MINIONS="${RAX_NUM_MINIONS-4}"
KUBE_MINION_FLAVOR="${KUBE_MINION_FLAVOR-general1-2}"
NUM_MINIONS="${NUM_MINIONS-4}"
MINION_TAG="tags=${INSTANCE_PREFIX}-minion"
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${RAX_NUM_MINIONS}}))
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
KUBE_NETWORK="10.240.0.0/16"
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET

View File

@ -180,7 +180,6 @@ rax-boot-master() {
--meta ${MASTER_TAG} \
--meta ETCD=${DISCOVERY_ID} \
--user-data ${KUBE_TEMP}/master-cloud-config.yaml \
--file /var/lib/kube-apiserver/known_tokens.csv=${KUBE_TEMP}/known_tokens.csv \
--config-drive true \
--nic net-id=${NETWORK_UUID} \
${MASTER_NAME}"
@ -209,6 +208,7 @@ rax-boot-minions() {
-e "s|INDEX|$((i + 1))|g" \
-e "s|KUBELET_TOKEN|${KUBELET_TOKEN}|" \
-e "s|KUBE_NETWORK|${KUBE_NETWORK}|" \
-e "s|KUBELET_TOKEN|${KUBELET_TOKEN}|" \
-e "s|KUBE_PROXY_TOKEN|${KUBE_PROXY_TOKEN}|" \
-e "s|LOGGING_DESTINATION|${LOGGING_DESTINATION:-}|" \
$(dirname $0)/rackspace/cloud-config/minion-cloud-config.yaml > $KUBE_TEMP/minion-cloud-config-$(($i + 1)).yaml
@ -316,9 +316,16 @@ kube-up() {
prep_known_tokens
rax-boot-master
rax-boot-minions
detect-master
# TODO look for a better way to get the known_tokens to the master. This is needed over file injection since the files were too large on a 4 node cluster.
$(scp -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} ${KUBE_TEMP}/known_tokens.csv core@${KUBE_MASTER_IP}:known_tokens.csv)
$(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo mv /home/core/known_tokens.csv /var/lib/kube-apiserver/known_tokens.csv)
$(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo chown root.root /var/lib/kube-apiserver/known_tokens.csv)
$(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo systemctl restart kube-apiserver)
FAIL=0
for job in `jobs -p`
do
@ -329,8 +336,6 @@ kube-up() {
exit 2
fi
detect-master
echo "Waiting for cluster initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."

View File

@ -39,7 +39,7 @@ Docker Multi Node | Flannel| N/A | local | [docs](docker-multino
Local | | | _none_ | [docs](../../docs/getting-started-guides/locally.md) | Community (@preillyme) |
libvirt/KVM | CoreOS | CoreOS | libvirt/KVM | [docs](../../docs/getting-started-guides/libvirt-coreos.md) | Community (@lhuard1A) |
oVirt | | | | [docs](../../docs/getting-started-guides/ovirt.md) | Community (@simon3z) |
Rackspace | CoreOS | CoreOS | flannel | [docs](../../docs/getting-started-guides/rackspace.md) | Community (@doublerr) | use k8s version 0.16.2
Rackspace | CoreOS | CoreOS | flannel | [docs](../../docs/getting-started-guides/rackspace.md) | Community (@doublerr) | use k8s version 0.18.0
*Note*: The above table is ordered by version test/used in notes followed by support level.

View File

@ -35,7 +35,7 @@ There is a specific `cluster/rackspace` directory with the scripts for the follo
- flanneld uses this network for next hop routing. These routes allow the containers running on each node to communicate with one another on this private network.
2. A SSH key will be created and uploaded if needed. This key must be used to ssh into the machines since we won't capture the password.
3. The master server and additional nodes will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data with the entire configuration for the systems.
4. We then boot as many nodes as defined via `$RAX_NUM_MINIONS`.
4. We then boot as many nodes as defined via `$NUM_MINIONS`.
## Some notes:
- The scripts expect `eth2` to be the cloud network that the containers will communicate across.