mirror of https://github.com/k3s-io/k3s
Merge pull request #45032 from everett-toews/rm-rackspace
Automatic merge from submit-queue Remove the Rackspace provider **What this PR does / why we need it**: To aid the effort of moving providers out of the cluster dir, I'm removing Rackspace and leaving behind a README.md simply as a placeholder until the entire dir is deleted. **Which issue this PR fixes** Fixes #6962 **Release note**: ```release-note Deployment of Kubernetes clusters on Rackspace using the in-tree bash deployment (i.e. cluster/kube-up.sh or get-kube.sh) is obsolete and support has been removed.```pull/6/head
commit
0a0b6889e5
|
@ -1,36 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Create generic token following GCE standard
|
||||
create_token() {
|
||||
echo $(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
|
||||
}
|
||||
|
||||
get_tokens_from_csv() {
|
||||
KUBE_BEARER_TOKEN=$(awk -F, '/admin/ {print $1}' ${KUBE_TEMP}/${1}_tokens.csv)
|
||||
KUBELET_TOKEN=$(awk -F, '/kubelet/ {print $1}' ${KUBE_TEMP}/${1}_tokens.csv)
|
||||
KUBE_PROXY_TOKEN=$(awk -F, '/kube_proxy/ {print $1}' ${KUBE_TEMP}/${1}_tokens.csv)
|
||||
}
|
||||
|
||||
generate_admin_token() {
|
||||
echo "$(create_token),admin,admin" >> ${KUBE_TEMP}/known_tokens.csv
|
||||
}
|
||||
|
||||
# Creates a csv file each time called (i.e one per kubelet).
|
||||
generate_kubelet_tokens() {
|
||||
echo "$(create_token),kubelet,kubelet" > ${KUBE_TEMP}/${1}_tokens.csv
|
||||
echo "$(create_token),kube_proxy,kube_proxy" >> ${KUBE_TEMP}/${1}_tokens.csv
|
||||
}
|
|
@ -1,209 +0,0 @@
|
|||
#cloud-config
|
||||
|
||||
write_files:
|
||||
- path: /etc/cloud.conf
|
||||
permissions: 0600
|
||||
content: |
|
||||
[Global]
|
||||
auth-url = OS_AUTH_URL
|
||||
username = OS_USERNAME
|
||||
api-key = OS_PASSWORD
|
||||
tenant-id = OS_TENANT_NAME
|
||||
region = OS_REGION_NAME
|
||||
[LoadBalancer]
|
||||
subnet-id = 11111111-1111-1111-1111-111111111111
|
||||
- path: /opt/bin/git-kubernetes-nginx.sh
|
||||
permissions: 0755
|
||||
content: |
|
||||
#!/bin/bash
|
||||
git clone https://github.com/thommay/kubernetes_nginx /opt/kubernetes_nginx
|
||||
/usr/bin/cp /opt/.kubernetes_auth /opt/kubernetes_nginx/.kubernetes_auth
|
||||
/opt/kubernetes_nginx/git-kubernetes-nginx.sh
|
||||
- path: /opt/bin/download-release.sh
|
||||
permissions: 0755
|
||||
content: |
|
||||
#!/bin/bash
|
||||
# This temp URL is only good for the length of time specified at cluster creation time.
|
||||
# Afterward, it will result in a 403.
|
||||
OBJECT_URL="CLOUD_FILES_URL"
|
||||
if [ ! -s /opt/kubernetes.tar.gz ]
|
||||
then
|
||||
echo "Downloading release ($OBJECT_URL)"
|
||||
wget "${OBJECT_URL}" -O /opt/kubernetes.tar.gz
|
||||
echo "Unpacking release"
|
||||
rm -rf /opt/kubernetes || false
|
||||
tar xzf /opt/kubernetes.tar.gz -C /opt/
|
||||
else
|
||||
echo "kubernetes release found. Skipping download."
|
||||
fi
|
||||
- path: /opt/.kubernetes_auth
|
||||
permissions: 0600
|
||||
content: |
|
||||
KUBE_USER:KUBE_PASSWORD
|
||||
|
||||
coreos:
|
||||
etcd2:
|
||||
discovery: https://discovery.etcd.io/DISCOVERY_ID
|
||||
advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001
|
||||
initial-advertise-peer-urls: http://$private_ipv4:2380
|
||||
listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
|
||||
listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001
|
||||
|
||||
flannel:
|
||||
ip_masq: true
|
||||
interface: eth2
|
||||
|
||||
fleet:
|
||||
public-ip: $private_ipv4
|
||||
metadata: kubernetes_role=master
|
||||
|
||||
update:
|
||||
reboot-strategy: off
|
||||
|
||||
units:
|
||||
- name: etcd2.service
|
||||
command: start
|
||||
- name: fleet.service
|
||||
command: start
|
||||
- name: flanneld.service
|
||||
drop-ins:
|
||||
- name: 50-flannel.conf
|
||||
content: |
|
||||
[Unit]
|
||||
Requires=etcd2.service
|
||||
After=etcd2.service
|
||||
|
||||
[Service]
|
||||
ExecStartPre=-/usr/bin/etcdctl mk /coreos.com/network/config '{"Network":"KUBE_NETWORK", "Backend": {"Type": "host-gw"}}'
|
||||
command: start
|
||||
- name: generate-serviceaccount-key.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Generate service-account key file
|
||||
|
||||
[Service]
|
||||
ExecStartPre=-/usr/bin/mkdir -p /var/run/kubernetes/
|
||||
ExecStart=/bin/openssl genrsa -out /var/run/kubernetes/kube-serviceaccount.key 2048 2>/dev/null
|
||||
RemainAfterExit=yes
|
||||
Type=oneshot
|
||||
- name: docker.service
|
||||
command: start
|
||||
drop-ins:
|
||||
- name: 51-docker-mirror.conf
|
||||
content: |
|
||||
[Unit]
|
||||
# making sure that flanneld finished startup, otherwise containers
|
||||
# won't land in flannel's network...
|
||||
Requires=flanneld.service
|
||||
After=flanneld.service
|
||||
Restart=Always
|
||||
- name: download-release.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Downloads Kubernetes Release
|
||||
After=network-online.target
|
||||
Requires=network-online.target
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStart=/usr/bin/bash /opt/bin/download-release.sh
|
||||
- name: kube-apiserver.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes API Server
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
After=network-online.target
|
||||
Requires=network-online.target
|
||||
After=download-release.service
|
||||
Requires=download-release.service
|
||||
Requires=generate-serviceaccount-key.service
|
||||
After=generate-serviceaccount-key.service
|
||||
[Service]
|
||||
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kube-apiserver /opt/bin/kube-apiserver
|
||||
ExecStartPre=/usr/bin/mkdir -p /var/lib/kube-apiserver
|
||||
ExecStart=/opt/bin/kube-apiserver \
|
||||
--address=127.0.0.1 \
|
||||
--cloud-provider=rackspace \
|
||||
--cloud-config=/etc/cloud.conf \
|
||||
--etcd-servers=http://127.0.0.1:4001 \
|
||||
--logtostderr=true \
|
||||
--port=8080 \
|
||||
--service-cluster-ip-range=SERVICE_CLUSTER_IP_RANGE \
|
||||
--token-auth-file=/var/lib/kube-apiserver/known_tokens.csv \
|
||||
--v=2 \
|
||||
--service-account-key-file=/var/run/kubernetes/kube-serviceaccount.key \
|
||||
--service-account-lookup=true \
|
||||
--admission-control=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,DefaultTolerationSeconds,ResourceQuota
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
- name: apiserver-advertiser.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes Apiserver Advertiser
|
||||
After=etcd2.service
|
||||
Requires=etcd2.service
|
||||
After=master-apiserver.service
|
||||
[Service]
|
||||
ExecStart=/bin/sh -c 'etcdctl set /corekube/apiservers/$public_ipv4 $public_ipv4'
|
||||
Restart=always
|
||||
RestartSec=120
|
||||
- name: kube-controller-manager.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes Controller Manager
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
After=network-online.target
|
||||
Requires=network-online.target
|
||||
After=kube-apiserver.service
|
||||
Requires=kube-apiserver.service
|
||||
[Service]
|
||||
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kube-controller-manager /opt/bin/kube-controller-manager
|
||||
ExecStart=/opt/bin/kube-controller-manager \
|
||||
--cloud-provider=rackspace \
|
||||
--cloud-config=/etc/cloud.conf \
|
||||
--logtostderr=true \
|
||||
--master=127.0.0.1:8080 \
|
||||
--v=2 \
|
||||
--service-account-private-key-file=/var/run/kubernetes/kube-serviceaccount.key \
|
||||
--root-ca-file=/run/kubernetes/apiserver.crt
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
- name: kube-scheduler.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes Scheduler
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
After=network-online.target
|
||||
Requires=network-online.target
|
||||
After=kube-apiserver.service
|
||||
Requires=kube-apiserver.service
|
||||
[Service]
|
||||
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kube-scheduler /opt/bin/kube-scheduler
|
||||
ExecStart=/opt/bin/kube-scheduler \
|
||||
--logtostderr=true \
|
||||
--master=127.0.0.1:8080
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
#Running nginx service with --net="host" is a necessary evil until running all k8s services in docker.
|
||||
- name: kubernetes-nginx.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes Nginx Service
|
||||
After=network-online.target
|
||||
Requires=network-online.target
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
[Service]
|
||||
ExecStartPre=/opt/bin/git-kubernetes-nginx.sh
|
||||
ExecStartPre=-/usr/bin/docker rm kubernetes_nginx
|
||||
ExecStart=/usr/bin/docker run --rm --net="host" -p "443:443" -t --name "kubernetes_nginx" kubernetes_nginx
|
||||
ExecStop=/usr/bin/docker stop kubernetes_nginx
|
||||
Restart=always
|
||||
RestartSec=15
|
|
@ -1,229 +0,0 @@
|
|||
#cloud-config
|
||||
|
||||
write_files:
|
||||
- path: /opt/bin/regen-apiserver-list.sh
|
||||
permissions: 0755
|
||||
content: |
|
||||
#!/bin/sh
|
||||
m=$(echo $(etcdctl ls --recursive /corekube/apiservers | cut -d/ -f4 | sort) | tr ' ' ,)
|
||||
mkdir -p /run/kubelet
|
||||
echo "APISERVER_IPS=$m" > /run/kubelet/apiservers.env
|
||||
echo "FIRST_APISERVER_URL=https://${m%%\,*}:6443" >> /run/kubelet/apiservers.env
|
||||
- path: /opt/bin/download-release.sh
|
||||
permissions: 0755
|
||||
content: |
|
||||
#!/bin/bash
|
||||
# This temp URL is only good for the length of time specified at cluster creation time.
|
||||
# Afterward, it will result in a 403.
|
||||
OBJECT_URL="CLOUD_FILES_URL"
|
||||
if [ ! -s /opt/kubernetes.tar.gz ]
|
||||
then
|
||||
echo "Downloading release ($OBJECT_URL)"
|
||||
wget "${OBJECT_URL}" -O /opt/kubernetes.tar.gz
|
||||
echo "Unpacking release"
|
||||
rm -rf /opt/kubernetes || false
|
||||
tar xzf /opt/kubernetes.tar.gz -C /opt/
|
||||
else
|
||||
echo "kubernetes release found. Skipping download."
|
||||
fi
|
||||
- path: /run/config-kubelet.sh
|
||||
permissions: 0755
|
||||
content: |
|
||||
#!/bin/bash -e
|
||||
set -x
|
||||
/usr/bin/mkdir -p /var/lib/kubelet
|
||||
cat > /var/lib/kubelet/kubeconfig << EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
token: KUBELET_TOKEN
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
- path: /run/config-kube-proxy.sh
|
||||
permissions: 0755
|
||||
content: |
|
||||
#!/bin/bash -e
|
||||
set -x
|
||||
/usr/bin/mkdir -p /var/lib/kube-proxy
|
||||
cat > /var/lib/kube-proxy/kubeconfig << EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
token: KUBE_PROXY_TOKEN
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-proxy
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
|
||||
coreos:
|
||||
etcd2:
|
||||
discovery: https://discovery.etcd.io/DISCOVERY_ID
|
||||
advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001
|
||||
initial-advertise-peer-urls: http://$private_ipv4:2380
|
||||
listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
|
||||
listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001
|
||||
|
||||
flannel:
|
||||
ip_masq: true
|
||||
interface: eth2
|
||||
|
||||
fleet:
|
||||
public-ip: $private_ipv4
|
||||
metadata: kubernetes_role=minion
|
||||
|
||||
update:
|
||||
reboot-strategy: off
|
||||
|
||||
units:
|
||||
- name: etcd2.service
|
||||
command: start
|
||||
- name: fleet.service
|
||||
command: start
|
||||
- name: flanneld.service
|
||||
drop-ins:
|
||||
- name: 50-flannel.conf
|
||||
content: |
|
||||
[Unit]
|
||||
Requires=etcd2.service
|
||||
After=etcd2.service
|
||||
|
||||
[Service]
|
||||
ExecStartPre=-/usr/bin/etcdctl mk /coreos.com/network/config '{"Network":"KUBE_NETWORK", "Backend": {"Type": "host-gw"}}'
|
||||
command: start
|
||||
- name: docker.service
|
||||
command: start
|
||||
drop-ins:
|
||||
- name: 51-docker-mirror.conf
|
||||
content: |
|
||||
[Unit]
|
||||
# making sure that flanneld finished startup, otherwise containers
|
||||
# won't land in flannel's network...
|
||||
Requires=flanneld.service
|
||||
After=flanneld.service
|
||||
Restart=Always
|
||||
- name: download-release.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Downloads Kubernetes Release
|
||||
After=network-online.target
|
||||
Requires=network-online.target
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStart=/usr/bin/bash /opt/bin/download-release.sh
|
||||
- name: kubelet.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
After=network-online.target
|
||||
Requires=network-online.target
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
After=download-release.service
|
||||
Requires=download-release.service
|
||||
After=apiserver-finder.service
|
||||
Requires=apiserver-finder.service
|
||||
[Service]
|
||||
EnvironmentFile=/run/kubelet/apiservers.env
|
||||
ExecStartPre=/run/config-kubelet.sh
|
||||
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kubelet /opt/bin/kubelet
|
||||
ExecStart=/opt/bin/kubelet \
|
||||
--address=$private_ipv4 \
|
||||
--api-servers=${FIRST_APISERVER_URL} \
|
||||
--cluster-dns=DNS_SERVER_IP \
|
||||
--cluster-domain=DNS_DOMAIN \
|
||||
--healthz-bind-address=$private_ipv4 \
|
||||
--hostname-override=$private_ipv4 \
|
||||
--logtostderr=true \
|
||||
--v=2
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
KillMode=process
|
||||
- name: kube-proxy.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes Proxy
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
After=network-online.target
|
||||
Requires=network-online.target
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
After=download-release.service
|
||||
Requires=download-release.service
|
||||
After=apiserver-finder.service
|
||||
Requires=apiserver-finder.service
|
||||
[Service]
|
||||
EnvironmentFile=/run/kubelet/apiservers.env
|
||||
ExecStartPre=/run/config-kube-proxy.sh
|
||||
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kube-proxy /opt/bin/kube-proxy
|
||||
ExecStart=/opt/bin/kube-proxy \
|
||||
--bind-address=$private_ipv4 \
|
||||
--kubeconfig=/var/lib/kube-proxy/kubeconfig \
|
||||
--logtostderr=true \
|
||||
--hostname-override=$private_ipv4 \
|
||||
--master=${FIRST_APISERVER_URL}
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
- name: apiserver-finder.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes Apiserver finder
|
||||
After=network-online.target
|
||||
Requires=network-online.target
|
||||
After=etcd2.service
|
||||
Requires=etcd2.service
|
||||
[Service]
|
||||
ExecStartPre=/opt/bin/regen-apiserver-list.sh
|
||||
ExecStart=/usr/bin/etcdctl exec-watch --recursive /corekube/apiservers -- /opt/bin/regen-apiserver-list.sh
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
- name: cbr0.netdev
|
||||
command: start
|
||||
content: |
|
||||
[NetDev]
|
||||
Kind=bridge
|
||||
Name=cbr0
|
||||
- name: cbr0.network
|
||||
command: start
|
||||
content: |
|
||||
[Match]
|
||||
Name=cbr0
|
||||
|
||||
[Network]
|
||||
Address=10.240.INDEX.1/24
|
||||
- name: nat.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=NAT container->outside traffic
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/sbin/iptables -t nat -A POSTROUTING -o eth0 -s 10.240.INDEX.0/24 -j MASQUERADE
|
||||
ExecStart=/usr/sbin/iptables -t nat -A POSTROUTING -o eth1 -s 10.240.INDEX.0/24 -j MASQUERADE
|
||||
RemainAfterExit=yes
|
||||
Type=oneshot
|
|
@ -1,64 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Sane defaults for dev environments. The following variables can be easily overriden
|
||||
# by setting each as a ENV variable ahead of time:
|
||||
# KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_NODE_FLAVOR, NUM_NODES, NOVA_NETWORK and SSH_KEY_NAME
|
||||
|
||||
# Shared
|
||||
KUBE_IMAGE="${KUBE_IMAGE-3eba4fbb-51da-4233-b699-8a4030561add}" # CoreOS (Stable)
|
||||
SSH_KEY_NAME="${SSH_KEY_NAME-id_kubernetes}"
|
||||
NOVA_NETWORK_LABEL="kubernetes-pool-net"
|
||||
NOVA_NETWORK_CIDR="${NOVA_NETWORK-192.168.0.0/24}"
|
||||
INSTANCE_PREFIX="kubernetes"
|
||||
|
||||
# Master
|
||||
KUBE_MASTER_FLAVOR="${KUBE_MASTER_FLAVOR-general1-1}"
|
||||
MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
MASTER_TAG="tags=${INSTANCE_PREFIX}-master"
|
||||
|
||||
# Node
|
||||
KUBE_NODE_FLAVOR="${KUBE_NODE_FLAVOR-general1-2}"
|
||||
NUM_NODES="${NUM_NODES-4}"
|
||||
NODE_TAG="tags=${INSTANCE_PREFIX}-node"
|
||||
NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-node-{1..${NUM_NODES}}))
|
||||
KUBE_NETWORK="10.240.0.0/16"
|
||||
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
|
||||
|
||||
# Optional: Enable node logging.
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
# Optional: Cluster monitoring to setup as part of the cluster bring up:
|
||||
# none - No cluster monitoring setup
|
||||
# influxdb - Heapster, InfluxDB, and Grafana
|
||||
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="10.0.0.10"
|
||||
DNS_DOMAIN="cluster.local"
|
||||
|
||||
# Optional: Enable DNS horizontal autoscaler
|
||||
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"
|
||||
|
||||
# Optional: Install Kubernetes UI
|
||||
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
|
|
@ -1,36 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Bring up a Kubernetes cluster.
|
||||
#
|
||||
# If the full release name (gs://<bucket>/<release>) is passed in then we take
|
||||
# that directly. If not then we assume we are doing development stuff and take
|
||||
# the defaults in the release config.
|
||||
|
||||
# exit on any error
|
||||
set -e
|
||||
|
||||
source $(dirname $0)/../kube-util.sh
|
||||
|
||||
echo "Starting cluster using provider: $KUBERNETES_PROVIDER"
|
||||
|
||||
verify-prereqs
|
||||
kube-up
|
||||
|
||||
# skipping validation for now until since machines show up as private IPs
|
||||
# source $(dirname $0)/validate-cluster.sh
|
||||
|
||||
echo "Done"
|
|
@ -1,360 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A library of helper functions for deploying on Rackspace
|
||||
|
||||
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
|
||||
# config-default.sh.
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
source "${KUBE_ROOT}/cluster/rackspace/authorization.sh"
|
||||
|
||||
verify-prereqs() {
|
||||
# Make sure that prerequisites are installed.
|
||||
for x in nova swiftly; do
|
||||
if [ "$(which $x)" == "" ]; then
|
||||
echo "cluster/rackspace/util.sh: Can't find $x in PATH, please fix and retry."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -z "${OS_AUTH_URL-}" ]]; then
|
||||
echo "cluster/rackspace/util.sh: OS_AUTH_URL not set."
|
||||
echo -e "\texport OS_AUTH_URL=https://identity.api.rackspacecloud.com/v2.0/"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ -z "${OS_USERNAME-}" ]]; then
|
||||
echo "cluster/rackspace/util.sh: OS_USERNAME not set."
|
||||
echo -e "\texport OS_USERNAME=myusername"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ -z "${OS_PASSWORD-}" ]]; then
|
||||
echo "cluster/rackspace/util.sh: OS_PASSWORD not set."
|
||||
echo -e "\texport OS_PASSWORD=myapikey"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
rax-ssh-key() {
|
||||
if [ ! -f $HOME/.ssh/${SSH_KEY_NAME} ]; then
|
||||
echo "cluster/rackspace/util.sh: Generating SSH KEY ${HOME}/.ssh/${SSH_KEY_NAME}"
|
||||
ssh-keygen -f ${HOME}/.ssh/${SSH_KEY_NAME} -N '' > /dev/null
|
||||
fi
|
||||
|
||||
if ! $(nova keypair-list | grep $SSH_KEY_NAME > /dev/null 2>&1); then
|
||||
echo "cluster/rackspace/util.sh: Uploading key to Rackspace:"
|
||||
echo -e "\tnova keypair-add ${SSH_KEY_NAME} --pub-key ${HOME}/.ssh/${SSH_KEY_NAME}.pub"
|
||||
nova keypair-add ${SSH_KEY_NAME} --pub-key ${HOME}/.ssh/${SSH_KEY_NAME}.pub > /dev/null 2>&1
|
||||
else
|
||||
echo "cluster/rackspace/util.sh: SSH key ${SSH_KEY_NAME}.pub already uploaded"
|
||||
fi
|
||||
}
|
||||
|
||||
rackspace-set-vars() {
|
||||
|
||||
CLOUDFILES_CONTAINER="kubernetes-releases-${OS_USERNAME}"
|
||||
CONTAINER_PREFIX=${CONTAINER_PREFIX-devel/}
|
||||
find-release-tars
|
||||
}
|
||||
|
||||
# Retrieves a tempurl from cloudfiles to make the release object publicly accessible temporarily.
|
||||
find-object-url() {
|
||||
|
||||
rackspace-set-vars
|
||||
|
||||
KUBE_TAR=${CLOUDFILES_CONTAINER}/${CONTAINER_PREFIX}/kubernetes-server-linux-amd64.tar.gz
|
||||
|
||||
# Create temp URL good for 24 hours
|
||||
RELEASE_TMP_URL=$(swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD} tempurl GET ${KUBE_TAR} 86400 )
|
||||
echo "cluster/rackspace/util.sh: Object temp URL:"
|
||||
echo -e "\t${RELEASE_TMP_URL}"
|
||||
|
||||
}
|
||||
|
||||
ensure_dev_container() {
|
||||
|
||||
SWIFTLY_CMD="swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD}"
|
||||
|
||||
if ! ${SWIFTLY_CMD} get ${CLOUDFILES_CONTAINER} > /dev/null 2>&1 ; then
|
||||
echo "cluster/rackspace/util.sh: Container doesn't exist. Creating container ${CLOUDFILES_CONTAINER}"
|
||||
${SWIFTLY_CMD} put ${CLOUDFILES_CONTAINER} > /dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
# Copy kubernetes-server-linux-amd64.tar.gz to cloud files object store
|
||||
copy_dev_tarballs() {
|
||||
|
||||
echo "cluster/rackspace/util.sh: Uploading to Cloud Files"
|
||||
${SWIFTLY_CMD} put -i ${SERVER_BINARY_TAR} \
|
||||
${CLOUDFILES_CONTAINER}/${CONTAINER_PREFIX}/kubernetes-server-linux-amd64.tar.gz > /dev/null 2>&1
|
||||
|
||||
echo "Release pushed."
|
||||
}
|
||||
|
||||
prep_known_tokens() {
|
||||
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||
generate_kubelet_tokens ${NODE_NAMES[i]}
|
||||
cat ${KUBE_TEMP}/${NODE_NAMES[i]}_tokens.csv >> ${KUBE_TEMP}/known_tokens.csv
|
||||
done
|
||||
|
||||
# Generate tokens for other "service accounts". Append to known_tokens.
|
||||
#
|
||||
# NB: If this list ever changes, this script actually has to
|
||||
# change to detect the existence of this file, kill any deleted
|
||||
# old tokens and add any new tokens (to handle the upgrade case).
|
||||
local -r service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns")
|
||||
for account in "${service_accounts[@]}"; do
|
||||
echo "$(create_token),${account},${account}" >> ${KUBE_TEMP}/known_tokens.csv
|
||||
done
|
||||
|
||||
generate_admin_token
|
||||
}
|
||||
|
||||
rax-boot-master() {
|
||||
|
||||
DISCOVERY_URL=$(curl https://discovery.etcd.io/new?size=1)
|
||||
DISCOVERY_ID=$(echo "${DISCOVERY_URL}" | cut -f 4 -d /)
|
||||
echo "cluster/rackspace/util.sh: etcd discovery URL: ${DISCOVERY_URL}"
|
||||
|
||||
# Copy cloud-config to KUBE_TEMP and work some sed magic
|
||||
sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \
|
||||
-e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\\&}|" \
|
||||
-e "s|KUBE_USER|${KUBE_USER}|" \
|
||||
-e "s|KUBE_PASSWORD|${KUBE_PASSWORD}|" \
|
||||
-e "s|SERVICE_CLUSTER_IP_RANGE|${SERVICE_CLUSTER_IP_RANGE}|" \
|
||||
-e "s|KUBE_NETWORK|${KUBE_NETWORK}|" \
|
||||
-e "s|OS_AUTH_URL|${OS_AUTH_URL}|" \
|
||||
-e "s|OS_USERNAME|${OS_USERNAME}|" \
|
||||
-e "s|OS_PASSWORD|${OS_PASSWORD}|" \
|
||||
-e "s|OS_TENANT_NAME|${OS_TENANT_NAME}|" \
|
||||
-e "s|OS_REGION_NAME|${OS_REGION_NAME}|" \
|
||||
$(dirname $0)/rackspace/cloud-config/master-cloud-config.yaml > $KUBE_TEMP/master-cloud-config.yaml
|
||||
|
||||
|
||||
MASTER_BOOT_CMD="nova boot \
|
||||
--key-name ${SSH_KEY_NAME} \
|
||||
--flavor ${KUBE_MASTER_FLAVOR} \
|
||||
--image ${KUBE_IMAGE} \
|
||||
--meta ${MASTER_TAG} \
|
||||
--meta ETCD=${DISCOVERY_ID} \
|
||||
--user-data ${KUBE_TEMP}/master-cloud-config.yaml \
|
||||
--config-drive true \
|
||||
--nic net-id=${NETWORK_UUID} \
|
||||
${MASTER_NAME}"
|
||||
|
||||
echo "cluster/rackspace/util.sh: Booting ${MASTER_NAME} with following command:"
|
||||
echo -e "\t$MASTER_BOOT_CMD"
|
||||
$MASTER_BOOT_CMD
|
||||
}
|
||||
|
||||
rax-boot-nodes() {
|
||||
|
||||
cp $(dirname $0)/rackspace/cloud-config/node-cloud-config.yaml \
|
||||
${KUBE_TEMP}/node-cloud-config.yaml
|
||||
|
||||
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||
|
||||
get_tokens_from_csv ${NODE_NAMES[i]}
|
||||
|
||||
sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \
|
||||
-e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\\&}|" \
|
||||
-e "s|DNS_SERVER_IP|${DNS_SERVER_IP:-}|" \
|
||||
-e "s|DNS_DOMAIN|${DNS_DOMAIN:-}|" \
|
||||
-e "s|ENABLE_CLUSTER_DNS|${ENABLE_CLUSTER_DNS:-false}|" \
|
||||
-e "s|ENABLE_NODE_LOGGING|${ENABLE_NODE_LOGGING:-false}|" \
|
||||
-e "s|INDEX|$((i + 1))|g" \
|
||||
-e "s|KUBELET_TOKEN|${KUBELET_TOKEN}|" \
|
||||
-e "s|KUBE_NETWORK|${KUBE_NETWORK}|" \
|
||||
-e "s|KUBELET_TOKEN|${KUBELET_TOKEN}|" \
|
||||
-e "s|KUBE_PROXY_TOKEN|${KUBE_PROXY_TOKEN}|" \
|
||||
-e "s|LOGGING_DESTINATION|${LOGGING_DESTINATION:-}|" \
|
||||
$(dirname $0)/rackspace/cloud-config/node-cloud-config.yaml > $KUBE_TEMP/node-cloud-config-$(($i + 1)).yaml
|
||||
|
||||
|
||||
NODE_BOOT_CMD="nova boot \
|
||||
--key-name ${SSH_KEY_NAME} \
|
||||
--flavor ${KUBE_NODE_FLAVOR} \
|
||||
--image ${KUBE_IMAGE} \
|
||||
--meta ${NODE_TAG} \
|
||||
--user-data ${KUBE_TEMP}/node-cloud-config-$(( i +1 )).yaml \
|
||||
--config-drive true \
|
||||
--nic net-id=${NETWORK_UUID} \
|
||||
${NODE_NAMES[$i]}"
|
||||
|
||||
echo "cluster/rackspace/util.sh: Booting ${NODE_NAMES[$i]} with following command:"
|
||||
echo -e "\t$NODE_BOOT_CMD"
|
||||
$NODE_BOOT_CMD
|
||||
done
|
||||
}
|
||||
|
||||
rax-nova-network() {
|
||||
if ! $(nova network-list | grep $NOVA_NETWORK_LABEL > /dev/null 2>&1); then
|
||||
SAFE_CIDR=$(echo $NOVA_NETWORK_CIDR | tr -d '\\')
|
||||
NETWORK_CREATE_CMD="nova network-create $NOVA_NETWORK_LABEL $SAFE_CIDR"
|
||||
|
||||
echo "cluster/rackspace/util.sh: Creating cloud network with following command:"
|
||||
echo -e "\t${NETWORK_CREATE_CMD}"
|
||||
|
||||
$NETWORK_CREATE_CMD
|
||||
else
|
||||
echo "cluster/rackspace/util.sh: Using existing cloud network $NOVA_NETWORK_LABEL"
|
||||
fi
|
||||
}
|
||||
|
||||
detect-nodes() {
|
||||
KUBE_NODE_IP_ADDRESSES=()
|
||||
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||
local node_ip=$(nova show --minimal ${NODE_NAMES[$i]} \
|
||||
| grep accessIPv4 | awk '{print $4}')
|
||||
echo "cluster/rackspace/util.sh: Found ${NODE_NAMES[$i]} at ${node_ip}"
|
||||
KUBE_NODE_IP_ADDRESSES+=("${node_ip}")
|
||||
done
|
||||
if [ -z "$KUBE_NODE_IP_ADDRESSES" ]; then
|
||||
echo "cluster/rackspace/util.sh: Could not detect Kubernetes node nodes. Make sure you've launched a cluster with 'kube-up.sh'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
detect-master() {
|
||||
KUBE_MASTER=${MASTER_NAME}
|
||||
|
||||
echo "Waiting for ${MASTER_NAME} IP Address."
|
||||
echo
|
||||
echo " This will continually check to see if the master node has an IP address."
|
||||
echo
|
||||
|
||||
KUBE_MASTER_IP=$(nova show $KUBE_MASTER --minimal | grep accessIPv4 | awk '{print $4}')
|
||||
|
||||
while [ "${KUBE_MASTER_IP-|}" == "|" ]; do
|
||||
KUBE_MASTER_IP=$(nova show $KUBE_MASTER --minimal | grep accessIPv4 | awk '{print $4}')
|
||||
printf "."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "${KUBE_MASTER} IP Address is ${KUBE_MASTER_IP}"
|
||||
}
|
||||
|
||||
# $1 should be the network you would like to get an IP address for
|
||||
detect-master-nova-net() {
|
||||
KUBE_MASTER=${MASTER_NAME}
|
||||
|
||||
MASTER_IP=$(nova show $KUBE_MASTER --minimal | grep $1 | awk '{print $5}')
|
||||
}
|
||||
|
||||
kube-up() {
|
||||
|
||||
SCRIPT_DIR=$(CDPATH="" cd $(dirname $0); pwd)
|
||||
|
||||
rackspace-set-vars
|
||||
ensure_dev_container
|
||||
copy_dev_tarballs
|
||||
|
||||
# Find the release to use. Generally it will be passed when doing a 'prod'
|
||||
# install and will default to the release/config.sh version when doing a
|
||||
# developer up.
|
||||
find-object-url
|
||||
|
||||
# Create a temp directory to hold scripts that will be uploaded to master/nodes
|
||||
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
|
||||
trap "rm -rf ${KUBE_TEMP}" EXIT
|
||||
|
||||
load-or-gen-kube-basicauth
|
||||
python2.7 $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $KUBE_USER $KUBE_PASSWORD
|
||||
HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd)
|
||||
|
||||
rax-nova-network
|
||||
NETWORK_UUID=$(nova network-list | grep -i ${NOVA_NETWORK_LABEL} | awk '{print $2}')
|
||||
|
||||
# create and upload ssh key if necessary
|
||||
rax-ssh-key
|
||||
|
||||
echo "cluster/rackspace/util.sh: Starting Cloud Servers"
|
||||
prep_known_tokens
|
||||
|
||||
rax-boot-master
|
||||
rax-boot-nodes
|
||||
|
||||
detect-master
|
||||
|
||||
# TODO look for a better way to get the known_tokens to the master. This is needed over file injection since the files were too large on a 4 node cluster.
|
||||
$(scp -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} ${KUBE_TEMP}/known_tokens.csv core@${KUBE_MASTER_IP}:/home/core/known_tokens.csv)
|
||||
$(sleep 2)
|
||||
$(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo /usr/bin/mkdir -p /var/lib/kube-apiserver)
|
||||
$(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo mv /home/core/known_tokens.csv /var/lib/kube-apiserver/known_tokens.csv)
|
||||
$(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo chown root:root /var/lib/kube-apiserver/known_tokens.csv)
|
||||
$(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo systemctl restart kube-apiserver)
|
||||
|
||||
FAIL=0
|
||||
for job in `jobs -p`
|
||||
do
|
||||
wait $job || let "FAIL+=1"
|
||||
done
|
||||
if (( $FAIL != 0 )); then
|
||||
echo "${FAIL} commands failed. Exiting."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
echo "Waiting for cluster initialization."
|
||||
echo
|
||||
echo " This will continually check to see if the API for kubernetes is reachable."
|
||||
echo " This might loop forever if there was some uncaught error during start"
|
||||
echo " up."
|
||||
echo
|
||||
|
||||
#This will fail until apiserver salt is updated
|
||||
until $(curl --insecure --user ${KUBE_USER}:${KUBE_PASSWORD} --max-time 5 \
|
||||
--fail --output /dev/null --silent https://${KUBE_MASTER_IP}/healthz); do
|
||||
printf "."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Kubernetes cluster created."
|
||||
|
||||
export KUBE_CERT=""
|
||||
export KUBE_KEY=""
|
||||
export CA_CERT=""
|
||||
export CONTEXT="rackspace_${INSTANCE_PREFIX}"
|
||||
|
||||
create-kubeconfig
|
||||
|
||||
# Don't bail on errors, we want to be able to print some info.
|
||||
set +e
|
||||
|
||||
detect-nodes
|
||||
|
||||
# ensures KUBECONFIG is set
|
||||
get-kubeconfig-basicauth
|
||||
echo "All nodes may not be online yet, this is okay."
|
||||
echo
|
||||
echo "Kubernetes cluster is running. The master is running at:"
|
||||
echo
|
||||
echo " https://${KUBE_MASTER_IP}"
|
||||
echo
|
||||
echo "The user name and password to use is located in ${KUBECONFIG:-$DEFAULT_KUBECONFIG}."
|
||||
echo
|
||||
echo "Security note: The server above uses a self signed certificate. This is"
|
||||
echo " subject to \"Man in the middle\" type attacks."
|
||||
echo
|
||||
}
|
||||
|
||||
# Perform preparations required to run e2e tests
|
||||
function prepare-e2e() {
|
||||
echo "Rackspace doesn't need special preparations for e2e tests"
|
||||
}
|
Loading…
Reference in New Issue