Remove kube-up for vsphere

pull/6/head
Ritesh H Shukla 2016-12-22 20:15:37 +00:00
parent 4ee51fcc2d
commit 35a750ac21
20 changed files with 15 additions and 1125 deletions

View File

@ -34,8 +34,6 @@
# * export KUBERNETES_PROVIDER=azure-legacy; wget -q -O - https://get.k8s.io | bash
# Vagrant (local virtual machines)
# * export KUBERNETES_PROVIDER=vagrant; wget -q -O - https://get.k8s.io | bash
# VMWare VSphere
# * export KUBERNETES_PROVIDER=vsphere; wget -q -O - https://get.k8s.io | bash
# VMWare Photon Controller
# * export KUBERNETES_PROVIDER=photon-controller; wget -q -O - https://get.k8s.io | bash
# Rackspace

View File

@ -1,8 +1,7 @@
# This runs highstate on the minion nodes.
#
# Some of the cluster deployment scripts use the list of minions on the minions
# themselves (for example: every minion is configured with static routes to
# every other minion on a vSphere deployment). To propagate changes throughout
# themselves. To propagate changes throughout
# the pool, run highstate on all minions whenever a single minion starts.
#
highstate_minions:

View File

@ -155,7 +155,7 @@ docker:
- watch:
- file: {{ environment_file }}
{% elif grains.cloud is defined and grains.cloud in ['vsphere', 'photon-controller'] and grains.os == 'Debian' and grains.osrelease_info[0] >=8 %}
{% elif grains.cloud is defined and grains.cloud in ['photon-controller'] and grains.os == 'Debian' and grains.osrelease_info[0] >=8 %}
{% if pillar.get('is_systemd') %}

View File

@ -9,7 +9,7 @@
{% if grains.cloud == 'azure-legacy' %}
{% set cert_ip='_use_azure_dns_name_' %}
{% endif %}
{% if grains.cloud == 'vsphere' or grains.cloud == 'photon-controller' %}
{% if grains.cloud == 'photon-controller' %}
{% set cert_ip=grains.ip_interfaces.eth0[0] %}
{% endif %}
{% endif %}

View File

@ -1,4 +1,4 @@
{% if grains['cloud'] is defined and grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack'] %}
{% if grains['cloud'] is defined and grains.cloud in ['aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %}
# TODO: generate and distribute tokens on other cloud providers.
/srv/kubernetes/known_tokens.csv:
file.managed:

View File

@ -22,7 +22,7 @@
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% endif -%}
{% if grains.cloud in [ 'vsphere', 'aws', 'gce' ] and grains.cloud_config is defined -%}
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\"}}," -%}
@ -85,7 +85,7 @@
{% set client_ca_file = "" -%}
{% set secure_port = "6443" -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack'] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %}
{% set secure_port = "443" -%}
{% set client_ca_file = "--client-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%}
@ -99,7 +99,7 @@
{% set basic_auth_file = "" -%}
{% set authz_mode = "" -%}
{% set abac_policy_file = "" -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack'] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack'] %}
{% set token_auth_file = " --token-auth-file=/srv/kubernetes/known_tokens.csv" -%}
{% set basic_auth_file = " --basic-auth-file=/srv/kubernetes/basic_auth.csv" -%}
{% set authz_mode = " --authorization-mode=ABAC" -%}

View File

@ -46,7 +46,7 @@
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% endif -%}
{% if grains.cloud in [ 'vsphere', 'aws', 'gce' ] and grains.cloud_config is defined -%}
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\"}}," -%}
@ -60,7 +60,7 @@
{% set root_ca_file = "" -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy'] %}
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%}

View File

@ -5,7 +5,7 @@
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
{% set api_servers = "--master=https://" + ips[0][0] -%}
{% endif -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy' ] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy' ] %}
{% set api_servers_with_port = api_servers -%}
{% else -%}
{% set api_servers_with_port = api_servers + ":6443" -%}

View File

@ -16,7 +16,7 @@
{% endif -%}
# TODO: remove nginx for other cloud providers.
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy'] %}
{% set api_servers_with_port = api_servers -%}
{% else -%}
{% set api_servers_with_port = api_servers + ":6443" -%}
@ -27,7 +27,7 @@
{% set debugging_handlers = "--enable-debugging-handlers=true" -%}
{% if grains['roles'][0] == 'kubernetes-master' -%}
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] -%}
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'photon-controller', 'openstack', 'azure-legacy'] -%}
# Unless given a specific directive, disable registration for the kubelet
# running on the master.
@ -51,7 +51,7 @@
{% endif -%}
{% set cloud_config = "" -%}
{% if grains.cloud in [ 'openstack', 'vsphere' ] and grains.cloud_config is defined -%}
{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% endif -%}

View File

@ -74,7 +74,7 @@ base:
- openvpn
- nginx
{% endif %}
{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] %}
{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws', 'photon-controller', 'openstack', 'azure-legacy'] %}
- docker
- kubelet
{% endif %}

View File

@ -0,0 +1 @@
Please use [Kubernetes-anywhere](https://github.com/kubernetes/kubernetes-anywhere) to get started on vSphere.

View File

@ -1,35 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR -C"
# These need to be set
# export GOVC_URL='hostname' # hostname of the vc
# export GOVC_USERNAME='username' # username for logging into the vsphere.
# export GOVC_PASSWORD='password' # password for the above username
# export GOVC_NETWORK='Network Name' # Name of the network the vms should join. Many times it could be "VM Network"
# export GOVC_DATASTORE='target datastore'
# To get resource pool via govc: govc ls -l 'host/*' | grep ResourcePool | awk '{print $1}' | xargs -n1 -t govc pool.info
# export GOVC_RESOURCE_POOL='resource pool or cluster with access to datastore'
# export GOVC_GUEST_LOGIN='kube:kube' # Used for logging into kube.vmdk during deployment.
# export GOVC_PORT=443 # The port to be used by vSphere cloud provider plugin
# To get datacente via govc: govc datacenter.info
# export GOVC_DATACENTER='ha-datacenter' # The datacenter to be used by vSphere cloud provider plugin
# export GOVC_GUEST_LOGIN='kube:kube' # Used for logging into kube.vmdk during deployment.
# Set GOVC_INSECURE if the host in GOVC_URL is using a certificate that cannot
# be verified (i.e. a self-signed certificate), but IS trusted.
# export GOVC_INSECURE=1

View File

@ -1,70 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NUM_NODES=4
DISK=./kube/kube.vmdk
GUEST_ID=debian7_64Guest
ENABLE_UUID=TRUE
INSTANCE_PREFIX=kubernetes
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_MEMORY_MB=1024
MASTER_CPU=1
NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}}))
NODE_IP_RANGES="10.244.0.0/16" # Min Prefix supported is 16
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
NODE_MEMORY_MB=2048
NODE_CPU=1
SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET
# Optional: Enable node logging.
ENABLE_NODE_LOGGING=false
LOGGING_DESTINATION=elasticsearch
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING=false
ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Cluster monitoring to setup as part of the cluster bring up:
# none - No cluster monitoring setup
# influxdb - Heapster, InfluxDB, and Grafana
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.244.240.240"
DNS_DOMAIN="cluster.local"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# We need to configure subject alternate names (SANs) for the master's certificate
# we generate. While users will connect via the external IP, pods (like the UI)
# will connect via the cluster IP, from the SERVICE_CLUSTER_IP_RANGE.
# In addition to the extra SANS here, we'll also add one for for the service IP.
MASTER_EXTRA_SANS="DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN}"
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}

View File

@ -1,38 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NUM_NODES=2
DISK=./kube/kube.vmdk
GUEST_ID=debian7_64Guest
INSTANCE_PREFIX="e2e-test-${USER}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_MEMORY_MB=1024
MASTER_CPU=1
NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}}))
NODE_IP_RANGES="10.244.0.0/16"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
NODE_MEMORY_MB=1024
NODE_CPU=1
SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}

View File

@ -1,130 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#generate token files
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
if [[ ! -f "${known_tokens_file}" ]]; then
mkdir -p /srv/salt-overlay/salt/kube-apiserver
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
(umask u=rw,go= ;
echo "$KUBELET_TOKEN,kubelet,kubelet" > $known_tokens_file;
echo "$KUBE_PROXY_TOKEN,kube_proxy,kube_proxy" >> $known_tokens_file)
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$KUBELET_TOKEN\", \"Insecure\": true }" > $kubelet_auth_file)
kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
mkdir -p /srv/salt-overlay/salt/kubelet
(umask 077;
cat > "${kubelet_kubeconfig_file}" << EOF
apiVersion: v1
kind: Config
clusters:
- cluster:
insecure-skip-tls-verify: true
name: local
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
users:
- name: kubelet
user:
token: ${KUBELET_TOKEN}
EOF
)
mkdir -p /srv/salt-overlay/salt/kube-proxy
kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
# Make a kubeconfig file with the token.
# TODO(etune): put apiserver certs into secret too, and reference from authfile,
# so that "Insecure" is not needed.
(umask 077;
cat > "${kube_proxy_kubeconfig_file}" << EOF
apiVersion: v1
kind: Config
clusters:
- cluster:
insecure-skip-tls-verify: true
name: local
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
EOF
)
# Generate tokens for other "service accounts". Append to known_tokens.
#
# NB: If this list ever changes, this script actually has to
# change to detect the existence of this file, kill any deleted
# old tokens and add any new tokens (to handle the upgrade case).
service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns")
for account in "${service_accounts[@]}"; do
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "${token},${account},${account}" >> "${known_tokens_file}"
done
fi
readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv"
if [ ! -e "${BASIC_AUTH_FILE}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-apiserver
(umask 077;
echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${BASIC_AUTH_FILE}")
fi
# Create the overlay files for the salt tree. We create these in a separate
# place so that we can blow away the rest of the salt configs on a kube-push and
# re-apply these.
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: $NODE_INSTANCE_PREFIX
service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE
enable_cluster_monitoring: "${ENABLE_CLUSTER_MONITORING:-none}"
enable_cluster_logging: "${ENABLE_CLUSTER_LOGGING:false}"
enable_cluster_ui: "${ENABLE_CLUSTER_UI:true}"
enable_node_logging: "${ENABLE_NODE_LOGGING:false}"
logging_destination: $LOGGING_DESTINATION
elasticsearch_replicas: $ELASTICSEARCH_LOGGING_REPLICAS
enable_cluster_dns: "${ENABLE_CLUSTER_DNS:-false}"
dns_server: $DNS_SERVER_IP
dns_domain: $DNS_DOMAIN
federations_domain_map: ''
e2e_storage_test_environment: "${E2E_STORAGE_TEST_ENVIRONMENT:-false}"
cluster_cidr: "$NODE_IP_RANGES"
allocate_node_cidrs: "${ALLOCATE_NODE_CIDRS:-true}"
admission_control: NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,DefaultStorageClass,ResourceQuota
EOF
mkdir -p /srv/salt-overlay/salt/nginx
echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd

View File

@ -1,22 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Remove kube.vm from /etc/hosts
sed -i -e 's/\b\w\+.vm\b//' /etc/hosts
# Update hostname in /etc/hosts and /etc/hostname
sed -i -e "s/\\bkube\\b/${MY_NAME}/g" /etc/host{s,name}
hostname ${MY_NAME}

View File

@ -1,26 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script assumes that the environment variable SERVER_BINARY_TAR contains
# the release tar to download and unpack. It is meant to be pushed to the
# master and run.
echo "Unpacking Salt tree"
rm -rf kubernetes
tar xzf "${SALT_TAR}"
echo "Running release install script"
sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR}"

View File

@ -1,74 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Use other Debian mirror
sed -i -e "s/http.us.debian.org/mirrors.kernel.org/" /etc/apt/sources.list
# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
CLOUD_CONFIG=/etc/vsphere_cloud.config
# Configuration to initialize vsphere cloud provider
cat <<EOF > $CLOUD_CONFIG
[Global]
user = $GOVC_USERNAME
password = $GOVC_PASSWORD
server = $GOVC_URL
port = $GOVC_PORT
insecure-flag = $GOVC_INSECURE
datacenter = $GOVC_DATACENTER
datastore = $GOVC_DATASTORE
[Disk]
scsicontrollertype = pvscsi
EOF
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-master
cbr-cidr: $MASTER_IP_RANGE
cloud: vsphere
master_extra_sans: $MASTER_EXTRA_SANS
kube_user: $KUBE_USER
cloud_config: $CLOUD_CONFIG
EOF
# Auto accept all keys from minions that try to join
mkdir -p /etc/salt/master.d
cat <<EOF >/etc/salt/master.d/auto-accept.conf
auto_accept: True
EOF
cat <<EOF >/etc/salt/master.d/reactor.conf
# React to new minions starting by running highstate on them.
reactor:
- 'salt/minion/*/start':
- /srv/reactor/highstate-new.sls
- /srv/reactor/highstate-masters.sls
- /srv/reactor/highstate-minions.sls
EOF
# Install Salt
#
# We specify -X to avoid a race condition that can cause minion failure to
# install. See https://github.com/saltstack/salt-bootstrap/issues/270
#
# -M installs the master
set +x
curl -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -M -X stable 2016.3.2
set -x

View File

@ -1,68 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Use other Debian mirror
sed -i -e "s/http.us.debian.org/mirrors.kernel.org/" /etc/apt/sources.list
# Resolve hostname of master
if ! grep -q $KUBE_MASTER /etc/hosts; then
echo "Adding host entry for $KUBE_MASTER"
echo "$KUBE_MASTER_IP $KUBE_MASTER" >> /etc/hosts
fi
# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $KUBE_MASTER" > /etc/salt/minion.d/master.conf
# Turn on debugging for salt-minion
# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion
# Configuration to initialize vsphere cloud provider
CLOUD_CONFIG=/etc/vsphere_cloud.config
cat <<EOF > $CLOUD_CONFIG
[Global]
user = $GOVC_USERNAME
password = $GOVC_PASSWORD
server = $GOVC_URL
port = $GOVC_PORT
insecure-flag = $GOVC_INSECURE
datacenter = $GOVC_DATACENTER
datastore = $GOVC_DATASTORE
[Disk]
scsicontrollertype = pvscsi
EOF
# Our minions will have a pool role to distinguish them from the master.
#
# Setting the "minion_ip" here causes the kubelet to use its IP for
# identification instead of its hostname.
#
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-pool
- kubernetes-pool-vsphere
cloud: vsphere
cloud_config: $CLOUD_CONFIG
EOF
# Install Salt
#
# We specify -X to avoid a race condition that can cause minion failure to
# install. See https://github.com/saltstack/salt-bootstrap/issues/270
curl -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -X stable 2016.3.2

View File

@ -1,645 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constants for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/vsphere/config-common.sh"
source "${KUBE_ROOT}/cluster/vsphere/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master {
KUBE_MASTER=${MASTER_NAME}
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
KUBE_MASTER_IP=$(govc vm.ip ${MASTER_NAME})
fi
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}
# Detect the information about the nodes
#
# Assumed vars:
# NODE_NAMES
# Vars set:
# KUBE_NODE_IP_ADDRESS (array)
function detect-nodes {
KUBE_NODE_IP_ADDRESSES=()
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
local nodeip=$(govc vm.ip ${NODE_NAMES[$i]})
if [[ -z "${nodeip-}" ]] ; then
echo "Did not find ${NODE_NAMES[$i]}" >&2
else
echo "Found ${NODE_NAMES[$i]} at ${nodeip}"
KUBE_NODE_IP_ADDRESSES+=("${nodeip}")
fi
done
if [[ -z "${KUBE_NODE_IP_ADDRESSES-}" ]]; then
echo "Could not detect Kubernetes nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
}
function trap-add {
local handler="$1"
local signal="${2-EXIT}"
local cur
cur="$(eval "sh -c 'echo \$3' -- $(trap -p ${signal})")"
if [[ -n "${cur}" ]]; then
handler="${cur}; ${handler}"
fi
trap "${handler}" ${signal}
}
function verify-prereqs {
which "govc" >/dev/null || {
echo "Can't find govc in PATH, please install and retry."
echo ""
echo " go install github.com/vmware/govmomi/govc"
echo ""
exit 1
}
}
function verify-ssh-prereqs {
local rc
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
# "Could not open a connection to your authentication agent."
if [[ "${rc}" -eq 2 ]]; then
eval "$(ssh-agent)" > /dev/null
trap-add "kill ${SSH_AGENT_PID}" EXIT
fi
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
# "The agent has no identities."
if [[ "${rc}" -eq 1 ]]; then
# Try adding one of the default identities, with or without passphrase.
ssh-add || true
fi
# Expect at least one identity to be available.
if ! ssh-add -L 1> /dev/null 2> /dev/null; then
echo "Could not find or add an SSH identity."
echo "Please start ssh-agent, add your identity, and retry."
exit 1
fi
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap-add 'rm -rf "${KUBE_TEMP}"' EXIT
fi
}
# Take the local tar files and upload them to the master.
#
# Assumed vars:
# MASTER_NAME
# SERVER_BINARY_TAR
# SALT_TAR
function upload-server-tars {
local vm_ip
vm_ip=$(govc vm.ip "${MASTER_NAME}")
kube-ssh ${vm_ip} "mkdir -p /home/kube/cache/kubernetes-install"
local tar
for tar in "${SERVER_BINARY_TAR}" "${SALT_TAR}"; do
kube-scp ${vm_ip} "${tar}" "/home/kube/cache/kubernetes-install/${tar##*/}"
done
}
# Run command over ssh
function kube-ssh {
local host="$1"
shift
ssh ${SSH_OPTS-} "kube@${host}" "$@" 2> /dev/null
}
# Copy file over ssh
function kube-scp {
local host="$1"
local src="$2"
local dst="$3"
scp ${SSH_OPTS-} "${src}" "kube@${host}:${dst}"
}
# Instantiate a generic kubernetes virtual machine (master or node)
#
# Usage:
# kube-up-vm VM_NAME [options to pass to govc vm.create]
#
# Example:
# kube-up-vm "vm-name" -c 2 -m 4096
#
# Assumed vars:
# DISK
# GUEST_ID
function kube-up-vm {
local vm_name="$1"
shift
govc vm.create \
-debug \
-disk="${DISK}" \
-g="${GUEST_ID}" \
-on=false \
-link=true \
"$@" \
"${vm_name}"
govc vm.change -e="disk.enableUUID=${ENABLE_UUID}" -vm="${vm_name}"
govc vm.power -on=true "${vm_name}"
# Retrieve IP first, to confirm the guest operations agent is running.
CURRENT_NODE_IP=$(govc vm.ip "${vm_name}")
govc guest.mkdir \
-l "kube:kube" \
-vm="${vm_name}" \
-p \
/home/kube/.ssh
ssh-add -L > "${KUBE_TEMP}/${vm_name}-authorized_keys"
govc guest.upload \
-l "kube:kube" \
-vm="${vm_name}" \
-f \
"${KUBE_TEMP}/${vm_name}-authorized_keys" \
/home/kube/.ssh/authorized_keys
}
# Kick off a local script on a kubernetes virtual machine (master or node)
#
# Usage:
# kube-run VM_NAME LOCAL_FILE
function kube-run {
local vm_name="$1"
local file="$2"
local dst="/tmp/$(basename "${file}")"
govc guest.upload -l "kube:kube" -vm="${vm_name}" -f -perm=0755 "${file}" "${dst}"
echo "uploaded ${file} to ${dst}"
local vm_ip
vm_ip=$(govc vm.ip "${vm_name}")
kube-ssh ${vm_ip} "nohup sudo ${dst} < /dev/null 1> ${dst}.out 2> ${dst}.err &"
}
#
# run the command remotely and check if the specific kube artifact is running or not.
# keep checking till the you hit the timeout. Default timeout 300s
#
# Usage:
# kube_check 10.0.0.1 cmd timeout
function kube-check {
nodeip=$1
cmd=$2
sleepstep=5
if [[ $# -lt 3 || -z $3 ]]; then
timeout=300
else
timeout=$3
fi
let effective_timeout=($timeout/$sleepstep)
attempt=0
echo
printf "This may take several minutes. Bound to $effective_timeout attempts"
while true; do
local rc=0
output=$(kube-ssh ${nodeip} "${cmd}") || rc=1
if [[ $rc != 0 ]]; then
if (( $attempt == $effective_timeout )); then
echo
echo "(Failed) rc: $rc Output: ${output}"
echo
echo -e "${cmd} failed to start on ${nodeip}. Your cluster is unlikely" >&2
echo "to work correctly. You may have to debug it by logging in." >&2
echo
exit 1
fi
else
echo
echo -e "[${cmd}] passed"
echo
break
fi
printf "."
attempt=$(($attempt+1))
sleep $sleepstep
done
}
#
# verify if salt master is up. Check 30 times and then echo out bad output and return 0
#
# Usage:
# remote-pgrep 10.0.0.1 salt-master
#
function remote-pgrep {
nodeip=$1
regex=$2
max_attempt=60
printf "This may take several minutes. Bound to $max_attempt attempts"
attempt=0
while true; do
local rc=0
output=$(kube-ssh ${nodeip} pgrep ${regex}) || rc=1
if [[ $rc != 0 ]]; then
if (( $attempt == $max_attempt )); then
echo
echo "(Failed) rc: $rc, output:${output}"
echo
echo -e "${regex} failed to start on ${nodeip} after checking for $attempt attempts. Your cluster is unlikely" >&2
echo "to work correctly. You may have to debug it by logging in." >&2
echo
exit 1
fi
else
echo
echo -e "[${regex} running]"
echo
break
fi
printf "."
attempt=$(($attempt+1))
sleep 10
done
}
# identify the pod routes and route them together.
#
# Assumptions:
# All packages have been installed and kubelet has started running.
#
function setup-pod-routes {
# wait till the kubelet sets up the bridge.
echo "Setting up routes"
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
printf "check if cbr0 bridge is ready on ${NODE_NAMES[$i]}\n"
kube-check ${KUBE_NODE_IP_ADDRESSES[$i]} 'sudo ifconfig cbr0 | grep -oP "inet addr:\K\S+"'
done
# identify the subnet assigned to the node by the kubernetes controller manager.
KUBE_NODE_BRIDGE_NETWORK=()
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
printf " finding network of cbr0 bridge on node ${NODE_NAMES[$i]}\n"
network=""
top2_octets_final=$(echo $NODE_IP_RANGES | awk -F "." '{ print $1 "." $2 }') # Assume that a 24 bit mask per node
attempt=0
max_attempt=60
while true ; do
attempt=$(($attempt+1))
network=$(kube-ssh ${KUBE_NODE_IP_ADDRESSES[$i]} 'sudo ip route show | grep -E "dev cbr0" | cut -d " " -f1')
top2_octets_read=$(echo $network | awk -F "." '{ print $1 "." $2 }')
if [[ "$top2_octets_read" == "$top2_octets_final" ]]; then
break
fi
if (( $attempt == $max_attempt )); then
echo
echo "(Failed) Waiting for cbr0 bridge to come up @ ${NODE_NAMES[$i]}"
echo
exit 1
fi
printf "."
sleep 5
done
printf "\n"
KUBE_NODE_BRIDGE_NETWORK+=("${network}")
done
# Make the pods visible to each other and to the master.
# The master needs have routes to the pods for the UI to work.
local j
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
printf "setting up routes for ${NODE_NAMES[$i]}\n"
printf " adding route to ${MASTER_NAME} for network ${KUBE_NODE_BRIDGE_NETWORK[${i}]} via ${KUBE_NODE_IP_ADDRESSES[${i}]}\n"
kube-ssh "${KUBE_MASTER_IP}" "sudo route add -net ${KUBE_NODE_BRIDGE_NETWORK[${i}]} gw ${KUBE_NODE_IP_ADDRESSES[${i}]}"
for (( j=0; j<${#NODE_NAMES[@]}; j++)); do
if [[ $i != $j ]]; then
printf " adding route to ${NODE_NAMES[$j]} for network ${KUBE_NODE_BRIDGE_NETWORK[${i}]} via ${KUBE_NODE_IP_ADDRESSES[${i}]}\n"
kube-ssh ${KUBE_NODE_IP_ADDRESSES[$i]} "sudo route add -net ${KUBE_NODE_BRIDGE_NETWORK[$j]} gw ${KUBE_NODE_IP_ADDRESSES[$j]}"
fi
done
printf "\n"
done
}
# Instantiate a kubernetes cluster
#
# Assumed vars:
# KUBE_ROOT
# <Various vars set in config file>
function kube-up {
verify-ssh-prereqs
find-release-tars
ensure-temp-dir
load-or-gen-kube-basicauth
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
local htpasswd
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
# This calculation of the service IP should work, but if you choose an
# alternate subnet, there's a small chance you'd need to modify the
# service_ip, below. We'll choose an IP like 10.244.240.1 by taking
# the first three octets of the SERVICE_CLUSTER_IP_RANGE and tacking
# on a .1
local octets
local service_ip
octets=($(echo "${SERVICE_CLUSTER_IP_RANGE}" | sed -e 's|/.*||' -e 's/\./ /g'))
((octets[3]+=1))
service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
MASTER_EXTRA_SANS="IP:${service_ip},DNS:${MASTER_NAME},${MASTER_EXTRA_SANS}"
TMP_DIR=/tmp
HOSTS=hosts
ETC_HOSTS=/etc/${HOSTS}
echo "Starting master VM (this can take a minute)..."
(
echo "#! /bin/bash"
echo "readonly MY_NAME=${MASTER_NAME}"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh"
echo "cd /home/kube/cache/kubernetes-install"
echo "readonly MASTER_NAME='${MASTER_NAME}'"
echo "readonly MASTER_IP_RANGE='${MASTER_IP_RANGE}'"
echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-node'"
echo "readonly NODE_IP_RANGES='${NODE_IP_RANGES}'"
echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
echo "readonly ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
echo "readonly ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI:-false}'"
echo "readonly DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
echo "readonly DNS_DOMAIN='${DNS_DOMAIN:-}'"
echo "readonly KUBE_USER='${KUBE_USER:-}'"
echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD:-}'"
echo "readonly SERVER_BINARY_TAR='${SERVER_BINARY_TAR##*/}'"
echo "readonly SALT_TAR='${SALT_TAR##*/}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'"
echo "readonly MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'"
echo "readonly GOVC_USERNAME='${GOVC_USERNAME}'"
echo "readonly GOVC_PASSWORD='${GOVC_PASSWORD}'"
echo "readonly GOVC_URL='${GOVC_URL}'"
echo "readonly GOVC_PORT='${GOVC_PORT}'"
echo "readonly GOVC_INSECURE='${GOVC_INSECURE}'"
echo "readonly GOVC_DATACENTER='${GOVC_DATACENTER}'"
echo "readonly GOVC_DATASTORE='${GOVC_DATASTORE}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/install-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
kube-up-vm ${MASTER_NAME} -c ${MASTER_CPU-1} -m ${MASTER_MEMORY_MB-1024}
upload-server-tars
kube-run ${MASTER_NAME} "${KUBE_TEMP}/master-start.sh"
# Print master IP, so user can log in for debugging.
detect-master
echo
echo "Starting node VMs (this can take a minute)..."
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
(
echo "#! /bin/bash"
echo "readonly MY_NAME=${NODE_NAMES[$i]}"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh"
echo "KUBE_MASTER=${KUBE_MASTER}"
echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}"
echo "NODE_IP_RANGE=$NODE_IP_RANGES"
echo "readonly GOVC_USERNAME='${GOVC_USERNAME}'"
echo "readonly GOVC_PASSWORD='${GOVC_PASSWORD}'"
echo "readonly GOVC_URL='${GOVC_URL}'"
echo "readonly GOVC_PORT='${GOVC_PORT}'"
echo "readonly GOVC_INSECURE='${GOVC_INSECURE}'"
echo "readonly GOVC_DATACENTER='${GOVC_DATACENTER}'"
echo "readonly GOVC_DATASTORE='${GOVC_DATASTORE}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-minion.sh"
) > "${KUBE_TEMP}/node-start-${i}.sh"
(
kube-up-vm "${NODE_NAMES[$i]}" -c ${NODE_CPU-1} -m ${NODE_MEMORY_MB-1024}
add_to_hosts="${CURRENT_NODE_IP} ${NODE_NAMES[$i]}"
node_ip_file=${NODE_NAMES[$i]}-ip
echo "sudo bash -c \"echo $add_to_hosts >> /etc/hosts\"" > ${KUBE_TEMP}/${node_ip_file}
echo $add_to_hosts >> ${KUBE_TEMP}/${HOSTS}
kube-scp ${KUBE_MASTER_IP} ${KUBE_TEMP}/${node_ip_file} /${TMP_DIR}/
kube-ssh ${KUBE_MASTER_IP} "bash /tmp/${node_ip_file}"
kube-run "${NODE_NAMES[$i]}" "${KUBE_TEMP}/node-start-${i}.sh"
) &
done
local fail=0
local job
for job in $(jobs -p); do
wait "${job}" || fail=$((fail + 1))
done
if (( $fail != 0 )); then
echo "${fail} commands failed. Exiting." >&2
exit 2
fi
# Print node IPs, so user can log in for debugging.
detect-nodes
# Setup node to node vm-name resolution
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
kube-scp ${KUBE_NODE_IP_ADDRESSES[$i]} ${KUBE_TEMP}/${HOSTS} ${TMP_DIR}
kube-ssh ${KUBE_NODE_IP_ADDRESSES[$i]} "sudo bash -c \"cat ${TMP_DIR}/${HOSTS} >> ${ETC_HOSTS}\""
done
printf "Waiting for salt-master to be up on ${KUBE_MASTER} ...\n"
remote-pgrep ${KUBE_MASTER_IP} "salt-master"
printf "Waiting for all packages to be installed on ${KUBE_MASTER} ...\n"
kube-check ${KUBE_MASTER_IP} "sudo salt \"${MASTER_NAME}\" state.highstate -t 30 | grep -E \"Failed:[[:space:]]+0\""
local i
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
printf "Waiting for salt-minion to be up on ${NODE_NAMES[$i]} ....\n"
remote-pgrep ${KUBE_NODE_IP_ADDRESSES[$i]} "salt-minion"
printf "Waiting for all salt packages to be installed on ${NODE_NAMES[$i]} .... \n"
kube-check ${KUBE_MASTER_IP} 'sudo salt '"${NODE_NAMES[$i]}"' state.highstate -t 30 | grep -E "Failed:[[:space:]]+0"'
printf " OK\n"
done
echo
echo "Waiting for master and node initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This might loop forever if there was some uncaught error during start up."
echo
until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/healthz"; do
printf "."
sleep 2
done
printf " OK\n"
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
printf "Waiting for ${NODE_NAMES[$i]} to become available..."
until curl --max-time 5 \
--fail --output /dev/null --silent "http://${KUBE_NODE_IP_ADDRESSES[$i]}:10250/healthz"; do
printf "."
sleep 2
done
printf " OK\n"
done
setup-pod-routes
echo "Kubernetes cluster created."
# TODO use token instead of basic auth
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
export CONTEXT="vsphere_${INSTANCE_PREFIX}"
(
umask 077
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
create-kubeconfig
)
printf "\n"
echo
echo "Sanity checking cluster..."
sleep 5
# Basic sanity checking
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
# Make sure docker is installed
kube-ssh "${KUBE_NODE_IP_ADDRESSES[$i]}" which docker > /dev/null || {
echo "Docker failed to install on ${NODE_NAMES[$i]}. Your cluster is unlikely" >&2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
echo "cluster. (sorry!)" >&2
exit 1
}
done
# ensures KUBECONFIG is set
get-kubeconfig-basicauth
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${KUBE_MASTER_IP}"
echo
echo "The user name and password to use is located in ${KUBECONFIG}"
echo
}
# Delete a kubernetes cluster
function kube-down {
govc vm.destroy ${MASTER_NAME} &
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
govc vm.destroy ${NODE_NAMES[i]} &
done
wait
}
# Update a kubernetes cluster with latest source
function kube-push {
verify-ssh-prereqs
find-release-tars
detect-master
upload-server-tars
(
echo "#! /bin/bash"
echo "cd /home/kube/cache/kubernetes-install"
echo "readonly SERVER_BINARY_TAR='${SERVER_BINARY_TAR##*/}'"
echo "readonly SALT_TAR='${SALT_TAR##*/}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/install-release.sh"
echo "echo Executing configuration"
echo "sudo salt '*' mine.update"
echo "sudo salt --force-color '*' state.highstate"
) | kube-ssh "${KUBE_MASTER_IP}"
get-kubeconfig-basicauth
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${KUBE_MASTER_IP}"
echo
echo "The user name and password to use is located in ${KUBECONFIG:-$DEFAULT_KUBECONFIG}."
echo
}
# Execute prior to running tests to build a release if required for env
function test-build-release {
echo "TODO"
}
# Execute prior to running tests to initialize required structure
function test-setup {
echo "TODO"
}
# Execute after running tests to perform any required clean-up
function test-teardown {
echo "TODO"
}