2015-02-17 21:20:07 +00:00
#!/bin/bash
2016-06-03 00:25:58 +00:00
# Copyright 2014 The Kubernetes Authors.
2015-02-17 21:20:07 +00:00
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
2015-12-07 10:33:59 +00:00
[ ! -z ${ UTIL_SH_DEBUG +x } ] && set -x
2016-11-10 21:59:29 +00:00
command -v kubectl >/dev/null 2>& 1 || { echo >& 2 "kubectl not found in path. Aborting." ; exit 1; }
2015-03-02 16:13:57 +00:00
KUBE_ROOT = $( dirname " ${ BASH_SOURCE } " ) /../..
2015-02-17 21:20:07 +00:00
readonly ROOT = $( dirname " ${ BASH_SOURCE } " )
2015-04-14 08:03:12 +00:00
source " $ROOT / ${ KUBE_CONFIG_FILE :- "config-default.sh" } "
source " $KUBE_ROOT /cluster/common.sh "
2015-02-17 21:20:07 +00:00
export LIBVIRT_DEFAULT_URI = qemu:///system
2017-04-05 02:00:07 +00:00
export SERVICE_ACCOUNT_LOOKUP = ${ SERVICE_ACCOUNT_LOOKUP :- true }
2017-05-26 04:10:00 +00:00
export ADMISSION_CONTROL = ${ ADMISSION_CONTROL :- Initializers ,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota }
2015-02-17 21:20:07 +00:00
readonly POOL = kubernetes
2015-12-07 09:35:21 +00:00
readonly POOL_PATH = /var/lib/libvirt/images/kubernetes
2015-02-17 21:20:07 +00:00
2015-12-07 10:57:05 +00:00
[ ! -d " ${ POOL_PATH } " ] && ( echo " $POOL_PATH " does not exist ; exit 1 )
2017-01-18 00:08:24 +00:00
# Creates a kubeconfig file for the kubelet.
# Args: address (e.g. "http://localhost:8080"), destination file path
function create-kubelet-kubeconfig( ) {
local apiserver_address = " ${ 1 } "
local destination = " ${ 2 } "
if [ [ -z " ${ apiserver_address } " ] ] ; then
echo "Must provide API server address to create Kubelet kubeconfig file!"
exit 1
fi
if [ [ -z " ${ destination } " ] ] ; then
echo "Must provide destination path to create Kubelet kubeconfig file!"
exit 1
fi
echo "Creating Kubelet kubeconfig file"
local dest_dir = " $( dirname " ${ destination } " ) "
mkdir -p " ${ dest_dir } " & >/dev/null || sudo mkdir -p " ${ dest_dir } "
sudo = $( test -w " ${ dest_dir } " || echo "sudo -E" )
cat <<EOF | ${ sudo } tee " ${ destination } " > /dev/null
apiVersion: v1
kind: Config
clusters:
- cluster:
server: ${ apiserver_address }
name: local
contexts:
- context:
cluster: local
name: local
current-context: local
EOF
}
2015-02-17 21:20:07 +00:00
# join <delim> <list...>
# Concatenates the list elements with the delimiter passed as first parameter
#
# Ex: join , a b c
# -> a,b,c
function join {
local IFS = " $1 "
shift
echo " $* "
}
# Must ensure that the following ENV vars are set
function detect-master {
2015-02-27 15:45:14 +00:00
KUBE_MASTER_IP = $MASTER_IP
KUBE_MASTER = $MASTER_NAME
2015-02-17 21:20:07 +00:00
export KUBERNETES_MASTER = http://$KUBE_MASTER_IP :8080
echo " KUBE_MASTER_IP: $KUBE_MASTER_IP "
echo " KUBE_MASTER: $KUBE_MASTER "
}
2015-09-30 03:28:14 +00:00
# Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[]
function detect-nodes {
KUBE_NODE_IP_ADDRESSES = ( " ${ NODE_IPS [@] } " )
2015-02-17 21:20:07 +00:00
}
2016-02-18 18:55:46 +00:00
function generate_certs {
node_names = ( " ${ @ } " )
#Root-CA
tempdir = $( mktemp -d)
CA_KEY = ${ CA_KEY :- " $tempdir /ca-key.pem " }
CA_CERT = ${ CA_CERT :- " $tempdir /ca.pem " }
openssl genrsa -out " ${ CA_KEY } " 2048 2>/dev/null
openssl req -x509 -new -nodes -key " ${ CA_KEY } " -days 10000 -out " ${ CA_CERT } " -subj "/CN=kube-ca" 2>/dev/null
#API server key pair
KUBE_KEY = ${ KUBE_KEY :- " $tempdir /apiserver-key.pem " }
API_SERVER_CERT_REQ = ${ API_SERVER_CERT_REQ :- " $tempdir /apiserver.csr " }
openssl genrsa -out " ${ KUBE_KEY } " 2048 2>/dev/null
KUBERNETES_SVC = ${ SERVICE_CLUSTER_IP_RANGE %.* } .1 openssl req -new -key " ${ KUBE_KEY } " -out " ${ API_SERVER_CERT_REQ } " -subj "/CN=kube-apiserver" -config cluster/libvirt-coreos/openssl.cnf 2>/dev/null
KUBE_CERT = ${ KUBE_CERT :- " $tempdir /apiserver.pem " }
KUBERNETES_SVC = ${ SERVICE_CLUSTER_IP_RANGE %.* } .1 openssl x509 -req -in " ${ API_SERVER_CERT_REQ } " -CA " ${ CA_CERT } " -CAkey " ${ CA_KEY } " -CAcreateserial -out " ${ KUBE_CERT } " -days 365 -extensions v3_req -extfile cluster/libvirt-coreos/openssl.cnf 2>/dev/null
#Copy apiserver and controller tsl assets
2015-11-26 21:16:07 +00:00
mkdir -p " $POOL_PATH /kubernetes/certs "
2016-02-18 18:55:46 +00:00
cp " ${ KUBE_CERT } " " $POOL_PATH /kubernetes/certs "
cp " ${ KUBE_KEY } " " $POOL_PATH /kubernetes/certs "
cp " ${ CA_CERT } " " $POOL_PATH /kubernetes/certs "
#Generate nodes certificate
for ( ( i = 0 ; i < $NUM_NODES ; i++ ) ) ; do
openssl genrsa -out $tempdir /${ node_names [ $i ] } -node-key.pem 2048 2>/dev/null
cp " $tempdir / ${ node_names [ $i ] } -node-key.pem " " $POOL_PATH /kubernetes/certs "
WORKER_IP = ${ NODE_IPS [ $i ] } openssl req -new -key $tempdir /${ node_names [ $i ] } -node-key.pem -out $tempdir /${ node_names [ $i ] } -node.csr -subj " /CN= ${ node_names [ $i ] } " -config cluster/libvirt-coreos/node-openssl.cnf 2>/dev/null
WORKER_IP = ${ NODE_IPS [ $i ] } openssl x509 -req -in $tempdir /${ node_names [ $i ] } -node.csr -CA " ${ CA_CERT } " -CAkey " ${ CA_KEY } " -CAcreateserial -out $tempdir /${ node_names [ $i ] } -node.pem -days 365 -extensions v3_req -extfile cluster/libvirt-coreos/node-openssl.cnf 2>/dev/null
cp " $tempdir / ${ node_names [ $i ] } -node.pem " " $POOL_PATH /kubernetes/certs "
done
echo "TLS assets generated..."
2015-11-26 21:16:07 +00:00
}
2016-11-10 21:59:29 +00:00
#Setup registry proxy
function setup_registry_proxy {
if [ [ " $ENABLE_CLUSTER_REGISTRY " = = "true" ] ] ; then
cp "./cluster/saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml" " $POOL_PATH /kubernetes/manifests "
fi
}
2015-11-26 21:16:07 +00:00
2015-02-17 21:20:07 +00:00
# Verify prereqs on host machine
function verify-prereqs {
if ! which virsh >/dev/null; then
echo "Can't find virsh in PATH, please fix and retry." >& 2
exit 1
fi
if ! virsh nodeinfo >/dev/null; then
exit 1
fi
if [ [ " $( </sys/kernel/mm/ksm/run) " -ne "1" ] ] ; then
echo "KSM is not enabled" >& 2
echo "Enabling it would reduce the memory footprint of large clusters" >& 2
if [ [ -t 0 ] ] ; then
read -t 5 -n 1 -p "Do you want to enable KSM (requires root password) (y/n)? " answer
echo ""
if [ [ " $answer " = = 'y' ] ] ; then
su -c 'echo 1 > /sys/kernel/mm/ksm/run'
fi
else
echo "You can enable it with (as root):" >& 2
echo "" >& 2
echo " echo 1 > /sys/kernel/mm/ksm/run" >& 2
echo "" >& 2
fi
fi
}
# Destroy the libvirt storage pool and all the images inside
#
# If 'keep_base_image' is passed as first parameter,
# the base image is kept, as well as the storage pool.
# All the other images are deleted.
function destroy-pool {
virsh pool-info $POOL >/dev/null 2>& 1 || return
rm -rf " $POOL_PATH " /kubernetes/*
rm -rf " $POOL_PATH " /kubernetes_config*/*
local vol
virsh vol-list $POOL | awk 'NR>2 && !/^$/ && $1 ~ /^kubernetes/ {print $1}' | \
while read vol; do
virsh vol-delete $vol --pool $POOL
done
[ [ " $1 " = = 'keep_base_image' ] ] && return
set +e
virsh vol-delete coreos_base.img --pool $POOL
virsh pool-destroy $POOL
rmdir " $POOL_PATH "
set -e
}
# Creates the libvirt storage pool and populate it with
# - the CoreOS base image
# - the kubernetes binaries
function initialize-pool {
mkdir -p " $POOL_PATH "
if ! virsh pool-info $POOL >/dev/null 2>& 1; then
virsh pool-create-as $POOL dir --target " $POOL_PATH "
fi
2016-10-12 20:43:47 +00:00
wget -N -P " $ROOT " https://${ COREOS_CHANNEL :- alpha } .release.core-os.net/amd64-usr/current/coreos_production_qemu_image.img.bz2
2015-04-13 13:13:53 +00:00
if [ [ " $ROOT /coreos_production_qemu_image.img.bz2 " -nt " $POOL_PATH /coreos_base.img " ] ] ; then
2015-02-17 21:20:07 +00:00
bunzip2 -f -k " $ROOT /coreos_production_qemu_image.img.bz2 "
virsh vol-delete coreos_base.img --pool $POOL 2> /dev/null || true
fi
2015-12-07 09:35:21 +00:00
if ! virsh vol-list $POOL | grep -q coreos_base.img; then
virsh vol-create-as $POOL coreos_base.img 10G --format qcow2
virsh vol-upload coreos_base.img " $ROOT /coreos_production_qemu_image.img " --pool $POOL
fi
2015-02-17 21:20:07 +00:00
mkdir -p " $POOL_PATH /kubernetes "
2015-03-18 14:31:11 +00:00
kube-push-internal
2015-02-27 16:05:00 +00:00
mkdir -p " $POOL_PATH /kubernetes/manifests "
if [ [ " $ENABLE_NODE_LOGGING " = = "true" ] ] ; then
if [ [ " $LOGGING_DESTINATION " = = "elasticsearch" ] ] ; then
cp " $KUBE_ROOT /cluster/saltbase/salt/fluentd-es/fluentd-es.manifest " " $POOL_PATH /kubernetes/manifests "
elif [ [ " $LOGGING_DESTINATION " = = "gcp" ] ] ; then
cp " $KUBE_ROOT /cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.manifest " " $POOL_PATH /kubernetes/manifests "
fi
fi
2015-02-28 06:55:44 +00:00
mkdir -p " $POOL_PATH /kubernetes/addons "
if [ [ " $ENABLE_CLUSTER_DNS " = = "true" ] ] ; then
2016-02-18 18:55:46 +00:00
render-template " $ROOT /namespace.yaml " > " $POOL_PATH /kubernetes/addons/namespace.yaml "
2016-12-09 03:47:11 +00:00
render-template " $ROOT /kubedns-svc.yaml " > " $POOL_PATH /kubernetes/addons/kubedns-svc.yaml "
render-template " $ROOT /kubedns-controller.yaml " > " $POOL_PATH /kubernetes/addons/kubedns-controller.yaml "
2016-12-15 14:52:56 +00:00
render-template " $ROOT /kubedns-sa.yaml " > " $POOL_PATH /kubernetes/addons/kubedns-sa.yaml "
2017-03-09 00:45:22 +00:00
render-template " $ROOT /kubedns-cm.yaml " > " $POOL_PATH /kubernetes/addons/kubedns-cm.yaml "
2015-02-28 06:55:44 +00:00
fi
2015-02-17 21:20:07 +00:00
virsh pool-refresh $POOL
}
function destroy-network {
set +e
virsh net-destroy kubernetes_global
virsh net-destroy kubernetes_pods
set -e
}
function initialize-network {
virsh net-create " $ROOT /network_kubernetes_global.xml "
virsh net-create " $ROOT /network_kubernetes_pods.xml "
}
function render-template {
eval " echo \" $( cat $1 ) \" "
}
2015-02-27 21:35:26 +00:00
function wait-cluster-readiness {
echo "Wait for cluster readiness"
2015-04-13 13:13:53 +00:00
local timeout = 120
2015-02-27 21:35:26 +00:00
while [ [ $timeout -ne 0 ] ] ; do
2017-05-19 02:29:27 +00:00
nb_ready_nodes = $( kubectl get nodes -o go-template= "{{range.items}}{{range.status.conditions}}{{.type}}{{end}}:{{end}}" 2>/dev/null | tr ':' '\n' | grep -c Ready || true )
2015-11-24 03:06:36 +00:00
echo " Nb ready nodes: $nb_ready_nodes / $NUM_NODES "
if [ [ " $nb_ready_nodes " -eq " $NUM_NODES " ] ] ; then
2016-11-10 21:59:29 +00:00
return 0
2015-02-27 21:35:26 +00:00
fi
timeout = $(( $timeout - 1 ))
sleep .5
done
return 1
}
2015-02-17 21:20:07 +00:00
# Instantiate a kubernetes cluster
function kube-up {
detect-master
2015-09-30 03:28:14 +00:00
detect-nodes
2015-02-17 21:20:07 +00:00
initialize-pool keep_base_image
2016-02-18 18:55:46 +00:00
generate_certs " ${ NODE_NAMES [@] } "
2016-11-10 21:59:29 +00:00
setup_registry_proxy
2015-02-17 21:20:07 +00:00
initialize-network
2015-12-07 15:35:02 +00:00
readonly ssh_keys = " $( cat ~/.ssh/*.pub | sed 's/^/ - /' ) "
2015-02-17 21:20:07 +00:00
readonly kubernetes_dir = " $POOL_PATH /kubernetes "
2015-09-23 09:34:55 +00:00
local i
2015-11-24 03:06:36 +00:00
for ( ( i = 0 ; i <= $NUM_NODES ; i++ ) ) ; do
if [ [ $i -eq $NUM_NODES ] ] ; then
2015-09-23 09:34:55 +00:00
etcd2_initial_cluster[ $i ] = " ${ MASTER_NAME } =http:// ${ MASTER_IP } :2380 "
else
2015-09-30 03:28:14 +00:00
etcd2_initial_cluster[ $i ] = " ${ NODE_NAMES [ $i ] } =http:// ${ NODE_IPS [ $i ] } :2380 "
2015-09-23 09:34:55 +00:00
fi
done
etcd2_initial_cluster = $( join , " ${ etcd2_initial_cluster [@] } " )
2015-09-30 03:28:14 +00:00
readonly machines = $( join , " ${ KUBE_NODE_IP_ADDRESSES [@] } " )
2015-02-17 21:20:07 +00:00
2015-11-24 03:06:36 +00:00
for ( ( i = 0 ; i <= $NUM_NODES ; i++ ) ) ; do
if [ [ $i -eq $NUM_NODES ] ] ; then
2015-02-17 21:20:07 +00:00
type = master
2015-02-27 15:45:14 +00:00
name = $MASTER_NAME
public_ip = $MASTER_IP
2015-02-17 21:20:07 +00:00
else
2015-09-30 03:28:14 +00:00
type = node-$( printf "%02d" $i )
name = ${ NODE_NAMES [ $i ] }
public_ip = ${ NODE_IPS [ $i ] }
2015-02-17 21:20:07 +00:00
fi
image = $name .img
config = kubernetes_config_$type
virsh vol-create-as $POOL $image 10G --format qcow2 --backing-vol coreos_base.img --backing-vol-format qcow2
mkdir -p " $POOL_PATH / $config /openstack/latest "
render-template " $ROOT /user_data.yml " > " $POOL_PATH / $config /openstack/latest/user_data "
virsh pool-refresh $POOL
domain_xml = $( mktemp)
render-template $ROOT /coreos.xml > $domain_xml
virsh create $domain_xml
rm $domain_xml
done
2015-02-27 21:35:26 +00:00
2015-04-14 08:03:12 +00:00
export KUBE_SERVER = "http://192.168.10.1:8080"
export CONTEXT = "libvirt-coreos"
create-kubeconfig
2017-01-18 00:08:24 +00:00
create-kubelet-kubeconfig " http:// ${ MASTER_IP } :8080 " " ${ POOL_PATH } /kubernetes/kubeconfig/kubelet.kubeconfig "
2015-03-13 14:54:53 +00:00
2015-02-27 21:35:26 +00:00
wait-cluster-readiness
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " http:// ${ KUBE_MASTER_IP } :8080 "
echo
2016-11-10 21:59:29 +00:00
echo "You can control the Kubernetes cluster with: 'kubectl'"
2015-02-27 21:35:26 +00:00
echo " You can connect on the master with: 'ssh core@ ${ KUBE_MASTER_IP } ' "
2016-11-10 21:59:29 +00:00
wait-registry-readiness
}
function create_registry_rc( ) {
echo " Create registry replication controller"
kubectl create -f $ROOT /registry-rc.yaml
local timeout = 120
while [ [ $timeout -ne 0 ] ] ; do
phase = $( kubectl get pods -n kube-system -lk8s-app= kube-registry --output= 'jsonpath={.items..status.phase}' )
if [ " $phase " = "Running" ] ; then
return 0
fi
timeout = $(( $timeout - 1 ))
sleep .5
done
}
function create_registry_svc( ) {
echo " Create registry service"
kubectl create -f " ${ KUBE_ROOT } /cluster/addons/registry/registry-svc.yaml "
}
function wait-registry-readiness( ) {
if [ [ " $ENABLE_CLUSTER_REGISTRY " != "true" ] ] ; then
return 0
fi
echo "Wait for registry readiness..."
local timeout = 120
while [ [ $timeout -ne 0 ] ] ; do
phase = $( kubectl get namespaces --output= jsonpath = '{.items[?(@.metadata.name=="kube-system")].status.phase}' )
if [ " $phase " = "Active" ] ; then
create_registry_rc
create_registry_svc
return 0
fi
echo "waiting for namespace kube-system"
timeout = $(( $timeout - 1 ))
sleep .5
done
2015-02-17 21:20:07 +00:00
}
# Delete a kubernetes cluster
function kube-down {
virsh list | awk 'NR>2 && !/^$/ && $2 ~ /^kubernetes/ {print $2}' | \
while read dom; do
virsh destroy $dom
done
destroy-pool keep_base_image
destroy-network
}
# The kubernetes binaries are pushed to a host directory which is exposed to the VM
function upload-server-tars {
tar -x -C " $POOL_PATH /kubernetes " -f " $SERVER_BINARY_TAR " kubernetes
rm -rf " $POOL_PATH /kubernetes/bin "
mv " $POOL_PATH /kubernetes/kubernetes/server/bin " " $POOL_PATH /kubernetes/bin "
2017-01-22 16:00:04 +00:00
chmod -R 755 " $POOL_PATH /kubernetes/bin "
2015-09-03 03:47:25 +00:00
rm -fr " $POOL_PATH /kubernetes/kubernetes "
2015-02-17 21:20:07 +00:00
}
# Update a kubernetes cluster with latest source
function kube-push {
2015-03-18 14:31:11 +00:00
kube-push-internal
ssh-to-node " $MASTER_NAME " "sudo systemctl restart kube-apiserver kube-controller-manager kube-scheduler"
2015-11-24 03:06:36 +00:00
for ( ( i = 0; i < NUM_NODES; i++) ) ; do
2015-09-30 03:28:14 +00:00
ssh-to-node " ${ NODE_NAMES [ $i ] } " "sudo systemctl restart kubelet kube-proxy"
2015-03-18 14:31:11 +00:00
done
wait-cluster-readiness
}
function kube-push-internal {
case " ${ KUBE_PUSH :- release } " in
release)
kube-push-release; ;
local )
kube-push-local; ;
*)
2015-08-08 21:29:57 +00:00
echo " The only known push methods are \"release\" to use the release tarball or \"local\" to use the binaries built by make. KUBE_PUSH is set \" $KUBE_PUSH \" " >& 2
2015-03-18 14:31:11 +00:00
return 1; ;
esac
}
function kube-push-release {
2015-02-17 21:20:07 +00:00
find-release-tars
upload-server-tars
}
2015-03-18 14:31:11 +00:00
function kube-push-local {
rm -rf " $POOL_PATH /kubernetes/bin/* "
mkdir -p " $POOL_PATH /kubernetes/bin "
cp " ${ KUBE_ROOT } /_output/local/go/bin " /* " $POOL_PATH /kubernetes/bin "
}
2015-02-17 21:20:07 +00:00
# Execute prior to running tests to build a release if required for env
function test-build-release {
echo "TODO"
}
# Execute prior to running tests to initialize required structure
function test-setup {
2016-02-18 00:49:07 +00:00
" ${ KUBE_ROOT } /cluster/kube-up.sh "
2015-02-17 21:20:07 +00:00
}
# Execute after running tests to perform any required clean-up
function test-teardown {
2015-03-02 16:13:57 +00:00
kube-down
2015-02-17 21:20:07 +00:00
}
2015-03-13 12:56:36 +00:00
# SSH to a node by name or IP ($1) and run a command ($2).
function ssh-to-node {
local node = " $1 "
local cmd = " $2 "
local machine
2015-09-30 03:28:14 +00:00
if [ [ " $node " = = " $MASTER_IP " ] ] || [ [ " $node " = ~ ^" $NODE_IP_BASE " ] ] ; then
2015-03-13 12:56:36 +00:00
machine = " $node "
elif [ [ " $node " = = " $MASTER_NAME " ] ] ; then
machine = " $MASTER_IP "
else
2015-11-24 03:06:36 +00:00
for ( ( i = 0; i < NUM_NODES; i++) ) ; do
2015-09-30 03:28:14 +00:00
if [ [ " $node " = = " ${ NODE_NAMES [ $i ] } " ] ] ; then
machine = " ${ NODE_IPS [ $i ] } "
2015-03-13 12:56:36 +00:00
break
fi
done
fi
if [ [ -z " $machine " ] ] ; then
echo " $node is an unknown machine to ssh to " >& 2
fi
2016-03-07 21:29:04 +00:00
ssh -o ConnectTimeout = 30 -o StrictHostKeyChecking = no -o UserKnownHostsFile = /dev/null -o ControlMaster = no " core@ $machine " " $cmd "
2015-03-13 12:56:36 +00:00
}
2015-03-02 16:13:57 +00:00
# Perform preparations required to run e2e tests
function prepare-e2e( ) {
echo "libvirt-coreos doesn't need special preparations for e2e tests" 1>& 2
}