libvirt-coreos cluster provider

libvirt-coreos is a cluster provider for kubernetes that starts local VMs and
runs kubernetes on it.
Its goal is to provide a multi-machines environment to develop and test kubernetes.

The purpose is mostly the same as the vagrant provider but with a big focus on
efficiency. The vagrant cluster takes a long time to boot and consumes a huge
amount of disk space. libvirt-coreos aims at being cheaper. As a consequence,
libvirt-coreos allows to start bigger clusters with more minions than vagrant.
pull/6/head
Lénaïc Huard 2015-02-17 22:20:07 +01:00
parent c9657cad04
commit ce7a993269
14 changed files with 828 additions and 3 deletions

View File

@ -18,7 +18,7 @@
# You can override the default provider by exporting the KUBERNETES_PROVIDER
# variable in your bashrc
#
# The valid values: 'gce', 'gke', 'aws', 'azure', 'vagrant', 'vsphere'
# The valid values: 'gce', 'gke', 'aws', 'azure', 'vagrant', 'vsphere', 'libvirt-coreos'
KUBERNETES_PROVIDER=${KUBERNETES_PROVIDER:-gce}

View File

@ -120,6 +120,11 @@ elif [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
config=(
"--kubeconfig=$HOME/.kubernetes_vagrant_kubeconfig"
)
elif [[ "$KUBERNETES_PROVIDER" == "libvirt-coreos" ]]; then
detect-master > /dev/null
config=(
"--server=http://${KUBE_MASTER_IP}:8080"
)
fi
echo "current-context: \"$(${kubectl} "${config[@]:+${config[@]}}" config view -o template --template='{{index . "current-context"}}')\"" >&2

2
cluster/libvirt-coreos/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/libvirt_storage_pool/
/coreos_production_qemu_image.img.bz2

View File

@ -0,0 +1,21 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Contains configuration values for interacting with the libvirt CoreOS cluster
# Number of minions in the cluster
NUM_MINIONS=${NUM_MINIONS:-3}
export NUM_MINIONS

View File

@ -0,0 +1,71 @@
<domain type='kvm'>
<name>${name}</name>
<memory unit='MiB'>512</memory>
<currentMemory unit='MiB'>512</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>$(which qemu-system-$(uname -m))</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='${POOL_PATH}/${image}'/>
<target dev='vda' bus='virtio'/>
</disk>
<controller type='usb' index='0'>
</controller>
<filesystem type='mount' accessmode='squash'>
<source dir='${POOL_PATH}/${config}'/>
<target dir='config-2'/>
<readonly/>
</filesystem>
<filesystem type='mount' accessmode='squash'>
<source dir='${kubernetes_dir}'/>
<target dir='kubernetes'/>
<readonly/>
</filesystem>
<interface type='network'>
<mac address='52:54:00:00:00:${i}'/>
<source network='kubernetes_global'/>
<model type='virtio'/>
</interface>
<interface type='network'>
<mac address='52:54:00:00:01:${i}'/>
<source network='kubernetes_pods'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<channel type='spicevmc'>
<target type='virtio' name='com.redhat.spice.0'/>
</channel>
<input type='tablet' bus='usb'/>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<graphics type='spice' autoport='yes'/>
<sound model='ich6'>
</sound>
<video>
<model type='qxl' vram='9216' heads='1'/>
</video>
<redirdev bus='usb' type='spicevmc'>
</redirdev>
<memballoon model='virtio'>
</memballoon>
</devices>
</domain>

View File

@ -0,0 +1,11 @@
<network>
<name>kubernetes_global</name>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr_kub_gl' stp='off' delay='0'/>
<ip address='192.168.10.254' netmask='255.255.255.0'>
</ip>
</network>

View File

@ -0,0 +1,6 @@
<network>
<name>kubernetes_pods</name>
<bridge name='virbr_kub_pods' stp='off' delay='0'/>
<ip address='10.10.0.1' netmask='255.255.0.0'>
</ip>
</network>

View File

@ -0,0 +1,111 @@
#cloud-config
hostname: ${name}
ssh_authorized_keys:
${ssh_keys}
write_files:
- path: /etc/systemd/journald.conf
permissions: 0644
content: |
[Journal]
SystemMaxUse=50M
RuntimeMaxUse=50M
coreos:
etcd:
name: ${name}
addr: 192.168.10.$(($i+1)):4001
bind-addr: 0.0.0.0
peer-addr: 192.168.10.$(($i+1)):7001
# peers: {etcd_peers}
discovery: ${discovery}
units:
- name: static.network
command: start
content: |
[Match]
# Name=eth0
MACAddress=52:54:00:00:00:${i}
[Network]
Address=192.168.10.$(($i+1))/24
DNS=192.168.10.254
Gateway=192.168.10.254
- name: cbr0.netdev
command: start
content: |
[NetDev]
Kind=bridge
Name=cbr0
- name: cbr0.network
command: start
content: |
[Match]
Name=cbr0
[Network]
Address=10.10.$(($i+1)).1/24
[Route]
Destination=10.10.0.0/16
- name: cbr0-interface.network
command: start
content: |
[Match]
# Name=eth1
MACAddress=52:54:00:00:01:${i}
[Network]
Bridge=cbr0
- name: nat.service
command: start
content: |
[Unit]
Description=NAT non container traffic
[Service]
ExecStart=/usr/sbin/iptables -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE ! -d 10.10.0.0/16
RemainAfterExit=yes
Type=oneshot
- name: etcd.service
command: start
- name: docker.service
command: start
drop-ins:
- name: 50-opts.conf
content: |
[Service]
Environment=DOCKER_OPTS='--bridge=cbr0 --iptables=false'
- name: docker-tcp.socket
command: start
enable: yes
content: |
[Unit]
Description=Docker Socket for the API
[Socket]
ListenStream=2375
BindIPv6Only=both
Service=docker.service
[Install]
WantedBy=sockets.target
- name: opt-kubernetes.mount
command: start
content: |
[Unit]
ConditionVirtualization=|vm
[Mount]
What=kubernetes
Where=/opt/kubernetes
Options=ro,trans=virtio,version=9p2000.L
Type=9p
update:
group: ${COREOS_CHANNEL:-alpha}
reboot-strategy: off
$( [[ ${type} =~ "master" ]] && render-template "$ROOT/user_data_master.yml" )
$( [[ ${type} =~ "minion" ]] && render-template "$ROOT/user_data_minion.yml" )

View File

@ -0,0 +1,63 @@
#cloud-config
coreos:
units:
- name: kube-apiserver.service
command: start
content: |
[Unit]
After=opt-kubernetes.mount etcd.service
ConditionFileIsExecutable=/opt/kubernetes/bin/kube-apiserver
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=opt-kubernetes.mount etcd.service
[Service]
ExecStart=/opt/kubernetes/bin/kube-apiserver \
--address=0.0.0.0 \
--port=8080 \
--etcd_servers=http://127.0.0.1:4001 \
--kubelet_port=10250 \
--portal_net=10.10.254.0/24
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target
- name: kube-controller-manager.service
command: start
content: |
[Unit]
After=opt-kubernetes.mount kube-apiserver.service
ConditionFileIsExecutable=/opt/kubernetes/bin/kube-controller-manager
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=opt-kubernetes.mount kube-apiserver.service
[Service]
ExecStart=/opt/kubernetes/bin/kube-controller-manager \
--master=127.0.0.1:8080 \
--machines=${machines}
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target
- name: kube-scheduler.service
command: start
content: |
[Unit]
After=opt-kubernetes.mount kube-apiserver.service
ConditionFileIsExecutable=/opt/kubernetes/bin/kube-scheduler
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=opt-kubernetes.mount kube-apiserver.service
[Service]
ExecStart=/opt/kubernetes/bin/kube-scheduler \
--master=127.0.0.1:8080
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,43 @@
#cloud-config
coreos:
units:
- name: kubelet.service
command: start
content: |
[Unit]
After=opt-kubernetes.mount etcd.service docker.socket
ConditionFileIsExecutable=/opt/kubernetes/bin/kubelet
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=opt-kubernetes.mount etcd.service docker.socket
[Service]
ExecStart=/opt/kubernetes/bin/kubelet \
--address=0.0.0.0 \
--hostname_override=192.168.10.$(($i+1)) \
--etcd_servers=http://127.0.0.1:4001
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target
- name: kube-proxy.service
command: start
content: |
[Unit]
After=opt-kubernetes.mount etcd.service
ConditionFileIsExecutable=/opt/kubernetes/bin/kube-proxy
Description=Kubernetes Proxy
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=opt-kubernetes.mount etcd.service
[Service]
ExecStart=/opt/kubernetes/bin/kube-proxy \
--etcd_servers=http://127.0.0.1:4001 \
--master=http://192.168.10.1:7080
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,256 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
readonly KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
readonly ROOT=$(dirname "${BASH_SOURCE}")
source $ROOT/${KUBE_CONFIG_FILE:-"config-default.sh"}
export LIBVIRT_DEFAULT_URI=qemu:///system
readonly POOL=kubernetes
readonly POOL_PATH="$(cd $ROOT && pwd)/libvirt_storage_pool"
# join <delim> <list...>
# Concatenates the list elements with the delimiter passed as first parameter
#
# Ex: join , a b c
# -> a,b,c
function join {
local IFS="$1"
shift
echo "$*"
}
# Must ensure that the following ENV vars are set
function detect-master {
KUBE_MASTER_IP=192.168.10.1
KUBE_MASTER=kubernetes-master
export KUBERNETES_MASTER=http://$KUBE_MASTER_IP:8080
echo "KUBE_MASTER_IP: $KUBE_MASTER_IP"
echo "KUBE_MASTER: $KUBE_MASTER"
}
# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[]
function detect-minions {
for (( i = 0 ; i < $NUM_MINIONS ; i++ )); do
KUBE_MINION_IP_ADDRESSES[$i]=192.168.10.$(($i+2))
done
echo "KUBE_MINION_IP_ADDRESSES=[${KUBE_MINION_IP_ADDRESSES[@]}]"
}
# Verify prereqs on host machine
function verify-prereqs {
if ! which virsh >/dev/null; then
echo "Can't find virsh in PATH, please fix and retry." >&2
exit 1
fi
if ! virsh nodeinfo >/dev/null; then
exit 1
fi
if [[ "$(</sys/kernel/mm/ksm/run)" -ne "1" ]]; then
echo "KSM is not enabled" >&2
echo "Enabling it would reduce the memory footprint of large clusters" >&2
if [[ -t 0 ]]; then
read -t 5 -n 1 -p "Do you want to enable KSM (requires root password) (y/n)? " answer
echo ""
if [[ "$answer" == 'y' ]]; then
su -c 'echo 1 > /sys/kernel/mm/ksm/run'
fi
else
echo "You can enable it with (as root):" >&2
echo "" >&2
echo " echo 1 > /sys/kernel/mm/ksm/run" >&2
echo "" >&2
fi
fi
}
# Destroy the libvirt storage pool and all the images inside
#
# If 'keep_base_image' is passed as first parameter,
# the base image is kept, as well as the storage pool.
# All the other images are deleted.
function destroy-pool {
virsh pool-info $POOL >/dev/null 2>&1 || return
rm -rf "$POOL_PATH"/kubernetes/*
rm -rf "$POOL_PATH"/kubernetes_config*/*
local vol
virsh vol-list $POOL | awk 'NR>2 && !/^$/ && $1 ~ /^kubernetes/ {print $1}' | \
while read vol; do
virsh vol-delete $vol --pool $POOL
done
[[ "$1" == 'keep_base_image' ]] && return
set +e
virsh vol-delete coreos_base.img --pool $POOL
virsh pool-destroy $POOL
rmdir "$POOL_PATH"
set -e
}
# Creates the libvirt storage pool and populate it with
# - the CoreOS base image
# - the kubernetes binaries
function initialize-pool {
mkdir -p "$POOL_PATH"
if ! virsh pool-info $POOL >/dev/null 2>&1; then
virsh pool-create-as $POOL dir --target "$POOL_PATH"
fi
wget -N -P "$ROOT" http://${COREOS_CHANNEL:-alpha}.release.core-os.net/amd64-usr/current/coreos_production_qemu_image.img.bz2
if [ "$ROOT/coreos_production_qemu_image.img.bz2" -nt "$POOL_PATH/coreos_base.img" ]; then
bunzip2 -f -k "$ROOT/coreos_production_qemu_image.img.bz2"
virsh vol-delete coreos_base.img --pool $POOL 2> /dev/null || true
mv "$ROOT/coreos_production_qemu_image.img" "$POOL_PATH/coreos_base.img"
fi
# if ! virsh vol-list $POOL | grep -q coreos_base.img; then
# virsh vol-create-as $POOL coreos_base.img 10G --format qcow2
# virsh vol-upload coreos_base.img "$ROOT/coreos_production_qemu_image.img" --pool $POOL
# fi
mkdir -p "$POOL_PATH/kubernetes"
kube-push
virsh pool-refresh $POOL
}
function destroy-network {
set +e
virsh net-destroy kubernetes_global
virsh net-destroy kubernetes_pods
set -e
}
function initialize-network {
virsh net-create "$ROOT/network_kubernetes_global.xml"
virsh net-create "$ROOT/network_kubernetes_pods.xml"
}
function render-template {
eval "echo \"$(cat $1)\""
}
# Instantiate a kubernetes cluster
function kube-up {
detect-master
detect-minions
initialize-pool keep_base_image
initialize-network
readonly ssh_keys="$(cat ~/.ssh/id_*.pub | sed 's/^/ - /')"
readonly kubernetes_dir="$POOL_PATH/kubernetes"
readonly discovery=$(curl -s https://discovery.etcd.io/new)
readonly machines=$(join , "${KUBE_MINION_IP_ADDRESSES[@]}")
local i
for (( i = 0 ; i <= $NUM_MINIONS ; i++ )); do
if [[ $i -eq 0 ]]; then
type=master
else
type=minion-$(printf "%02d" $i)
fi
name=kubernetes_$type
image=$name.img
config=kubernetes_config_$type
virsh vol-create-as $POOL $image 10G --format qcow2 --backing-vol coreos_base.img --backing-vol-format qcow2
mkdir -p "$POOL_PATH/$config/openstack/latest"
render-template "$ROOT/user_data.yml" > "$POOL_PATH/$config/openstack/latest/user_data"
virsh pool-refresh $POOL
domain_xml=$(mktemp)
render-template $ROOT/coreos.xml > $domain_xml
virsh create $domain_xml
rm $domain_xml
done
}
# Delete a kubernetes cluster
function kube-down {
virsh list | awk 'NR>2 && !/^$/ && $2 ~ /^kubernetes/ {print $2}' | \
while read dom; do
virsh destroy $dom
done
destroy-pool keep_base_image
destroy-network
}
function find-release-tars {
SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
fi
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
exit 1
fi
}
# The kubernetes binaries are pushed to a host directory which is exposed to the VM
function upload-server-tars {
tar -x -C "$POOL_PATH/kubernetes" -f "$SERVER_BINARY_TAR" kubernetes
rm -rf "$POOL_PATH/kubernetes/bin"
mv "$POOL_PATH/kubernetes/kubernetes/server/bin" "$POOL_PATH/kubernetes/bin"
rmdir "$POOL_PATH/kubernetes/kubernetes/server" "$POOL_PATH/kubernetes/kubernetes"
}
# Update a kubernetes cluster with latest source
function kube-push {
find-release-tars
upload-server-tars
}
# Execute prior to running tests to build a release if required for env
function test-build-release {
echo "TODO"
}
# Execute prior to running tests to initialize required structure
function test-setup {
echo "TODO"
}
# Execute after running tests to perform any required clean-up
function test-teardown {
echo "TODO"
}
# Set the {KUBE_USER} and {KUBE_PASSWORD} environment values required to interact with provider
function get-password {
export KUBE_USER=core
echo "TODO get-password"
}
function setup-monitoring-firewall {
echo "TODO" 1>&2
}
function teardown-monitoring-firewall {
echo "TODO" 1>&2
}
function setup-logging-firewall {
echo "TODO: setup logging"
}
function teardown-logging-firewall {
echo "TODO: teardown logging"
}

View File

@ -55,7 +55,7 @@ echo "Found ${found} nodes."
cat -n "${MINIONS_FILE}"
# On vSphere, use minion IPs as their names
if [[ "${KUBERNETES_PROVIDER}" == "vsphere" ]] || [[ "${KUBERNETES_PROVIDER}" == "vagrant" ]]; then
if [[ "${KUBERNETES_PROVIDER}" == "vsphere" ]] || [[ "${KUBERNETES_PROVIDER}" == "vagrant" ]] || [[ "${KUBERNETES_PROVIDER}" == "libvirt-coreos" ]]; then
MINION_NAMES=("${KUBE_MINION_IP_ADDRESSES[@]}")
fi

View File

@ -17,7 +17,8 @@ Bare-metal | custom | Ubuntu | [docs](../../docs/getting-started-guide
Local | | | [docs](../../docs/getting-started-guides/locally.md) | Inactive |
Ovirt | | | [docs](../../docs/getting-started-guides/ovirt.md) | Inactive |
Rackspace | CoreOS | CoreOS | [docs](../../docs/getting-started-guides/rackspace.md) | Inactive |
Bare-metal | custom | CentOS | [docs](../../docs/getting-started-guides/centos/centos_manual_config.md) | Community(@coolsvap) | Uses K8s v0.9.1
Bare-metal | custom | CentOS | [docs](../../docs/getting-started-guides/centos/centos_manual_config.md) | Community(@coolsvap) | Uses K8s v0.9.1
libvirt/KVM | CoreOS | CoreOS | [docs](../../docs/getting-started-guides/libvirt-coreos.md) | Community (@lhuard1A) |
Definition of columns:
- **IaaS Provider** is who/what provides the virtual or physical machines (nodes) that Kubernetes runs on.
- **OS** is the base operating system of the nodes.

View File

@ -0,0 +1,235 @@
## Getting started with libvirt CoreOS
### Highlights
* Super-fast cluster boot-up (few seconds instead of several minutes for vagrant)
* Reduced disk usage thanks to [COW](https://en.wikibooks.org/wiki/QEMU/Images#Copy_on_write)
* Reduced memory footprint thanks to [KSM](https://www.kernel.org/doc/Documentation/vm/ksm.txt)
### Prerequisites
1. Install [qemu](http://wiki.qemu.org/Main_Page)
2. Install [libvirt](http://libvirt.org/)
3. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
4. Check that your $HOME is accessible to the qemu user²
#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
You can test it with the following command:
```
virsh -c qemu:///system pool-list
```
If you have access error messages, please read https://libvirt.org/acl.html and https://libvirt.org/aclpolkit.html .
In short, if your libvirt has been compiled with Polkit support (ex: Arch, Fedora 21), you can create `/etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules` with the following content to grant full access to libvirt to `$USER`
```
polkit.addRule(function(action, subject) {
if (action.id == "org.libvirt.unix.manage" &&
subject.user == "$USER") {
return polkit.Result.YES;
polkit.log("action=" + action);
polkit.log("subject=" + subject);
}
});
```
(Replace `$USER` with your login name)
If your libvirt has not been compiled with Polkit (ex: Ubuntu 14.04.1 LTS), check the permissions on the libvirt unix socket:
```
ls -l /var/run/libvirt/libvirt-sock
srwxrwx--- 1 root libvirtd 0 févr. 12 16:03 /var/run/libvirt/libvirt-sock
usermod -a -G libvirtd $USER
# $USER needs to logout/login to have the new group be taken into account
```
(Replace `$USER` with your login name)
#### ² Qemu will run with a specific user. It must have access to the VMs drives
All the disk drive resources needed by the VM (CoreOS disk image, kubernetes binaries, cloud-init files, etc.) are put inside `./cluster/libvirt-coreos/libvirt_storage_pool`.
As were using the `qemu:///system` instance of libvirt, qemu will run with a specific `user:group` distinct from your user. It is configured in `/etc/libvirt/qemu.conf`. That qemu user must have access to that libvirt storage pool.
If your `$HOME` is world readable, everything is fine. If your $HOME is private, `cluster/kube-up.sh` will fail with an error message like:
```
error: Cannot access storage file '$HOME/.../kubernetes/cluster/libvirt-coreos/libvirt_storage_pool/kubernetes_master.img' (as uid:99, gid:78): Permission denied
```
In order to fix that issue, you have several possibilities:
* set `POOL_PATH` inside `cluster/libvirt-coreos/config-default.sh` to a directory:
* backed by a filesystem with a lot of free disk space
* writable by your user;
* accessible by the qemu user.
* Grant the qemu user access to the storage pool.
On Arch:
```
setfacl -m g:kvm:--x ~
```
### Setup
By default, the libvirt-coreos setup will create a single kubernetes master and 3 kubernetes minions. Because the VM drives use Copy-on-Write and because of memory ballooning and KSM, there is a lot of resource over-allocation.
To start your local cluster, open a shell and run:
```shell
cd kubernetes
export KUBERNETES_PROVIDER=libvirt-coreos
cluster/kube-up.sh
```
The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster management scripts which variant to use. If you forget to set this, the assumption is you are running on Google Compute Engine.
The `NUM_MINIONS` environment variable may be set to specify the number of minions to start. If it is not set, the number of minions defaults to 3.
You can check that your machines are there and running with:
```
virsh -c qemu:///system list
Id Name State
----------------------------------------------------
15 kubernetes_master running
16 kubernetes_minion-01 running
17 kubernetes_minion-02 running
18 kubernetes_minion-03 running
```
You can check that the kubernetes cluster is working with:
```
$ ./cluster/kubectl.sh get minions
NAME LABELS STATUS
192.168.10.2 <none> Ready
192.168.10.3 <none> Ready
192.168.10.4 <none> Ready
```
The VMs are running [CoreOS](https://coreos.com/).
Your ssh keys have already been pushed to the VM. (It looks for ~/.ssh/id_*.pub)
The user to use to connect to the VM is `core`.
The IP to connect to the master is 192.168.10.1.
The IPs to connect to the minions are 192.168.10.2 and onwards.
Connect to `kubernetes_master`:
```
ssh core@192.168.10.1
```
Connect to `kubernetes_minion-01`:
```
ssh core@192.168.10.2
```
### Interacting with your Kubernetes cluster with the `kube-*` scripts.
All of the following commands assume you have set `KUBERNETES_PROVIDER` appropriately:
```
export KUBERNETES_PROVIDER=libvirt-coreos
```
Bring up a libvirt-CoreOS cluster of 5 minions
```
NUM_MINIONS=5 cluster/kube-up.sh
```
Destroy the libvirt-CoreOS cluster
```
cluster/kube-down.sh
```
Uptade the libvirt-CoreOS cluster with a new Kubernetes release:
```
cluster/kube-push.sh
```
Interact with the cluster
```
cluster/kubectl.sh
```
### Troubleshooting
#### !!! Cannot find kubernetes-server-linux-amd64.tar.gz
Build the release tarballs:
```
make release
```
#### Can't find virsh in PATH, please fix and retry.
Install libvirt
On Arch:
```
pacman -S qemu libvirt
```
On Ubuntu 14.04.1:
```
aptitude install qemu-system-x86 libvirt-bin
```
On Fedora 21:
```
yum install qemu libvirt
```
#### error: Failed to connect socket to '/var/run/libvirt/libvirt-sock': No such file or directory
Start the libvirt daemon
On Arch:
```
systemctl start libvirtd
```
On Ubuntu 14.04.1:
```
service libvirt-bin start
```
#### error: Failed to connect socket to '/var/run/libvirt/libvirt-sock': Permission denied
Fix libvirt access permission (Remember to adapt `$USER`)
On Arch and Fedora 21:
```
cat > /etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules <<EOF
polkit.addRule(function(action, subject) {
if (action.id == "org.libvirt.unix.manage" &&
subject.user == "$USER") {
return polkit.Result.YES;
polkit.log("action=" + action);
polkit.log("subject=" + subject);
}
});
EOF
```
On Ubuntu:
```
usermod -a -G libvirtd $USER
```