Fix #2741. Add support for alternate Vagrant providers: VMWare Fusion, VMWare Workstation, and Parallels.

pull/6/head
Matt Bogosian 2015-04-15 16:07:50 -07:00
parent 3bc42f1635
commit e8672e2242
4 changed files with 325 additions and 130 deletions

145
Vagrantfile vendored
View File

@ -27,14 +27,46 @@ $minion_ips = $num_minion.times.collect { |n| $minion_ip_base + "#{n+3}" }
# Determine the OS platform to use
$kube_os = ENV['KUBERNETES_OS'] || "fedora"
# Check if we already have kube box
$kube_box_url = ENV['KUBERNETES_BOX_URL'] || "http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-20_chef-provisionerless.box"
# To override the vagrant provider, use (e.g.):
# KUBERNETES_PROVIDER=vagrant VAGRANT_DEFAULT_PROVIDER=... .../cluster/kube-up.sh
# To override the box, use (e.g.):
# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... .../cluster/kube-up.sh
# You can specify a box version:
# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... KUBERNETES_BOX_VERSION=... .../cluster/kube-up.sh
# You can specify a box location:
# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... KUBERNETES_BOX_URL=... .../cluster/kube-up.sh
# KUBERNETES_BOX_URL and KUBERNETES_BOX_VERSION will be ignored unless
# KUBERNETES_BOX_NAME is set
# OS platform to box information
$kube_box = {
"fedora" => {
"name" => "fedora20",
"box_url" => $kube_box_url
# Default OS platform to provider/box information
$kube_provider_boxes = {
:parallels => {
'fedora' => {
# :box_url and :box_version are optional (and mutually exclusive);
# if :box_url is omitted the box will be retrieved by :box_name (and
# :box_version if provided) from
# http://atlas.hashicorp.com/boxes/search (formerly
# http://vagrantcloud.com/); this allows you override :box_name with
# your own value so long as you provide :box_url; for example, the
# "official" name of this box is "rickard-von-essen/
# opscode_fedora-20", but by providing the URL and our own name, we
# make it appear as yet another provider under the "kube-fedora20"
# box
:box_name => 'kube-fedora20',
:box_url => 'https://atlas.hashicorp.com/rickard-von-essen/boxes/opscode_fedora-20/versions/0.4.0/providers/parallels.box'
}
},
:virtualbox => {
'fedora' => {
:box_name => 'kube-fedora20',
:box_url => 'http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-20_chef-provisionerless.box'
}
},
:vmware_desktop => {
'fedora' => {
:box_name => 'kube-fedora20',
:box_url => 'http://opscode-vm-bento.s3.amazonaws.com/vagrant/vmware/opscode_fedora-20-i386_chef-provisionerless.box'
}
}
}
@ -54,16 +86,88 @@ end
# In Fedora VM, tmpfs device is mapped to /tmp. tmpfs is given 50% of RAM allocation.
# When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens.
# This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.)
$vm_mem = (ENV['KUBERNETES_MEMORY'] || 1024).to_i
$vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i
$vm_minion_mem = (ENV['KUBERNETES_MINION_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
def customize_vm(config)
config.vm.box = $kube_box[$kube_os]["name"]
config.vm.box_url = $kube_box[$kube_os]["box_url"]
def setvmboxandurl(config, provider)
if ENV['KUBERNETES_BOX_NAME'] then
config.vm.box = ENV['KUBERNETES_BOX_NAME']
config.vm.provider :virtualbox do |v|
v.customize ["modifyvm", :id, "--memory", $vm_mem]
v.customize ["modifyvm", :id, "--cpus", $vm_cpus]
if ENV['KUBERNETES_BOX_URL'] then
config.vm.box_url = ENV['KUBERNETES_BOX_URL']
end
if ENV['KUBERNETES_BOX_VERSION'] then
config.vm.box_version = ENV['KUBERNETES_BOX_VERSION']
end
else
config.vm.box = $kube_provider_boxes[provider][$kube_os][:box_name]
if $kube_provider_boxes[provider][$kube_os][:box_url] then
config.vm.box_url = $kube_provider_boxes[provider][$kube_os][:box_url]
end
if $kube_provider_boxes[provider][$kube_os][:box_version] then
config.vm.box_version = $kube_provider_boxes[provider][$kube_os][:box_version]
end
end
end
def customize_vm(config, vm_mem)
# Try VMWare Fusion first (see
# https://docs.vagrantup.com/v2/providers/basic_usage.html)
config.vm.provider :vmware_fusion do |v, override|
setvmboxandurl(override, :vmware_desktop)
v.vmx['memsize'] = vm_mem
v.vmx['numvcpus'] = $vm_cpus
end
# Then try VMWare Workstation
config.vm.provider :vmware_workstation do |v, override|
setvmboxandurl(override, :vmware_desktop)
v.vmx['memsize'] = vm_mem
v.vmx['numvcpus'] = $vm_cpus
end
# Then try Parallels
config.vm.provider :parallels do |v, override|
setvmboxandurl(override, :parallels)
v.memory = vm_mem # v.customize ['set', :id, '--memsize', vm_mem]
v.cpus = $vm_cpus # v.customize ['set', :id, '--cpus', $vm_cpus]
# Don't attempt to update the Parallels tools on the image (this can
# be done manually if necessary)
v.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off']
# Set up Parallels folder sharing to behave like VirtualBox (i.e.,
# mount the current directory as /vagrant and that's it)
v.customize ['set', :id, '--shf-guest', 'off']
v.customize ['set', :id, '--shf-guest-automount', 'off']
v.customize ['set', :id, '--shf-host', 'on']
# Remove all auto-mounted "shared folders"; the result seems to
# persist between runs (i.e., vagrant halt && vagrant up)
override.vm.provision :shell, :inline => (%q{
set -ex
if [ -d /media/psf ]; then
for i in /media/psf/*; do
if [ -d "${i}" ]; then
umount "${i}" || true
rmdir -v "${i}"
fi
done
rmdir -v /media/psf
fi
exit
}).strip
end
# Finally, fall back to VirtualBox
config.vm.provider :virtualbox do |v, override|
setvmboxandurl(override, :virtualbox)
v.memory = vm_mem # v.customize ["modifyvm", :id, "--memory", vm_mem]
v.cpus = $vm_cpus # v.customize ["modifyvm", :id, "--cpus", $vm_cpus]
# Use faster paravirtualized networking
v.customize ["modifyvm", :id, "--nictype1", "virtio"]
@ -73,7 +177,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Kubernetes master
config.vm.define "master" do |c|
customize_vm c
customize_vm c, $vm_master_mem
if ENV['KUBE_TEMP'] then
script = "#{ENV['KUBE_TEMP']}/master-start.sh"
c.vm.provision "shell", run: "always", path: script
@ -84,17 +188,20 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Kubernetes minion
$num_minion.times do |n|
config.vm.define "minion-#{n+1}" do |minion|
customize_vm minion
minion_vm_name = "minion-#{n+1}"
minion_prefix = ENV['INSTANCE_PREFIX'] || 'kubernetes' # must mirror default in cluster/vagrant/config-default.sh
minion_hostname = "#{minion_prefix}-#{minion_vm_name}"
config.vm.define minion_vm_name do |minion|
customize_vm minion, $vm_minion_mem
minion_index = n+1
minion_ip = $minion_ips[n]
if ENV['KUBE_TEMP'] then
script = "#{ENV['KUBE_TEMP']}/minion-start-#{n}.sh"
minion.vm.provision "shell", run: "always", path: script
end
minion.vm.network "private_network", ip: "#{minion_ip}"
minion.vm.hostname = "#{ENV['INSTANCE_PREFIX']}-minion-#{minion_index}"
minion.vm.hostname = minion_hostname
end
end
end

View File

@ -33,13 +33,62 @@ function detect-minions {
# Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so
# that our Vagrantfile doesn't error out.
function verify-prereqs {
for x in vagrant VBoxManage; do
for x in vagrant; do
if ! which "$x" >/dev/null; then
echo "Can't find $x in PATH, please fix and retry."
exit 1
fi
done
local vagrant_plugins=$(vagrant plugin list | sed '-es% .*$%%' '-es% *% %g' | tr ' ' $'\n')
local providers=(
# Format is:
# provider_ctl_executable vagrant_provider_name vagrant_provider_plugin_re
# either provider_ctl_executable or vagrant_provider_plugin_re can
# be blank (i.e., '') if none is needed by Vagrant (see, e.g.,
# virtualbox entry)
vmrun vmware_fusion vagrant-vmware-fusion
vmrun vmware_workstation vagrant-vmware-workstation
prlctl parallels vagrant-parallels
VBoxManage virtualbox ''
)
local provider_found=''
local provider_bin
local provider_name
local provider_plugin_re
while [ "${#providers[@]}" -gt 0 ]; do
provider_bin=${providers[0]}
provider_name=${providers[1]}
provider_plugin_re=${providers[2]}
providers=("${providers[@]:3}")
# If the provider is explicitly set, look only for that provider
if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ] \
&& [ "${VAGRANT_DEFAULT_PROVIDER}" != "${provider_name}" ]; then
continue
fi
if ([ -z "${provider_bin}" ] \
|| which "${provider_bin}" >/dev/null 2>&1) \
&& ([ -z "${provider_plugin_re}" ] \
|| [ -n "$(echo "${vagrant_plugins}" | grep -E "^${provider_plugin_re}$")" ]); then
provider_found="${provider_name}"
# Stop after finding the first viable provider
break
fi
done
if [ -z "${provider_found}" ]; then
if [ -n "${VAGRANT_DEFAULT_PROVIDER}" ]; then
echo "Can't find the necessary components for the ${VAGRANT_DEFAULT_PROVIDER} vagrant provider, please fix and retry."
else
echo "Can't find the necessary components for any viable vagrant providers (e.g., virtualbox), please fix and retry."
fi
exit 1
fi
# Set VAGRANT_CWD to KUBE_ROOT so that we find the right Vagrantfile no
# matter what directory the tools are called from.
export VAGRANT_CWD="${KUBE_ROOT}"
@ -89,6 +138,7 @@ function create-provision-scripts {
echo "DNS_REPLICAS='${DNS_REPLICAS:-}'"
echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'"
echo "ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
) > "${KUBE_TEMP}/master-start.sh"
@ -109,6 +159,7 @@ function create-provision-scripts {
echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})"
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS-}'"
echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
@ -116,6 +167,9 @@ function create-provision-scripts {
}
function verify-cluster {
# TODO: How does the user know the difference between "tak[ing] some
# time" and "loop[ing] forever"? Can we give more specific feedback on
# whether "an error" has occurred?
echo "Each machine instance has been created/updated."
echo " Now waiting for the Salt provisioning process to complete on each machine."
echo " This can take some time based on your network, disk, and cpu speed."

View File

@ -4,42 +4,54 @@ Running kubernetes with Vagrant (and VirtualBox) is an easy way to run/test/deve
### Prerequisites
1. Install latest version >= 1.6.2 of vagrant from http://www.vagrantup.com/downloads.html
2. Install latest version of Virtual Box from https://www.virtualbox.org/wiki/Downloads
2. Install one of:
1. The latest version of Virtual Box from https://www.virtualbox.org/wiki/Downloads
2. [VMWare Fusion](https://www.vmware.com/products/fusion/) version 5 or greater as well as the appropriate [Vagrant VMWare Fusion provider](https://www.vagrantup.com/vmware)
3. [VMWare Workstation](https://www.vmware.com/products/workstation/) version 9 or greater as well as the [Vagrant VMWare Workstation provider](https://www.vagrantup.com/vmware)
4. [Parallels Desktop](https://www.parallels.com/products/desktop/) version 9 or greater as well as the [Vagrant Parallels provider](https://parallels.github.io/vagrant-parallels/)
3. Get or build a [binary release](../../getting-started-guides/binary_release.md)
### Setup
By default, the Vagrant setup will create a single kubernetes-master and 1 kubernetes-minion. Each VM will take 1 GB, so make sure you have at least 2GB to 4GB of free memory (plus appropriate free disk space). To start your local cluster, open a shell and run:
```
```sh
cd kubernetes
export KUBERNETES_PROVIDER=vagrant
cluster/kube-up.sh
./cluster/kube-up.sh
```
The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster management scripts which variant to use. If you forget to set this, the assumption is you are running on Google Compute Engine.
If you installed more than one Vagrant provider, Kubernetes will usually pick the appropriate one. However, you can override which one Kubernetes will use by setting the [`VAGRANT_DEFAULT_PROVIDER`](https://docs.vagrantup.com/v2/providers/default.html) environment variable:
```sh
export VAGRANT_DEFAULT_PROVIDER=parallels
export KUBERNETES_PROVIDER=vagrant
./cluster/kube-up.sh
```
Vagrant will provision each machine in the cluster with all the necessary components to run Kubernetes. The initial setup can take a few minutes to complete on each machine.
By default, each VM in the cluster is running Fedora, and all of the Kubernetes services are installed into systemd.
To access the master or any minion:
```
```sh
vagrant ssh master
vagrant ssh minion-1
```
If you are running more than one minion, you can access the others by:
```
```sh
vagrant ssh minion-2
vagrant ssh minion-3
```
To view the service status and/or logs on the kubernetes-master:
```
```sh
vagrant ssh master
[vagrant@kubernetes-master ~] $ sudo systemctl status kube-apiserver
[vagrant@kubernetes-master ~] $ sudo journalctl -r -u kube-apiserver
@ -52,7 +64,7 @@ vagrant ssh master
```
To view the services on any of the kubernetes-minion(s):
```
```sh
vagrant ssh minion-1
[vagrant@kubernetes-minion-1] $ sudo systemctl status docker
[vagrant@kubernetes-minion-1] $ sudo journalctl -r -u docker
@ -65,18 +77,18 @@ vagrant ssh minion-1
With your Kubernetes cluster up, you can manage the nodes in your cluster with the regular Vagrant commands.
To push updates to new Kubernetes code after making source changes:
```
cluster/kube-push.sh
```sh
./cluster/kube-push.sh
```
To stop and then restart the cluster:
```
```sh
vagrant halt
cluster/kube-up.sh
./cluster/kube-up.sh
```
To destroy the cluster:
```
```sh
vagrant destroy
```
@ -84,14 +96,13 @@ Once your Vagrant machines are up and provisioned, the first thing to do is to c
You may need to build the binaries first, you can do this with ```make```
```
```sh
$ ./cluster/kubectl.sh get minions
NAME LABELS
10.245.1.4 <none>
10.245.1.5 <none>
10.245.1.3 <none>
```
### Interacting with your Kubernetes cluster with the `kube-*` scripts.
@ -100,39 +111,39 @@ Alternatively to using the vagrant commands, you can also use the `cluster/kube-
All of these commands assume you have set `KUBERNETES_PROVIDER` appropriately:
```
```sh
export KUBERNETES_PROVIDER=vagrant
```
Bring up a vagrant cluster
```
cluster/kube-up.sh
```sh
./cluster/kube-up.sh
```
Destroy the vagrant cluster
```
cluster/kube-down.sh
```sh
./cluster/kube-down.sh
```
Update the vagrant cluster after you make changes (only works when building your own releases locally):
```
cluster/kube-push.sh
```sh
./cluster/kube-push.sh
```
Interact with the cluster
```
cluster/kubectl.sh
```sh
./cluster/kubectl.sh
```
### Authenticating with your master
When using the vagrant provider in Kubernetes, the `cluster/kubectl.sh` script will cache your credentials in a `~/.kubernetes_vagrant_auth` file so you will not be prompted for them in the future.
```
```sh
cat ~/.kubernetes_vagrant_auth
{ "User": "vagrant",
"Password": "vagrant"
@ -144,22 +155,21 @@ cat ~/.kubernetes_vagrant_auth
You should now be set to use the `cluster/kubectl.sh` script. For example try to list the minions that you have started with:
```
cluster/kubectl.sh get minions
```sh
./cluster/kubectl.sh get minions
```
### Running containers
Your cluster is running, you can list the minions in your cluster:
```
$ cluster/kubectl.sh get minions
```sh
$ ./cluster/kubectl.sh get minions
NAME LABELS
10.245.2.4 <none>
10.245.2.3 <none>
10.245.2.2 <none>
```
Now start running some containers!
@ -196,7 +206,7 @@ NAME IMAGE(S) HOST
You need to wait for the provisioning to complete, you can monitor the minions by doing:
```
```sh
$ sudo salt '*minion-1' cmd.run 'docker images'
kubernetes-minion-1:
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
@ -206,7 +216,7 @@ kubernetes-minion-1:
Once the docker image for nginx has been downloaded, the container will start and you can list it:
```
```sh
$ sudo salt '*minion-1' cmd.run 'docker ps'
kubernetes-minion-1:
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
@ -235,9 +245,9 @@ We did not start any services, hence there are none listed. But we see three rep
Check the [guestbook](../../examples/guestbook/README.md) application to learn how to create a service.
You can already play with resizing the replicas with:
```
$ cluster/kubectl.sh resize rc my-nginx --replicas=2
$ cluster/kubectl.sh get pods
```sh
$ ./cluster/kubectl.sh resize rc my-nginx --replicas=2
$ ./cluster/kubectl.sh get pods
NAME IMAGE(S) HOST LABELS STATUS
7813c8bd-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.2/10.245.2.2 name=myNginx Running
78140853-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.3/10.245.2.3 name=myNginx Running
@ -247,9 +257,9 @@ Congratulations!
### Testing
The following will run all of the end-to-end testing scenarios assuming you set your environment in cluster/kube-env.sh
The following will run all of the end-to-end testing scenarios assuming you set your environment in `cluster/kube-env.sh`:
```
```sh
NUM_MINIONS=3 hack/e2e-test.sh
```
@ -257,26 +267,26 @@ NUM_MINIONS=3 hack/e2e-test.sh
#### I keep downloading the same (large) box all the time!
By default the Vagrantfile will download the box from S3. You can change this (and cache the box locally) by providing an alternate URL when calling `kube-up.sh`
By default the Vagrantfile will download the box from S3. You can change this (and cache the box locally) by providing a name and an alternate URL when calling `kube-up.sh`
```bash
```sh
export KUBERNETES_BOX_NAME=choose_your_own_name_for_your_kuber_box
export KUBERNETES_BOX_URL=path_of_your_kuber_box
export KUBERNETES_PROVIDER=vagrant
cluster/kube-up.sh
./cluster/kube-up.sh
```
#### I just created the cluster, but I am getting authorization errors!
You probably have an incorrect ~/.kubernetes_vagrant_auth file for the cluster you are attempting to contact.
```
```sh
rm ~/.kubernetes_vagrant_auth
```
After using kubectl.sh make sure that the correct credentials are set:
```
```sh
cat ~/.kubernetes_vagrant_auth
{
"User": "vagrant",
@ -284,35 +294,42 @@ cat ~/.kubernetes_vagrant_auth
}
```
#### I just created the cluster, but I do not see my container running !
#### I just created the cluster, but I do not see my container running!
If this is your first time creating the cluster, the kubelet on each minion schedules a number of docker pull requests to fetch prerequisite images. This can take some time and as a result may delay your initial pod getting provisioned.
#### I changed Kubernetes code, but it's not running !
#### I changed Kubernetes code, but it's not running!
Are you sure there was no build error? After running `$ vagrant provision`, scroll up and ensure that each Salt state was completed successfully on each box in the cluster.
It's very likely you see a build error due to an error in your source files!
#### I have brought Vagrant up but the minions won't validate !
#### I have brought Vagrant up but the minions won't validate!
Are you sure you built a release first? Did you install `net-tools`? For more clues, login to one of the minions (`vagrant ssh minion-1`) and inspect the salt minion log (`sudo cat /var/log/salt/minion`).
#### I want to change the number of minions !
#### I want to change the number of minions!
You can control the number of minions that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough minions to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single minion. You do this, by setting `NUM_MINIONS` to 1 like so:
```
```sh
export NUM_MINIONS=1
```
#### I want my VMs to have more memory !
#### I want my VMs to have more memory!
You can control the memory allotted to virtual machines with the `KUBERNETES_MEMORY` environment variable.
Just set it to the number of megabytes you would like the machines to have. For example:
```
```sh
export KUBERNETES_MEMORY=2048
```
If you need more granular control, you can set the amount of memory for the master and minions independently. For example:
```sh
export KUBERNETES_MASTER_MEMORY=1536
export KUBERNETES_MASTER_MINION=2048
```
#### I ran vagrant suspend and nothing works!
```vagrant suspend``` seems to mess up the network. It's not supported at this time.

View File

@ -4,13 +4,17 @@ Running kubernetes with Vagrant (and VirtualBox) is an easy way to run/test/deve
### Prerequisites
1. Install latest version >= 1.6.2 of vagrant from http://www.vagrantup.com/downloads.html
2. Install latest version of Virtual Box from https://www.virtualbox.org/wiki/Downloads
2. Install one of:
1. The latest version of Virtual Box from https://www.virtualbox.org/wiki/Downloads
2. [VMWare Fusion](https://www.vmware.com/products/fusion/) version 5 or greater as well as the appropriate [Vagrant VMWare Fusion provider](https://www.vagrantup.com/vmware)
3. [VMWare Workstation](https://www.vmware.com/products/workstation/) version 9 or greater as well as the [Vagrant VMWare Workstation provider](https://www.vagrantup.com/vmware)
4. [Parallels Desktop](https://www.parallels.com/products/desktop/) version 9 or greater as well as the [Vagrant Parallels provider](https://parallels.github.io/vagrant-parallels/)
### Setup
Setting up a cluster is as simple as running:
```
```sh
export KUBERNETES_PROVIDER=vagrant
curl -sS https://get.k8s.io | bash
```
@ -19,33 +23,41 @@ The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster
By default, the Vagrant setup will create a single kubernetes-master and 1 kubernetes-minion. Each VM will take 1 GB, so make sure you have at least 2GB to 4GB of free memory (plus appropriate free disk space). To start your local cluster, open a shell and run:
```
```sh
cd kubernetes
export KUBERNETES_PROVIDER=vagrant
cluster/kube-up.sh
./cluster/kube-up.sh
```
Vagrant will provision each machine in the cluster with all the necessary components to run Kubernetes. The initial setup can take a few minutes to complete on each machine.
If you installed more than one Vagrant provider, Kubernetes will usually pick the appropriate one. However, you can override which one Kubernetes will use by setting the [`VAGRANT_DEFAULT_PROVIDER`](https://docs.vagrantup.com/v2/providers/default.html) environment variable:
```sh
export VAGRANT_DEFAULT_PROVIDER=parallels
export KUBERNETES_PROVIDER=vagrant
./cluster/kube-up.sh
```
By default, each VM in the cluster is running Fedora, and all of the Kubernetes services are installed into systemd.
To access the master or any minion:
```
```sh
vagrant ssh master
vagrant ssh minion-1
```
If you are running more than one minion, you can access the others by:
```
```sh
vagrant ssh minion-2
vagrant ssh minion-3
```
To view the service status and/or logs on the kubernetes-master:
```
```sh
vagrant ssh master
[vagrant@kubernetes-master ~] $ sudo systemctl status kube-apiserver
[vagrant@kubernetes-master ~] $ sudo journalctl -r -u kube-apiserver
@ -58,7 +70,7 @@ vagrant ssh master
```
To view the services on any of the kubernetes-minion(s):
```
```sh
vagrant ssh minion-1
[vagrant@kubernetes-minion-1] $ sudo systemctl status docker
[vagrant@kubernetes-minion-1] $ sudo journalctl -r -u docker
@ -71,18 +83,18 @@ vagrant ssh minion-1
With your Kubernetes cluster up, you can manage the nodes in your cluster with the regular Vagrant commands.
To push updates to new Kubernetes code after making source changes:
```
cluster/kube-push.sh
```sh
./cluster/kube-push.sh
```
To stop and then restart the cluster:
```
```sh
vagrant halt
cluster/kube-up.sh
./cluster/kube-up.sh
```
To destroy the cluster:
```
```sh
vagrant destroy
```
@ -90,14 +102,13 @@ Once your Vagrant machines are up and provisioned, the first thing to do is to c
You may need to build the binaries first, you can do this with ```make```
```
```sh
$ ./cluster/kubectl.sh get minions
NAME LABELS
10.245.1.4 <none>
10.245.1.5 <none>
10.245.1.3 <none>
```
### Interacting with your Kubernetes cluster with the `kube-*` scripts.
@ -106,39 +117,39 @@ Alternatively to using the vagrant commands, you can also use the `cluster/kube-
All of these commands assume you have set `KUBERNETES_PROVIDER` appropriately:
```
```sh
export KUBERNETES_PROVIDER=vagrant
```
Bring up a vagrant cluster
```
cluster/kube-up.sh
```sh
./cluster/kube-up.sh
```
Destroy the vagrant cluster
```
cluster/kube-down.sh
```sh
./cluster/kube-down.sh
```
Update the vagrant cluster after you make changes (only works when building your own releases locally):
```
cluster/kube-push.sh
```sh
./cluster/kube-push.sh
```
Interact with the cluster
```
cluster/kubectl.sh
```sh
./cluster/kubectl.sh
```
### Authenticating with your master
When using the vagrant provider in Kubernetes, the `cluster/kubectl.sh` script will cache your credentials in a `~/.kubernetes_vagrant_auth` file so you will not be prompted for them in the future.
```
```sh
cat ~/.kubernetes_vagrant_auth
{ "User": "vagrant",
"Password": "vagrant"
@ -150,50 +161,49 @@ cat ~/.kubernetes_vagrant_auth
You should now be set to use the `cluster/kubectl.sh` script. For example try to list the minions that you have started with:
```
cluster/kubectl.sh get minions
```sh
./cluster/kubectl.sh get minions
```
### Running containers
Your cluster is running, you can list the minions in your cluster:
```
$ cluster/kubectl.sh get minions
```sh
$ ./cluster/kubectl.sh get minions
NAME LABELS
10.245.2.4 <none>
10.245.2.3 <none>
10.245.2.2 <none>
```
Now start running some containers!
You can now use any of the cluster/kube-*.sh commands to interact with your VM machines.
You can now use any of the `cluster/kube-*.sh` commands to interact with your VM machines.
Before starting a container there will be no pods, services and replication controllers.
```
$ cluster/kubectl.sh get pods
```sh
$ ./cluster/kubectl.sh get pods
NAME IMAGE(S) HOST LABELS STATUS
$ cluster/kubectl.sh get services
$ ./cluster/kubectl.sh get services
NAME LABELS SELECTOR IP PORT
$ cluster/kubectl.sh get replicationControllers
$ ./cluster/kubectl.sh get replicationControllers
NAME IMAGE(S SELECTOR REPLICAS
```
Start a container running nginx with a replication controller and three replicas
```
$ cluster/kubectl.sh run-container my-nginx --image=nginx --replicas=3 --port=80
```sh
$ ./cluster/kubectl.sh run-container my-nginx --image=nginx --replicas=3 --port=80
```
When listing the pods, you will see that three containers have been started and are in Waiting state:
```
$ cluster/kubectl.sh get pods
```sh
$ ./cluster/kubectl.sh get pods
NAME IMAGE(S) HOST LABELS STATUS
781191ff-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.4/10.245.2.4 name=myNginx Waiting
7813c8bd-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.2/10.245.2.2 name=myNginx Waiting
@ -202,7 +212,7 @@ NAME IMAGE(S) HOST
You need to wait for the provisioning to complete, you can monitor the minions by doing:
```
```sh
$ sudo salt '*minion-1' cmd.run 'docker images'
kubernetes-minion-1:
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
@ -213,7 +223,7 @@ kubernetes-minion-1:
Once the docker image for nginx has been downloaded, the container will start and you can list it:
```
```sh
$ sudo salt '*minion-1' cmd.run 'docker ps'
kubernetes-minion-1:
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
@ -225,17 +235,17 @@ kubernetes-minion-1:
Going back to listing the pods, services and replicationControllers, you now have:
```
$ cluster/kubectl.sh get pods
```sh
$ ./cluster/kubectl.sh get pods
NAME IMAGE(S) HOST LABELS STATUS
781191ff-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.4/10.245.2.4 name=myNginx Running
7813c8bd-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.2/10.245.2.2 name=myNginx Running
78140853-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.3/10.245.2.3 name=myNginx Running
$ cluster/kubectl.sh get services
$ ./cluster/kubectl.sh get services
NAME LABELS SELECTOR IP PORT
$ cluster/kubectl.sh get replicationControllers
$ ./cluster/kubectl.sh get replicationControllers
NAME IMAGE(S SELECTOR REPLICAS
myNginx nginx name=my-nginx 3
```
@ -244,9 +254,9 @@ We did not start any services, hence there are none listed. But we see three rep
Check the [guestbook](../../examples/guestbook/README.md) application to learn how to create a service.
You can already play with resizing the replicas with:
```
$ cluster/kubectl.sh resize rc my-nginx --replicas=2
$ cluster/kubectl.sh get pods
```sh
$ ./cluster/kubectl.sh resize rc my-nginx --replicas=2
$ ./cluster/kubectl.sh get pods
NAME IMAGE(S) HOST LABELS STATUS
7813c8bd-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.2/10.245.2.2 name=myNginx Running
78140853-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.3/10.245.2.3 name=myNginx Running
@ -258,26 +268,26 @@ Congratulations!
#### I keep downloading the same (large) box all the time!
By default the Vagrantfile will download the box from S3. You can change this (and cache the box locally) by providing an alternate URL when calling `kube-up.sh`
By default the Vagrantfile will download the box from S3. You can change this (and cache the box locally) by providing a name and an alternate URL when calling `kube-up.sh`
```bash
```sh
export KUBERNETES_BOX_NAME=choose_your_own_name_for_your_kuber_box
export KUBERNETES_BOX_URL=path_of_your_kuber_box
export KUBERNETES_PROVIDER=vagrant
cluster/kube-up.sh
./cluster/kube-up.sh
```
#### I just created the cluster, but I am getting authorization errors!
You probably have an incorrect ~/.kubernetes_vagrant_auth file for the cluster you are attempting to contact.
```
```sh
rm ~/.kubernetes_vagrant_auth
```
After using kubectl.sh make sure that the correct credentials are set:
```
```sh
cat ~/.kubernetes_vagrant_auth
{
"User": "vagrant",
@ -285,34 +295,41 @@ cat ~/.kubernetes_vagrant_auth
}
```
#### I just created the cluster, but I do not see my container running !
#### I just created the cluster, but I do not see my container running!
If this is your first time creating the cluster, the kubelet on each minion schedules a number of docker pull requests to fetch prerequisite images. This can take some time and as a result may delay your initial pod getting provisioned.
#### I want to make changes to Kubernetes code !
#### I want to make changes to Kubernetes code!
To set up a vagrant cluster for hacking, follow the [vagrant developer guide](../devel/developer-guides/vagrant.md).
#### I have brought Vagrant up but the minions won't validate !
#### I have brought Vagrant up but the minions won't validate!
Log on to one of the minions (`vagrant ssh minion-1`) and inspect the salt minion log (`sudo cat /var/log/salt/minion`).
#### I want to change the number of minions !
#### I want to change the number of minions!
You can control the number of minions that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough minions to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single minion. You do this, by setting `NUM_MINIONS` to 1 like so:
```
```sh
export NUM_MINIONS=1
```
#### I want my VMs to have more memory !
#### I want my VMs to have more memory!
You can control the memory allotted to virtual machines with the `KUBERNETES_MEMORY` environment variable.
Just set it to the number of megabytes you would like the machines to have. For example:
```
```sh
export KUBERNETES_MEMORY=2048
```
If you need more granular control, you can set the amount of memory for the master and minions independently. For example:
```sh
export KUBERNETES_MASTER_MEMORY=1536
export KUBERNETES_MASTER_MINION=2048
```
#### I ran vagrant suspend and nothing works!
```vagrant suspend``` seems to mess up the network. It's not supported at this time.