Remove the deprecated vagrant kube-up implementation.

pull/6/head
Robert Bailey 2018-01-10 16:24:46 -08:00
parent 98277ff20b
commit 8d44e0b38a
16 changed files with 5 additions and 1594 deletions

325
Vagrantfile vendored
View File

@ -1,325 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
# Require a recent version of vagrant otherwise some have reported errors setting host names on boxes
Vagrant.require_version ">= 1.7.4"
if ARGV.first == "up" && ENV['USING_KUBE_SCRIPTS'] != 'true'
raise Vagrant::Errors::VagrantError.new, <<END
Calling 'vagrant up' directly is not supported. Instead, please run the following:
export KUBERNETES_PROVIDER=vagrant
export VAGRANT_DEFAULT_PROVIDER=providername
./cluster/kube-up.sh
END
end
# The number of nodes to provision
$num_node = (ENV['NUM_NODES'] || 1).to_i
# ip configuration
$master_ip = ENV['MASTER_IP']
$node_ip_base = ENV['NODE_IP_BASE'] || ""
$node_ips = $num_node.times.collect { |n| $node_ip_base + "#{n+3}" }
# Determine the OS platform to use
$kube_os = ENV['KUBERNETES_OS'] || "fedora"
# Determine whether vagrant should use nfs to sync folders
$use_nfs = ENV['KUBERNETES_VAGRANT_USE_NFS'] == 'true'
# Determine whether vagrant should use rsync to sync folders
$use_rsync = ENV['KUBERNETES_VAGRANT_USE_RSYNC'] == 'true'
# To override the vagrant provider, use (e.g.):
# KUBERNETES_PROVIDER=vagrant VAGRANT_DEFAULT_PROVIDER=... .../cluster/kube-up.sh
# To override the box, use (e.g.):
# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... .../cluster/kube-up.sh
# You can specify a box version:
# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... KUBERNETES_BOX_VERSION=... .../cluster/kube-up.sh
# You can specify a box location:
# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... KUBERNETES_BOX_URL=... .../cluster/kube-up.sh
# KUBERNETES_BOX_URL and KUBERNETES_BOX_VERSION will be ignored unless
# KUBERNETES_BOX_NAME is set
# Default OS platform to provider/box information
$kube_provider_boxes = {
:parallels => {
'fedora' => {
# :box_url and :box_version are optional (and mutually exclusive);
# if :box_url is omitted the box will be retrieved by :box_name (and
# :box_version if provided) from
# http://atlas.hashicorp.com/boxes/search (formerly
# http://vagrantcloud.com/); this allows you override :box_name with
# your own value so long as you provide :box_url; for example, the
# "official" name of this box is "rickard-von-essen/
# opscode_fedora-20", but by providing the URL and our own name, we
# make it appear as yet another provider under the "kube-fedora22"
# box
:box_name => 'kube-fedora23',
:box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/parallels/opscode_fedora-23_chef-provisionerless.box'
}
},
:virtualbox => {
'fedora' => {
:box_name => 'kube-fedora23',
:box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-23_chef-provisionerless.box'
}
},
:libvirt => {
'fedora' => {
:box_name => 'kube-fedora23',
:box_url => 'https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/23/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-23-20151030.x86_64.vagrant-libvirt.box'
}
},
:vmware_desktop => {
'fedora' => {
:box_name => 'kube-fedora23',
:box_url => 'https://opscode-vm-bento.s3.amazonaws.com/vagrant/vmware/opscode_fedora-23_chef-provisionerless.box'
}
},
:vsphere => {
'fedora' => {
:box_name => 'vsphere-dummy',
:box_url => 'https://github.com/deromka/vagrant-vsphere/blob/master/vsphere-dummy.box?raw=true'
}
}
}
# Give access to all physical cpu cores
# Previously cargo-culted from here:
# http://www.stefanwrobel.com/how-to-make-vagrant-performance-not-suck
# Rewritten to actually determine the number of hardware cores instead of assuming
# that the host has hyperthreading enabled.
host = RbConfig::CONFIG['host_os']
if host =~ /darwin/
$vm_cpus = `sysctl -n hw.physicalcpu`.to_i
elsif host =~ /linux/
#This should work on most processors, however it will fail on ones without the core id field.
#So far i have only seen this on a raspberry pi. which you probably don't want to run vagrant on anyhow...
#But just in case we'll default to the result of nproc if we get 0 just to be safe.
$vm_cpus = `cat /proc/cpuinfo | grep 'core id' | sort -u | wc -l`.to_i
if $vm_cpus < 1
$vm_cpus = `nproc`.to_i
end
else # sorry Windows folks, I can't help you
$vm_cpus = 2
end
# Give VM 1024MB of RAM by default
# In Fedora VM, tmpfs device is mapped to /tmp. tmpfs is given 50% of RAM allocation.
# When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens.
# This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.)
$vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1280).to_i
$vm_node_mem = (ENV['KUBERNETES_NODE_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 2048).to_i
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
if Vagrant.has_plugin?("vagrant-proxyconf")
$http_proxy = ENV['KUBERNETES_HTTP_PROXY'] || ""
$https_proxy = ENV['KUBERNETES_HTTPS_PROXY'] || ""
$no_proxy = ENV['KUBERNETES_NO_PROXY'] || "127.0.0.1"
config.proxy.http = $http_proxy
config.proxy.https = $https_proxy
config.proxy.no_proxy = $no_proxy
end
# this corrects a bug in 1.8.5 where an invalid SSH key is inserted.
if Vagrant::VERSION == "1.8.5"
config.ssh.insert_key = false
end
def setvmboxandurl(config, provider)
if ENV['KUBERNETES_BOX_NAME'] then
config.vm.box = ENV['KUBERNETES_BOX_NAME']
if ENV['KUBERNETES_BOX_URL'] then
config.vm.box_url = ENV['KUBERNETES_BOX_URL']
end
if ENV['KUBERNETES_BOX_VERSION'] then
config.vm.box_version = ENV['KUBERNETES_BOX_VERSION']
end
else
config.vm.box = $kube_provider_boxes[provider][$kube_os][:box_name]
if $kube_provider_boxes[provider][$kube_os][:box_url] then
config.vm.box_url = $kube_provider_boxes[provider][$kube_os][:box_url]
end
if $kube_provider_boxes[provider][$kube_os][:box_version] then
config.vm.box_version = $kube_provider_boxes[provider][$kube_os][:box_version]
end
end
end
def customize_vm(config, vm_mem)
if $use_nfs then
config.vm.synced_folder ".", "/vagrant", nfs: true
elsif $use_rsync then
opts = {}
if ENV['KUBERNETES_VAGRANT_RSYNC_ARGS'] then
opts[:rsync__args] = ENV['KUBERNETES_VAGRANT_RSYNC_ARGS'].split(" ")
end
if ENV['KUBERNETES_VAGRANT_RSYNC_EXCLUDE'] then
opts[:rsync__exclude] = ENV['KUBERNETES_VAGRANT_RSYNC_EXCLUDE'].split(" ")
end
config.vm.synced_folder ".", "/vagrant", opts
end
# Try VMWare Fusion first (see
# https://docs.vagrantup.com/v2/providers/basic_usage.html)
config.vm.provider :vmware_fusion do |v, override|
setvmboxandurl(override, :vmware_desktop)
v.vmx['memsize'] = vm_mem
v.vmx['numvcpus'] = $vm_cpus
end
# configure libvirt provider
config.vm.provider :libvirt do |v, override|
setvmboxandurl(override, :libvirt)
v.memory = vm_mem
v.cpus = $vm_cpus
v.nested = true
v.volume_cache = 'none'
end
# Then try VMWare Workstation
config.vm.provider :vmware_workstation do |v, override|
setvmboxandurl(override, :vmware_desktop)
v.vmx['memsize'] = vm_mem
v.vmx['numvcpus'] = $vm_cpus
end
# Then try Parallels
config.vm.provider :parallels do |v, override|
setvmboxandurl(override, :parallels)
v.memory = vm_mem # v.customize ['set', :id, '--memsize', vm_mem]
v.cpus = $vm_cpus # v.customize ['set', :id, '--cpus', $vm_cpus]
# Don't attempt to update the Parallels tools on the image (this can
# be done manually if necessary)
v.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off']
# Set up Parallels folder sharing to behave like VirtualBox (i.e.,
# mount the current directory as /vagrant and that's it)
v.customize ['set', :id, '--shf-guest', 'off']
v.customize ['set', :id, '--shf-guest-automount', 'off']
v.customize ['set', :id, '--shf-host', 'on']
# Synchronize VM clocks to host clock (Avoid certificate invalid issue)
v.customize ['set', :id, '--time-sync', 'on']
# Remove all auto-mounted "shared folders"; the result seems to
# persist between runs (i.e., vagrant halt && vagrant up)
override.vm.provision :shell, :inline => (%q{
set -ex
if [ -d /media/psf ]; then
for i in /media/psf/*; do
if [ -d "${i}" ]; then
umount "${i}" || true
rmdir -v "${i}"
fi
done
rmdir -v /media/psf
fi
exit
}).strip
end
# Then try vsphere
config.vm.provider :vsphere do |vsphere, override|
setvmboxandurl(override, :vsphere)
#config.vm.hostname = ENV['MASTER_NAME']
config.ssh.username = ENV['MASTER_USER']
config.ssh.password = ENV['MASTER_PASSWD']
config.ssh.pty = true
config.ssh.insert_key = true
#config.ssh.private_key_path = '~/.ssh/id_rsa_vsphere'
# Don't attempt to update the tools on the image (this can
# be done manually if necessary)
# vsphere.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off']
# The vSphere host we're going to connect to
vsphere.host = ENV['VAGRANT_VSPHERE_URL']
# The ESX host for the new VM
vsphere.compute_resource_name = ENV['VAGRANT_VSPHERE_RESOURCE_POOL']
# The resource pool for the new VM
#vsphere.resource_pool_name = 'Comp'
# path to folder where new VM should be created, if not specified template's parent folder will be used
vsphere.vm_base_path = ENV['VAGRANT_VSPHERE_BASE_PATH']
# The template we're going to clone
vsphere.template_name = ENV['VAGRANT_VSPHERE_TEMPLATE_NAME']
# The name of the new machine
#vsphere.name = ENV['MASTER_NAME']
# vSphere login
vsphere.user = ENV['VAGRANT_VSPHERE_USERNAME']
# vSphere password
vsphere.password = ENV['VAGRANT_VSPHERE_PASSWORD']
# cpu count
vsphere.cpu_count = $vm_cpus
# memory in MB
vsphere.memory_mb = vm_mem
# If you don't have SSL configured correctly, set this to 'true'
vsphere.insecure = ENV['VAGRANT_VSPHERE_INSECURE']
end
# Don't attempt to update Virtualbox Guest Additions (requires gcc)
if Vagrant.has_plugin?("vagrant-vbguest") then
config.vbguest.auto_update = false
end
# Finally, fall back to VirtualBox
config.vm.provider :virtualbox do |v, override|
setvmboxandurl(override, :virtualbox)
v.memory = vm_mem # v.customize ["modifyvm", :id, "--memory", vm_mem]
v.cpus = $vm_cpus # v.customize ["modifyvm", :id, "--cpus", $vm_cpus]
# Use faster paravirtualized networking
v.customize ["modifyvm", :id, "--nictype1", "virtio"]
v.customize ["modifyvm", :id, "--nictype2", "virtio"]
end
end
# Kubernetes master
config.vm.define "master" do |c|
customize_vm c, $vm_master_mem
if ENV['KUBE_TEMP'] then
script = "#{ENV['KUBE_TEMP']}/master-start.sh"
c.vm.provision "shell", run: "always", path: script
end
c.vm.network "private_network", ip: "#{$master_ip}"
end
# Kubernetes node
$num_node.times do |n|
node_vm_name = "node-#{n+1}"
config.vm.define node_vm_name do |node|
customize_vm node, $vm_node_mem
node_ip = $node_ips[n]
if ENV['KUBE_TEMP'] then
script = "#{ENV['KUBE_TEMP']}/node-start-#{n}.sh"
node.vm.provision "shell", run: "always", path: script
end
node.vm.network "private_network", ip: "#{node_ip}"
end
end
end

View File

@ -517,7 +517,6 @@ EOF
cp -R "${KUBE_ROOT}/docs" "${release_stage}/"
cp "${KUBE_ROOT}/README.md" "${release_stage}/"
cp "${KUBE_ROOT}/Godeps/LICENSES" "${release_stage}/"
cp "${KUBE_ROOT}/Vagrantfile" "${release_stage}/"
echo "${KUBE_GIT_VERSION}" > "${release_stage}/version"

View File

@ -193,7 +193,6 @@ pkg_tar(
files = [
"//:Godeps/LICENSES",
"//:README.md",
"//:Vagrantfile",
"//:version",
"//cluster:all-srcs",
"//docs:all-srcs",

View File

@ -4,11 +4,10 @@ This is the root of the SaltStack configuration for Kubernetes. A high
level overview for the Kubernetes SaltStack configuration can be found [in the docs tree.](https://kubernetes.io/docs/admin/salt/)
This SaltStack configuration currently applies to default
configurations for Debian-on-GCE, Fedora-on-Vagrant, Ubuntu-on-AWS and
Ubuntu-on-Azure. (That doesn't mean it can't be made to apply to an
arbitrary configuration, but those are only the in-tree OS/IaaS
combinations supported today.) As you peruse the configuration, these
are shorthanded as `gce`, `vagrant`, `aws`, `azure-legacy` in `grains.cloud`;
configurations for Debian-on-GCE. (That doesn't mean it can't
be made to apply to an arbitrary configuration, but those are
only the in-tree OS/IaaS combinations supported today.) As you
peruse the configuration, this is shorthanded as `gce`, in `grains.cloud`;
the documentation in this tree uses this same shorthand for convenience.
See more:

View File

@ -1,36 +0,0 @@
approvers:
- derekwaynecarr
reviewers:
- ArtfulCoder
- thockin
- lavalamp
- smarterclayton
- derekwaynecarr
- caesarxuchao
- vishh
- mikedanese
- liggitt
- nikhiljindal
- erictune
- dchen1107
- zmerlynn
- justinsb
- roberthbailey
- eparis
- jlowdermilk
- piosz
- jsafrane
- jbeda
- madhusudancs
- jayunit100
- cjcullen
- david-mcmahon
- mfojtik
- pweil-
- dcbw
- ivan4th
- filbranden
- dshulyak
- k82cn
- caseydavenport
- johscheuer

View File

@ -1,122 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Contains configuration values for interacting with the Vagrant cluster
# Number of nodes in the cluster
NUM_NODES=${NUM_NODES-"1"}
export NUM_NODES
# The IP of the master
export MASTER_IP=${MASTER_IP-"10.245.1.2"}
export KUBE_MASTER_IP=${MASTER_IP}
export INSTANCE_PREFIX="kubernetes"
export MASTER_NAME="${INSTANCE_PREFIX}-master"
# Should the master serve as a node
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}
# Map out the IPs, names and container subnets of each node
export NODE_IP_BASE=${NODE_IP_BASE-"10.245.1."}
NODE_CONTAINER_SUBNET_BASE="10.246"
MASTER_CONTAINER_NETMASK="255.255.255.0"
MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1"
MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24"
CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16"
for ((i=0; i < NUM_NODES; i++)) do
NODE_IPS[$i]="${NODE_IP_BASE}$((i+3))"
NODE_NAMES[$i]="${INSTANCE_PREFIX}-node-$((i+1))"
NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24"
NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1"
NODE_CONTAINER_NETMASKS[$i]="255.255.255.0"
VAGRANT_NODE_NAMES[$i]="node-$((i+1))"
done
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.246.0.0/16}"
SERVICE_CLUSTER_IP_RANGE=10.247.0.0/16 # formerly PORTAL_NET
# Since this isn't exposed on the network, default to a simple user/passwd
MASTER_USER="${MASTER_USER:-vagrant}"
MASTER_PASSWD="${MASTER_PASSWD:-vagrant}"
# Admission Controllers to invoke prior to persisting objects in cluster
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,PVCProtection,ResourceQuota
# Optional: Enable node logging.
ENABLE_NODE_LOGGING=false
LOGGING_DESTINATION=elasticsearch
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING=false
ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Cluster monitoring to setup as part of the cluster bring up:
# none - No cluster monitoring setup
# influxdb - Heapster, InfluxDB, and Grafana
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# Extra options to set on the Docker command line. This is useful for setting
# --insecure-registry for local registries, or globally configuring selinux options
# TODO Enable selinux when Fedora 21 repositories get an updated docker package
# see https://bugzilla.redhat.com/show_bug.cgi?id=1216151
#EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-} -b=cbr0 --selinux-enabled --insecure-registry 10.0.0.0/8"
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-} --insecure-registry 10.0.0.0/8 -s overlay"
# Flag to tell the kubelet to enable CFS quota support
ENABLE_CPU_CFS_QUOTA="${KUBE_ENABLE_CPU_CFS_QUOTA:-true}"
# Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.247.0.10"
DNS_DOMAIN="cluster.local"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Enable setting flags for kube-apiserver to turn on behavior in active-dev
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
# Determine extra certificate names for master
octets=($(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e 's|/.*||' -e 's/\./ /g'))
((octets[3]+=1))
service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
MASTER_EXTRA_SANS="IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}"
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail, kubenet, etc
if [ "${NETWORK_PROVIDER}" == "kubenet" ]; then
CLUSTER_IP_RANGE="${CONTAINER_SUBNET}"
fi
# If enabled kube-controller-manager will be started with the --enable-hostpath-provisioner flag
ENABLE_HOSTPATH_PROVISIONER="${ENABLE_HOSTPATH_PROVISIONER:-true}"
# OpenContrail networking plugin specific settings
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
# Default fallback NETWORK_IF_NAME, will be used in case when no 'VAGRANT-BEGIN' comments were defined in network-script
export DEFAULT_NETWORK_IF_NAME="eth0"

View File

@ -1,29 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Contains configuration values for interacting with the Vagrant cluster in test mode
#Set NUM_NODES to minimum required for testing.
NUM_NODES=2
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/vagrant/config-default.sh"
# Do not register the master kubelet during testing
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}

View File

@ -1,105 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echoOK() {
TC='\e['
RegB="${TC}0m"
if [ "$1" -eq "0" ]; then
Green="${TC}32m"
echo -e "[${Green}OK${RegB}]"
else
Red="${TC}31m"
echo -e "[${Red}FAIL${RegB}]"
echo "Check log file."
exit 1
fi
}
usage() {
echo "Usage options: [--logfile <path to file>]"
}
logfile=/dev/null
while [[ $# > 0 ]]; do
key="$1"
shift
case $key in
-l|--logfile)
logfile="$1"
if [ "$logfile" == "" ]; then
usage
exit 1
fi
shift
;;
*)
# unknown option
usage
exit 1
;;
esac
done
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
cd "${KUBE_ROOT}"
echo All verbose output will be redirected to $logfile, use --logfile option to change.
printf "Start the cluster with 2 nodes .. "
export NUM_NODES=2
export KUBERNETES_PROVIDER=vagrant
(cluster/kube-up.sh >>"$logfile" 2>&1) || true
echoOK $?
printf "Check if node-1 can reach kubernetes master .. "
vagrant ssh node-1 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1
echoOK $?
printf "Check if node-2 can reach kubernetes master .. "
vagrant ssh node-2 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1
echoOK $?
printf "Pull an image that runs a web server on node-1 .. "
vagrant ssh node-1 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1
echoOK $?
printf "Pull an image that runs a web server on node-2 .. "
vagrant ssh node-2 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1
echoOK $?
printf "Run the server on node-1 .. "
vagrant ssh node-1 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1
echoOK $?
printf "Run the server on node-2 .. "
vagrant ssh node-2 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1
echoOK $?
printf "Run ping from node-1 to docker bridges and to the containers on both nodes .. "
vagrant ssh node-1 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1
echoOK $?
printf "Same pinch from node-2 .. "
vagrant ssh node-2 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1
echoOK $?
printf "tcp check, curl to both the running webservers from node-1 .. "
vagrant ssh node-1 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1
echoOK $?
printf "tcp check, curl to both the running webservers from node-2 .. "
vagrant ssh node-2 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1
echoOK $?
printf "All good, destroy the cluster .. "
vagrant destroy -f >>"$logfile" 2>&1
echoOK $?

View File

@ -1,122 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Set the host name explicitly
# See: https://github.com/mitchellh/vagrant/issues/2430
hostnamectl set-hostname ${MASTER_NAME}
# Set the variable to empty value explicitly
if_to_edit=""
if [[ "$(grep 'VERSION_ID' /etc/os-release)" =~ ^VERSION_ID=23 ]]; then
# Disable network interface being managed by Network Manager (needed for Fedora 21+)
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN )
for if_conf in ${if_to_edit}; do
grep -q ^NM_CONTROLLED= ${if_conf} || echo 'NM_CONTROLLED=no' >> ${if_conf}
sed -i 's/#^NM_CONTROLLED=.*/NM_CONTROLLED=no/' ${if_conf}
done;
systemctl restart network
fi
# needed for vsphere support
# handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts
# set the NETWORK_IF_NAME to have a default value in such case
NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'`
if [[ -z "$NETWORK_IF_NAME" ]]; then
NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME}
fi
# Setup hosts file to support ping by hostname to each node in the cluster from apiserver
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
node=${NODE_NAMES[$i]}
ip=${NODE_IPS[$i]}
if [ ! "$(cat /etc/hosts | grep $node)" ]; then
echo "Adding $node to hosts file"
echo "$ip $node" >> /etc/hosts
fi
done
echo "127.0.0.1 localhost" >> /etc/hosts # enables cmds like 'kubectl get pods' on master.
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
enable-accounting
prepare-package-manager
# Configure the master network
if [ "${NETWORK_PROVIDER}" != "kubenet" ]; then
provision-network-master
fi
write-salt-config kubernetes-master
# Generate and distribute a shared secret (bearer token) to
# apiserver and kubelet so that kubelet can authenticate to
# apiserver to send events.
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
if [[ ! -f "${known_tokens_file}" ]]; then
mkdir -p /srv/salt-overlay/salt/kube-apiserver
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
(umask u=rw,go= ;
echo "$KUBELET_TOKEN,kubelet,kubelet" > $known_tokens_file;
echo "$KUBE_PROXY_TOKEN,kube_proxy,kube_proxy" >> $known_tokens_file;
echo "$KUBE_BEARER_TOKEN,admin,admin" >> $known_tokens_file)
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$KUBELET_TOKEN\", \"Insecure\": true }" > $kubelet_auth_file)
create-salt-kubelet-auth
create-salt-kubeproxy-auth
# Generate tokens for other "service accounts". Append to known_tokens.
#
# NB: If this list ever changes, this script actually has to
# change to detect the existence of this file, kill any deleted
# old tokens and add any new tokens (to handle the upgrade case).
service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns")
for account in "${service_accounts[@]}"; do
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "${token},${account},${account}" >> "${known_tokens_file}"
done
fi
readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv"
if [ ! -e "${BASIC_AUTH_FILE}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-apiserver
(umask 077;
echo "${MASTER_PASSWD},${MASTER_USER},admin" > "${BASIC_AUTH_FILE}")
fi
# Enable Fedora Cockpit on host to support Kubernetes administration
# Access it by going to <master-ip>:9090 and login as vagrant/vagrant
if ! which /usr/libexec/cockpit-ws &>/dev/null; then
pushd /etc/yum.repos.d
curl -OL https://copr.fedorainfracloud.org/coprs/g/cockpit/cockpit-preview/repo/fedora-23/msuchy-cockpit-preview-fedora-23.repo
dnf install -y cockpit cockpit-kubernetes docker socat ethtool
popd
systemctl enable cockpit.socket
systemctl start cockpit.socket
fi
install-salt
run-salt

View File

@ -1,91 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# provision-network-master configures flannel on the master
function provision-network-master {
echo "Provisioning network on master"
FLANNEL_ETCD_URL="http://${MASTER_IP}:4379"
# Install etcd for flannel data
if ! which etcd >/dev/null 2>&1; then
dnf install -y etcd
# Modify etcd configuration for flannel data
cat <<EOF >/etc/etcd/etcd.conf
ETCD_NAME=flannel
ETCD_DATA_DIR="/var/lib/etcd/flannel.etcd"
ETCD_LISTEN_PEER_URLS="http://${MASTER_IP}:4380"
ETCD_LISTEN_CLIENT_URLS="http://${MASTER_IP}:4379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://${MASTER_IP}:4380"
ETCD_INITIAL_CLUSTER="flannel=http://${MASTER_IP}:4380"
ETCD_ADVERTISE_CLIENT_URLS="${FLANNEL_ETCD_URL}"
EOF
# fix the etcd boot failure issue
sed -i '/^Restart/a RestartSec=10' /usr/lib/systemd/system/etcd.service
systemctl daemon-reload
# Enable and start etcd
systemctl enable etcd
systemctl start etcd
fi
# Install flannel for overlay
if ! which flanneld >/dev/null 2>&1; then
dnf install -y flannel
cat <<EOF >/etc/flannel-config.json
{
"Network": "${CONTAINER_SUBNET}",
"SubnetLen": 24,
"Backend": {
"Type": "udp",
"Port": 8285
}
}
EOF
# Import default configuration into etcd for master setup
etcdctl -C ${FLANNEL_ETCD_URL} set /coreos.com/network/config < /etc/flannel-config.json
# Configure local daemon to speak to master
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN )
NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'`
# needed for vsphere support
# handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts
# set the NETWORK_IF_NAME to have a default value in such case
if [[ -z "$NETWORK_IF_NAME" ]]; then
NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME}
fi
cat <<EOF >/etc/sysconfig/flanneld
FLANNEL_ETCD="${FLANNEL_ETCD_URL}"
FLANNEL_ETCD_KEY="/coreos.com/network"
FLANNEL_OPTIONS="-iface=${NETWORK_IF_NAME} --ip-masq"
EOF
# Start flannel
systemctl enable flanneld
systemctl start flanneld
fi
echo "Network configuration verified"
}

View File

@ -1,51 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# provision-network-node configures flannel on the node
function provision-network-node {
echo "Provisioning network on node"
FLANNEL_ETCD_URL="http://${MASTER_IP}:4379"
# Install flannel for overlay
if ! which flanneld >/dev/null 2>&1; then
dnf install -y flannel
# Configure local daemon to speak to master
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN )
NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'`
# needed for vsphere support
# handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts
# set the NETWORK_IF_NAME to have a default value in such case
if [[ -z "$NETWORK_IF_NAME" ]]; then
NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME}
fi
cat <<EOF >/etc/sysconfig/flanneld
FLANNEL_ETCD="${FLANNEL_ETCD_URL}"
FLANNEL_ETCD_KEY="/coreos.com/network"
FLANNEL_OPTIONS="-iface=${NETWORK_IF_NAME} --ip-masq"
EOF
# Start flannel
systemctl enable flanneld
systemctl start flanneld
fi
echo "Network configuration verified"
}

View File

@ -1,88 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Set the host name explicitly
# See: https://github.com/mitchellh/vagrant/issues/2430
hostnamectl set-hostname ${NODE_NAME}
if_to_edit=""
if [[ "$(grep 'VERSION_ID' /etc/os-release)" =~ ^VERSION_ID=23 ]]; then
# Disable network interface being managed by Network Manager (needed for Fedora 21+)
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN )
for if_conf in ${if_to_edit}; do
grep -q ^NM_CONTROLLED= ${if_conf} || echo 'NM_CONTROLLED=no' >> ${if_conf}
sed -i 's/#^NM_CONTROLLED=.*/NM_CONTROLLED=no/' ${if_conf}
done;
systemctl restart network
fi
# needed for vsphere support
# handle the case when no 'VAGRANT-BEGIN' comment was defined in network-scripts
# set the NETWORK_IF_NAME to have a default value in such case
NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'`
if [[ -z "$NETWORK_IF_NAME" ]]; then
NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME}
fi
# Setup hosts file to support ping by hostname to master
if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then
echo "Adding $MASTER_NAME to hosts file"
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
fi
echo "$NODE_IP $NODE_NAME" >> /etc/hosts
# Setup hosts file to support ping by hostname to each node in the cluster
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
node=${NODE_NAMES[$i]}
ip=${NODE_IPS[$i]}
if [ ! "$(cat /etc/hosts | grep $node)" ]; then
echo "Adding $node to hosts file"
echo "$ip $node" >> /etc/hosts
fi
done
enable-accounting
prepare-package-manager
# Configure network
if [ "${NETWORK_PROVIDER}" != "kubenet" ]; then
provision-network-node
fi
write-salt-config kubernetes-pool
# Generate kubelet and kube-proxy auth file(kubeconfig) if there is not an existing one
known_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
if [[ ! -f "${known_kubeconfig_file}" ]]; then
create-salt-kubelet-auth
create-salt-kubeproxy-auth
else
# stop kubelet, let salt start it later
systemctl stop kubelet
fi
install-salt
add-volume-support
run-salt
dnf install -y socat ethtool
dnf update -y docker

View File

@ -1,222 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function enable-accounting() {
mkdir -p /etc/systemd/system.conf.d/
cat <<EOF >/etc/systemd/system.conf.d/kubernetes-accounting.conf
[Manager]
DefaultCPUAccounting=yes
DefaultMemoryAccounting=yes
EOF
systemctl daemon-reload
}
function prepare-package-manager() {
echo "Prepare package manager"
# Useful if a mirror is broken or slow
if [ -z "$CUSTOM_FEDORA_REPOSITORY_URL" ]; then
echo "fastestmirror=True" >> /etc/dnf/dnf.conf
else
# remove trailing slash from URL if it's present
CUSTOM_FEDORA_REPOSITORY_URL="${CUSTOM_FEDORA_REPOSITORY_URL%/}"
sed -i -e "/^metalink=/d" /etc/yum.repos.d/*.repo
sed -i -e "s@^#baseurl=http://download.fedoraproject.org/pub/fedora@baseurl=$CUSTOM_FEDORA_REPOSITORY_URL@" /etc/yum.repos.d/*.repo
fi
}
function add-volume-support() {
echo "Adding nfs volume support"
# we need nfs-utils to support volumes
dnf install -y nfs-utils
}
function write-salt-config() {
local role="$1"
# Update salt configuration
mkdir -p /etc/salt/minion.d
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
enable_cpu_cfs_quota: '$(echo "$ENABLE_CPU_CFS_QUOTA" | sed -e "s/'/''/g")'
network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")'
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")'
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG" | sed -e "s/'/''/g")'
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET" | sed -e "s/'/''/g")'
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
enable_hostpath_provisioner: '$(echo "$ENABLE_HOSTPATH_PROVISIONER" | sed -e "s/'/''/g")'
EOF
if [ -n "${EVICTION_HARD:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
eviction_hard: '$(echo "${EVICTION_HARD}" | sed -e "s/'/''/g")'
EOF
fi
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: warning
log_level_logfile: warning
EOF
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
publicAddressOverride: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
network_mode: openvswitch
networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")'
api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
kubelet_kubeconfig: /srv/salt-overlay/salt/kubelet/kubeconfig
cloud: vagrant
roles:
- $role
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
master_extra_sans: '$(echo "$MASTER_EXTRA_SANS" | sed -e "s/'/''/g")'
keep_host_etcd: true
kube_user: '$(echo "$KUBE_USER" | sed -e "s/'/''/g")'
EOF
}
function release_not_found() {
echo "It looks as if you don't have a compiled version of Kubernetes. If you" >&2
echo "are running from a clone of the git repo, please run 'make quick-release'." >&2
echo "Note that this requires having Docker installed. If you are running " >&2
echo "from a release tarball, something is wrong. Look at " >&2
echo "http://kubernetes.io/ for information on how to contact the development team for help." >&2
exit 1
}
function install-salt() {
server_binary_tar="/vagrant/server/kubernetes-server-linux-amd64.tar.gz"
if [[ ! -f "$server_binary_tar" ]]; then
server_binary_tar="/vagrant/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
fi
if [[ ! -f "$server_binary_tar" ]]; then
release_not_found
fi
salt_tar="/vagrant/server/kubernetes-salt.tar.gz"
if [[ ! -f "$salt_tar" ]]; then
salt_tar="/vagrant/_output/release-tars/kubernetes-salt.tar.gz"
fi
if [[ ! -f "$salt_tar" ]]; then
release_not_found
fi
echo "Running release install script"
rm -rf /kube-install
mkdir -p /kube-install
pushd /kube-install
tar xzf "$salt_tar"
cp "$server_binary_tar" .
./kubernetes/saltbase/install.sh "${server_binary_tar##*/}"
popd
if ! which salt-call >/dev/null 2>&1; then
# Install salt from official repositories.
# Need to enable testing-repos to get version of salt with fix for dnf-core-plugins
dnf config-manager --set-enabled updates-testing
dnf install -y salt-minion
# Fedora >= 23 includes salt packages but the bootstrap is
# creating configuration for a (non-existent) salt repo anyway.
# Remove the invalid repo to prevent dnf from warning about it on
# every update. Assume this problem is specific to Fedora 23 and
# will fixed by the time another version of Fedora lands.
local fedora_version=$(grep 'VERSION_ID' /etc/os-release | sed 's+VERSION_ID=++')
if [[ "${fedora_version}" = '23' ]]; then
local repo_file='/etc/yum.repos.d/saltstack-salt-fedora-23.repo'
if [[ -f "${repo_file}" ]]; then
rm "${repo_file}"
fi
fi
fi
}
function run-salt() {
echo " Now waiting for the Salt provisioning process to complete on this machine."
echo " This can take some time based on your network, disk, and cpu speed."
salt-call --local state.highstate
}
function create-salt-kubelet-auth() {
local -r kubelet_kubeconfig_folder="/srv/salt-overlay/salt/kubelet"
mkdir -p "${kubelet_kubeconfig_folder}"
(umask 077;
cat > "${kubelet_kubeconfig_folder}/kubeconfig" << EOF
apiVersion: v1
kind: Config
clusters:
- cluster:
server: "https://${MASTER_IP}"
insecure-skip-tls-verify: true
name: local
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
users:
- name: kubelet
user:
token: ${KUBELET_TOKEN}
EOF
)
}
function create-salt-kubeproxy-auth() {
kube_proxy_kubeconfig_folder="/srv/salt-overlay/salt/kube-proxy"
mkdir -p "${kube_proxy_kubeconfig_folder}"
(umask 077;
cat > "${kube_proxy_kubeconfig_folder}/kubeconfig" << EOF
apiVersion: v1
kind: Config
clusters:
- cluster:
insecure-skip-tls-verify: true
name: local
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
EOF
)
}

View File

@ -1,389 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/vagrant/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
function detect-master () {
KUBE_MASTER_IP=$MASTER_IP
echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2
}
# Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[]
function detect-nodes {
echo "Nodes already detected" 1>&2
KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}")
}
# Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so
# that our Vagrantfile doesn't error out.
function verify-prereqs {
for x in vagrant; do
if ! which "$x" >/dev/null; then
echo "Can't find $x in PATH, please fix and retry."
exit 1
fi
done
local vagrant_plugins=$(vagrant plugin list | sed '-es% .*$%%' '-es% *% %g' | tr ' ' $'\n')
local providers=(
# Format is:
# provider_ctl_executable vagrant_provider_name vagrant_provider_plugin_re
# either provider_ctl_executable or vagrant_provider_plugin_re can
# be blank (i.e., '') if none is needed by Vagrant (see, e.g.,
# virtualbox entry)
'' vmware_fusion vagrant-vmware-fusion
'' vmware_workstation vagrant-vmware-workstation
prlctl parallels vagrant-parallels
VBoxManage virtualbox ''
virsh libvirt vagrant-libvirt
'' vsphere vagrant-vsphere
)
local provider_found=''
local provider_bin
local provider_name
local provider_plugin_re
while [ "${#providers[@]}" -gt 0 ]; do
provider_bin=${providers[0]}
provider_name=${providers[1]}
provider_plugin_re=${providers[2]}
providers=("${providers[@]:3}")
# If the provider is explicitly set, look only for that provider
if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ] \
&& [ "${VAGRANT_DEFAULT_PROVIDER}" != "${provider_name}" ]; then
continue
fi
if ([ -z "${provider_bin}" ] \
|| which "${provider_bin}" >/dev/null 2>&1) \
&& ([ -z "${provider_plugin_re}" ] \
|| [ -n "$(echo "${vagrant_plugins}" | grep -E "^${provider_plugin_re}$")" ]); then
provider_found="${provider_name}"
# Stop after finding the first viable provider
break
fi
done
if [ -z "${provider_found}" ]; then
if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ]; then
echo "Can't find the necessary components for the ${VAGRANT_DEFAULT_PROVIDER} vagrant provider."
echo "Possible reasons could be: "
echo -e "\t- vmrun utility is not in your path"
echo -e "\t- Vagrant plugin was not found."
echo -e "\t- VAGRANT_DEFAULT_PROVIDER is set, but not found."
echo "Please fix and retry."
else
echo "Can't find the necessary components for any viable vagrant providers (e.g., virtualbox), please fix and retry."
fi
exit 1
fi
# Set VAGRANT_CWD to KUBE_ROOT so that we find the right Vagrantfile no
# matter what directory the tools are called from.
export VAGRANT_CWD="${KUBE_ROOT}"
export USING_KUBE_SCRIPTS=true
}
# Create a set of provision scripts for the master and each of the nodes
function create-provision-scripts {
kube::util::ensure-temp-dir
(
echo "#! /bin/bash"
echo-kube-env
echo "NODE_IP='${MASTER_IP}'"
echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'"
echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-master.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
(
echo "#! /bin/bash"
echo-kube-env
echo "NODE_NAME=(${NODE_NAMES[$i]})"
echo "NODE_IP='${NODE_IPS[$i]}'"
echo "NODE_ID='$i'"
echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'"
echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-node.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-node.sh"
) > "${KUBE_TEMP}/node-start-${i}.sh"
done
}
function echo-kube-env() {
echo "KUBE_ROOT=/vagrant"
echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
echo "MASTER_NAME='${INSTANCE_PREFIX}-master'"
echo "MASTER_IP='${MASTER_IP}'"
echo "NODE_NAMES=(${NODE_NAMES[@]})"
echo "NODE_IPS=(${NODE_IPS[@]})"
echo "DEFAULT_NETWORK_IF_NAME=${DEFAULT_NETWORK_IF_NAME}"
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
echo "CLUSTER_IP_RANGE='${CLUSTER_IP_RANGE}'"
echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'"
echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})"
echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "MASTER_USER='${MASTER_USER}'"
echo "MASTER_PASSWD='${MASTER_PASSWD}'"
echo "KUBE_USER='${KUBE_USER}'"
echo "KUBE_PASSWORD='${KUBE_PASSWORD}'"
echo "KUBE_BEARER_TOKEN='${KUBE_BEARER_TOKEN}'"
echo "ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING}'"
echo "ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'"
echo "ELASTICSEARCH_LOGGING_REPLICAS='${ELASTICSEARCH_LOGGING_REPLICAS:-1}'"
echo "ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
echo "ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI}'"
echo "ENABLE_HOSTPATH_PROVISIONER='${ENABLE_HOSTPATH_PROVISIONER:-false}'"
echo "LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
echo "ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
echo "DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
echo "DNS_DOMAIN='${DNS_DOMAIN:-}'"
echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'"
echo "ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'"
echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'"
echo "KUBELET_TOKEN='${KUBELET_TOKEN:-}'"
echo "KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN:-}'"
echo "MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'"
echo "ENABLE_CPU_CFS_QUOTA='${ENABLE_CPU_CFS_QUOTA}'"
echo "NETWORK_PROVIDER='${NETWORK_PROVIDER:-}'"
echo "OPENCONTRAIL_TAG='${OPENCONTRAIL_TAG:-}'"
echo "OPENCONTRAIL_KUBERNETES_TAG='${OPENCONTRAIL_KUBERNETES_TAG:-}'"
echo "OPENCONTRAIL_PUBLIC_SUBNET='${OPENCONTRAIL_PUBLIC_SUBNET:-}'"
echo "E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'"
echo "CUSTOM_FEDORA_REPOSITORY_URL='${CUSTOM_FEDORA_REPOSITORY_URL:-}'"
echo "EVICTION_HARD='${EVICTION_HARD:-}'"
}
function verify-cluster {
# TODO: How does the user know the difference between "tak[ing] some
# time" and "loop[ing] forever"? Can we give more specific feedback on
# whether "an error" has occurred?
echo "Each machine instance has been created/updated."
echo " Now waiting for the Salt provisioning process to complete on each machine."
echo " This can take some time based on your network, disk, and cpu speed."
echo " It is possible for an error to occur during Salt provision of cluster and this could loop forever."
# verify master has all required daemons
echo "Validating master"
local machine="master"
local -a required_processes=("kube-apiserver" "kube-scheduler" "kube-controller-manager" "kubelet" "docker")
local validated="1"
until [[ "$validated" == "0" ]]; do
validated="0"
for process in "${required_processes[@]}"; do
vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || {
printf "."
validated="1"
sleep 2
}
done
done
# verify each node has all required daemons
local i
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
echo "Validating ${VAGRANT_NODE_NAMES[$i]}"
local machine=${VAGRANT_NODE_NAMES[$i]}
local -a required_processes=("kube-proxy" "kubelet" "docker")
local validated="1"
until [[ "${validated}" == "0" ]]; do
validated="0"
for process in "${required_processes[@]}"; do
vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || {
printf "."
validated="1"
sleep 2
}
done
done
done
echo
echo "Waiting for each node to be registered with cloud provider"
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
local validated="0"
start="$(date +%s)"
until [[ "$validated" == "1" ]]; do
now="$(date +%s)"
# Timeout set to 3 minutes
if [ $((now - start)) -gt 180 ]; then
echo "Timeout while waiting for echo node to be registered with cloud provider"
exit 2
fi
local nodes=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o name)
validated=$(echo $nodes | grep -c "${NODE_NAMES[i]}") || {
printf "."
sleep 2
validated="0"
}
done
done
# By this time, all kube api calls should work, so no need to loop and retry.
echo "Validating we can run kubectl commands."
vagrant ssh master --command "kubectl get pods" || {
echo "WARNING: kubectl to localhost failed. This could mean localhost is not bound to an IP"
}
(
# ensures KUBECONFIG is set
get-kubeconfig-basicauth
get-kubeconfig-bearertoken
echo
echo "Kubernetes cluster is running."
echo
echo "The master is running at:"
echo
echo " https://${MASTER_IP}"
echo
echo "Administer and visualize its resources using Cockpit:"
echo
echo " https://${MASTER_IP}:9090"
echo
echo "For more information on Cockpit, visit http://cockpit-project.org"
echo
echo "The user name and password to use is located in ${KUBECONFIG}"
echo
)
}
# Instantiate a kubernetes cluster
function kube-up {
load-or-gen-kube-basicauth
load-or-gen-kube-bearertoken
get-tokens
create-provision-scripts
vagrant up --no-parallel
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
export CONTEXT="vagrant"
(
umask 077
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
# Update the user's kubeconfig to include credentials for this apiserver.
create-kubeconfig
)
verify-cluster
}
# Delete a kubernetes cluster
function kube-down {
vagrant destroy -f
}
# Update a kubernetes cluster with latest source
function kube-push {
get-kubeconfig-basicauth
get-kubeconfig-bearertoken
create-provision-scripts
vagrant provision
}
# Execute prior to running tests to build a release if required for env
function test-build-release {
# Make a release
"${KUBE_ROOT}/build/release.sh"
}
# Execute prior to running tests to initialize required structure
function test-setup {
"${KUBE_ROOT}/cluster/kube-up.sh"
echo "Vagrant test setup complete" 1>&2
}
# Execute after running tests to perform any required clean-up
function test-teardown {
kube-down
}
# Find the node name based on the IP address
function find-vagrant-name-by-ip {
local ip="$1"
local ip_pattern="${NODE_IP_BASE}(.*)"
# This is subtle. We map 10.245.2.2 -> node-1. We do this by matching a
# regexp and using the capture to construct the name.
[[ $ip =~ $ip_pattern ]] || {
return 1
}
echo "node-$((${BASH_REMATCH[1]} - 1))"
}
# Find the vagrant machine name based on the host name of the node
function find-vagrant-name-by-node-name {
local ip="$1"
if [[ "$ip" == "${INSTANCE_PREFIX}-master" ]]; then
echo "master"
return $?
fi
local ip_pattern="${INSTANCE_PREFIX}-node-(.*)"
[[ $ip =~ $ip_pattern ]] || {
return 1
}
echo "node-${BASH_REMATCH[1]}"
}
# SSH to a node by name or IP ($1) and run a command ($2).
function ssh-to-node {
local node="$1"
local cmd="$2"
local machine
machine=$(find-vagrant-name-by-ip $node) || true
[[ -n ${machine-} ]] || machine=$(find-vagrant-name-by-node-name $node) || true
[[ -n ${machine-} ]] || {
echo "Cannot find machine to ssh to: $1"
return 1
}
vagrant ssh "${machine}" -c "${cmd}"
}
# Perform preparations required to run e2e tests
function prepare-e2e() {
echo "Vagrant doesn't need special preparations for e2e tests" 1>&2
}
function get-tokens() {
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
}

View File

@ -225,7 +225,7 @@ func RegisterClusterFlags() {
flag.StringVar(&TestContext.KubeVolumeDir, "volume-dir", "/var/lib/kubelet", "Path to the directory containing the kubelet volumes.")
flag.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
flag.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.")
flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, vagrant, etc.)")
flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, etc.)")
flag.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
flag.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")
flag.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")

View File

@ -3454,12 +3454,6 @@ func GetSigner(provider string) (ssh.Signer, error) {
}
// Otherwise revert to home dir
keyfile = "kube_aws_rsa"
case "vagrant":
keyfile = os.Getenv("VAGRANT_SSH_KEY")
if len(keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(keyfile)
}
return nil, fmt.Errorf("VAGRANT_SSH_KEY env variable should be provided")
case "local", "vsphere":
keyfile = os.Getenv("LOCAL_SSH_KEY") // maybe?
if len(keyfile) == 0 {