Merge pull request #11264 from eparis/total-ansible

Ansible updates
pull/6/head
Mike Danese 2015-07-24 10:39:34 -07:00
commit f0d7f70838
55 changed files with 735 additions and 204 deletions

View File

@ -1,54 +1,59 @@
# Kubernetes Ansible
This playbook helps you to set up a Kubernetes cluster on machines where you
can't or don't want to use the salt scripts and cluster up/down tools. They
can be real hardware, VMs, things in a public cloud, etc.
This playbook and set of roles set up a Kubernetes cluster onto machines. They
can be real hardware, VMs, things in a public cloud, etc. Anything that you can connect to via SSH.
## Before starting
* Record the IP address/hostname of which machine you want to be your master (only support a single master)
* Record the IP address/hostname of the machine you want to be your etcd server (often same as master, only one)
* Record the IP addresses/hostname of the machines you want to be your nodes. (the master can also be a node)
* Make sure your ansible running machine has ansible 1.9 and python-netaddr installed.
### Configure the inventory file
## Setup
Stick the system information gathered above into the 'inventory' file.
### Configure inventory
### Configure your cluster
Add the system information gathered above into the 'inventory' file, or create a new inventory file for the cluster.
You will want to look though all of the options in `group_vars/all.yml` and
set the variables to reflect your needs. The options should be described there
### Configure Cluster options
Look though all of the options in `group_vars/all.yml` and
set the variables to reflect your needs. The options are described there
in full detail.
### Set up the actual kubernetes cluster
## Running the playbook
Now run the setup:
After going through the setup, run the setup script provided:
`$ ./setup.sh`
In generel this will work on very recent Fedora, rawhide or F21. Future work to
support RHEL7, CentOS, and possible other distros should be forthcoming.
### You can just set up certain parts instead of doing it all
Only etcd:
`$ ./setup.sh --tags=etcd`
Only the kubernetes master:
`$ ./setup.sh --tags=masters`
Only the kubernetes nodes:
`$ ./setup.sh --tags=nodes`
### You may overwrite the inventory file by doing
You may override the inventory file by doing:
`INVENTORY=myinventory ./setup.sh`
Only flannel:
$ ./setup.sh --tags=flannel
In general this will work on very recent Fedora, rawhide or F21. Future work to
support RHEL7, CentOS, and possible other distros should be forthcoming.
### Targeted runs
You can just setup certain parts instead of doing it all.
#### etcd
`$ ./setup.sh --tags=etcd`
#### Kubernetes master
`$ ./setup.sh --tags=masters`
#### kubernetes nodes
`$ ./setup.sh --tags=nodes`
### flannel
`$ ./setup.sh --tags=flannel`
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/contrib/ansible/README.md?pixel)]()

View File

@ -1,3 +1,11 @@
# This value determines how kubernetes binaries, config files, and service
# files are loaded onto the target machines. The following are the only
# valid options:
#
# localBuild - requires make release to have been run to build local binaries
# packageManager - will install packages from your distribution using yum/dnf/apt
source_type: localBuild
# will be used as the Internal dns domain name if DNS is enabled. Services
# will be discoverable under <service-name>.<namespace>.svc.<domainname>, e.g.
# myservice.default.svc.cluster.local
@ -50,6 +58,9 @@ cluster_logging: true
# Turn to false to disable cluster monitoring with heapster and influxdb
cluster_monitoring: true
# Turn to false to disable the kube-ui addon for this cluster
kube-ui: false
# Turn this varable to 'false' to disable whole DNS configuration.
dns_setup: true
# How many replicas in the Replication Controller

View File

@ -27,6 +27,14 @@
has_rpm: true
when: s.stat.exists
- name: Init the has_firewalld fact
set_fact:
has_firewalld: false
- name: Init the has_iptables fact
set_fact:
has_iptables: false
# collect information about what packages are installed
- include: rpm.yml
when: has_rpm

View File

@ -5,10 +5,6 @@
changed_when: false
failed_when: false
- name: Init the has_firewalld fact
set_fact:
has_firewalld: false
- name: Set the has_firewalld fact
set_fact:
has_firewalld: true
@ -20,10 +16,6 @@
changed_when: false
failed_when: false
- name: Init the has_iptables fact
set_fact:
has_iptables: false
- name: Set the has_iptables fact
set_fact:
has_iptables: true

View File

@ -0,0 +1,10 @@
---
- name: DEBIAN | Make sure this is stretch or sid, jessies does not have docker
fail: msg="Docker.io only available in sid and stretch, https://wiki.debian.org/Docker"
when: ansible_lsb.codename != "stretch" and ansible_lsb.codename != "sid"
- name: DEBIAN | Install Docker
action: "{{ ansible_pkg_mgr }}"
args:
name: docker.io
state: latest

View File

@ -4,3 +4,4 @@
args:
name: docker
state: latest
when: not is_atomic

View File

@ -1,32 +1,51 @@
---
- include: debian-install.yml
when: ansible_distribution == "Debian"
- include: generic-install.yml
when: not is_atomic
when: ansible_distribution != "Debian"
- name: Set docker config file directory
set_fact:
docker_config_dir: "/etc/sysconfig"
- name: Override docker config file directory for Debian
set_fact:
docker_config_dir: "/etc/default"
when: ansible_distribution == "Debian"
- name: Verify docker config files exists
file: path={{ docker_config_dir }}/{{ item }} state=touch
changed_when: false
with_items:
- docker
- docker-network
- name: Turn down docker logging
lineinfile: dest=/etc/sysconfig/docker regexp=^OPTIONS= line=OPTIONS="--selinux-enabled --log-level=warn"
lineinfile: dest={{ docker_config_dir }}/docker regexp=^OPTIONS= line=OPTIONS="--selinux-enabled --log-level=warn"
notify:
- restart docker
- name: Install http_proxy into docker-network
lineinfile: dest=/etc/sysconfig/docker-network regexp=^HTTP_PROXY= line=HTTP_PROXY="{{ http_proxy }}"
lineinfile: dest={{ docker_config_dir }}/docker-network regexp=^HTTP_PROXY= line=HTTP_PROXY="{{ http_proxy }}"
when: http_proxy is defined
notify:
- restart docker
- name: Install https_proxy into docker-network
lineinfile: dest=/etc/sysconfig/docker-network regexp=^HTTPS_PROXY= line=HTTPS_PROXY="{{ https_proxy }}"
lineinfile: dest={{ docker_config_dir }}/docker-network regexp=^HTTPS_PROXY= line=HTTPS_PROXY="{{ https_proxy }}"
when: https_proxy is defined
notify:
- restart docker
- name: Install no-proxy into docker-network
lineinfile: dest=/etc/sysconfig/docker-network regexp=^NO_PROXY= line=NO_PROXY="{{ no_proxy }}"
lineinfile: dest={{ docker_config_dir }}/docker-network regexp=^NO_PROXY= line=NO_PROXY="{{ no_proxy }}"
when: no_proxy is defined
notify:
- restart docker
- name: Add any insecure registrys to docker config
lineinfile: dest=/etc/sysconfig/docker regexp=^INSECURE_REGISTRY= line=INSECURE_REGISTRY='{% for reg in insecure_registrys %}--insecure-registry="{{ reg }}" {% endfor %}'
lineinfile: dest={{ docker_config_dir }}/docker regexp=^INSECURE_REGISTRY= line=INSECURE_REGISTRY='{% for reg in insecure_registrys %}--insecure-registry="{{ reg }}" {% endfor %}'
when: insecure_registrys is defined and insecure_registrys > 0
notify:
- restart docker

View File

@ -0,0 +1,15 @@
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=simple
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
User=etcd
ExecStart=/usr/bin/etcd
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@ -1,4 +1,7 @@
---
- name: reload systemd
command: systemctl --system daemon-reload
- name: restart etcd
service: name=etcd state=restarted
when: etcd_started.changed == false

View File

@ -13,6 +13,13 @@
notify:
- restart etcd
- name: Write etcd systemd unit file for Debian
copy: src=etcd.service dest=/etc/systemd/system
notify:
- reload systemd
- restart etcd
when: ansible_distribution == "Debian"
- name: Enable etcd
service: name=etcd enabled=yes

View File

@ -16,3 +16,5 @@
- name: start docker
service: name=docker state=started
# This might fail if docker isn't installed yet
ignore_errors: yes

View File

@ -1 +1,4 @@
kube_addons_dir: "{{ kube_config_dir }}/addons"
local_temp_addon_dir: /tmp/kubernetes/addons

View File

@ -47,16 +47,19 @@
KUBECTL=${TEST_KUBECTL:-} # substitute for tests
KUBECTL=${KUBECTL:-${KUBECTL_BIN:-}}
KUBECTL=${KUBECTL:-/usr/local/bin/kubectl}
NUM_TRIES_FOR_CREATE=${TEST_NUM_TRIES:-100}
DELAY_AFTER_CREATE_ERROR_SEC=${TEST_DELAY_AFTER_ERROR_SEC:=10}
NUM_TRIES_FOR_STOP=${TEST_NUM_TRIES:-100}
DELAY_AFTER_STOP_ERROR_SEC=${TEST_DELAY_AFTER_ERROR_SEC:=10}
if [[ ! -x ${KUBECTL} ]]; then
echo "ERROR: kubectl command (${KUBECTL}) not found or is not executable" 1>&2
exit 1
fi
# If an add-on definition is incorrect, or a definition has just disappeared
# from the local directory, the script will still keep on retrying.
# The script does not end until all retries are done, so
# one invalid manifest may block updates of other add-ons.
# Be careful how you set these parameters
NUM_TRIES=1 # will be updated based on input parameters
DELAY_AFTER_ERROR_SEC=${TEST_DELAY_AFTER_ERROR_SEC:=10}
# remember that you can't log from functions that print some output (because
# logs are also printed on stdout)
@ -112,9 +115,11 @@ except Exception, ex:
}
# $1 yaml file path
function get-object-name-from-file() {
# returns a string of the form <namespace>/<name> (we call it nsnames)
function get-object-nsname-from-file() {
# prints to stdout, so log cannot be used
#WARNING: only yaml is supported
#addons that do not specify a namespace are assumed to be in "default".
cat $1 | python -c '''
try:
import pipes,sys,yaml
@ -125,7 +130,10 @@ try:
# Otherwise we are ignoring them (the update will not work anyway)
print "ERROR"
else:
print y["metadata"]["name"]
try:
print "%s/%s" % (y["metadata"]["namespace"], y["metadata"]["name"])
except Exception, ex:
print "default/%s" % y["metadata"]["name"]
except Exception, ex:
print "ERROR"
'''
@ -135,7 +143,7 @@ except Exception, ex:
# $2 addon type (e.g. ReplicationController)
# echoes the string with paths to files containing addon for the given type
# works only for yaml files (!) (ignores json files)
function get-addons-from-disk() {
function get-addon-paths-from-disk() {
# prints to stdout, so log cannot be used
local -r addon_dir=$1
local -r obj_type=$2
@ -183,9 +191,10 @@ function run-until-success() {
}
# $1 object type
function get-addons-from-server() {
# returns a list of <namespace>/<name> pairs (nsnames)
function get-addon-nsnames-from-server() {
local -r obj_type=$1
"${KUBECTL}" get "${obj_type}" -o template -t "{{range.items}}{{.metadata.name}} {{end}}" --api-version=v1 -l kubernetes.io/cluster-service=true
"${KUBECTL}" get "${obj_type}" --all-namespaces -o template -t "{{range.items}}{{.metadata.namespace}}/{{.metadata.name}} {{end}}" --api-version=v1 -l kubernetes.io/cluster-service=true
}
# returns the characters after the last separator (including)
@ -227,36 +236,52 @@ function get-basename() {
function stop-object() {
local -r obj_type=$1
local -r obj_name=$2
log INFO "Stopping ${obj_type} ${obj_name}"
run-until-success "${KUBECTL} stop ${obj_type} ${obj_name}" ${NUM_TRIES_FOR_STOP} ${DELAY_AFTER_STOP_ERROR_SEC}
local -r namespace=$2
local -r obj_name=$3
log INFO "Stopping ${obj_type} ${namespace}/${obj_name}"
run-until-success "${KUBECTL} stop --namespace=${namespace} ${obj_type} ${obj_name}" ${NUM_TRIES} ${DELAY_AFTER_ERROR_SEC}
}
function create-object() {
local -r obj_type=$1
local -r file_path=$2
log INFO "Creating new ${obj_type} from file ${file_path}"
run-until-success "${KUBECTL} create -f ${file_path}" ${NUM_TRIES_FOR_CREATE} ${DELAY_AFTER_CREATE_ERROR_SEC}
local nsname_from_file
nsname_from_file=$(get-object-nsname-from-file ${file_path})
if [[ "${nsname_from_file}" == "ERROR" ]]; then
log INFO "Cannot read object name from ${file_path}. Ignoring"
return 1
fi
IFS='/' read namespace obj_name <<< "${nsname_from_file}"
log INFO "Creating new ${obj_type} from file ${file_path} in namespace ${namespace}, name: ${obj_name}"
# this will keep on failing if the ${file_path} disappeared in the meantime.
# Do not use too many retries.
run-until-success "${KUBECTL} create --namespace=${namespace} -f ${file_path}" ${NUM_TRIES} ${DELAY_AFTER_ERROR_SEC}
}
function update-object() {
local -r obj_type=$1
local -r obj_name=$2
local -r file_path=$3
log INFO "updating the ${obj_type} ${obj_name} with the new definition ${file_path}"
stop-object ${obj_type} ${obj_name}
local -r namespace=$2
local -r obj_name=$3
local -r file_path=$4
log INFO "updating the ${obj_type} ${namespace}/${obj_name} with the new definition ${file_path}"
stop-object ${obj_type} ${namespace} ${obj_name}
create-object ${obj_type} ${file_path}
}
# deletes the objects from the server
# $1 object type
# $2 a list of object names
# $2 a list of object nsnames
function stop-objects() {
local -r obj_type=$1
local -r obj_names=$2
local -r obj_nsnames=$2
local namespace
local obj_name
for obj_name in ${obj_names}; do
stop-object ${obj_type} ${obj_names} &
for nsname in ${obj_nsnames}; do
IFS='/' read namespace obj_name <<< "${nsname}"
stop-object ${obj_type} ${namespace} ${obj_name} &
done
}
@ -268,6 +293,12 @@ function create-objects() {
local -r file_paths=$2
local file_path
for file_path in ${file_paths}; do
# Remember that the file may have disappear by now
# But we don't want to check it here because
# such race condition may always happen after
# we check it. Let's have the race
# condition happen a bit more often so that
# we see that our tests pass anyway.
create-object ${obj_type} ${file_path} &
done
}
@ -275,21 +306,27 @@ function create-objects() {
# updates objects
# $1 object type
# $2 a list of update specifications
# each update specification is a ';' separated pair: <object name>;<file path>
# each update specification is a ';' separated pair: <nsname>;<file path>
function update-objects() {
local -r obj_type=$1 # ignored
local -r update_spec=$2
local objdesc
local nsname
local obj_name
local namespace
for objdesc in ${update_spec}; do
IFS=';' read -a array <<< ${objdesc}
update-object ${obj_type} ${array[0]} ${array[1]} &
IFS=';' read nsname file_path <<< "${objdesc}"
IFS='/' read namespace obj_name <<< "${nsname}"
update-object ${obj_type} ${namespace} ${obj_name} ${file_path} &
done
}
# Global variables set by function match-objects.
for_delete="" # a list of object names to be deleted
for_update="" # a list of pairs <obj_name>;<filePath> for objects that should be updated
for_ignore="" # a list of object nanes that can be ignored
nsnames_for_delete="" # a list of object nsnames to be deleted
for_update="" # a list of pairs <nsname>;<filePath> for objects that should be updated
nsnames_for_ignore="" # a list of object nsnames that will be ignored
new_files="" # a list of file paths that weren't matched by any existing objects (these objects must be created now)
@ -302,32 +339,36 @@ function match-objects() {
local -r separator=$3
# output variables (globals)
for_delete=""
nsnames_for_delete=""
for_update=""
for_ignore=""
nsnames_for_ignore=""
new_files=""
addon_names_on_server=$(get-addons-from-server "${obj_type}")
addon_paths_in_files=$(get-addons-from-disk "${addon_dir}" "${obj_type}")
addon_nsnames_on_server=$(get-addon-nsnames-from-server "${obj_type}")
addon_paths_in_files=$(get-addon-paths-from-disk "${addon_dir}" "${obj_type}")
log DB2 "addon_names_on_server=${addon_names_on_server}"
log DB2 "addon_nsnames_on_server=${addon_nsnames_on_server}"
log DB2 "addon_paths_in_files=${addon_paths_in_files}"
local matched_files=""
local basename_on_server=""
local name_on_server=""
local basensname_on_server=""
local nsname_on_server=""
local suffix_on_server=""
local name_from_file=""
local nsname_from_file=""
local suffix_from_file=""
local found=0
local addon_path=""
for name_on_server in ${addon_names_on_server}; do
basename_on_server=$(get-basename ${name_on_server} ${separator})
suffix_on_server="$(get-suffix ${name_on_server} ${separator})"
# objects that were moved between namespaces will have different nsname
# because the namespace is included. So they will be treated
# like different objects and not updated but deleted and created again
# (in the current version update is also delete+create, so it does not matter)
for nsname_on_server in ${addon_nsnames_on_server}; do
basensname_on_server=$(get-basename ${nsname_on_server} ${separator})
suffix_on_server="$(get-suffix ${nsname_on_server} ${separator})"
log DB3 "Found existing addon ${name_on_server}, basename=${basename_on_server}"
log DB3 "Found existing addon ${nsname_on_server}, basename=${basensname_on_server}"
# check if the addon is present in the directory and decide
# what to do with it
@ -335,36 +376,41 @@ function match-objects() {
# again. But for small number of addons it doesn't matter so much.
found=0
for addon_path in ${addon_paths_in_files}; do
name_from_file=$(get-object-name-from-file ${addon_path})
if [[ "${name_from_file}" == "ERROR" ]]; then
nsname_from_file=$(get-object-nsname-from-file ${addon_path})
if [[ "${nsname_from_file}" == "ERROR" ]]; then
log INFO "Cannot read object name from ${addon_path}. Ignoring"
continue
else
log DB2 "Found object name '${name_from_file}' in file ${addon_path}"
log DB2 "Found object name '${nsname_from_file}' in file ${addon_path}"
fi
suffix_from_file="$(get-suffix ${name_from_file} ${separator})"
suffix_from_file="$(get-suffix ${nsname_from_file} ${separator})"
log DB3 "matching: ${basename_on_server}${suffix_from_file} == ${name_from_file}"
if [[ "${basename_on_server}${suffix_from_file}" == "${name_from_file}" ]]; then
log DB3 "matched existing ${obj_type} ${name_on_server} to file ${addon_path}; suffix_on_server=${suffix_on_server}, suffix_from_file=${suffix_from_file}"
log DB3 "matching: ${basensname_on_server}${suffix_from_file} == ${nsname_from_file}"
if [[ "${basensname_on_server}${suffix_from_file}" == "${nsname_from_file}" ]]; then
log DB3 "matched existing ${obj_type} ${nsname_on_server} to file ${addon_path}; suffix_on_server=${suffix_on_server}, suffix_from_file=${suffix_from_file}"
found=1
matched_files="${matched_files} ${addon_path}"
if [[ "${suffix_on_server}" == "${suffix_from_file}" ]]; then
for_ignore="${for_ignore} ${name_from_file}"
nsnames_for_ignore="${nsnames_for_ignore} ${nsname_from_file}"
else
for_update="${for_update} ${name_on_server};${addon_path}"
for_update="${for_update} ${nsname_on_server};${addon_path}"
fi
break
fi
done
if [[ ${found} -eq 0 ]]; then
log DB2 "No definition file found for replication controller ${name_on_server}. Scheduling for deletion"
for_delete="${for_delete} ${name_on_server}"
log DB2 "No definition file found for replication controller ${nsname_on_server}. Scheduling for deletion"
nsnames_for_delete="${nsnames_for_delete} ${nsname_on_server}"
fi
done
log DB3 "matched_files=${matched_files}"
# note that if the addon file is invalid (or got removed after listing files
# but before we managed to match it) it will not be matched to any
# of the existing objects. So we will treat it as a new file
# and try to create its object.
for addon_path in ${addon_paths_in_files}; do
echo ${matched_files} | grep "${addon_path}" >/dev/null
if [[ $? -ne 0 ]]; then
@ -381,12 +427,12 @@ function reconcile-objects() {
local -r separator=$3 # name separator
match-objects ${addon_path} ${obj_type} ${separator}
log DBG "${obj_type}: for_delete=${for_delete}"
log DBG "${obj_type}: nsnames_for_delete=${nsnames_for_delete}"
log DBG "${obj_type}: for_update=${for_update}"
log DBG "${obj_type}: for_ignore=${for_ignore}"
log DBG "${obj_type}: nsnames_for_ignore=${nsnames_for_ignore}"
log DBG "${obj_type}: new_files=${new_files}"
stop-objects "${obj_type}" "${for_delete}"
stop-objects "${obj_type}" "${nsnames_for_delete}"
# wait for jobs below is a protection against changing the basename
# of a replication controllerm without changing the selector.
# If we don't wait, the new rc may be created before the old one is deleted
@ -400,9 +446,9 @@ function reconcile-objects() {
create-objects "${obj_type}" "${new_files}"
update-objects "${obj_type}" "${for_update}"
local obj
for obj in ${for_ignore}; do
log DB2 "The ${obj_type} ${obj} is already up to date"
local nsname
for nsname in ${nsnames_for_ignore}; do
log DB2 "The ${obj_type} ${nsname} is already up to date"
done
wait-for-jobs
@ -435,11 +481,21 @@ function update-addons() {
fi
}
if [[ $# -ne 1 ]]; then
echo "Illegal number of parameters" 1>&2
# input parameters:
# $1 input directory
# $2 retry period in seconds - the script will retry api-server errors for approximately
# this amound of time (it is not very precise), at interval equal $DELAY_AFTER_ERROR_SEC.
#
if [[ $# -ne 2 ]]; then
echo "Illegal number of parameters. Usage $0 addon-dir [retry-period]" 1>&2
exit 1
fi
NUM_TRIES=$(($2 / ${DELAY_AFTER_ERROR_SEC}))
if [[ ${NUM_TRIES} -le 0 ]]; then
NUM_TRIES=1
fi
addon_path=$1
update-addons ${addon_path}

View File

@ -21,6 +21,8 @@ KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl}
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-600}
SYSTEM_NAMESPACE=kube-system
token_dir=${TOKEN_DIR:-/srv/kubernetes}
function create-kubeconfig-secret() {
@ -49,6 +51,7 @@ contexts:
- context:
cluster: local
user: ${username}
namespace: ${SYSTEM_NAMESPACE}
name: service-account-context
current-context: service-account-context
EOF
@ -69,6 +72,7 @@ contexts:
- context:
cluster: local
user: ${username}
namespace: ${SYSTEM_NAMESPACE}
name: service-account-context
current-context: service-account-context
EOF
@ -84,36 +88,39 @@ metadata:
name: token-${safe_username}
type: Opaque
EOF
create-resource-from-string "${secretyaml}" 100 10 "Secret-for-token-for-user-${username}" &
# TODO: label the secrets with special label so kubectl does not show these?
create-resource-from-string "${secretyaml}" 100 10 "Secret-for-token-for-user-${username}" "${SYSTEM_NAMESPACE}" &
}
# $1 filename of addon to start.
# $2 count of tries to start the addon.
# $3 delay in seconds between two consecutive tries
# $4 namespace
function start_addon() {
local -r addon_filename=$1;
local -r tries=$2;
local -r delay=$3;
local -r namespace=$4
create-resource-from-string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}"
create-resource-from-string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}" "${namespace}"
}
# $1 string with json or yaml.
# $2 count of tries to start the addon.
# $3 delay in seconds between two consecutive tries
# $3 name of this object to use when logging about it.
# $4 name of this object to use when logging about it.
# $5 namespace for this object
function create-resource-from-string() {
local -r config_string=$1;
local tries=$2;
local -r delay=$3;
local -r config_name=$4;
local -r namespace=$5;
while [ ${tries} -gt 0 ]; do
echo "${config_string}" | ${KUBECTL} create -f - && \
echo "== Successfully started ${config_name} at $(date -Is)" && \
echo "${config_string}" | ${KUBECTL} --namespace="${namespace}" create -f - && \
echo "== Successfully started ${config_name} in namespace ${namespace} at $(date -Is)" && \
return 0;
let tries=tries-1;
echo "== Failed to start ${config_name} at $(date -Is). ${tries} tries remaining. =="
echo "== Failed to start ${config_name} in namespace ${namespace} at $(date -Is). ${tries} tries remaining. =="
sleep ${delay};
done
return 1;
@ -136,6 +143,18 @@ for k,v in yaml.load(sys.stdin).iteritems():
''' < "${kube_env_yaml}")
fi
# Create the namespace that will be used to host the cluster-level add-ons.
start_addon /etc/kubernetes/addons/namespace.yaml 100 10 "" &
# Wait for the default service account to be created in the kube-system namespace.
token_found=""
while [ -z "${token_found}" ]; do
sleep .5
token_found=$(${KUBECTL} get --namespace="${SYSTEM_NAMESPACE}" serviceaccount default -o template -t "{{with index .secrets 0}}{{.name}}{{end}}" || true)
done
echo "== default service account in the ${SYSTEM_NAMESPACE} namespace has token ${token_found} =="
# Generate secrets for "internal service accounts".
# TODO(etune): move to a completely yaml/object based
# workflow so that service accounts can be created
@ -143,6 +162,7 @@ fi
# NOTE: needs to run as root to read this file.
# Read each line in the csv file of tokens.
# Expect errors when the script is started again.
# NOTE: secrets are created asynchronously, in background.
while read line; do
# Split each line into the token and username.
IFS=',' read -a parts <<< "${line}"
@ -154,25 +174,29 @@ while read line; do
else
# Set the server to https://kubernetes. Pods/components that
# do not have DNS available will have to override the server.
create-kubeconfig-secret "${token}" "${username}" "https://kubernetes"
create-kubeconfig-secret "${token}" "${username}" "https://kubernetes.default"
fi
done < ${token_dir}/known_tokens.csv
done < "${token_dir}/known_tokens.csv"
# Create admission_control objects if defined before any other addon services. If the limits
# are defined in a namespace other than default, we should still create the limits for the
# default namespace.
for obj in $(find /etc/kubernetes/admission-controls \( -name \*.yaml -o -name \*.json \)); do
start_addon ${obj} 100 10 &
start_addon ${obj} 100 10 default &
echo "++ obj ${obj} is created ++"
done
# Check if the configuration has changed recently - in case the user
# created/updated/deleted the files on the master.
while true; do
start_sec=$(date +"%s")
#kube-addon-update.sh must be deployed in the same directory as this file
`dirname $0`/kube-addon-update.sh /etc/kubernetes/addons
sleep $ADDON_CHECK_INTERVAL_SEC
`dirname $0`/kube-addon-update.sh /etc/kubernetes/addons ${ADDON_CHECK_INTERVAL_SEC}
end_sec=$(date +"%s")
len_sec=$((${end_sec}-${start_sec}))
# subtract the time passed from the sleep time
if [[ ${len_sec} -lt ${ADDON_CHECK_INTERVAL_SEC} ]]; then
sleep_time=$((${ADDON_CHECK_INTERVAL_SEC}-${len_sec}))
sleep ${sleep_time}
fi
done

View File

@ -1,6 +1,6 @@
---
- name: reload and restart kube-addons
command: systemctl daemon-reload
command: systemctl --system daemon-reload
notify:
- restart kube-addons

View File

@ -1,11 +1,11 @@
---
- name: LOGGING | Assures {{ kube_config_dir }}/addons/cluster-logging dir exists
file: path={{ kube_config_dir }}/addons/cluster-logging state=directory
- name: LOGGING | Assures {{ kube_addons_dir }}/cluster-logging dir exists
file: path={{ kube_addons_dir }}/cluster-logging state=directory
- name: LOGGING | Download logging files from Kubernetes repo
get_url:
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/fluentd-elasticsearch/{{ item }}
dest="{{ kube_config_dir }}/addons/cluster-logging/"
dest="{{ kube_addons_dir }}/cluster-logging/"
force=yes
with_items:
- es-controller.yaml

View File

@ -1,11 +1,11 @@
---
- name: MONITORING | Assures {{ kube_config_dir }}/addons/cluster-monitoring dir exists
file: path={{ kube_config_dir }}/addons/cluster-monitoring state=directory
file: path={{ kube_addons_dir }}/cluster-monitoring state=directory
- name: MONITORING | Download monitoring files from Kubernetes repo
get_url:
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/cluster-monitoring/influxdb/{{ item }}
dest="{{ kube_config_dir }}/addons/cluster-monitoring/"
dest="{{ kube_addons_dir }}/cluster-monitoring/"
force=yes
with_items:
- grafana-service.yaml

View File

@ -1,6 +1,6 @@
---
- name: DNS | Assures {{ kube_config_dir }}/addons/dns dir exists
file: path={{ kube_config_dir }}/addons/dns state=directory
- name: DNS | Assures {{ kube_addons_dir }}/dns dir exists
file: path={{ kube_addons_dir }}/dns state=directory
- name: DNS | Assures local dns addon dir exists
local_action: file
@ -26,7 +26,7 @@
template:
args:
src: "{{ local_temp_addon_dir }}/dns/skydns-rc.yaml.j2"
dest: "{{ kube_config_dir }}/addons/dns/skydns-rc.yaml"
dest: "{{ kube_addons_dir }}/dns/skydns-rc.yaml"
mode: 0755
owner: root
group: root
@ -49,7 +49,7 @@
template:
args:
src: "{{ local_temp_addon_dir }}/dns/skydns-svc.yaml.j2"
dest: "{{ kube_config_dir }}/addons/dns/skydns-svc.yaml"
dest: "{{ kube_addons_dir }}/dns/skydns-svc.yaml"
mode: 0755
owner: root
group: root

View File

@ -1,5 +1,14 @@
- name: Install PyYAML
- name: Set pyyaml package name
set_fact:
pyyaml_name: python-yaml
- name: Overwrite pyyaml package name for non-Debian
set_fact:
pyyaml_name: PyYAML
when: ansible_distribution != "Debian"
- name: Install PyYAML for non-debian
action: "{{ ansible_pkg_mgr }}"
args:
name: PyYAML
name: "{{ pyyaml_name }}"
state: latest

View File

@ -0,0 +1,12 @@
---
- name: KUBE-UI | Assures {{ kube_addons_dir }}/kube-ui dir exists
file: path={{ kube_addons_dir }}/kube-ui state=directory
- name: KUBE-UI | Download kube-ui files from Kubernetes repo
get_url:
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/kube-ui/{{ item }}
dest="{{ kube_addons_dir }}/kube-ui/"
force=yes
with_items:
- kube-ui-rc.yaml
- kube-ui-svc.yaml

View File

@ -1,6 +1,6 @@
---
- name: Assures /etc/kubernetes/addons/ dir exists
file: path=/etc/kubernetes/addons/ state=directory
- name: Assures addons dir exists
file: path={{ kube_addons_dir }} state=directory
- include: generic-install.yml
when: not is_atomic
@ -11,6 +11,12 @@
state=directory
sudo: no
- name: Make sure the system services namespace exists
get_url:
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/kube-addons/namespace.yaml
dest="{{ kube_config_dir }}/addons/"
force=yes
- include: dns.yml
when: dns_setup
@ -20,6 +26,9 @@
- include: cluster-logging.yml
when: cluster_logging
- include: kube-ui.yml
when: kube-ui
#- name: Get kube-addons script from Kubernetes
# get_url:
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/kube-addons/kube-addons.sh

View File

@ -1,3 +1,6 @@
# The port that the Kubernetes apiserver component listens on.
kube_master_api_port: 443
# This directory is where all the additional scripts go
# that Kubernetes normally puts in /srv/kubernetes.
# This puts them in a sane location

View File

@ -20,9 +20,9 @@ set -o pipefail
# Caller should set in the ev:
# MASTER_IP - this may be an ip or things like "_use_gce_external_ip_"
# MASTER_NAME - DNS name for the master
# DNS_DOMAIN - which will be passed to minions in --cluster_domain
# SERVICE_CLUSTER_IP_RANGE - where all service IPs are allocated
# MASTER_NAME - I'm not sure what it is...
# Also the following will be respected
# CERT_DIR - where to place the finished certs

View File

@ -1,4 +1,13 @@
---
- name: Install openssl for easy-rsa stuff
action: "{{ ansible_pkg_mgr }}"
args:
name: "{{ item }}"
state: latest
with_items:
- openssl
- curl
#- name: Get create ca cert script from Kubernetes
# get_url:
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh
@ -15,12 +24,12 @@
# FIXME This only generates a cert for one master...
- name: Run create cert script on master
command:
"{{ kube_script_dir }}/make-ca-cert.sh {{ inventory_hostname }}"
"{{ kube_script_dir }}/make-ca-cert.sh"
args:
creates: "{{ kube_cert_dir }}/server.crt"
environment:
MASTER_IP: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
MASTER_NAME: "kubernetes"
MASTER_NAME: "{{ inventory_hostname }}"
DNS_DOMAIN: "{{ dns_domain }}"
SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}"
CERT_DIR: "{{ kube_cert_dir }}"

View File

@ -20,4 +20,4 @@ KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow_privileged=true"
# How the replication controller, scheduler, and proxy
KUBE_MASTER="--master=https://{{ groups['masters'][0] }}:443"
KUBE_MASTER="--master=https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}"

View File

@ -1,4 +1,9 @@
---
- name: reload systemd
command: systemctl --system daemon-reload
notify:
- restart daemons
- name: restart daemons
command: /bin/true
notify:

View File

@ -1,10 +1,10 @@
---
- name: Open firewalld port for apiserver
firewalld: port=443/tcp permanent=false state=enabled
firewalld: port={{ kube_master_api_port }}/tcp permanent=false state=enabled
# in case this is also a node with firewalld turned off
ignore_errors: yes
- name: Save firewalld port for apiserver
firewalld: port=443/tcp permanent=true state=enabled
firewalld: port={{ kube_master_api_port }}/tcp permanent=true state=enabled
# in case this is also a node with firewalld turned off
ignore_errors: yes

View File

@ -5,7 +5,7 @@
always_run: yes
- name: Open apiserver port with iptables
command: /sbin/iptables -I INPUT 1 -p tcp --dport 443 -j ACCEPT -m comment --comment "kube-apiserver"
command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ kube_master_api_port }} -j ACCEPT -m comment --comment "kube-apiserver"
when: "'kube-apiserver' not in iptablesrules.stdout"
notify:
- restart iptables

View File

@ -0,0 +1,30 @@
---
- name: Copy master binaries
copy:
src: ../../_output/local/go/bin/{{ item }}
dest: /usr/bin/
mode: 755
with_items:
- kube-apiserver
- kube-scheduler
- kube-controller-manager
- kubectl
notify: restart daemons
- name: Copy master service files
copy:
src: ../init/systemd/{{ item }}
dest: /etc/systemd/system/
mode: 644
with_items:
- kube-apiserver.service
- kube-scheduler.service
- kube-controller-manager.service
notify: reload systemd
- name: Copy systemd tmpfile for apiserver
copy:
src: ../init/systemd/tmpfiles.d/
dest: /etc/tmpfiles.d/
mode: 644
notify: reload systemd

View File

@ -1,9 +1,13 @@
---
- include: generic-install.yml
when: not is_atomic and not ansible_distribution == "CentOS"
- include: packageManagerInstall.yml
when: source_type == "packageManager"
tags:
- binary-update
- include: centos.yml
when: not is_atomic and ansible_distribution == "CentOS"
- include: localBuildInstall.yml
when: source_type == "localBuild"
tags:
- binary-update
- name: write the config file for the api server
template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver
@ -21,7 +25,7 @@
- name: Enable apiserver
service: name=kube-apiserver enabled=yes state=started
- name: Get the node token values
- name: Get the master token values
slurp:
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
with_items:

View File

@ -0,0 +1,23 @@
---
- include: pkgMgrInstallers/centos-install.yml
when: ansible_distribution == "CentOS"
- name: Set fact saying we did CentOS package install
set_fact:
did_install: true
when: ansible_distribution == "CentOS"
- include: pkgMgrInstallers/fedora-install.yml
when: ansible_distribution == "Fedora" and ansible_distribution_release != "Rawhide"
- name: Set fact saying we did Fedora package install
set_fact:
did_install: true
when: ansible_distribution == "Fedora" and ansible_distribution_release != "Rawhide"
- include: pkgMgrInstallers/generic-install.yml
when: not did_install

View File

@ -6,3 +6,4 @@
enablerepo=virt7-docker-common-candidate
notify:
- restart daemons
when: not is_atomic

View File

@ -0,0 +1,9 @@
- name: Fedora | Install kubernetes master
action: "{{ ansible_pkg_mgr }}"
args:
name: kubernetes-master
state: latest
enablerepo: "updates-testing"
notify:
- restart daemons
when: not is_atomic

View File

@ -1,7 +1,8 @@
- name: Install kubernetes
- name: Generic | Install kubernetes master
action: "{{ ansible_pkg_mgr }}"
args:
name: kubernetes-master
state: latest
notify:
- restart daemons
when: not is_atomic

View File

@ -8,7 +8,7 @@
KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1"
# The port on the local server to listen on.
KUBE_API_PORT="--secure-port=443"
KUBE_API_PORT="--secure-port={{ kube_master_api_port }}"
# Port nodes listen on
# KUBELET_PORT="--kubelet_port=10250"

View File

@ -5,7 +5,7 @@ preferences: {}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: https://{{ groups['masters'][0] }}:443
server: https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}
name: {{ cluster_name }}
contexts:
- context:

View File

@ -5,7 +5,7 @@ preferences: {}
clusters:
- cluster:
certificate-authority-data: {{ kube_ca_cert|b64encode }}
server: https://{{ groups['masters'][0] }}:443
server: https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}
name: {{ cluster_name }}
contexts:
- context:

View File

@ -5,7 +5,7 @@ preferences: {}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: https://{{ groups['masters'][0] }}:443
server: https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}
name: {{ cluster_name }}
contexts:
- context:

View File

@ -1,4 +1,9 @@
---
- name: reload systemd
command: systemctl --system daemon-reload
notify:
- restart daemons
- name: restart daemons
command: /bin/true
notify:

View File

@ -0,0 +1,27 @@
---
- name: Copy node binaries
copy:
src: ../../_output/local/go/bin/{{ item }}
dest: /usr/bin/
mode: 755
with_items:
- kubelet
- kube-proxy
- kubectl
notify:
- restart daemons
- name: Copy node service files
copy:
src: ../init/systemd/{{ item }}
dest: /etc/systemd/system/
mode: 644
with_items:
- kube-proxy.service
- kubelet.service
notify: reload systemd
- name: Create the /var/lib/kubelet working directory
file:
path: /var/lib/kubelet
state: directory

View File

@ -3,16 +3,21 @@
command: getenforce
register: selinux
changed_when: false
when: ansible_selinux
- name: Set selinux permissive because tokens and selinux don't work together
selinux: state=permissive policy=targeted
when: "'Enforcing' in selinux.stdout"
when: ansible_selinux and 'Enforcing' in selinux.stdout
- include: generic-install.yml
when: not is_atomic and not ansible_distribution == "CentOS"
- include: packageManagerInstall.yml
when: source_type == "packageManager"
tags:
- binary-update
- include: centos.yml
when: not is_atomic and ansible_distribution == "CentOS"
- include: localBuildInstall.yml
when: source_type == "localBuild"
tags:
- binary-update
- name: Make sure manifest directory exists
file: path={{ kube_manifest_dir }} state=directory

View File

@ -0,0 +1,23 @@
---
- include: pkgMgrInstallers/centos-install.yml
when: ansible_distribution == "CentOS"
- name: Set fact saying we did CentOS package install
set_fact:
did_install: true
when: ansible_distribution == "CentOS"
- include: pkgMgrInstallers/fedora-install.yml
when: ansible_distribution == "Fedora" and ansible_distribution_release != "Rawhide"
- name: Set fact saying we did Fedora package install
set_fact:
did_install: true
when: ansible_distribution == "Fedora" and ansible_distribution_release != "Rawhide"
- include: pkgMgrInstallers/generic-install.yml
when: not did_install

View File

@ -6,3 +6,4 @@
enablerepo=virt7-docker-common-candidate
notify:
- restart daemons
when: not is_atomic

View File

@ -0,0 +1,9 @@
- name: Fedora | Install kubernetes node
action: "{{ ansible_pkg_mgr }}"
args:
name: kubernetes-node
state: latest
enablerepo: "updates-testing"
notify:
- restart daemons
when: not is_atomic

View File

@ -1,7 +1,8 @@
- name: Install kubernetes
- name: Generic | Install kubernetes node
action: "{{ ansible_pkg_mgr }}"
args:
name: kubernetes-node
state: latest
notify:
- restart daemons
when: not is_atomic

View File

@ -11,7 +11,7 @@ KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_HOSTNAME="--hostname_override={{ inventory_hostname }}"
# location of the api-server
KUBELET_API_SERVER="--api_servers=https://{{ groups['masters'][0]}}:443"
KUBELET_API_SERVER="--api_servers=https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}"
# Add your own!
{% if dns_setup %}

View File

@ -5,7 +5,7 @@ preferences: {}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: https://{{ groups['masters'][0] }}:443
server: https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}
name: {{ cluster_name }}
contexts:
- context:

View File

@ -10,7 +10,7 @@ contexts:
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: https://{{ groups['masters'][0] }}:443
server: https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}
name: {{ cluster_name }}
users:
- name: proxy

1
contrib/ansible/vagrant/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
openstack_config.yml

View File

@ -0,0 +1,65 @@
## Vagrant deployer for Kubernetes Ansible
This deployer sets up a vagrant cluster and installs kubernetes with flannel on it.
## Before you start !
You will need a functioning vagrant provider. Currently supported are openstack, libvirt, and virtualbox.
## USAGE
In general all that should be needed it to run
```
vagrant up
```
If you export an env variable such as
```
export NUM_MINIONS=4
```
The system will create that number of nodes. Default is 2.
## Provider Specific Information
Vagrant tries to be intelligent and pick the first provider supported by your installation. If you want to specify a provider you can do so by running vagrant like so:
```
vagrant up --provider=openstack
```
### OpenStack
Make sure to install the openstack provider for vagrant.
```
vagrant plugin install vagrant-openstack-provider --plugin-version ">= 0.6.1"
```
NOTE This is a more up-to-date provider than the similar `vagrant-openstack-plugin`.
Also note that current (required) versions of `vagrant-openstack-provider` are not compatible with ruby 2.2.
https://github.com/ggiamarchi/vagrant-openstack-provider/pull/237
So make sure you get at least version 0.6.1.
To use the vagrant openstack provider you will need
- Copy `openstack_config.yml.example` to `openstack_config.yml`
- Edit `openstack_config.yml` to include your relevant details.
For vagrant (1.7.2) does not seem to ever want to pick openstack as the provider. So you will need to tell it to use openstack explicitly.
###### Libvirt
The libvirt vagrant provider is non-deterministic when launching VMs. This is a problem as we need ansible to only run after all of the VMs are running. To solve this when using libvirt one must
do the following
```
vagrant up --no-provision
vagrant provision
```
### VirtualBox
Nothing special with VirtualBox. Hopefully `vagrant up` just works.
## Random Information
If you just want to update the binaries on your systems (either pkgManager or localBuild) you can do so using the ansible binary-update tag. To do so with vagrant provision you would need to run
```
ANSIBLE_TAGS="binary-update" vagrant provision
```
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/contrib/ansible/vagrant/README.md?pixel)]()

View File

@ -1,46 +1,142 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
require "yaml"
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure(2) do |config|
### This is a new provider, different then cloudbau's.
### RUN: vagrant plugin uninstall vagrant-openstack-plugin"
### Then RUN: "vagrant plugin install vagrant-openstack-provider"
require 'vagrant-openstack-provider'
$num_nodes = (ENV['NUM_NODES'] || 2).to_i
ansible_tags = ENV['ANSIBLE_TAGS']
VAGRANTFILE_API_VERSION = "2"
# Openstack providers are best used with latest versions.
Vagrant.require_version ">= 1.7"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# By default, Vagrant 1.7+ automatically inserts a different
# insecure keypair for each new VM created. The easiest way
# to use the same keypair for all the machines is to disable
# this feature and rely on the legacy insecure key.
config.ssh.insert_key = false
# This explicitly sets the order that vagrant will use by default if no --provider given
config.vm.provider "openstack"
config.vm.provider "libvirt"
config.vm.provider "virtualbox"
def set_openstack(os, config, n)
# common config
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/cloudbau/vagrant-openstack-plugin/raw/master/dummy.box"
# this crap is to make it not fail if the file doesn't exist (which is ok if we are using a different provisioner)
__filename = File.join(File.dirname(__FILE__), "openstack_config.yml")
if File.exist?(__filename)
_config = YAML.load(File.open(__filename, File::RDONLY).read)
else
_config = Hash.new("")
_config['security_group'] = []
end
config.ssh.username = "fedora"
config.ssh.private_key_path = "~/.ssh/id_rsa"
config.vm.boot_timeout = 60*10
### The below parameters need to be modified per your openstack instance.
os.username = _config['os_username']
os.password = _config['os_password']
os.tenant_name = _config['os_tenant']
os.keypair_name = _config['os_ssh_key_name']
os.openstack_auth_url = _config['os_auth_url']
os.region = _config['os_region_name']
os.floating_ip_pool = _config['os_floating_ip_pool']
os.flavor = _config['os_flavor']
os.image = _config['os_image']
os.security_groups = _config['os_security_groups']
os.server_name = n.vm.hostname
end
def set_vbox(vb, config)
config.vm.box = "chef/centos-7.0"
# config.vm.network "public_network"
config.vm.define "master", primary: true do |master|
master.vm.hostname = "master.vms.local"
master.vm.network "private_network", ip: "192.168.1.100"
end
(1..1).each do |i|
config.vm.define "node-#{i}" do |node|
node.vm.hostname = "node-#{i}.vms.local"
node.vm.network "private_network", ip: "192.168.1.1#{i}"
node.vm.provision :ansible do |ansible|
ansible.host_key_checking = false
ansible.extra_vars = {
ansible_ssh_user: 'vagrant',
ansible_ssh_pass: 'vagrant',
user: 'vagrant'
}
#ansible.verbose = 'vvv'
ansible.playbook = "../cluster.yml"
ansible.inventory_path = "vinventory"
ansible.limit = 'all'
end
end
end
config.vm.provider "virtualbox" do |vb|
# Display the VirtualBox GUI when booting the machine
config.vm.network "private_network", type: "dhcp"
vb.gui = false
# Customize the amount of memory on the VM:
vb.memory = "2048"
# vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
vb.memory = 2048
vb.cpus = 2
# Use faster paravirtualized networking
vb.customize ["modifyvm", :id, "--nictype1", "virtio"]
vb.customize ["modifyvm", :id, "--nictype2", "virtio"]
end
def set_libvirt(lv, config)
config.vm.box = "kube-centos-7"
config.vm.box_url = "http://cloud.centos.org/centos/7/vagrant/x86_64/images/CentOS-7.LibVirt.box"
lv.memory = 2048
lv.cpus = 2
lv.nested = true
lv.volume_cache = 'none'
end
def set_provider(n)
n.vm.provider :openstack do |os, override|
set_openstack(os, override, n)
end
n.vm.provider :virtualbox do |vb, override|
set_vbox(vb, override)
end
n.vm.provider :libvirt do |lv, override|
set_libvirt(lv, override)
end
end
config.vm.synced_folder ".", "/vagrant", disabled: true
nodes = Array.new()
$num_nodes.times do |i|
# multi vm config
name = "kube-node-#{i+1}"
nodes.push(name)
config.vm.define "#{name}" do |n|
n.vm.hostname = name
set_provider(n)
end
end
# This is how we create the ansible inventory, see it in .vagrant
# if you want to debug, run 'VAGRANT_LOG=info vagrant up'
# and you'll see exactly how the cluster comes up via ansible inv.
groups = {
"etcd" => ["kube-master"],
"masters" => ["kube-master"],
"nodes" => nodes,
"all_groups:children" => ["etcd","masters","nodes"]
}
config.vm.define "kube-master" do |n|
name = "kube-master"
n.vm.hostname = name
set_provider(n)
if ansible_tags.nil?
# This set up the vagrant hosts before we run the main playbook
# Today this just creates /etc/hosts so machines can talk via their
# 'internal' IPs instead of the openstack public ip.
n.vm.provision :ansible do |ansible|
ansible.groups = groups
ansible.playbook = "./vagrant-ansible.yml"
ansible.limit = "all" #otherwise the metadata wont be there for ipv4?
end
end
# This sets up both flannel and kube.
n.vm.provision :ansible do |ansible|
ansible.groups = groups
ansible.playbook = "../cluster.yml"
ansible.limit = "all" #otherwise the metadata wont be there for ipv4?
ansible.tags = ansible_tags
end
end
end

View File

@ -0,0 +1,12 @@
os_username: eparis
os_password: redhat
os_tenant: "RH US Business Group"
os_auth_url: "http://os1-public.osop.rhcloud.com:5000/v2.0"
os_region_name: "OS1Public"
os_ssh_key_name: "eparis"
os_flavor: "m1.small"
os_image: "Fedora 22 Cloud Base x86_64 (final)"
os_security_groups:
- "default"
#- some_other_group
os_floating_ip_pool: "os1_public"

View File

@ -0,0 +1,12 @@
os_username: eparis
os_password: password
os_tenant: "RH US Business Group"
os_auth_url: "http://os1-public.osop.rhcloud.com:5000/v2.0"
os_region_name: "OS1Public"
os_ssh_key_name: "eparis"
os_flavor: "m1.small"
os_image: "Fedora 22 Cloud Base x86_64 (final)"
os_security_groups:
- "default"
#- some_other_group
os_floating_ip_pool: "os1_public"

View File

@ -0,0 +1,11 @@
- hosts: all
sudo: yes
tasks:
- name: "Build hosts file"
lineinfile:
dest=/etc/hosts
regexp=".*{{ item }}$"
line="{{ hostvars[item].ansible_default_ipv4.address }} {{item}}"
state=present
when: hostvars[item].ansible_default_ipv4.address is defined
with_items: groups['all']

View File

@ -1,8 +0,0 @@
[masters]
192.168.1.100
[etcd]
192.168.1.100
[nodes]
192.168.1.11