Merge pull request #38227 from shyamjvs/kubemark-master-migrate

Automatic merge from submit-queue (batch tested with PRs 38609, 38227)

On kubemark master, kubelet now runs as a supervisord process and all master components as pods

This PR fixes issue #37485
On kubemark, previously we had a custom setup that runs master components as top-level processes under supervisord, which is lighter than running kubelet and docker.
This PR makes kubelet run as a process under supervisord and the master components (apiserver, controller-manager, scheduler, etcd) as pods, making testing on kubemark mimic real clusters better.
Also, start-kubemark-master.sh now closely resembles cluster/gce/gci/configure-helper.sh, allowing easy integration in future.

cc @kubernetes/sig-scalability @wojtek-t @gmarek
pull/6/head
Kubernetes Submit Queue 2016-12-13 02:12:34 -08:00 committed by GitHub
commit 0a0294cad6
9 changed files with 645 additions and 156 deletions

View File

@ -37,14 +37,14 @@
"resources": {
"requests": {
"cpu": "100m",
"memory": "##METRICS_MEM##Mi"
"memory": "{{METRICS_MEM}}Mi"
}
},
"command": [
"/heapster"
],
"args": [
"--source=kubernetes:https://##MASTER_IP##:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/kubeconfig"
"--source=kubernetes:https://{{MASTER_IP}}:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/kubeconfig"
],
"volumeMounts": [
{
@ -59,7 +59,7 @@
"resources": {
"requests": {
"cpu": "100m",
"memory": "##EVENTER_MEM##Ki"
"memory": "{{EVENTER_MEM}}Ki"
}
},
"command": [

View File

@ -8,7 +8,7 @@
}
},
"spec": {
"replicas": ##numreplicas##,
"replicas": {{numreplicas}},
"selector": {
"name": "hollow-node"
},
@ -36,7 +36,7 @@
"containers": [
{
"name": "hollow-kubelet",
"image": "gcr.io/##project##/kubemark:latest",
"image": "gcr.io/{{project}}/kubemark:latest",
"ports": [
{"containerPort": 4194},
{"containerPort": 10250},
@ -89,7 +89,7 @@
},
{
"name": "hollow-proxy",
"image": "gcr.io/##project##/kubemark:latest",
"image": "gcr.io/{{project}}/kubemark:latest",
"env": [
{
"name": "CONTENT_TYPE",

View File

@ -0,0 +1,50 @@
apiVersion: v1
kind: Pod
metadata:
name: etcd-server-events
namespace: kube-system
spec:
hostNetwork: true
nodeName: {{instance_prefix}}-master
containers:
- name: etcd-container
image: {{kube_docker_registry}}/etcd:{{etcd_image}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
command:
- /bin/sh
- -c
- /usr/local/bin/etcd
{{params}}
1>>/var/log/etcd-events.log 2>&1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /health
port: 4002
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
ports:
- name: serverport
containerPort: 2381
hostPort: 2381
protocol: TCP
- name: clientport
containerPort: 4002
hostPort: 4002
protocol: TCP
volumeMounts:
- name: varetcd
mountPath: /var/etcd
- name: varlogetcd
mountPath: /var/log/etcd-events.log
volumes:
- name: varetcd
hostPath:
path: /var/etcd/events
- name: varlogetcd
hostPath:
path: /var/log/etcd-events.log

View File

@ -0,0 +1,49 @@
apiVersion: v1
kind: Pod
metadata:
name: etcd-server
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: etcd-container
image: {{kube_docker_registry}}/etcd:{{etcd_image}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 200m
command:
- /bin/sh
- -c
- /usr/local/bin/etcd
{{params}}
1>>/var/log/etcd.log 2>&1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /health
port: 2379
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
ports:
- name: serverport
containerPort: 2380
hostPort: 2380
protocol: TCP
- name: clientport
containerPort: 2379
hostPort: 2379
protocol: TCP
volumeMounts:
- name: varetcd
mountPath: /var/etcd
- name: varlogetcd
mountPath: /var/log/etcd.log
volumes:
- name: varetcd
hostPath:
path: /var/etcd
- name: varlogetcd
hostPath:
path: /var/log/etcd.log

View File

@ -0,0 +1,67 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: {{kube_docker_registry}}/kube-apiserver:{{kube-apiserver_docker_tag}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 250m
command:
- /bin/sh
- -c
- /usr/local/bin/kube-apiserver
{{params}}
1>>/var/log/kube-apiserver.log 2>&1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
ports:
- name: https
containerPort: 443
hostPort: 443
protocol: TCP
- name: local
containerPort: 8080
hostPort: 8080
protocol: TCP
volumeMounts:
- name: srvkube
mountPath: /srv/kubernetes
readOnly: true
- name: logfile
mountPath: /var/log/kube-apiserver.log
- name: etcssl
mountPath: /etc/ssl
readOnly: true
- name: usrsharecacerts
mountPath: /usr/share/ca-certificates
readOnly: true
- name: srvsshproxy
mountPath: /srv/sshproxy
volumes:
- name: srvkube
hostPath:
path: /srv/kubernetes
- name: logfile
hostPath:
path: /var/log/kube-apiserver.log
- name: etcssl
hostPath:
path: /etc/ssl
- name: usrsharecacerts
hostPath:
path: /usr/share/ca-certificates
- name: srvsshproxy
hostPath:
path: /srv/sshproxy

View File

@ -0,0 +1,53 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-controller-manager
image: {{kube_docker_registry}}/kube-controller-manager:{{kube-controller-manager_docker_tag}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 200m
command:
- /bin/sh
- -c
- /usr/local/bin/kube-controller-manager
{{params}}
1>>/var/log/kube-controller-manager.log 2>&1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10252
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
volumeMounts:
- name: srvkube
mountPath: /srv/kubernetes
readOnly: true
- name: logfile
mountPath: /var/log/kube-controller-manager.log
- name: etcssl
mountPath: /etc/ssl
readOnly: true
- name: usrsharecacerts
mountPath: /usr/share/ca-certificates
readOnly: true
volumes:
- name: srvkube
hostPath:
path: /srv/kubernetes
- name: logfile
hostPath:
path: /var/log/kube-controller-manager.log
- name: etcssl
hostPath:
path: /etc/ssl
- name: usrsharecacerts
hostPath:
path: /usr/share/ca-certificates

View File

@ -0,0 +1,36 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
namespace: kube-system
spec:
hostNetwork: true
nodeName: {{instance_prefix}}-master
containers:
- name: kube-scheduler
image: {{kube_docker_registry}}/kube-scheduler:{{kube-scheduler_docker_tag}}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
command:
- /bin/sh
- -c
- /usr/local/bin/kube-scheduler
{{params}}
1>>/var/log/kube-scheduler.log 2>&1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10251
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
volumeMounts:
- name: logfile
mountPath: /var/log/kube-scheduler.log
volumes:
- name: logfile
hostPath:
path: /var/log/kube-scheduler.log

View File

@ -14,143 +14,372 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: figure out how to get etcd tag from some real configuration and put it here.
# Script that starts kubelet on kubemark-master as a supervisord process
# and then runs the master components as pods using kubelet.
# Define key path variables.
KUBE_ROOT="/home/kubernetes"
KUBE_BINDIR="${KUBE_ROOT}/kubernetes/server/bin"
function create-dirs {
echo "Creating required directories"
mkdir -p /var/lib/kubelet
mkdir -p /etc/kubernetes/manifests
}
# Setup working directory for kubelet.
function setup-kubelet-dir {
echo "Making /var/lib/kubelet executable for kubelet"
mount -B /var/lib/kubelet /var/lib/kubelet/
mount -B -o remount,exec,suid,dev /var/lib/kubelet
}
# Remove any default etcd config dirs/files.
function delete-default-etcd-configs {
if [[ -d /etc/etcd ]]; then
rm -rf /etc/etcd
fi
if [[ -e /etc/default/etcd ]]; then
rm -f /etc/default/etcd
fi
if [[ -e /etc/systemd/system/etcd.service ]]; then
rm -f /etc/systemd/system/etcd.service
fi
if [[ -e /etc/init.d/etcd ]]; then
rm -f /etc/init.d/etcd
fi
}
# Compute etcd related variables.
function compute-etcd-variables {
ETCD_IMAGE="${ETCD_IMAGE:-}"
ETCD_QUOTA_BYTES=""
if [ "${ETCD_VERSION:0:2}" == "3." ]; then
# TODO: Set larger quota to see if that helps with
# 'mvcc: database space exceeded' errors. If so, pipe
# though our setup scripts.
ETCD_QUOTA_BYTES=" --quota-backend-bytes=4294967296 "
fi
}
# Formats the given device ($1) if needed and mounts it at given mount point
# ($2).
function safe-format-and-mount() {
device=$1
mountpoint=$2
# Format only if the disk is not already formatted.
if ! tune2fs -l "${device}" ; then
echo "Formatting '${device}'"
mkfs.ext4 -F -E lazy_itable_init=0,lazy_journal_init=0,discard "${device}"
fi
mkdir -p "${mountpoint}"
echo "Mounting '${device}' at '${mountpoint}'"
mount -o discard,defaults "${device}" "${mountpoint}"
}
# Finds a PD device with name '$1' attached to the master.
function find-attached-pd() {
local -r pd_name=$1
if [[ ! -e /dev/disk/by-id/${pd_name} ]]; then
echo ""
fi
device_info=$(ls -l /dev/disk/by-id/${pd_name})
relative_path=${device_info##* }
echo "/dev/disk/by-id/${relative_path}"
}
# Mounts a persistent disk (formatting if needed) to store the persistent data
# on the master. safe-format-and-mount only formats an unformatted disk, and
# mkdir -p will leave a directory be if it already exists.
function mount-pd() {
local -r pd_name=$1
local -r mount_point=$2
if [[ -z "${find-attached-pd ${pd_name}}" ]]; then
echo "Can't find ${pd_name}. Skipping mount."
return
fi
echo "Mounting PD '${pd_path}' at '${mount_point}'"
local -r pd_path="/dev/disk/by-id/${pd_name}"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
mkdir -p "${mount_point}"
safe-format-and-mount "${pd_path}" "${mount_point}"
echo "Mounted PD '${pd_path}' at '${mount_point}'"
# NOTE: These locations on the PD store persistent data, so to maintain
# upgradeability, these locations should not change. If they do, take care
# to maintain a migration path from these locations to whatever new
# locations.
}
function assemble-docker-flags {
echo "Assemble docker command line flags"
local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false"
docker_opts+=" --log-level=debug" # Since it's a test cluster
# TODO(shyamjvs): Incorporate network plugin options, etc later.
echo "DOCKER_OPTS=\"${docker_opts}\"" > /etc/default/docker
echo "DOCKER_NOFILE=65536" >> /etc/default/docker # For setting ulimit -n
service docker restart
# TODO(shyamjvs): Make docker run through systemd/supervisord.
}
# A helper function for loading a docker image. It keeps trying up to 5 times.
#
# $1: Full path of the docker image
function try-load-docker-image {
local -r img=$1
echo "Try to load docker image file ${img}"
# Temporarily turn off errexit, because we don't want to exit on first failure.
set +e
local -r max_attempts=5
local -i attempt_num=1
until timeout 30 docker load -i "${img}"; do
if [[ "${attempt_num}" == "${max_attempts}" ]]; then
echo "Fail to load docker image file ${img} after ${max_attempts} retries. Exit!!"
exit 1
else
attempt_num=$((attempt_num+1))
sleep 5
fi
done
# Re-enable errexit.
set -e
}
# Loads kube-system docker images. It is better to do it before starting kubelet,
# as kubelet will restart docker daemon, which may interfere with loading images.
function load-docker-images {
echo "Start loading kube-system docker images"
local -r img_dir="${KUBE_BINDIR}"
try-load-docker-image "${img_dir}/kube-apiserver.tar"
try-load-docker-image "${img_dir}/kube-controller-manager.tar"
try-load-docker-image "${img_dir}/kube-scheduler.tar"
}
# Computes command line arguments to be passed to kubelet.
function compute-kubelet-params {
local params="${KUBELET_TEST_ARGS:-}"
params+=" --allow-privileged=true"
params+=" --babysit-daemons=true"
params+=" --cgroup-root=/"
params+=" --cloud-provider=gce"
params+=" --config=/etc/kubernetes/manifests"
if [[ -n "${KUBELET_PORT:-}" ]]; then
params+=" --port=${KUBELET_PORT}"
fi
params+=" --enable-debugging-handlers=false"
params+=" --hairpin-mode=none"
echo "${params}"
}
# Creates the supervisord config file for kubelet from the exec_command ($1).
function create-kubelet-conf() {
local -r name="kubelet"
local exec_command="$1 "
exec_command+=$(compute-kubelet-params)
function write_supervisor_conf() {
local name=$1
local exec_command=$2
cat >>"/etc/supervisor/conf.d/${name}.conf" <<EOF
[program:${name}]
command=${exec_command}
stderr_logfile=/var/log/${name}.log
stdout_logfile=/var/log/${name}.log
logfile_maxbytes=500MB
autorestart=true
startretries=1000000
EOF
}
EVENT_STORE_IP=$1
EVENT_STORE_URL="http://${EVENT_STORE_IP}:4002"
NUM_NODES=$2
EVENT_PD=$3
# KUBEMARK_ETCD_IMAGE may be empty so it has to be kept as a last argument
KUBEMARK_ETCD_IMAGE=$4
if [[ -z "${KUBEMARK_ETCD_IMAGE}" ]]; then
# Default etcd version.
KUBEMARK_ETCD_IMAGE="2.2.1"
fi
function retry() {
for i in {1..4}; do
"$@" && return 0 || sleep $i
done
"$@"
}
function mount-master-pd() {
local -r pd_name=$1
local -r mount_point=$2
if [[ ! -e "/dev/disk/by-id/${pd_name}" ]]; then
echo "Can't find ${pd_name}. Skipping mount."
return
fi
device_info=$(ls -l "/dev/disk/by-id/${pd_name}")
local relative_path=${device_info##* }
pd_device="/dev/disk/by-id/${relative_path}"
echo "Mounting master-pd"
local -r pd_path="/dev/disk/by-id/${pd_name}"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
mkdir -p "${mount_point}"
# Format only if the disk is not already formatted.
if ! tune2fs -l "${pd_path}" ; then
echo "Formatting '${pd_path}'"
mkfs.ext4 -F -E lazy_itable_init=0,lazy_journal_init=0,discard "${pd_path}"
# This function assembles the kubelet supervisord config file and starts it using
# supervisorctl, on the kubemark master.
function start-kubelet {
# Kill any pre-existing kubelet process(es).
pkill kubelet
# Replace the builtin kubelet (if any) with the correct binary.
local -r builtin_kubelet="$(which kubelet)"
if [[ -n "${builtin_kubelet}" ]]; then
cp "${KUBE_BINDIR}/kubelet" "$(dirname "$builtin_kubelet")"
fi
echo "Mounting '${pd_path}' at '${mount_point}'"
mount -o discard,defaults "${pd_path}" "${mount_point}"
echo "Mounted master-pd '${pd_path}' at '${mount_point}'"
# Create supervisord config for kubelet.
create-kubelet-conf "${KUBE_BINDIR}/kubelet"
# Update supervisord to make it run kubelet.
supervisorctl reread
supervisorctl update
}
main_etcd_mount_point="/mnt/disks/master-pd"
mount-master-pd "google-master-pd" "${main_etcd_mount_point}"
# Contains all the data stored in etcd.
mkdir -m 700 -p "${main_etcd_mount_point}/var/etcd"
ln -s -f "${main_etcd_mount_point}/var/etcd" /var/etcd
mkdir -p /etc/srv
# Contains the dynamically generated apiserver auth certs and keys.
mkdir -p "${main_etcd_mount_point}/srv/kubernetes"
ln -s -f "${main_etcd_mount_point}/srv/kubernetes" /etc/srv/kubernetes
# Directory for kube-apiserver to store SSH key (if necessary).
mkdir -p "${main_etcd_mount_point}/srv/sshproxy"
ln -s -f "${main_etcd_mount_point}/srv/sshproxy" /etc/srv/sshproxy
# Create the log file and set its properties.
#
# $1 is the file to create.
function prepare-log-file {
touch $1
chmod 644 $1
chown root:root $1
}
if [ "${EVENT_PD:-false}" == "true" ]; then
event_etcd_mount_point="/mnt/disks/master-event-pd"
mount-master-pd "google-master-event-pd" "${event_etcd_mount_point}"
# Contains all the data stored in event etcd.
mkdir -m 700 -p "${event_etcd_mount_point}/var/etcd/events"
ln -s -f "${event_etcd_mount_point}/var/etcd/events" /var/etcd/events
fi
# Computes command line arguments to be passed to etcd.
function compute-etcd-params {
local params="${ETCD_TEST_ARGS:-}"
params+=" --listen-peer-urls=http://127.0.0.1:2380"
params+=" --advertise-client-urls=http://127.0.0.1:2379"
params+=" --listen-client-urls=http://0.0.0.0:2379"
params+=" --data-dir=/var/etcd/data"
params+=" ${ETCD_QUOTA_BYTES}"
echo "${params}"
}
ETCD_QUOTA_BYTES=""
if [ "${KUBEMARK_ETCD_VERSION:0:2}" == "3." ]; then
# TODO: Set larger quota to see if that helps with
# 'mvcc: database space exceeded' errors. If so, pipe
# though our setup scripts.
ETCD_QUOTA_BYTES="--quota-backend-bytes=4294967296 "
fi
# Computes command line arguments to be passed to etcd-events.
function compute-etcd-events-params {
local params="${ETCD_TEST_ARGS:-}"
params+=" --listen-peer-urls=http://127.0.0.1:2381"
params+=" --advertise-client-urls=http://127.0.0.1:4002"
params+=" --listen-client-urls=http://0.0.0.0:4002"
params+=" --data-dir=/var/etcd/data-events"
params+=" ${ETCD_QUOTA_BYTES}"
echo "${params}"
}
if [ "${EVENT_STORE_IP}" == "127.0.0.1" ]; then
# Retry starting etcd to avoid pulling image errors.
retry sudo docker run --net=host \
-v /var/etcd/events/data:/var/etcd/data -v /var/log:/var/log -d \
gcr.io/google_containers/etcd:${KUBEMARK_ETCD_IMAGE} /bin/sh -c "/usr/local/bin/etcd \
--listen-peer-urls http://127.0.0.1:2381 \
--advertise-client-urls=http://127.0.0.1:4002 \
--listen-client-urls=http://0.0.0.0:4002 \
--data-dir=/var/etcd/data ${ETCD_QUOTA_BYTES} 1>> /var/log/etcd-events.log 2>&1"
fi
# Computes command line arguments to be passed to apiserver.
function compute-kube-apiserver-params {
local params="${APISERVER_TEST_ARGS:-}"
params+=" --insecure-bind-address=0.0.0.0"
params+=" --etcd-servers=http://127.0.0.1:2379"
params+=" --etcd-servers-overrides=/events#${EVENT_STORE_URL}"
params+=" --tls-cert-file=/srv/kubernetes/server.cert"
params+=" --tls-private-key-file=/srv/kubernetes/server.key"
params+=" --client-ca-file=/srv/kubernetes/ca.crt"
params+=" --token-auth-file=/srv/kubernetes/known_tokens.csv"
params+=" --secure-port=443"
params+=" --basic-auth-file=/srv/kubernetes/basic_auth.csv"
params+=" --target-ram-mb=$((${NUM_NODES} * 60))"
params+=" --storage-backend=${STORAGE_BACKEND}"
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
params+=" --admission-control=${CUSTOM_ADMISSION_PLUGINS}"
echo "${params}"
}
# Retry starting etcd to avoid pulling image errors.
retry sudo docker run --net=host \
-v /var/etcd/data:/var/etcd/data -v /var/log:/var/log -d \
gcr.io/google_containers/etcd:${KUBEMARK_ETCD_IMAGE} /bin/sh -c "/usr/local/bin/etcd \
--listen-peer-urls http://127.0.0.1:2380 \
--advertise-client-urls=http://127.0.0.1:2379 \
--listen-client-urls=http://0.0.0.0:2379 \
--data-dir=/var/etcd/data ${ETCD_QUOTA_BYTES} 1>> /var/log/etcd.log 2>&1"
# Computes command line arguments to be passed to controller-manager.
function compute-kube-controller-manager-params {
local params="${CONTROLLER_MANAGER_TEST_ARGS:-}"
params+=" --master=127.0.0.1:8080"
params+=" --service-account-private-key-file=/srv/kubernetes/server.key"
params+=" --root-ca-file=/srv/kubernetes/ca.crt"
params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
params+=" --cluster-cidr=${CLUSTER_IP_RANGE}"
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}"
echo "${params}"
}
ulimit_command='bash -c "ulimit -n 65536;'
# Computes command line arguments to be passed to scheduler.
function compute-kube-scheduler-params {
local params="${SCHEDULER_TEST_ARGS:-}"
params+=" --master=127.0.0.1:8080"
echo "${params}"
}
cd /
# Start a kubernetes master component '$1' which can be any of the following:
# 1. etcd
# 2. etcd-events
# 3. kube-apiserver
# 4. kube-controller-manager
# 5. kube-scheduler
#
# It prepares the log file, loads the docker tag, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars:
# DOCKER_REGISTRY
function start-kubemaster-component() {
local -r component=$1
local component_is_etcd=false
if [ "${component:0:4}" == "etcd" ]; then
component_is_etcd=true
fi
echo "Start master component ${component}"
prepare-log-file /var/log/"${component}".log
local -r src_file="${KUBE_ROOT}/${component}.yaml"
local -r params=$(compute-${component}-params)
# Evaluate variables.
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{kube_docker_registry}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{instance_prefix}}@${INSTANCE_PREFIX}@g" "${src_file}"
if [ "${component_is_etcd}" == "true" ]; then
sed -i -e "s@{{etcd_image}}@${ETCD_IMAGE}@g" "${src_file}"
else
local -r component_docker_tag=$(cat ${KUBE_BINDIR}/${component}.docker_tag)
sed -i -e "s@{{${component}_docker_tag}}@${component_docker_tag}@g" "${src_file}"
fi
cp "${src_file}" /etc/kubernetes/manifests
}
############################### Main Function ########################################
echo "Start to configure master instance for kubemark"
# Extract files from the server tar and setup master env variables.
cd "${KUBE_ROOT}"
tar xzf kubernetes-server-linux-amd64.tar.gz
source "${KUBE_ROOT}/kubemark-master-env.sh"
write_supervisor_conf "kube-scheduler" "${ulimit_command} /kubernetes/server/bin/kube-scheduler --master=127.0.0.1:8080 $(cat /scheduler_flags | tr '\n' ' ')\""
write_supervisor_conf "kube-apiserver" "${ulimit_command} /kubernetes/server/bin/kube-apiserver --insecure-bind-address=0.0.0.0 \
--etcd-servers=http://127.0.0.1:2379 \
--etcd-servers-overrides=/events#${EVENT_STORE_URL} \
--tls-cert-file=/srv/kubernetes/server.cert \
--tls-private-key-file=/srv/kubernetes/server.key \
--client-ca-file=/srv/kubernetes/ca.crt \
--token-auth-file=/srv/kubernetes/known_tokens.csv \
--secure-port=443 \
--basic-auth-file=/srv/kubernetes/basic_auth.csv \
--target-ram-mb=$((${NUM_NODES} * 60)) \
$(cat /apiserver_flags | tr '\n' ' ')\""
write_supervisor_conf "kube-controller-manager" "${ulimit_command} /kubernetes/server/bin/kube-controller-manager \
--master=127.0.0.1:8080 \
--service-account-private-key-file=/srv/kubernetes/server.key \
--root-ca-file=/srv/kubernetes/ca.crt \
$(cat /controllers_flags | tr '\n' ' ')\""
# Setup required directory structure and etcd variables.
create-dirs
setup-kubelet-dir
delete-default-etcd-configs
compute-etcd-variables
supervisorctl reread
supervisorctl update
# Mount master PD for etcd and create symbolic links to it.
{
main_etcd_mount_point="/mnt/disks/master-pd"
mount-pd "google-master-pd" "${main_etcd_mount_point}"
# Contains all the data stored in etcd.
mkdir -m 700 -p "${main_etcd_mount_point}/var/etcd"
ln -s -f "${main_etcd_mount_point}/var/etcd" /var/etcd
mkdir -p /etc/srv
# Contains the dynamically generated apiserver auth certs and keys.
mkdir -p "${main_etcd_mount_point}/srv/kubernetes"
ln -s -f "${main_etcd_mount_point}/srv/kubernetes" /etc/srv/kubernetes
# Directory for kube-apiserver to store SSH key (if necessary).
mkdir -p "${main_etcd_mount_point}/srv/sshproxy"
ln -s -f "${main_etcd_mount_point}/srv/sshproxy" /etc/srv/sshproxy
}
# Mount master PD for event-etcd (if required) and create symbolic links to it.
{
EVENT_STORE_IP="${EVENT_STORE_IP:-127.0.0.1}"
EVENT_STORE_URL="${EVENT_STORE_URL:-http://${EVENT_STORE_IP}:4002}"
EVENT_PD="${EVENT_PD:-false}"
if [ "${EVENT_PD:-false}" == "true" ]; then
event_etcd_mount_point="/mnt/disks/master-event-pd"
mount-pd "google-master-event-pd" "${event_etcd_mount_point}"
# Contains all the data stored in event etcd.
mkdir -m 700 -p "${event_etcd_mount_point}/var/etcd/events"
ln -s -f "${event_etcd_mount_point}/var/etcd/events" /var/etcd/events
fi
}
# Setup docker flags and load images of the master components.
assemble-docker-flags
DOCKER_REGISTRY="gcr.io/google_containers"
load-docker-images
# Start kubelet as a supervisord process and master components as pods.
start-kubelet
start-kubemaster-component "etcd"
if [ "${EVENT_STORE_IP:-}" == "127.0.0.1" ]; then
start-kubemaster-component "etcd-events"
fi
start-kubemaster-component "kube-apiserver"
start-kubemaster-component "kube-controller-manager"
start-kubemaster-component "kube-scheduler"
# Wait till apiserver is working fine.
until [ "$(curl 127.0.0.1:8080/healthz 2> /dev/null)" == "ok" ]; do
sleep 1
done
echo "Done for the configuration for kubermark master"

View File

@ -22,36 +22,38 @@ KUBE_ROOT=$(readlink -e ${TMP_ROOT} 2> /dev/null || perl -MCwd -e 'print Cwd::ab
source "${KUBE_ROOT}/test/kubemark/common.sh"
function writeEnvironmentFiles() {
cat > "${RESOURCE_DIRECTORY}/apiserver_flags" <<EOF
${APISERVER_TEST_ARGS}
--storage-backend=${STORAGE_BACKEND}
--service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}"
function writeEnvironmentFile() {
cat > "${RESOURCE_DIRECTORY}/kubemark-master-env.sh" <<EOF
# Generic variables.
INSTANCE_PREFIX=${INSTANCE_PREFIX}
SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE}
# Etcd related variables.
ETCD_IMAGE=2.2.1
# Controller-manager related variables.
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS}"
ALLOCATE_NODE_CIDRS=${ALLOCATE_NODE_CIDRS}
CLUSTER_IP_RANGE=${CLUSTER_IP_RANGE}
TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD}
# Scheduler related variables.
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS}"
# Apiserver related variables.
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS}"
STORAGE_BACKEND=${STORAGE_BACKEND}
NUM_NODES=${NUM_NODES}
EOF
if [ -z "${CUSTOM_ADMISSION_PLUGINS:-}" ]; then
cat >> "${RESOURCE_DIRECTORY}/apiserver_flags" <<EOF
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
cat >> "${RESOURCE_DIRECTORY}/kubemark-master-env.sh" <<EOF
CUSTOM_ADMISSION_PLUGINS=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
EOF
else
cat >> "${RESOURCE_DIRECTORY}/apiserver_flags" <<EOF
--admission-control=${CUSTOM_ADMISSION_PLUGINS}
cat >> "${RESOURCE_DIRECTORY}/kubemark-master-env.sh" <<EOF
CUSTOM_ADMISSION_PLUGINS=${CUSTOM_ADMISSION_PLUGINS}
EOF
fi
sed -i'' -e "s/\"//g" "${RESOURCE_DIRECTORY}/apiserver_flags"
cat > "${RESOURCE_DIRECTORY}/scheduler_flags" <<EOF
${SCHEDULER_TEST_ARGS}
EOF
sed -i'' -e "s/\"//g" "${RESOURCE_DIRECTORY}/scheduler_flags"
cat > "${RESOURCE_DIRECTORY}/controllers_flags" <<EOF
${CONTROLLER_MANAGER_TEST_ARGS}
--allocate-node-cidrs="${ALLOCATE_NODE_CIDRS}"
--cluster-cidr="${CLUSTER_IP_RANGE}"
--service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}"
--terminated-pod-gc-threshold="${TERMINATED_POD_GC_THRESHOLD}"
EOF
sed -i'' -e "s/\"//g" "${RESOURCE_DIRECTORY}/controllers_flags"
}
MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark"
@ -148,7 +150,7 @@ done
password=$(python -c 'import string,random; print("".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)))')
gcloud compute ssh --zone="${ZONE}" --project="${PROJECT}" "${MASTER_NAME}" \
--command="sudo mkdir /srv/kubernetes -p && \
--command="sudo mkdir /home/kubernetes -p && sudo mkdir /srv/kubernetes -p && \
sudo bash -c \"echo ${MASTER_CERT_BASE64} | base64 --decode > /srv/kubernetes/server.cert\" && \
sudo bash -c \"echo ${MASTER_KEY_BASE64} | base64 --decode > /srv/kubernetes/server.key\" && \
sudo bash -c \"echo ${CA_CERT_BASE64} | base64 --decode > /srv/kubernetes/ca.crt\" && \
@ -159,21 +161,24 @@ gcloud compute ssh --zone="${ZONE}" --project="${PROJECT}" "${MASTER_NAME}" \
sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy\" >> /srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo ${password},admin,admin > /srv/kubernetes/basic_auth.csv\""
writeEnvironmentFiles
writeEnvironmentFile
gcloud compute copy-files --zone="${ZONE}" --project="${PROJECT}" \
"${SERVER_BINARY_TAR}" \
"${RESOURCE_DIRECTORY}/kubemark-master-env.sh" \
"${RESOURCE_DIRECTORY}/start-kubemark-master.sh" \
"${KUBEMARK_DIRECTORY}/configure-kubectl.sh" \
"${RESOURCE_DIRECTORY}/apiserver_flags" \
"${RESOURCE_DIRECTORY}/scheduler_flags" \
"${RESOURCE_DIRECTORY}/controllers_flags" \
"root@${MASTER_NAME}":/
"${RESOURCE_DIRECTORY}/manifests/etcd.yaml" \
"${RESOURCE_DIRECTORY}/manifests/etcd-events.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-apiserver.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-scheduler.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-controller-manager.yaml" \
"root@${MASTER_NAME}":/home/kubernetes/
gcloud compute ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" \
--command="sudo chmod a+x /configure-kubectl.sh && sudo chmod a+x /start-kubemark-master.sh && \
sudo /start-kubemark-master.sh ${EVENT_STORE_IP:-127.0.0.1} ${NUM_NODES:-0} ${EVENT_PD:-false} ${ETCD_IMAGE:-}"
--command="sudo chmod a+x /home/kubernetes/configure-kubectl.sh && \
sudo chmod a+x /home/kubernetes/start-kubemark-master.sh && \
sudo /home/kubernetes/start-kubemark-master.sh"
# create kubeconfig for Kubelet:
KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
@ -248,18 +253,18 @@ contexts:
current-context: kubemark-context
EOF
sed "s/##numreplicas##/${NUM_NODES:-10}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.json" > "${RESOURCE_DIRECTORY}/hollow-node.json"
sed -i'' -e "s/##project##/${PROJECT}/g" "${RESOURCE_DIRECTORY}/hollow-node.json"
sed "s/{{numreplicas}}/${NUM_NODES:-10}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.json" > "${RESOURCE_DIRECTORY}/hollow-node.json"
sed -i'' -e "s/{{project}}/${PROJECT}/g" "${RESOURCE_DIRECTORY}/hollow-node.json"
mkdir "${RESOURCE_DIRECTORY}/addons" || true
sed "s/##MASTER_IP##/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_mem_per_node=4
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES:-10}))
sed -i'' -e "s/##METRICS_MEM##/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
eventer_mem_per_node=500
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES:-10}))
sed -i'' -e "s/##EVENTER_MEM##/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/kubemark-ns.json"
"${KUBECTL}" create -f "${KUBECONFIG_SECRET}" --namespace="kubemark"