From f31c4f6d69596b5168ef9e5102fe2a2243e1e051 Mon Sep 17 00:00:00 2001 From: Andy Zheng Date: Thu, 19 May 2016 13:24:03 -0700 Subject: [PATCH 1/4] Revert "Revert "Add support for running GCI on the GCE cloud provider"" This reverts commit 40f53b176504c0b516ec858055dafb72236bfdd1. --- build/common.sh | 13 +- cluster/gce/configure-vm.sh | 5 +- cluster/gce/gci/README.md | 6 + cluster/gce/gci/configure-helper.sh | 749 ++++++++++++++++++ cluster/gce/gci/configure.sh | 194 +++++ cluster/gce/gci/health-monitor.sh | 80 ++ cluster/gce/gci/helper.sh | 81 ++ cluster/gce/gci/master.yaml | 91 +++ cluster/gce/gci/node.yaml | 91 +++ cluster/gce/trusty/configure-helper.sh | 1 - cluster/gce/trusty/configure.sh | 2 +- cluster/gce/util.sh | 15 +- .../salt/supervisor/kubelet-checker.sh | 2 +- hack/verify-flags/exceptions.txt | 2 + 14 files changed, 1313 insertions(+), 19 deletions(-) create mode 100644 cluster/gce/gci/README.md create mode 100644 cluster/gce/gci/configure-helper.sh create mode 100644 cluster/gce/gci/configure.sh create mode 100644 cluster/gce/gci/health-monitor.sh create mode 100755 cluster/gce/gci/helper.sh create mode 100644 cluster/gce/gci/master.yaml create mode 100644 cluster/gce/gci/node.yaml diff --git a/build/common.sh b/build/common.sh index 089816bc0f..4b9cca0520 100755 --- a/build/common.sh +++ b/build/common.sh @@ -917,7 +917,9 @@ function kube::release::package_kube_manifests_tarball() { cp "${salt_dir}/kube-apiserver/abac-authz-policy.jsonl" "${dst_dir}" cp "${salt_dir}/kube-controller-manager/kube-controller-manager.manifest" "${dst_dir}" cp "${salt_dir}/kube-addons/kube-addon-manager.yaml" "${dst_dir}" - cp "${KUBE_ROOT}/cluster/gce/trusty/configure-helper.sh" "${dst_dir}" + cp "${KUBE_ROOT}/cluster/gce/trusty/configure-helper.sh" "${dst_dir}/trusty-configure-helper.sh" + cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${dst_dir}/gci-configure-helper.sh" + cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${dst_dir}/health-monitor.sh" cp -r "${salt_dir}/kube-admission-controls/limit-range" "${dst_dir}" local objects objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo) @@ -1114,13 +1116,12 @@ function kube::release::gcs::copy_release_artifacts() { # Stage everything in release directory kube::release::gcs::stage_and_hash "${RELEASE_DIR}"/* . || return 1 - # Having the configure-vm.sh script and trusty code from the GCE cluster + # Having the configure-vm.sh script and GCI code from the GCE cluster # deploy hosted with the release is useful for GKE. - # TODO(andyzheng0831): Replace the trusty path with GCI after finshing the GCI code. kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/configure-vm.sh" extra/gce || return 1 - kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/trusty/node.yaml" extra/gce || return 1 - kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/trusty/master.yaml" extra/gce || return 1 - kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/trusty/configure.sh" extra/gce || return 1 + kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/gci/node.yaml" extra/gce || return 1 + kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/gci/master.yaml" extra/gce || return 1 + kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/gci/configure.sh" extra/gce || return 1 # Upload the "naked" binaries to GCS. This is useful for install scripts that # download the binaries directly and don't need tars. diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index 28dbec5a24..066b6fe9f3 100755 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -358,7 +358,6 @@ stop-salt-minion() { # Finds the master PD device; returns it in MASTER_PD_DEVICE find-master-pd() { MASTER_PD_DEVICE="" - # TODO(zmerlynn): GKE is still lagging in master-pd creation if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then return fi @@ -376,7 +375,7 @@ find-master-pd() { # already exists. mount-master-pd() { find-master-pd - if [[ -z "${MASTER_PD_DEVICE}" ]]; then + if [[ -z "${MASTER_PD_DEVICE:-}" ]]; then return fi @@ -570,7 +569,7 @@ function convert-bytes-gce-kube() { # - Optionally uses KUBECFG_CERT and KUBECFG_KEY to store a copy of the client # cert credentials. # -# After the first boot and on upgrade, these files exists on the master-pd +# After the first boot and on upgrade, these files exist on the master-pd # and should never be touched again (except perhaps an additional service # account, see NB below.) function create-salt-master-auth() { diff --git a/cluster/gce/gci/README.md b/cluster/gce/gci/README.md new file mode 100644 index 0000000000..f5ed2173f4 --- /dev/null +++ b/cluster/gce/gci/README.md @@ -0,0 +1,6 @@ +# Google Container-VM Image (GCI) + +[GCI](https://cloud.google.com/compute/docs/containers/vm-image/) is a container-optimized OS image for the Google Cloud Platform (GCP). We built GCI primarily for running Google services on GCP. Unlike the open preview version of container-vm, Google Container-VM Image is based on the open source Chromium OS project, allowing us greater control over the build management, security compliance, and customizations for GCP. + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/gce/gci/README.md?pixel)]() diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh new file mode 100644 index 0000000000..cad060c575 --- /dev/null +++ b/cluster/gce/gci/configure-helper.sh @@ -0,0 +1,749 @@ +#!/bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script is for configuring kubernetes master and node instances. It is +# uploaded in the manifests tar ball. + +set -o errexit +set -o nounset +set -o pipefail + +function config-ip-firewall { + echo "Configuring IP firewall rules" + # The GCI image has host firewall which drop most inbound/forwarded packets. + # We need to add rules to accept all TCP/UDP packets. + if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then + echo "Add rules to accept all inbound TCP/UDP packets" + iptables -A INPUT -w -p TCP -j ACCEPT + iptables -A INPUT -w -p UDP -j ACCEPT + fi + if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then + echo "Add rules to accept all forwarded TCP/UDP packets" + iptables -A FORWARD -w -p TCP -j ACCEPT + iptables -A FORWARD -w -p UDP -j ACCEPT + fi +} + +function create-dirs { + echo "Creating required directories" + mkdir -p /var/lib/kubelet + mkdir -p /etc/kubernetes/manifests + if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then + mkdir -p /var/lib/kube-proxy + fi +} + +# Finds the master PD device; returns it in MASTER_PD_DEVICE +function find-master-pd { + MASTER_PD_DEVICE="" + if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then + return + fi + device_info=$(ls -l /dev/disk/by-id/google-master-pd) + relative_path=${device_info##* } + MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}" +} + +# Mounts a persistent disk (formatting if needed) to store the persistent data +# on the master -- etcd's data, a few settings, and security certs/keys/tokens. +# safe_format_and_mount only formats an unformatted disk, and mkdir -p will +# leave a directory be if it already exists. +function mount-master-pd { + find-master-pd + if [[ -z "${MASTER_PD_DEVICE:-}" ]]; then + return + fi + + echo "Mounting master-pd" + local -r pd_path="/dev/disk/by-id/google-master-pd" + local -r mount_point="/mnt/disks/master-pd" + # Format and mount the disk, create directories on it for all of the master's + # persistent data, and link them to where they're used. + mkdir -p "${mount_point}" + /usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${pd_path}" "${mount_point}" &>/var/log/master-pd-mount.log || \ + { echo "!!! master-pd mount failed, review /var/log/master-pd-mount.log !!!"; return 1; } + # Contains all the data stored in etcd. + mkdir -m 700 -p "${mount_point}/var/etcd" + ln -s -f "${mount_point}/var/etcd" /var/etcd + mkdir -p /etc/srv + # Contains the dynamically generated apiserver auth certs and keys. + mkdir -p "${mount_point}/etc/srv/kubernetes" + ln -s -f "${mount_point}/etc/srv/kubernetes" /etc/srv/kubernetes + # Directory for kube-apiserver to store SSH key (if necessary). + mkdir -p "${mount_point}/etc/srv/sshproxy" + ln -s -f "${mount_point}/etc/srv/sshproxy" /etc/srv/sshproxy + + if ! id etcd &>/dev/null; then + useradd -s /sbin/nologin -d /var/etcd etcd + fi + chown -R etcd "${mount_point}/var/etcd" + chgrp -R etcd "${mount_point}/var/etcd" +} + +# After the first boot and on upgrade, these files exist on the master-pd +# and should never be touched again (except perhaps an additional service +# account, see NB below.) +function create-master-auth { + echo "Creating master auth files" + local -r auth_dir="/etc/srv/kubernetes" + if [[ ! -e "${auth_dir}/ca.crt" && ! -z "${CA_CERT:-}" && ! -z "${MASTER_CERT:-}" && ! -z "${MASTER_KEY:-}" ]]; then + echo "${CA_CERT}" | base64 -d > "${auth_dir}/ca.crt" + echo "${MASTER_CERT}" | base64 -d > "${auth_dir}/server.cert" + echo "${MASTER_KEY}" | base64 -d > "${auth_dir}/server.key" + fi + local -r basic_auth_csv="${auth_dir}/basic_auth.csv" + if [[ ! -e "${basic_auth_csv}" ]]; then + echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${basic_auth_csv}" + fi + local -r known_tokens_csv="${auth_dir}/known_tokens.csv" + if [[ ! -e "${known_tokens_csv}" ]]; then + echo "${KUBE_BEARER_TOKEN},admin,admin" > "${known_tokens_csv}" + echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${known_tokens_csv}" + echo "${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy" >> "${known_tokens_csv}" + fi + # Do not create /etc/gce.conf unless specified. + if [[ -z ${CLOUD_CONFIG:-} ]]; then + return + fi + cat </etc/gce.conf +[global] +EOF + if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then + cat <>/etc/gce.conf +token-url = ${TOKEN_URL} +token-body = ${TOKEN_BODY} +project-id = ${PROJECT_ID} +network-name = ${NODE_NETWORK} +EOF + fi + if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then + cat <>/etc/gce.conf +node-tags = ${NODE_INSTANCE_PREFIX} +EOF + fi + if [[ -n "${MULTIZONE:-}" ]]; then + cat <>/etc/gce.conf +multizone = ${MULTIZONE} +EOF + fi +} + +function create-kubelet-kubeconfig { + echo "Creating kubelet kubeconfig file" + if [[ -z "${KUBELET_CA_CERT:-}" ]]; then + KUBELET_CA_CERT="${CA_CERT}" + fi + cat </var/lib/kubelet/kubeconfig +apiVersion: v1 +kind: Config +users: +- name: kubelet + user: + client-certificate-data: ${KUBELET_CERT} + client-key-data: ${KUBELET_KEY} +clusters: +- name: local + cluster: + certificate-authority-data: ${KUBELET_CA_CERT} +contexts: +- context: + cluster: local + user: kubelet + name: service-account-context +current-context: service-account-context +EOF +} + +# Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY +# to generate a kubeconfig file for the kubelet to securely connect to the apiserver. +function create-master-kubelet-auth { + # Only configure the kubelet on the master if the required variables are + # set in the environment. + if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then + create-kubelet-kubeconfig + fi +} + +function create-kubeproxy-kubeconfig { + echo "Creating kube-proxy kubeconfig file" + cat </var/lib/kube-proxy/kubeconfig +apiVersion: v1 +kind: Config +users: +- name: kube-proxy + user: + token: ${KUBE_PROXY_TOKEN} +clusters: +- name: local + cluster: + certificate-authority-data: ${CA_CERT} +contexts: +- context: + cluster: local + user: kube-proxy + name: service-account-context +current-context: service-account-context +EOF +} + +function assemble-docker-flags { + local docker_opts="-p /var/run/docker.pid --bridge=cbr0 --iptables=false --ip-masq=false" + if [[ "${TEST_CLUSTER:-}" == "true" ]]; then + docker_opts+=" --debug" + fi + echo "DOCKER_OPTS=\"${docker_opts} ${EXTRA_DOCKER_OPTS:-}\"" > /etc/default/docker +} + +# A helper function for loading a docker image. It keeps trying up to 5 times. +# +# $1: Full path of the docker image +function try-load-docker-image { + local -r img=$1 + echo "Try to load docker image file ${img}" + # Temporarily turn off errexit, because we don't want to exit on first failure. + set +e + local -r max_attempts=5 + local -i attempt_num=1 + until timeout 30 docker load -i "${img}"; do + if [[ "${attempt_num}" == "${max_attempts}" ]]; then + echo "Fail to load docker image file ${img} after ${max_attempts} retries. Exist!!" + exit 1 + else + attempt_num=$((attempt_num+1)) + sleep 5 + fi + done + # Re-enable errexit. + set -e +} + +# Loads kube-system docker images. It is better to do it before starting kubelet, +# as kubelet will restart docker daemon, which may interfere with loading images. +function load-docker-images { + echo "Start loading kube-system docker images" + local -r img_dir="${KUBE_HOME}/kube-docker-files" + if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then + try-load-docker-image "${img_dir}/kube-apiserver.tar" + try-load-docker-image "${img_dir}/kube-controller-manager.tar" + try-load-docker-image "${img_dir}/kube-scheduler.tar" + else + try-load-docker-image "${img_dir}/kube-proxy.tar" + fi +} + +# A kubelet systemd service is built in GCI image, but by default it is not started +# when an instance is up. To start kubelet, the command line flags should be written +# to /etc/default/kubelet in the format "KUBELET_OPTS=", and then start kubelet +# using systemctl. This function assembles the command line and start the kubelet +# systemd service. +function start-kubelet { + echo "Start kubelet" + local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}" + flags+=" --allow-privileged=true" + flags+=" --babysit-daemons=true" + flags+=" --cgroup-root=/" + flags+=" --cloud-provider=gce" + flags+=" --cluster-dns=${DNS_SERVER_IP}" + flags+=" --cluster-domain=${DNS_DOMAIN}" + flags+=" --config=/etc/kubernetes/manifests" + flags+=" --kubelet-cgroups=/kubelet" + flags+=" --system-cgroups=/system" + + if [[ -n "${KUBELET_PORT:-}" ]]; then + flags+=" --port=${KUBELET_PORT}" + fi + if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then + flags+=" --enable-debugging-handlers=false" + flags+=" --hairpin-mode=none" + if [[ ! -z "${KUBELET_APISERVER:-}" && ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then + flags+=" --api-servers=https://${KUBELET_APISERVER}" + flags+=" --register-schedulable=false" + flags+=" --reconcile-cidr=false" + flags+=" --pod-cidr=10.123.45.0/30" + else + flags+=" --pod-cidr=${MASTER_IP_RANGE}" + fi + else # For nodes + flags+=" --enable-debugging-handlers=true" + flags+=" --api-servers=https://${KUBERNETES_MASTER_NAME}" + if [[ "${HAIRPIN_MODE:-}" == "promiscuous-bridge" ]] || \ + [[ "${HAIRPIN_MODE:-}" == "hairpin-veth" ]] || \ + [[ "${HAIRPIN_MODE:-}" == "none" ]]; then + flags+=" --hairpin-mode=${HAIRPIN_MODE}" + fi + fi + if [[ "${ENABLE_MANIFEST_URL:-}" == "true" ]]; then + flags+=" --manifest-url=${MANIFEST_URL}" + flags+=" --manifest-url-header=${MANIFEST_URL_HEADER}" + fi + if [[ -n "${ENABLE_CUSTOM_METRICS:-}" ]]; then + flags+=" --enable-custom-metrics=${ENABLE_CUSTOM_METRICS}" + fi + if [[ -n "${NODE_LABELS:-}" ]]; then + flags+=" --node-labels=${NODE_LABELS}" + fi + if [[ "${ALLOCATE_NODE_CIDRS:-}" == "true" ]]; then + flags+=" --configure-cbr0=${ALLOCATE_NODE_CIDRS}" + fi + echo "KUBELET_OPTS=\"${flags}\"" > /etc/default/kubelet + + systemctl start kubelet.service +} + +# Create the log file and set its properties. +# +# $1 is the file to create. +function prepare-log-file { + touch $1 + chmod 644 $1 + chown root:root $1 +} + +# Starts kube-proxy pod. +function start-kube-proxy { + echo "Start kube-proxy pod" + prepare-log-file /var/log/kube-proxy.log + local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/kube-proxy.manifest" + remove-salt-config-comments "${src_file}" + + local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig" + local kube_docker_registry="gcr.io/google_containers" + if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then + kube_docker_registry=${KUBE_DOCKER_REGISTRY} + fi + local -r kube_proxy_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-proxy.docker_tag) + local api_servers="--master=https://${KUBERNETES_MASTER_NAME}" + sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file} + sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file} + sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file} + sed -i -e "s@{{test_args}}@${KUBEPROXY_TEST_ARGS:-}@g" ${src_file} + sed -i -e "s@{{ cpurequest }}@20m@g" ${src_file} + sed -i -e "s@{{log_level}}@${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}@g" ${src_file} + sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file} + if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then + sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" ${src_file} + fi + cp "${src_file}" /etc/kubernetes/manifests +} + +# Replaces the variables in the etcd manifest file with the real values, and then +# copy the file to the manifest dir +# $1: value for variable 'suffix' +# $2: value for variable 'port' +# $3: value for variable 'server_port' +# $4: value for variable 'cpulimit' +# $5: pod name, which should be either etcd or etcd-events +function prepare-etcd-manifest { + local -r temp_file="/tmp/$5" + cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" "${temp_file}" + sed -i -e "s@{{ *suffix *}}@$1@g" "${temp_file}" + sed -i -e "s@{{ *port *}}@$2@g" "${temp_file}" + sed -i -e "s@{{ *server_port *}}@$3@g" "${temp_file}" + sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}" + # Replace the volume host path. + sed -i -e "s@/mnt/master-pd/var/etcd@/mnt/disks/master-pd/var/etcd@g" "${temp_file}" + mv "${temp_file}" /etc/kubernetes/manifests +} + +# Starts etcd server pod (and etcd-events pod if needed). +# More specifically, it prepares dirs and files, sets the variable value +# in the manifests, and copies them to /etc/kubernetes/manifests. +function start-etcd-servers { + echo "Start etcd pods" + if [[ -d /etc/etcd ]]; then + rm -rf /etc/etcd + fi + if [[ -e /etc/default/etcd ]]; then + rm -f /etc/default/etcd + fi + if [[ -e /etc/systemd/system/etcd.service ]]; then + rm -f /etc/systemd/system/etcd.service + fi + if [[ -e /etc/init.d/etcd ]]; then + rm -f /etc/init.d/etcd + fi + prepare-log-file /var/log/etcd.log + prepare-etcd-manifest "" "4001" "2380" "200m" "etcd.manifest" + + prepare-log-file /var/log/etcd-events.log + prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest" +} + +# Calculates the following variables based on env variables, which will be used +# by the manifests of several kube-master components. +# CLOUD_CONFIG_VOLUME +# CLOUD_CONFIG_MOUNT +# DOCKER_REGISTRY +function compute-master-manifest-variables { + CLOUD_CONFIG_VOLUME="" + CLOUD_CONFIG_MOUNT="" + if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then + CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\"}}," + CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true}," + fi + DOCKER_REGISTRY="gcr.io/google_containers" + if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then + DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}" + fi +} + +# A helper function for removing salt configuration and comments from a file. +# This is mainly for preparing a manifest file. +# +# $1: Full path of the file to manipulate +function remove-salt-config-comments { + # Remove salt configuration. + sed -i "/^[ |\t]*{[#|%]/d" $1 + # Remove comments. + sed -i "/^[ |\t]*#/d" $1 +} + +# Starts kubernetes apiserver. +# It prepares the log file, loads the docker image, calculates variables, sets them +# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests. +# +# Assumed vars (which are calculated in function compute-master-manifest-variables) +# CLOUD_CONFIG_VOLUME +# CLOUD_CONFIG_MOUNT +# DOCKER_REGISTRY +function start-kube-apiserver { + echo "Start kubernetes api-server" + prepare-log-file /var/log/kube-apiserver.log + + # Calculate variables and assemble the command line. + local params="${API_SERVER_TEST_LOG_LEVEL:-"--v=2"} ${APISERVER_TEST_ARGS:-}" + params+=" --address=127.0.0.1" + params+=" --allow-privileged=true" + params+=" --authorization-mode=ABAC" + params+=" --authorization-policy-file=/etc/srv/kubernetes/abac-authz-policy.jsonl" + params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv" + params+=" --cloud-provider=gce" + params+=" --client-ca-file=/etc/srv/kubernetes/ca.crt" + params+=" --etcd-servers=http://127.0.0.1:4001" + params+=" --etcd-servers-overrides=/events#http://127.0.0.1:4002" + params+=" --secure-port=443" + params+=" --tls-cert-file=/etc/srv/kubernetes/server.cert" + params+=" --tls-private-key-file=/etc/srv/kubernetes/server.key" + params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv" + if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then + params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" + fi + if [[ -n "${ADMISSION_CONTROL:-}" ]]; then + params+=" --admission-control=${ADMISSION_CONTROL}" + fi + if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]]; then + params+=" --min-request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT}" + fi + if [[ -n "${RUNTIME_CONFIG:-}" ]]; then + params+=" --runtime-config=${RUNTIME_CONFIG}" + fi + if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then + local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") + params+=" --advertise-address=${vm_external_ip}" + params+=" --cloud-config=/etc/gce.conf" + params+=" --ssh-user=${PROXY_SSH_USER}" + params+=" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile" + fi + local -r kube_apiserver_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag) + + local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty" + cp "${src_dir}/abac-authz-policy.jsonl" /etc/srv/kubernetes/ + src_file="${src_dir}/kube-apiserver.manifest" + remove-salt-config-comments "${src_file}" + # Evaluate variables. + sed -i -e "s@{{params}}@${params}@g" "${src_file}" + sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}" + sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" "${src_file}" + sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}" + sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}" + sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}" + sed -i -e "s@{{pillar\['kube-apiserver_docker_tag'\]}}@${kube_apiserver_docker_tag}@g" "${src_file}" + sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}" + sed -i -e "s@{{secure_port}}@443@g" "${src_file}" + sed -i -e "s@{{secure_port}}@8080@g" "${src_file}" + sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}" + sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}" + cp "${src_file}" /etc/kubernetes/manifests +} + +# Starts kubernetes controller manager. +# It prepares the log file, loads the docker image, calculates variables, sets them +# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests. +# +# Assumed vars (which are calculated in function compute-master-manifest-variables) +# CLOUD_CONFIG_VOLUME +# CLOUD_CONFIG_MOUNT +# DOCKER_REGISTRY +function start-kube-controller-manager { + echo "Start kubernetes controller-manager" + prepare-log-file /var/log/kube-controller-manager.log + + # Calculate variables and assemble the command line. + local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-}" + params+=" --cloud-provider=gce" + params+=" --master=127.0.0.1:8080" + params+=" --root-ca-file=/etc/srv/kubernetes/ca.crt" + params+=" --service-account-private-key-file=/etc/srv/kubernetes/server.key" + if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then + params+=" --cloud-config=/etc/gce.conf" + fi + if [[ -n "${INSTANCE_PREFIX:-}" ]]; then + params+=" --cluster-name=${INSTANCE_PREFIX}" + fi + if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then + params+=" --cluster-cidr=${CLUSTER_IP_RANGE}" + fi + if [[ "${ALLOCATE_NODE_CIDRS:-}" == "true" ]]; then + params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}" + fi + if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then + params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}" + fi + local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag) + + local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest" + remove-salt-config-comments "${src_file}" + # Evaluate variables. + sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}" + sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}" + sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}" + sed -i -e "s@{{params}}@${params}@g" "${src_file}" + sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}" + sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}" + sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}" + sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}" + cp "${src_file}" /etc/kubernetes/manifests +} + +# Starts kubernetes scheduler. +# It prepares the log file, loads the docker image, calculates variables, sets them +# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests. +# +# Assumed vars (which are calculated in compute-master-manifest-variables) +# DOCKER_REGISTRY +function start-kube-scheduler { + echo "Start kubernetes scheduler" + prepare-log-file /var/log/kube-scheduler.log + + # Calculate variables and set them in the manifest. + params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}" + local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag") + + # Remove salt comments and replace variables with values. + local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest" + remove-salt-config-comments "${src_file}" + sed -i -e "s@{{params}}@${params}@g" "${src_file}" + sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}" + sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}" + cp "${src_file}" /etc/kubernetes/manifests +} + +# A helper function for copying addon manifests and set dir/files +# permissions. +# +# $1: addon category under /etc/kubernetes +# $2: manifest source dir +function setup-addon-manifests { + local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/$2" + local -r dst_dir="/etc/kubernetes/$1/$2" + if [[ ! -d "${dst_dir}" ]]; then + mkdir -p "${dst_dir}" + fi + local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml") + if [[ -n "${files}" ]]; then + cp "${src_dir}/"*.yaml "${dst_dir}" + fi + files=$(find "${src_dir}" -maxdepth 1 -name "*.json") + if [[ -n "${files}" ]]; then + cp "${src_dir}/"*.json "${dst_dir}" + fi + files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml.in") + if [[ -n "${files}" ]]; then + cp "${src_dir}/"*.yaml.in "${dst_dir}" + fi + chown -R root:root "${dst_dir}" + chmod 755 "${dst_dir}" + chmod 644 "${dst_dir}"/* +} + +# Prepares the manifests of k8s addons, and starts the addon manager. +function start-kube-addons { + echo "Prepare kube-addons manifests and start kube addon manager" + local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty" + local -r dst_dir="/etc/kubernetes/addons" + # Set up manifests of other addons. + if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]] || \ + [[ "${ENABLE_CLUSTER_MONITORING:-}" == "google" ]] || \ + [[ "${ENABLE_CLUSTER_MONITORING:-}" == "standalone" ]] || \ + [[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]]; then + local -r file_dir="cluster-monitoring/${ENABLE_CLUSTER_MONITORING}" + setup-addon-manifests "addons" "${file_dir}" + # Replace the salt configurations with variable values. + metrics_memory="200Mi" + eventer_memory="200Mi" + local -r metrics_memory_per_node="4" + local -r eventer_memory_per_node="500" + if [[ -n "${NUM_NODES:-}" && "${NUM_NODES}" -ge 1 ]]; then + num_kube_nodes="$((${NUM_NODES}-1))" + metrics_memory="$((${num_kube_nodes} * ${metrics_memory_per_node} + 200))Mi" + eventer_memory="$((${num_kube_nodes} * ${eventer_memory_per_node} + 200 * 1024))Ki" + fi + controller_yaml="${dst_dir}/${file_dir}" + if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]]; then + controller_yaml="${controller_yaml}/heapster-controller-combined.yaml" + else + controller_yaml="${controller_yaml}/heapster-controller.yaml" + fi + remove-salt-config-comments "${controller_yaml}" + sed -i -e "s@{{ *metrics_memory *}}@${metrics_memory}@g" "${controller_yaml}" + sed -i -e "s@{{ *eventer_memory *}}@${eventer_memory}@g" "${controller_yaml}" + sed -i -e "s@{{ *metrics_memory_per_node *}}@${metrics_memory_per_node}@g" "${controller_yaml}" + sed -i -e "s@{{ *eventer_memory_per_node *}}@${eventer_memory_per_node}@g" "${controller_yaml}" + fi + if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then + setup-addon-manifests "addons" "cluster-loadbalancing/glbc" + local -r glbc_yaml="${dst_dir}/cluster-loadbalancing/glbc/glbc.yaml" + remove-salt-config-comments "${glbc_yaml}" + sed -i -e "s@{{ *kube_uid *}}@${KUBE_UID:-}@g" "${glbc_yaml}" + fi + if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then + setup-addon-manifests "addons" "dns" + local -r dns_rc_file="${dst_dir}/dns/skydns-rc.yaml" + local -r dns_svc_file="${dst_dir}/dns/skydns-svc.yaml" + mv "${dst_dir}/dns/skydns-rc.yaml.in" "${dns_rc_file}" + mv "${dst_dir}/dns/skydns-svc.yaml.in" "${dns_svc_file}" + # Replace the salt configurations with variable values. + sed -i -e "s@{{ *pillar\['dns_replicas'\] *}}@${DNS_REPLICAS}@g" "${dns_rc_file}" + sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${dns_rc_file}" + sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${dns_svc_file}" + fi + if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then + setup-addon-manifests "addons" "registry" + local -r registry_pv_file="${dst_dir}/registry/registry-pv.yaml" + local -r registry_pvc_file="${dst_dir}/registry/registry-pvc.yaml" + mv "${dst_dir}/registry/registry-pv.yaml.in" "${registry_pv_file}" + mv "${dst_dir}/registry/registry-pvc.yaml.in" "${registry_pvc_file}" + # Replace the salt configurations with variable values. + remove-salt-config-comments "${controller_yaml}" + sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pv_file}" + sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pvc_file}" + sed -i -e "s@{{ *pillar\['cluster_registry_disk_name'\] *}}@${CLUSTER_REGISTRY_DISK}@g" "${registry_pvc_file}" + fi + if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \ + [[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \ + [[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]]; then + setup-addon-manifests "addons" "fluentd-elasticsearch" + fi + if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then + setup-addon-manifests "addons" "dashboard" + fi + if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then + setup-addon-manifests "admission-controls" "limit-range" + fi + + # Place addon manager pod manifest. + cp "${src_dir}/kube-addon-manager.yaml" /etc/kubernetes/manifests +} + +# Starts a fluentd static pod for logging. +function start-fluentd { + echo "Start fluentd pod" + if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]]; then + if [[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then + cp "${KUBE_HOME}/kube-manifests/kubernetes/fluentd-gcp.yaml" /etc/kubernetes/manifests/ + elif [[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]]; then + cp "${KUBE_HOME}/kube-manifests/kubernetes/fluentd-es.yaml" /etc/kubernetes/manifests/ + fi + fi +} + +function reset-motd { + # kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl) + local -r version="$(/usr/bin/kubelet --version=true | cut -f2 -d " ")" + # This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1), + # or the git hash that's in the build info. + local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")" + local devel="" + if [[ "${gitref}" != "${version}" ]]; then + devel=" +Note: This looks like a development version, which might not be present on GitHub. +If it isn't, the closest tag is at: + https://github.com/kubernetes/kubernetes/tree/${gitref} +" + gitref="${version//*+/}" + fi + cat > /etc/motd < /etc/motd < "${KUBE_HOME}/kube-env") + rm -f "${tmp_kube_env}" +} + +function validate-hash { + local -r file="$1" + local -r expected="$2" + + actual=$(sha1sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +# Retry a download until we get it. Takes a hash and a set of URLs. +# +# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown. +# $2+ are the URLs to download. +function download-or-bust { + local -r hash="$1" + shift 1 + + local -r urls=( $* ) + while true; do + for url in "${urls[@]}"; do + local file="${url##*/}" + rm -f "${file}" + if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 80 --retry 6 --retry-delay 10 "${url}"; then + echo "== Failed to download ${url}. Retrying. ==" + elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + else + if [[ -n "${hash}" ]]; then + echo "== Downloaded ${url} (SHA1 = ${hash}) ==" + else + echo "== Downloaded ${url} ==" + fi + return + fi + done + done +} + +function split-commas { + echo $1 | tr "," "\n" +} + +# Downloads kubernetes binaries and kube-system manifest tarball, unpacks them, +# and places them into suitable directories. Files are placed in /home/kubernetes. +function install-kube-binary-config { + cd "${KUBE_HOME}" + local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") ) + local -r server_binary_tar="${server_binary_tar_urls[0]##*/}" + if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then + local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}" + else + echo "Downloading binary release sha1 (not found in env)" + download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}" + local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1") + fi + echo "Downloading binary release tar" + download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}" + tar xzf "${KUBE_HOME}/${server_binary_tar}" -C "${KUBE_HOME}" --overwrite + # Copy docker_tag and image files to ${KUBE_HOME}/kube-docker-files. + src_dir="${KUBE_HOME}/kubernetes/server/bin" + dst_dir="${KUBE_HOME}/kube-docker-files" + mkdir -p "${dst_dir}" + cp "${src_dir}/"*.docker_tag "${dst_dir}" + if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then + cp "${src_dir}/kube-proxy.tar" "${dst_dir}" + else + cp "${src_dir}/kube-apiserver.tar" "${dst_dir}" + cp "${src_dir}/kube-controller-manager.tar" "${dst_dir}" + cp "${src_dir}/kube-scheduler.tar" "${dst_dir}" + cp -r "${KUBE_HOME}/kubernetes/addons" "${dst_dir}" + fi + local -r kube_bin="${KUBE_HOME}/bin" + # If the built-in binary version is different from the expected version, we use + # the downloaded binary. The simplest implementation is to always use the downloaded + # binary without checking the version. But we have another version guardian in GKE. + # So, we compare the versions to ensure this run-time binary replacement is only + # applied for OSS kubernetes. + cp "${src_dir}/kubelet" "${kube_bin}" + local -r builtin_version="$(/usr/bin/kubelet --version=true | cut -f2 -d " ")" + local -r required_version="$(/home/kubernetes/bin/kubelet --version=true | cut -f2 -d " ")" + if [[ "${TEST_CLUSTER:-}" == "true" ]] || \ + [[ "${builtin_version}" != "${required_version}" ]]; then + cp "${src_dir}/kubectl" "${kube_bin}" + chmod 544 "${kube_bin}/kubelet" + chmod 544 "${kube_bin}/kubectl" + mount --bind "${kube_bin}/kubelet" /usr/bin/kubelet + mount --bind "${kube_bin}/kubectl" /usr/bin/kubectl + else + rm -f "${kube_bin}/kubelet" + fi + cp "${KUBE_HOME}/kubernetes/LICENSES" "${KUBE_HOME}" + + # Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/. + dst_dir="${KUBE_HOME}/kube-manifests" + mkdir -p "${dst_dir}" + local -r manifests_tar_urls=( $(split-commas "${KUBE_MANIFESTS_TAR_URL}") ) + local -r manifests_tar="${manifests_tar_urls[0]##*/}" + if [ -n "${KUBE_MANIFESTS_TAR_HASH:-}" ]; then + local -r manifests_tar_hash="${KUBE_MANIFESTS_TAR_HASH}" + else + echo "Downloading k8s manifests sha1 (not found in env)" + download-or-bust "" "${manifests_tar_urls[@]/.tar.gz/.tar.gz.sha1}" + local -r manifests_tar_hash=$(cat "${manifests_tar}.sha1") + fi + echo "Downloading k8s manifests tar" + download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}" + tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite + local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}" + if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then + find "${dst_dir}" -maxdepth 1 -name \*.yaml -or -name \*.yaml.in | \ + xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@" + find "${dst_dir}" -maxdepth 1 -name \*.manifest -or -name \*.json | \ + xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@" + fi + cp "${dst_dir}/kubernetes/gci-trusty/gci-configure-helper.sh" "${KUBE_HOME}/bin/configure-helper.sh" + cp "${dst_dir}/kubernetes/gci-trusty/health-monitor.sh" "${KUBE_HOME}/bin/health-monitor.sh" + chmod 544 "${KUBE_HOME}/bin/configure-helper.sh" + chmod 544 "${KUBE_HOME}/bin/health-monitor.sh" + + # Clean up. + rm -rf "${KUBE_HOME}/kubernetes" + rm -f "${KUBE_HOME}/${server_binary_tar}" + rm -f "${KUBE_HOME}/${server_binary_tar}.sha1" + rm -f "${KUBE_HOME}/${manifests_tar}" + rm -f "${KUBE_HOME}/${manifests_tar}.sha1" +} + + +######### Main Function ########## +echo "Start to install kubernetes files" +set-broken-motd +KUBE_HOME="/home/kubernetes" +download-kube-env +source "${KUBE_HOME}/kube-env" +install-kube-binary-config +echo "Done for installing kubernetes files" diff --git a/cluster/gce/gci/health-monitor.sh b/cluster/gce/gci/health-monitor.sh new file mode 100644 index 0000000000..a23fdee458 --- /dev/null +++ b/cluster/gce/gci/health-monitor.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script is for master and node instance health monitoring, which is +# packed in kube-manifest tarball. It is executed through a systemd service +# in cluster/gce/gci/.yaml. The env variables come from an env +# file provided by the systemd service. + +set -o nounset +set -o pipefail + +# We simply kill the process when there is a failure. Another systemd service will +# automatically restart the process. +function docker_monitoring { + while [ 1 ]; do + if ! timeout 10 docker ps > /dev/null; then + echo "Docker daemon failed!" + pkill docker + # Wait for a while, as we don't want to kill it again before it is really up. + sleep 30 + else + sleep "${SLEEP_SECONDS}" + fi + done +} + +function kubelet_monitoring { + echo "waiting a minute for startup" + sleep 60 + local -r max_seconds=10 + while [ 1 ]; do + if ! curl --insecure -m "${max_seconds}" -f -s https://127.0.0.1:${KUBELET_PORT:-10250}/healthz > /dev/null; then + echo "Kubelet is unhealthy!" + curl --insecure https://127.0.0.1:${KUBELET_PORT:-10250}/healthz + pkill kubelet + # Wait for a while, as we don't want to kill it again before it is really up. + sleep 60 + else + sleep "${SLEEP_SECONDS}" + fi + done +} + + +############## Main Function ################ +if [[ "$#" -ne 1 ]]; then + echo "Usage: health-monitor.sh " + exit 1 +fi + +KUBE_ENV="/home/kubernetes/kube-env" +if [[ ! -e "${KUBE_ENV}" ]]; then + echo "The ${KUBE_ENV} file does not exist!! Terminate health monitoring" + exit 1 +fi + +SLEEP_SECONDS=10 +component=$1 +echo "Start kubernetes health monitoring for ${component}" +source "${KUBE_ENV}" +if [[ "${component}" == "docker" ]]; then + docker_monitoring +elif [[ "${component}" == "kubelet" ]]; then + kubelet_monitoring +else + echo "Health monitoring for component "${component}" is not supported!" +fi diff --git a/cluster/gce/gci/helper.sh b/cluster/gce/gci/helper.sh new file mode 100755 index 0000000000..45d5e24434 --- /dev/null +++ b/cluster/gce/gci/helper.sh @@ -0,0 +1,81 @@ +#!/bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions and constant for GCI distro + +# Creates the GCI specific metadata files if they do not exit. +# Assumed var +# KUBE_TEMP +function ensure-gci-metadata-files { + if [[ ! -f "${KUBE_TEMP}/gci-update.txt" ]]; then + cat >"${KUBE_TEMP}/gci-update.txt" << EOF +update_disabled +EOF + fi + if [[ ! -f "${KUBE_TEMP}/gci-docker.txt" ]]; then + cat >"${KUBE_TEMP}/gci-docker.txt" << EOF +true +EOF + fi +} + +# $1: template name (required). +function create-node-instance-template { + local template_name="$1" + ensure-gci-metadata-files + create-node-template "$template_name" "${scope_flags[*]}" \ + "kube-env=${KUBE_TEMP}/node-kube-env.yaml" \ + "user-data=${KUBE_ROOT}/cluster/gce/gci/node.yaml" \ + "configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh" \ + "cluster-name=${KUBE_TEMP}/cluster-name.txt" \ + "gci-update-strategy=${KUBE_TEMP}/gci-update.txt" \ + "gci-ensure-gke-docker=${KUBE_TEMP}/gci-docker.txt" +} + +# create-master-instance creates the master instance. If called with +# an argument, the argument is used as the name to a reserved IP +# address for the master. (In the case of upgrade/repair, we re-use +# the same IP.) +# +# It requires a whole slew of assumed variables, partially due to to +# the call to write-master-env. Listing them would be rather +# futile. Instead, we list the required calls to ensure any additional +# +# variables are set: +# ensure-temp-dir +# detect-project +# get-bearer-token +function create-master-instance { + local address_opt="" + [[ -n ${1:-} ]] && address_opt="--address ${1}" + + write-master-env + ensure-gci-metadata-files + gcloud compute instances create "${MASTER_NAME}" \ + ${address_opt} \ + --project "${PROJECT}" \ + --zone "${ZONE}" \ + --machine-type "${MASTER_SIZE}" \ + --image-project="${MASTER_IMAGE_PROJECT}" \ + --image "${MASTER_IMAGE}" \ + --tags "${MASTER_TAG}" \ + --network "${NETWORK}" \ + --scopes "storage-ro,compute-rw,monitoring,logging-write" \ + --can-ip-forward \ + --metadata-from-file \ + "kube-env=${KUBE_TEMP}/master-kube-env.yaml,user-data=${KUBE_ROOT}/cluster/gce/gci/master.yaml,configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh,cluster-name=${KUBE_TEMP}/cluster-name.txt,gci-update-strategy=${KUBE_TEMP}/gci-update.txt,gci-ensure-gke-docker=${KUBE_TEMP}/gci-docker.txt" \ + --disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" +} diff --git a/cluster/gce/gci/master.yaml b/cluster/gce/gci/master.yaml new file mode 100644 index 0000000000..49b1a18aeb --- /dev/null +++ b/cluster/gce/gci/master.yaml @@ -0,0 +1,91 @@ +#cloud-config + +write_files: + - path: /etc/systemd/system/kube-master-installation.service + permissions: 0644 + owner: root + content: | + [Unit] + Description=Download and install k8s binaries and configurations + After=network-online.target + + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStartPre=/bin/mkdir -p /home/kubernetes/bin + ExecStartPre=/bin/mount --bind /home/kubernetes/bin /home/kubernetes/bin + ExecStartPre=/bin/mount -o remount,exec /home/kubernetes/bin + ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh + ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure.sh + ExecStart=/home/kubernetes/bin/configure.sh + + [Install] + WantedBy=kubernetes.target + + - path: /etc/systemd/system/kube-master-configuration.service + permissions: 0644 + owner: root + content: | + [Unit] + Description=Configure kubernetes master + After=kube-master-installation.service + + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/home/kubernetes/bin/configure-helper.sh + + [Install] + WantedBy=kubernetes.target + + - path: /etc/systemd/system/kube-docker-monitor.service + permissions: 0644 + owner: root + content: | + [Unit] + Description=Kubernetes health monitoring for docker + After=kube-master-configuration.service + + [Service] + Restart=always + RestartSec=10 + RemainAfterExit=yes + RemainAfterExit=yes + ExecStart=/home/kubernetes/bin/health-monitor.sh docker + + [Install] + WantedBy=kubernetes.target + + - path: /etc/systemd/system/kubelet-monitor.service + permissions: 0644 + owner: root + content: | + [Unit] + Description=Kubernetes health monitoring for kubelet + After=kube-master-configuration.service + + [Service] + Restart=always + RestartSec=10 + RemainAfterExit=yes + RemainAfterExit=yes + ExecStart=/home/kubernetes/bin/health-monitor.sh kubelet + + [Install] + WantedBy=kubernetes.target + + + - path: /etc/systemd/system/kubernetes.target + permissions: 0644 + owner: root + content: | + [Unit] + Description=Kubernetes + +runcmd: + - systemctl daemon-reload + - systemctl enable kube-master-installation.service + - systemctl enable kube-master-configuration.service + - systemctl enable kube-docker-monitor.service + - systemctl enable kubelet-monitor.service + - systemctl start kubernetes.target diff --git a/cluster/gce/gci/node.yaml b/cluster/gce/gci/node.yaml new file mode 100644 index 0000000000..3b4978b2cf --- /dev/null +++ b/cluster/gce/gci/node.yaml @@ -0,0 +1,91 @@ +#cloud-config + +write_files: + - path: /etc/systemd/system/kube-node-installation.service + permissions: 0644 + owner: root + content: | + [Unit] + Description=Download and install k8s binaries and configurations + After=network-online.target + + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStartPre=/bin/mkdir -p /home/kubernetes/bin + ExecStartPre=/bin/mount --bind /home/kubernetes/bin /home/kubernetes/bin + ExecStartPre=/bin/mount -o remount,exec /home/kubernetes/bin + ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh + ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure.sh + ExecStart=/home/kubernetes/bin/configure.sh + + [Install] + WantedBy=kubernetes.target + + - path: /etc/systemd/system/kube-node-configuration.service + permissions: 0644 + owner: root + content: | + [Unit] + Description=Configure kubernetes node + After=kube-node-installation.service + + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/home/kubernetes/bin/configure-helper.sh + + [Install] + WantedBy=kubernetes.target + + - path: /etc/systemd/system/kube-docker-monitor.service + permissions: 0644 + owner: root + content: | + [Unit] + Description=Kubernetes health monitoring for docker + After=kube-node-configuration.service + + [Service] + Restart=always + RestartSec=10 + RemainAfterExit=yes + RemainAfterExit=yes + ExecStart=/home/kubernetes/bin/health-monitor.sh docker + + [Install] + WantedBy=kubernetes.target + + - path: /etc/systemd/system/kubelet-monitor.service + permissions: 0644 + owner: root + content: | + [Unit] + Description=Kubernetes health monitoring for kubelet + After=kube-node-configuration.service + + [Service] + Restart=always + RestartSec=10 + RemainAfterExit=yes + RemainAfterExit=yes + ExecStart=/home/kubernetes/bin/health-monitor.sh kubelet + + [Install] + WantedBy=kubernetes.target + + + - path: /etc/systemd/system/kubernetes.target + permissions: 0644 + owner: root + content: | + [Unit] + Description=Kubernetes + +runcmd: + - systemctl daemon-reload + - systemctl enable kube-node-installation.service + - systemctl enable kube-node-configuration.service + - systemctl enable kube-docker-monitor.service + - systemctl enable kubelet-monitor.service + - systemctl start kubernetes.target diff --git a/cluster/gce/trusty/configure-helper.sh b/cluster/gce/trusty/configure-helper.sh index d5cdcef106..4ce1832193 100644 --- a/cluster/gce/trusty/configure-helper.sh +++ b/cluster/gce/trusty/configure-helper.sh @@ -247,7 +247,6 @@ mount_master_pd() { readonly pd_path="/dev/disk/by-id/google-master-pd" readonly mount_point="/mnt/disks/master-pd" - # TODO(zmerlynn): GKE is still lagging in master-pd creation if [ ! -e "${pd_path}" ]; then return fi diff --git a/cluster/gce/trusty/configure.sh b/cluster/gce/trusty/configure.sh index 254c9780fc..0494cc0377 100644 --- a/cluster/gce/trusty/configure.sh +++ b/cluster/gce/trusty/configure.sh @@ -174,7 +174,7 @@ install_kube_binary_config() { find "${dst_dir}" -maxdepth 1 -name \*.manifest -or -maxdepth 1 -name \*.json | \ xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@" fi - cp "${dst_dir}/kubernetes/gci-trusty/configure-helper.sh" /etc/kube-configure-helper.sh + cp "${dst_dir}/kubernetes/gci-trusty/trusty-configure-helper.sh" /etc/kube-configure-helper.sh # Clean up. rm -rf "${kube_home}/kubernetes" diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 0c623f59f3..c7546f50f4 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -23,11 +23,14 @@ source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}" source "${KUBE_ROOT}/cluster/common.sh" source "${KUBE_ROOT}/cluster/lib/util.sh" -if [[ "${OS_DISTRIBUTION}" == "debian" || "${OS_DISTRIBUTION}" == "coreos" || "${OS_DISTRIBUTION}" == "trusty" ]]; then +if [[ "${OS_DISTRIBUTION}" == "debian" || "${OS_DISTRIBUTION}" == "coreos" || "${OS_DISTRIBUTION}" == "trusty" || "${OS_DISTRIBUTION}" == "gci" ]]; then source "${KUBE_ROOT}/cluster/gce/${OS_DISTRIBUTION}/helper.sh" -elif [[ "${OS_DISTRIBUTION}" == "gci" ]]; then - # TODO(andyzheng0831): Switch to use the GCI specific code. - source "${KUBE_ROOT}/cluster/gce/trusty/helper.sh" +else + echo "Cannot operate on cluster using os distro: ${OS_DISTRIBUTION}" >&2 + exit 1 +fi + +if [[ "${OS_DISTRIBUTION}" == "gci" ]]; then # If the master or node image is not set, we use the latest GCI dev image. # Otherwise, we respect whatever set by the user. gci_images=( $(gcloud compute images list --project google-containers \ @@ -40,9 +43,7 @@ elif [[ "${OS_DISTRIBUTION}" == "gci" ]]; then NODE_IMAGE="${gci_images[0]}" NODE_IMAGE_PROJECT="google-containers" fi -else - echo "Cannot operate on cluster using os distro: ${OS_DISTRIBUTION}" >&2 - exit 1 + fi # Verfiy cluster autoscaler configuration. diff --git a/cluster/saltbase/salt/supervisor/kubelet-checker.sh b/cluster/saltbase/salt/supervisor/kubelet-checker.sh index 41fb061a12..1a83c11ea4 100755 --- a/cluster/saltbase/salt/supervisor/kubelet-checker.sh +++ b/cluster/saltbase/salt/supervisor/kubelet-checker.sh @@ -34,7 +34,7 @@ max_seconds=10 while true; do if ! curl --insecure -m ${max_seconds} -f -s https://127.0.0.1:{{kubelet_port}}/healthz > /dev/null; then echo "kubelet failed!" - curl --insecure -s http://127.0.0.1:{{kubelet_port}}/healthz + curl --insecure https://127.0.0.1:{{kubelet_port}}/healthz exit 2 fi sleep 10 diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index 3d4c75ea2b..9c39446d98 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -14,6 +14,8 @@ cluster/gce/configure-vm.sh: cloud_config: ${CLOUD_CONFIG} cluster/gce/configure-vm.sh: env-to-grains "runtime_config" cluster/gce/configure-vm.sh: kubelet_api_servers: '${KUBELET_APISERVER}' cluster/gce/coreos/helper.sh:# cloud_config yaml file should be passed +cluster/gce/gci/configure-helper.sh: local api_servers="--master=https://${KUBERNETES_MASTER_NAME}" +cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}" cluster/gce/trusty/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}" cluster/gce/util.sh: local node_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \ cluster/juju/layers/kubernetes/reactive/k8s.py: check_call(split(cmd.format(directory, cluster_name, public_address, From 914c1d61e9443f617471be9c1ee1fe7a25b52a3e Mon Sep 17 00:00:00 2001 From: Andy Zheng Date: Thu, 19 May 2016 21:48:17 -0700 Subject: [PATCH 2/4] GCI: Fix a cluster initialization failure caused by gce.conf --- cluster/gce/gci/configure-helper.sh | 11 +++++++---- cluster/gce/gci/configure.sh | 8 ++++---- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index cad060c575..e357a7eb3f 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -114,14 +114,12 @@ function create-master-auth { echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${known_tokens_csv}" echo "${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy" >> "${known_tokens_csv}" fi - # Do not create /etc/gce.conf unless specified. - if [[ -z ${CLOUD_CONFIG:-} ]]; then - return - fi + local use_cloud_config="false" cat </etc/gce.conf [global] EOF if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then + use_cloud_config="true" cat <>/etc/gce.conf token-url = ${TOKEN_URL} token-body = ${TOKEN_BODY} @@ -130,15 +128,20 @@ network-name = ${NODE_NETWORK} EOF fi if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then + use_cloud_config="true" cat <>/etc/gce.conf node-tags = ${NODE_INSTANCE_PREFIX} EOF fi if [[ -n "${MULTIZONE:-}" ]]; then + use_cloud_config="true" cat <>/etc/gce.conf multizone = ${MULTIZONE} EOF fi + if [[ "${use_cloud_config}" != "true" ]]; then + rm -f /etc/gce.conf + fi } function create-kubelet-kubeconfig { diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index 5c62780c22..5cf02455b5 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -29,12 +29,12 @@ Broken (or in progress) Kubernetes node setup! Check the cluster initialization using the following commands. Master instance: - - systemctl status kube-master-installation - - systemctl status kube-master-configuration + - sudo systemctl status kube-master-installation + - sudo systemctl status kube-master-configuration Node instance: - - systemctl status kube-node-installation - - systemctl status kube-node-configuration + - sudo systemctl status kube-node-installation + - sudo systemctl status kube-node-configuration EOF } From bd293e1522828adb3dec57f2cdb30a0380419857 Mon Sep 17 00:00:00 2001 From: Andy Zheng Date: Fri, 20 May 2016 21:25:08 -0700 Subject: [PATCH 3/4] GCI: support CIDR allocator for NodeController --- cluster/gce/gci/configure-helper.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index e357a7eb3f..35950fc20a 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -509,6 +509,9 @@ function start-kube-controller-manager { if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then params+=" --cluster-cidr=${CLUSTER_IP_RANGE}" fi + if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then + params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" + fi if [[ "${ALLOCATE_NODE_CIDRS:-}" == "true" ]]; then params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}" fi From 6bb0a25f7a09e7a49b41c5dee7a520fe092012ff Mon Sep 17 00:00:00 2001 From: Andy Zheng Date: Sun, 22 May 2016 01:05:57 -0700 Subject: [PATCH 4/4] GCI: Add support for GCP webhook authentication and authorization --- cluster/gce/gci/configure-helper.sh | 63 ++++++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 35950fc20a..e5b2e3c82e 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -142,6 +142,46 @@ EOF if [[ "${use_cloud_config}" != "true" ]]; then rm -f /etc/gce.conf fi + + if [[ -n "${GCP_AUTHN_URL:-}" ]]; then + cat </etc/gcp_authn.config +clusters: + - name: gcp-authentication-server + cluster: + server: ${GCP_AUTHN_URL} +users: + - name: kube-apiserver + user: + auth-provider: + name: gcp +current-context: webhook +contexts: +- context: + cluster: gcp-authentication-server + user: kube-apiserver + name: webhook +EOF + fi + + if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then + cat </etc/gcp_authz.config +clusters: + - name: gcp-authorization-server + cluster: + server: ${GCP_AUTHZ_URL} +users: + - name: kube-apiserver + user: + auth-provider: + name: gcp +current-context: webhook +contexts: +- context: + cluster: gcp-authorization-server + user: kube-apiserver + name: webhook +EOF + fi } function create-kubelet-kubeconfig { @@ -430,7 +470,6 @@ function start-kube-apiserver { local params="${API_SERVER_TEST_LOG_LEVEL:-"--v=2"} ${APISERVER_TEST_ARGS:-}" params+=" --address=127.0.0.1" params+=" --allow-privileged=true" - params+=" --authorization-mode=ABAC" params+=" --authorization-policy-file=/etc/srv/kubernetes/abac-authz-policy.jsonl" params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv" params+=" --cloud-provider=gce" @@ -460,13 +499,29 @@ function start-kube-apiserver { params+=" --ssh-user=${PROXY_SSH_USER}" params+=" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile" fi - local -r kube_apiserver_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag) + webhook_authn_config_mount="" + webhook_authn_config_volume="" + if [[ -n "${GCP_AUTHN_URL:-}" ]]; then + params+=" --authentication-token-webhook-config-file=/etc/gcp_authn.config" + webhook_authn_config_mount="{\"name\": \"webhookauthnconfigmount\",\"mountPath\": \"/etc/gcp_authn.config\", \"readOnly\": false}," + webhook_authn_config_volume="{\"name\": \"webhookauthnconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authn.config\"}}," + fi + + params+=" --authorization-mode=ABAC" + webhook_config_mount="" + webhook_config_volume="" + if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then + params+=",Webhook --authorization-webhook-config-file=/etc/gcp_authz.config" + webhook_config_mount="{\"name\": \"webhookconfigmount\",\"mountPath\": \"/etc/gcp_authz.config\", \"readOnly\": false}," + webhook_config_volume="{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authz.config\"}}," + fi local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty" cp "${src_dir}/abac-authz-policy.jsonl" /etc/srv/kubernetes/ src_file="${src_dir}/kube-apiserver.manifest" remove-salt-config-comments "${src_file}" # Evaluate variables. + local -r kube_apiserver_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag) sed -i -e "s@{{params}}@${params}@g" "${src_file}" sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}" sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" "${src_file}" @@ -479,6 +534,10 @@ function start-kube-apiserver { sed -i -e "s@{{secure_port}}@8080@g" "${src_file}" sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}" sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}" + sed -i -e "s@{{webhook_authn_config_mount}}@${webhook_authn_config_mount}@g" "${src_file}" + sed -i -e "s@{{webhook_authn_config_volume}}@${webhook_authn_config_volume}@g" "${src_file}" + sed -i -e "s@{{webhook_config_mount}}@${webhook_config_mount}@g" "${src_file}" + sed -i -e "s@{{webhook_config_volume}}@${webhook_config_volume}@g" "${src_file}" cp "${src_file}" /etc/kubernetes/manifests }