mirror of https://github.com/k3s-io/k3s
commit
a95f1b84cb
|
@ -899,7 +899,8 @@ function kube::release::package_kube_manifests_tarball() {
|
|||
|
||||
# Source 2: manifests from cluster/gce/kube-manifests.
|
||||
# TODO(andyzheng0831): Enable the following line after finishing issue #16702.
|
||||
# cp "${KUBE_ROOT}/cluster/gce/kube-manifests/"* "${release_stage}/"
|
||||
# cp "${KUBE_ROOT}/cluster/gce/kube-manifests/*" "${release_stage}/"
|
||||
cp -r "${KUBE_ROOT}/cluster/gce/coreos/kube-manifests"/* "${release_stage}/"
|
||||
|
||||
kube::release::clean_cruft
|
||||
|
||||
|
|
|
@ -337,7 +337,7 @@ function find-release-tars() {
|
|||
|
||||
# This tarball is only used by Ubuntu Trusty.
|
||||
KUBE_MANIFESTS_TAR=
|
||||
if [[ "${KUBE_OS_DISTRIBUTION:-}" == "trusty" ]]; then
|
||||
if [[ "${KUBE_OS_DISTRIBUTION:-}" == "trusty" || "${KUBE_OS_DISTRIBUTION:-}" == "coreos" ]]; then
|
||||
KUBE_MANIFESTS_TAR="${KUBE_ROOT}/server/kubernetes-manifests.tar.gz"
|
||||
if [[ ! -f "${KUBE_MANIFESTS_TAR}" ]]; then
|
||||
KUBE_MANIFESTS_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-manifests.tar.gz"
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
MANIFESTS_DIR=/opt/kube-manifests/kubernetes
|
||||
|
||||
echo "Configuring hostname"
|
||||
hostnamectl set-hostname $(hostname | cut -f1 -d.)
|
||||
|
||||
echo "Configuring kubelet"
|
||||
mkdir -p /var/lib/kubelet
|
||||
mkdir -p /etc/kubernetes/manifests
|
||||
src=${MANIFESTS_DIR}/kubelet-config.yaml
|
||||
dst=/var/lib/kubelet/kubeconfig
|
||||
cp ${src} ${dst}
|
||||
sed -i 's/\"/\\\"/g' ${dst} # eval will remove the double quotes if they are not escaped
|
||||
eval "echo \"$(< ${dst})\"" > ${dst}
|
|
@ -0,0 +1,331 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
readonly KNOWN_TOKENS_FILE="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
|
||||
readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv"
|
||||
|
||||
# evaluate-manifest evalutes the source manifest with the environment variables.
|
||||
function evaluate-manifest() {
|
||||
local src=$1
|
||||
local dst=$2
|
||||
cp ${src} ${dst}
|
||||
sed -i 's/\"/\\\"/g' ${dst} # eval will remove the double quotes if they are not escaped
|
||||
eval "echo \"$(< ${dst})\"" > ${dst}
|
||||
}
|
||||
|
||||
# evaluate-manifests-dir evalutes the source manifests within $1 and put the result
|
||||
# in $2.
|
||||
function evaluate-manifests-dir() {
|
||||
local src=$1
|
||||
local dst=$2
|
||||
mkdir -p ${dst}
|
||||
|
||||
for f in ${src}/*
|
||||
do
|
||||
evaluate-manifest $f ${dst}/${f##*/}
|
||||
done
|
||||
}
|
||||
|
||||
function configure-kube-proxy() {
|
||||
echo "Configuring kube-proxy"
|
||||
mkdir -p /var/lib/kube-proxy
|
||||
evaluate-manifest ${MANIFESTS_DIR}/kubeproxy-config.yaml /var/lib/kube-proxy/kubeconfig
|
||||
}
|
||||
|
||||
function configure-logging() {
|
||||
if [[ "${LOGGING_DESTINATION}" == "gcp" ]];then
|
||||
echo "Configuring fluentd-gcp"
|
||||
# fluentd-gcp
|
||||
evaluate-manifest ${MANIFESTS_DIR}/fluentd-gcp.yaml /etc/kubernetes/manifests/fluentd-gcp.yaml
|
||||
elif [[ "${LOGGING_DESTINATION}" == "elasticsearch" ]];then
|
||||
echo "Configuring fluentd-es"
|
||||
# fluentd-es
|
||||
evaluate-manifest ${MANIFESTS_DIR}/fluentd-es.yaml /etc/kubernetes/manifests/fluentd-es.yaml
|
||||
fi
|
||||
}
|
||||
|
||||
function configure-admission-controls() {
|
||||
echo "Configuring admission controls"
|
||||
mkdir -p /etc/kubernetes/admission-controls
|
||||
cp -r ${SALT_DIR}/salt/kube-admission-controls/limit-range /etc/kubernetes/admission-controls/
|
||||
}
|
||||
|
||||
function configure-etcd() {
|
||||
echo "Configuring etcd"
|
||||
touch /var/log/etcd.log
|
||||
evaluate-manifest ${MANIFESTS_DIR}/etcd.yaml /etc/kubernetes/manifests/etcd.yaml
|
||||
}
|
||||
|
||||
function configure-etcd-events() {
|
||||
echo "Configuring etcd-events"
|
||||
touch /var/log/etcd-events.log
|
||||
evaluate-manifest ${MANIFESTS_DIR}/etcd-events.yaml /etc/kubernetes/manifests/etcd-events.yaml
|
||||
}
|
||||
|
||||
function configure-kube-apiserver() {
|
||||
echo "Configuring kube-apiserver"
|
||||
|
||||
# Wait for etcd to be up.
|
||||
wait-url-up http://127.0.0.1:4001/version
|
||||
|
||||
touch /var/log/kube-apiserver.log
|
||||
|
||||
# Copying known_tokens and basic_auth file.
|
||||
cp ${SALT_OVERLAY}/salt/kube-apiserver/*.csv /srv/kubernetes/
|
||||
evaluate-manifest ${MANIFESTS_DIR}/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
}
|
||||
|
||||
function configure-kube-scheduler() {
|
||||
echo "Configuring kube-scheduler"
|
||||
touch /var/log/kube-scheduler.log
|
||||
evaluate-manifest ${MANIFESTS_DIR}/kube-scheduler.yaml /etc/kubernetes/manifests/kube-scheduler.yaml
|
||||
}
|
||||
|
||||
function configure-kube-controller-manager() {
|
||||
# Wait for api server.
|
||||
wait-url-up http://127.0.0.1:8080/version
|
||||
echo "Configuring kube-controller-manager"
|
||||
touch /var/log/kube-controller-manager.log
|
||||
evaluate-manifest ${MANIFESTS_DIR}/kube-controller-manager.yaml /etc/kubernetes/manifests/kube-controller-manager.yaml
|
||||
}
|
||||
|
||||
# Wait until $1 become reachable.
|
||||
function wait-url-up() {
|
||||
until curl --silent $1
|
||||
do
|
||||
sleep 5
|
||||
done
|
||||
}
|
||||
|
||||
# Configure addon yamls, and run salt/kube-addons/kube-addon.sh
|
||||
function configure-master-addons() {
|
||||
echo "Configuring master addons"
|
||||
|
||||
local addon_dir=/etc/kubernetes/addons
|
||||
mkdir -p ${addon_dir}
|
||||
|
||||
# Copy namespace.yaml
|
||||
evaluate-manifest ${MANIFESTS_DIR}/addons/namespace.yaml ${addon_dir}/namespace.yaml
|
||||
|
||||
if [[ "${ENABLE_L7_LOADBALANCING}" == "glbc" ]]; then
|
||||
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-loadbalancing/glbc ${addon_dir}/cluster-loadbalancing/glbc
|
||||
fi
|
||||
|
||||
if [[ "${ENABLE_CLUSTER_DNS}" == "true" ]]; then
|
||||
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/dns ${addon_dir}/dns
|
||||
fi
|
||||
|
||||
if [[ "${ENABLE_CLUSTER_UI}" == "true" ]]; then
|
||||
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/kube-ui ${addon_dir}/kube-ui
|
||||
fi
|
||||
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING}" == "influxdb" ]]; then
|
||||
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-monitoring/influxdb ${addon_dir}/cluster-monitoring/influxdb
|
||||
elif [[ "${ENABLE_CLUSTER_MONITORING}" == "google" ]]; then
|
||||
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-monitoring/google ${addon_dir}/cluster-monitoring/google
|
||||
elif [[ "${ENABLE_CLUSTER_MONITORING}" == "standalone" ]]; then
|
||||
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-monitoring/standalone ${addon_dir}/cluster-monitoring/standalone
|
||||
elif [[ "${ENABLE_CLUSTER_MONITORING}" == "googleinfluxdb" ]]; then
|
||||
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-monitoring/googleinfluxdb ${addon_dir}/cluster-monitoring/googleinfluxdb
|
||||
fi
|
||||
|
||||
# Note that, KUBE_ENABLE_INSECURE_REGISTRY is not supported yet.
|
||||
if [[ "${ENABLE_CLUSTER_REGISTRY}" == "true" ]]; then
|
||||
CLUSTER_REGISTRY_DISK_SIZE=$(convert-bytes-gce-kube "${CLUSTER_REGISTRY_DISK_SIZE}")
|
||||
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/registry ${addon_dir}/registry
|
||||
fi
|
||||
}
|
||||
|
||||
function configure-master-components() {
|
||||
configure-admission-controls
|
||||
configure-etcd
|
||||
configure-etcd-events
|
||||
configure-kube-apiserver
|
||||
configure-kube-scheduler
|
||||
configure-kube-controller-manager
|
||||
configure-master-addons
|
||||
}
|
||||
|
||||
# TODO(yifan): Merge this with mount-master-pd() in configure-vm.sh
|
||||
# Pass ${save_format_and_mount} as an argument.
|
||||
function mount-master-pd() {
|
||||
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
|
||||
return
|
||||
fi
|
||||
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
|
||||
relative_path=${device_info##* }
|
||||
device_path="/dev/disk/by-id/${relative_path}"
|
||||
|
||||
# Format and mount the disk, create directories on it for all of the master's
|
||||
# persistent data, and link them to where they're used.
|
||||
echo "Mounting master-pd"
|
||||
mkdir -p /mnt/master-pd
|
||||
safe_format_and_mount=${SALT_DIR}/salt/helpers/safe_format_and_mount
|
||||
chmod +x ${safe_format_and_mount}
|
||||
${safe_format_and_mount} -m "mkfs.ext4 -F" "${device_path}" /mnt/master-pd &>/var/log/master-pd-mount.log || \
|
||||
{ echo "!!! master-pd mount failed, review /var/log/master-pd-mount.log !!!"; return 1; }
|
||||
# Contains all the data stored in etcd
|
||||
mkdir -m 700 -p /mnt/master-pd/var/etcd
|
||||
# Contains the dynamically generated apiserver auth certs and keys
|
||||
mkdir -p /mnt/master-pd/srv/kubernetes
|
||||
# Contains the cluster's initial config parameters and auth tokens
|
||||
mkdir -p /mnt/master-pd/srv/salt-overlay
|
||||
# Directory for kube-apiserver to store SSH key (if necessary)
|
||||
mkdir -p /mnt/master-pd/srv/sshproxy
|
||||
|
||||
ln -s -f /mnt/master-pd/var/etcd /var/etcd
|
||||
ln -s -f /mnt/master-pd/srv/kubernetes /srv/kubernetes
|
||||
ln -s -f /mnt/master-pd/srv/sshproxy /srv/sshproxy
|
||||
ln -s -f /mnt/master-pd/srv/salt-overlay /srv/salt-overlay
|
||||
|
||||
# This is a bit of a hack to get around the fact that salt has to run after the
|
||||
# PD and mounted directory are already set up. We can't give ownership of the
|
||||
# directory to etcd until the etcd user and group exist, but they don't exist
|
||||
# until salt runs if we don't create them here. We could alternatively make the
|
||||
# permissions on the directory more permissive, but this seems less bad.
|
||||
if ! id etcd &>/dev/null; then
|
||||
useradd -s /sbin/nologin -d /var/etcd etcd
|
||||
fi
|
||||
chown -R etcd /mnt/master-pd/var/etcd
|
||||
chgrp -R etcd /mnt/master-pd/var/etcd
|
||||
}
|
||||
|
||||
# The job of this function is simple, but the basic regular expression syntax makes
|
||||
# this difficult to read. What we want to do is convert from [0-9]+B, KB, KiB, MB, etc
|
||||
# into [0-9]+, Ki, Mi, Gi, etc.
|
||||
# This is done in two steps:
|
||||
# 1. Convert from [0-9]+X?i?B into [0-9]X? (X denotes the prefix, ? means the field
|
||||
# is optional.
|
||||
# 2. Attach an 'i' to the end of the string if we find a letter.
|
||||
# The two step process is needed to handle the edge case in which we want to convert
|
||||
# a raw byte count, as the result should be a simple number (e.g. 5B -> 5).
|
||||
#
|
||||
# TODO(yifan): Reuse the one defined in configure-vm.sh to remove duplication.
|
||||
function convert-bytes-gce-kube() {
|
||||
local -r storage_space=$1
|
||||
echo "${storage_space}" | sed -e 's/^\([0-9]\+\)\([A-Z]\)\?i\?B$/\1\2/g' -e 's/\([A-Z]\)$/\1i/'
|
||||
}
|
||||
|
||||
# TODO(yifan): Use create-salt-master-auth() in configure-vm.sh
|
||||
function create-salt-master-auth() {
|
||||
if [[ ! -e /srv/kubernetes/ca.crt ]]; then
|
||||
if [[ ! -z "${CA_CERT:-}" ]] && [[ ! -z "${MASTER_CERT:-}" ]] && [[ ! -z "${MASTER_KEY:-}" ]]; then
|
||||
mkdir -p /srv/kubernetes
|
||||
(umask 077;
|
||||
echo "${CA_CERT}" | base64 -d > /srv/kubernetes/ca.crt;
|
||||
echo "${MASTER_CERT}" | base64 -d > /srv/kubernetes/server.cert;
|
||||
echo "${MASTER_KEY}" | base64 -d > /srv/kubernetes/server.key;
|
||||
# Kubecfg cert/key are optional and included for backwards compatibility.
|
||||
# TODO(roberthbailey): Remove these two lines once GKE no longer requires
|
||||
# fetching clients certs from the master VM.
|
||||
echo "${KUBECFG_CERT:-}" | base64 -d > /srv/kubernetes/kubecfg.crt;
|
||||
echo "${KUBECFG_KEY:-}" | base64 -d > /srv/kubernetes/kubecfg.key)
|
||||
fi
|
||||
fi
|
||||
if [ ! -e "${BASIC_AUTH_FILE}" ]; then
|
||||
mkdir -p /srv/salt-overlay/salt/kube-apiserver
|
||||
(umask 077;
|
||||
echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${BASIC_AUTH_FILE}")
|
||||
fi
|
||||
if [ ! -e "${KNOWN_TOKENS_FILE}" ]; then
|
||||
mkdir -p /srv/salt-overlay/salt/kube-apiserver
|
||||
(umask 077;
|
||||
echo "${KUBE_BEARER_TOKEN},admin,admin" > "${KNOWN_TOKENS_FILE}";
|
||||
echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${KNOWN_TOKENS_FILE}";
|
||||
echo "${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy" >> "${KNOWN_TOKENS_FILE}")
|
||||
|
||||
# Generate tokens for other "service accounts". Append to known_tokens.
|
||||
#
|
||||
# NB: If this list ever changes, this script actually has to
|
||||
# change to detect the existence of this file, kill any deleted
|
||||
# old tokens and add any new tokens (to handle the upgrade case).
|
||||
local -r service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns")
|
||||
for account in "${service_accounts[@]}"; do
|
||||
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
echo "${token},${account},${account}" >> "${KNOWN_TOKENS_FILE}"
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
# $1 is the directory containing all of the docker images
|
||||
function load-docker-images() {
|
||||
local success
|
||||
local restart_docker
|
||||
while true; do
|
||||
success=true
|
||||
restart_docker=false
|
||||
for image in "$1/"*; do
|
||||
timeout 30 docker load -i "${image}" &>/dev/null
|
||||
rc=$?
|
||||
if [[ "$rc" == 124 ]]; then
|
||||
restart_docker=true
|
||||
elif [[ "$rc" != 0 ]]; then
|
||||
success=false
|
||||
fi
|
||||
done
|
||||
if [[ "$success" == "true" ]]; then break; fi
|
||||
if [[ "$restart_docker" == "true" ]]; then systemctl restart docker; fi
|
||||
sleep 15
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
# TODO(yifan): Making this function more generic for other runtimes.
|
||||
function load-master-components-images() {
|
||||
echo "Loading docker images for master components"
|
||||
${SALT_DIR}/install.sh ${KUBE_BIN_TAR}
|
||||
${SALT_DIR}/salt/kube-master-addons/kube-master-addons.sh
|
||||
|
||||
# Get the image tags.
|
||||
KUBE_APISERVER_DOCKER_TAG=$(cat ${KUBE_BIN_DIR}/kube-apiserver.docker_tag)
|
||||
KUBE_CONTROLLER_MANAGER_DOCKER_TAG=$(cat ${KUBE_BIN_DIR}/kube-controller-manager.docker_tag)
|
||||
KUBE_SCHEDULER_DOCKER_TAG=$(cat ${KUBE_BIN_DIR}/kube-scheduler.docker_tag)
|
||||
}
|
||||
|
||||
|
||||
##########
|
||||
# main #
|
||||
##########
|
||||
|
||||
KUBE_BIN_TAR=/opt/downloads/kubernetes-server-linux-amd64.tar.gz
|
||||
KUBE_BIN_DIR=/opt/kubernetes/server/bin
|
||||
SALT_DIR=/opt/kubernetes/saltbase
|
||||
SALT_OVERLAY=/srv/salt-overlay
|
||||
MANIFESTS_DIR=/opt/kube-manifests/kubernetes
|
||||
|
||||
# On CoreOS, the hosts is in /usr/share/baselayout/hosts
|
||||
# So we need to manually populdate the hosts file here on gce.
|
||||
echo "127.0.0.1 localhost" >> /etc/hosts
|
||||
echo "::1 localhost" >> /etc/hosts
|
||||
|
||||
if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
|
||||
mount-master-pd
|
||||
create-salt-master-auth
|
||||
load-master-components-images
|
||||
configure-master-components
|
||||
else
|
||||
configure-kube-proxy
|
||||
fi
|
||||
|
||||
if [[ "${ENABLE_NODE_LOGGING}" == "true" ]];then
|
||||
configure-logging
|
||||
fi
|
||||
|
||||
echo "Finish configuration successfully!"
|
|
@ -16,17 +16,49 @@
|
|||
|
||||
# A library of helper functions and constant for coreos os distro
|
||||
|
||||
# By sourcing debian's helper.sh, we use the same create-master-instance
|
||||
# functions as debian. But we overwrite the create-node-instance-template
|
||||
# function to use coreos.
|
||||
source "${KUBE_ROOT}/cluster/gce/debian/helper.sh"
|
||||
|
||||
# TODO(dawnchen): Check $CONTAINER_RUNTIME to decide which
|
||||
# cloud_config yaml file should be passed
|
||||
# $1: template name (required)
|
||||
function create-node-instance-template {
|
||||
function create-node-instance-template() {
|
||||
local template_name="$1"
|
||||
create-node-template "$template_name" "${scope_flags}" \
|
||||
"kube-env=${KUBE_TEMP}/node-kube-env.yaml" \
|
||||
"user-data=${KUBE_ROOT}/cluster/gce/coreos/node.yaml"
|
||||
"user-data=${KUBE_ROOT}/cluster/gce/coreos/node.yaml" \
|
||||
"configure-node=${KUBE_ROOT}/cluster/gce/coreos/configure-node.sh" \
|
||||
"configure-kubelet=${KUBE_ROOT}/cluster/gce/coreos/configure-kubelet.sh"
|
||||
}
|
||||
|
||||
|
||||
# create-master-instance creates the master instance. If called with
|
||||
# an argument, the argument is used as the name to a reserved IP
|
||||
# address for the master. (In the case of upgrade/repair, we re-use
|
||||
# the same IP.)
|
||||
#
|
||||
# It requires a whole slew of assumed variables, partially due to to
|
||||
# the call to write-master-env. Listing them would be rather
|
||||
# futile. Instead, we list the required calls to ensure any additional
|
||||
# variables are set:
|
||||
# ensure-temp-dir
|
||||
# detect-project
|
||||
# get-bearer-token
|
||||
#
|
||||
function create-master-instance() {
|
||||
local address_opt=""
|
||||
[[ -n ${1:-} ]] && address_opt="--address ${1}"
|
||||
|
||||
write-master-env
|
||||
gcloud compute instances create "${MASTER_NAME}" \
|
||||
${address_opt} \
|
||||
--project "${PROJECT}" \
|
||||
--zone "${ZONE}" \
|
||||
--machine-type "${MASTER_SIZE}" \
|
||||
--image-project="${MASTER_IMAGE_PROJECT}" \
|
||||
--image "${MASTER_IMAGE}" \
|
||||
--tags "${MASTER_TAG}" \
|
||||
--network "${NETWORK}" \
|
||||
--scopes "storage-ro,compute-rw,monitoring,logging-write" \
|
||||
--can-ip-forward \
|
||||
--metadata-from-file \
|
||||
"kube-env=${KUBE_TEMP}/master-kube-env.yaml,user-data=${KUBE_ROOT}/cluster/gce/coreos/master.yaml,configure-node=${KUBE_ROOT}/cluster/gce/coreos/configure-node.sh,configure-kubelet=${KUBE_ROOT}/cluster/gce/coreos/configure-kubelet.sh" \
|
||||
--disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no"
|
||||
}
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
# This must match the --default-backend-service argument of the l7 lb
|
||||
# controller and is required because GCE mandates a default backend.
|
||||
name: default-http-backend
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "GLBCDefaultBackend"
|
||||
spec:
|
||||
# The default backend must be of type NodePort.
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
k8s-app: glbc
|
|
@ -0,0 +1,68 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: l7-lb-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
version: v0.5.1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "GLBC"
|
||||
spec:
|
||||
# There should never be more than 1 controller alive simultaneously.
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: glbc
|
||||
version: v0.5.1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
version: v0.5.1
|
||||
name: glbc
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 600
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissable as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: gcr.io/google_containers/defaultbackend:1.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- image: gcr.io/google_containers/glbc:0.5.1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
# healthz reaches out to GCE
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
name: l7-lb-controller
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
args:
|
||||
- --default-backend-service=kube-system/default-http-backend
|
||||
- --sync-period=300s
|
|
@ -0,0 +1,50 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: heapster-v10
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v10
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: heapster
|
||||
version: v10
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v10
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster:v0.18.2
|
||||
name: heapster
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes:''
|
||||
- --sink=gcm
|
||||
- --sink=gcmautoscaling
|
||||
- --sink=gcl
|
||||
- --stats_resolution=30s
|
||||
- --sink_frequency=1m
|
||||
volumeMounts:
|
||||
- name: ssl-certs
|
||||
mountPath: /etc/ssl/certs
|
||||
readOnly: true
|
||||
- name: usrsharecacerts
|
||||
mountPath: /usr/share/ca-certificates
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: ssl-certs
|
||||
hostPath:
|
||||
path: /etc/ssl/certs
|
||||
- name: usrsharecacerts
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
|
@ -0,0 +1,14 @@
|
|||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Heapster"
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8082
|
||||
selector:
|
||||
k8s-app: heapster
|
|
@ -0,0 +1,50 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: heapster-v10
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v10
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: heapster
|
||||
version: v10
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v10
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster:v0.18.2
|
||||
name: heapster
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes:''
|
||||
- --sink=gcl
|
||||
- --sink=gcmautoscaling
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- --stats_resolution=30s
|
||||
- --sink_frequency=1m
|
||||
volumeMounts:
|
||||
- name: ssl-certs
|
||||
mountPath: /etc/ssl/certs
|
||||
readOnly: true
|
||||
- name: usrsharecacerts
|
||||
mountPath: /usr/share/ca-certificates
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: ssl-certs
|
||||
hostPath:
|
||||
path: /etc/ssl/certs
|
||||
- name: usrsharecacerts
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: monitoring-grafana
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Grafana"
|
||||
spec:
|
||||
# On production clusters, consider setting up auth for grafana, and
|
||||
# exposing Grafana either using a LoadBalancer or a public IP.
|
||||
# type: LoadBalancer
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 3000
|
||||
selector:
|
||||
k8s-app: influxGrafana
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: heapster-v10
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v10
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: heapster
|
||||
version: v10
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v10
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster:v0.18.2
|
||||
name: heapster
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- --stats_resolution=30s
|
||||
- --sink_frequency=1m
|
|
@ -0,0 +1,14 @@
|
|||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Heapster"
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8082
|
||||
selector:
|
||||
k8s-app: heapster
|
|
@ -0,0 +1,70 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: monitoring-influxdb-grafana-v2
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: influxGrafana
|
||||
version: v2
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: influxGrafana
|
||||
version: v2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: influxGrafana
|
||||
version: v2
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster_influxdb:v0.4
|
||||
name: influxdb
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
ports:
|
||||
- containerPort: 8083
|
||||
hostPort: 8083
|
||||
- containerPort: 8086
|
||||
hostPort: 8086
|
||||
volumeMounts:
|
||||
- name: influxdb-persistent-storage
|
||||
mountPath: /data
|
||||
- image: beta.gcr.io/google_containers/heapster_grafana:v2.1.1
|
||||
name: grafana
|
||||
env:
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
# This variable is required to setup templates in Grafana.
|
||||
- name: INFLUXDB_SERVICE_URL
|
||||
value: http://monitoring-influxdb:8086
|
||||
# The following env variables are required to make Grafana accessible via
|
||||
# the kubernetes api-server proxy. On production clusters, we recommend
|
||||
# removing these env variables, setup auth for grafana, and expose the grafana
|
||||
# service using a LoadBalancer or a public IP.
|
||||
- name: GF_AUTH_BASIC_ENABLED
|
||||
value: "false"
|
||||
- name: GF_AUTH_ANONYMOUS_ENABLED
|
||||
value: "true"
|
||||
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
|
||||
value: Admin
|
||||
- name: GF_SERVER_ROOT_URL
|
||||
value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/
|
||||
volumeMounts:
|
||||
- name: grafana-persistent-storage
|
||||
mountPath: /var
|
||||
|
||||
volumes:
|
||||
- name: influxdb-persistent-storage
|
||||
emptyDir: {}
|
||||
- name: grafana-persistent-storage
|
||||
emptyDir: {}
|
||||
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: monitoring-influxdb
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "InfluxDB"
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 8083
|
||||
targetPort: 8083
|
||||
- name: api
|
||||
port: 8086
|
||||
targetPort: 8086
|
||||
selector:
|
||||
k8s-app: influxGrafana
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: heapster-v10
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v10
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: heapster
|
||||
version: v10
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v10
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster:v0.18.2
|
||||
name: heapster
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 300Mi
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes:''
|
|
@ -0,0 +1,14 @@
|
|||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: heapster
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Heapster"
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8082
|
||||
selector:
|
||||
k8s-app: heapster
|
|
@ -0,0 +1,115 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-dns-v10
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v10
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: ${DNS_REPLICAS}
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
version: v10
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v10
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd
|
||||
image: gcr.io/google_containers/etcd:2.0.9
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
command:
|
||||
- /usr/local/bin/etcd
|
||||
- -data-dir
|
||||
- /var/etcd/data
|
||||
- -listen-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -advertise-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -initial-cluster-token
|
||||
- skydns-etcd
|
||||
volumeMounts:
|
||||
- name: etcd-storage
|
||||
mountPath: /var/etcd/data
|
||||
- name: kube2sky
|
||||
image: gcr.io/google_containers/kube2sky:1.12
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
args:
|
||||
# command = "/kube2sky"
|
||||
- -domain=${DNS_DOMAIN}
|
||||
- name: skydns
|
||||
image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
args:
|
||||
# command = "/skydns"
|
||||
- -machines=http://127.0.0.1:4001
|
||||
- -addr=0.0.0.0:53
|
||||
- -ns-rotate=false
|
||||
- -domain=${DNS_DOMAIN}.
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 1
|
||||
timeoutSeconds: 5
|
||||
- name: healthz
|
||||
image: gcr.io/google_containers/exechealthz:1.0
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
args:
|
||||
- -cmd=nslookup kubernetes.default.svc.${DNS_DOMAIN} 127.0.0.1 >/dev/null
|
||||
- -port=8080
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
volumes:
|
||||
- name: etcd-storage
|
||||
emptyDir: {}
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
|
@ -0,0 +1,20 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: ${DNS_SERVER_IP}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
|
@ -0,0 +1,40 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: elasticsearch-logging-v1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/elasticsearch:1.7
|
||||
name: elasticsearch-logging
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
ports:
|
||||
- containerPort: 9200
|
||||
name: db
|
||||
protocol: TCP
|
||||
- containerPort: 9300
|
||||
name: transport
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: es-persistent-storage
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: es-persistent-storage
|
||||
emptyDir: {}
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: elasticsearch-logging
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Elasticsearch"
|
||||
spec:
|
||||
ports:
|
||||
- port: 9200
|
||||
protocol: TCP
|
||||
targetPort: db
|
||||
selector:
|
||||
k8s-app: elasticsearch-logging
|
|
@ -0,0 +1,34 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kibana-logging-v1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kibana-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: kibana-logging
|
||||
version: v1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kibana-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: kibana-logging
|
||||
image: gcr.io/google_containers/kibana:1.3
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
env:
|
||||
- name: "ELASTICSEARCH_URL"
|
||||
value: "http://elasticsearch-logging:9200"
|
||||
ports:
|
||||
- containerPort: 5601
|
||||
name: ui
|
||||
protocol: TCP
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kibana-logging
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kibana-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Kibana"
|
||||
spec:
|
||||
ports:
|
||||
- port: 5601
|
||||
protocol: TCP
|
||||
targetPort: ui
|
||||
selector:
|
||||
k8s-app: kibana-logging
|
|
@ -0,0 +1,36 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-ui-v3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-ui
|
||||
version: v3
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: kube-ui
|
||||
version: v3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-ui
|
||||
version: v3
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-ui
|
||||
image: gcr.io/google_containers/kube-ui:v3
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8080
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-ui
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-ui
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeUI"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-ui
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kube-system
|
|
@ -0,0 +1,14 @@
|
|||
kind: PersistentVolume
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kube-system-kube-registry-pv
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
capacity:
|
||||
storage: ${CLUSTER_REGISTRY_DISK_SIZE}
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
gcePersistentDisk:
|
||||
pdName: ${CLUSTER_REGISTRY_DISK}
|
||||
fsType: "ext4"
|
|
@ -0,0 +1,13 @@
|
|||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kube-registry-pvc
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: ${CLUSTER_REGISTRY_DISK_SIZE}
|
|
@ -0,0 +1,44 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-registry-v0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-registry
|
||||
version: v0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: kube-registry
|
||||
version: v0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-registry
|
||||
version: v0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: registry
|
||||
image: registry:2
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: REGISTRY_HTTP_ADDR
|
||||
value: :5000
|
||||
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
|
||||
value: /var/lib/registry
|
||||
volumeMounts:
|
||||
- name: image-store
|
||||
mountPath: /var/lib/registry
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
name: registry
|
||||
protocol: TCP
|
||||
volumes:
|
||||
- name: image-store
|
||||
persistentVolumeClaim:
|
||||
claimName: kube-registry-pvc
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-registry
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-registry
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeRegistry"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-registry
|
||||
ports:
|
||||
- name: registry
|
||||
port: 5000
|
||||
protocol: TCP
|
|
@ -0,0 +1,58 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: etcd-server-events-kubernetes-master
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/etcd
|
||||
--listen-peer-urls=http://127.0.0.1:2381
|
||||
--addr=127.0.0.1:4002
|
||||
--bind-addr=127.0.0.1:4002
|
||||
--data-dir=/var/etcd/data-events
|
||||
1>>/var/log/etcd-events.log 2>&1
|
||||
image: gcr.io/google_containers/etcd:2.0.12
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 4002
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
name: etcd-container
|
||||
ports:
|
||||
- containerPort: 2381
|
||||
hostPort: 2381
|
||||
name: serverport
|
||||
protocol: TCP
|
||||
- containerPort: 4002
|
||||
hostPort: 4002
|
||||
name: clientport
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
requests:
|
||||
cpu: 100m
|
||||
volumeMounts:
|
||||
- mountPath: /var/etcd
|
||||
name: varetcd
|
||||
- mountPath: /var/log/etcd-events.log
|
||||
name: varlogetcd
|
||||
dnsPolicy: ClusterFirst
|
||||
hostNetwork: true
|
||||
nodeName: kubernetes-master
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /mnt/master-pd/var/etcd
|
||||
name: varetcd
|
||||
- hostPath:
|
||||
path: /var/log/etcd-events.log
|
||||
name: varlogetcd
|
|
@ -0,0 +1,57 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: etcd-server-kubernetes-master
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/etcd
|
||||
--listen-peer-urls=http://127.0.0.1:2380
|
||||
--addr=127.0.0.1:4001
|
||||
--bind-addr=127.0.0.1:4001
|
||||
--data-dir=/var/etcd/data
|
||||
1>>/var/log/etcd.log 2>&1
|
||||
image: gcr.io/google_containers/etcd:2.0.12
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 4001
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
name: etcd-container
|
||||
ports:
|
||||
- containerPort: 2380
|
||||
hostPort: 2380
|
||||
name: serverport
|
||||
protocol: TCP
|
||||
- containerPort: 4001
|
||||
hostPort: 4001
|
||||
name: clientport
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
requests:
|
||||
cpu: 200m
|
||||
volumeMounts:
|
||||
- mountPath: /var/etcd
|
||||
name: varetcd
|
||||
- mountPath: /var/log/etcd.log
|
||||
name: varlogetcd
|
||||
dnsPolicy: ClusterFirst
|
||||
hostNetwork: true
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /mnt/master-pd/var/etcd
|
||||
name: varetcd
|
||||
- hostPath:
|
||||
path: /var/log/etcd.log
|
||||
name: varlogetcd
|
|
@ -0,0 +1,85 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-apiserver-kubernetes-master
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/kube-apiserver
|
||||
--address=127.0.0.1
|
||||
--etcd-servers=http://127.0.0.1:4001
|
||||
--etcd-servers-overrides=/events#http://127.0.0.1:4002
|
||||
--cloud-provider=gce
|
||||
--admission-control=${ADMISSION_CONTROL}
|
||||
--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}
|
||||
--client-ca-file=/srv/kubernetes/ca.crt
|
||||
--basic-auth-file=/srv/kubernetes/basic_auth.csv
|
||||
--tls-cert-file=/srv/kubernetes/server.cert
|
||||
--tls-private-key-file=/srv/kubernetes/server.key
|
||||
--secure-port=443
|
||||
--token-auth-file=/srv/kubernetes/known_tokens.csv
|
||||
--v=2
|
||||
--allow-privileged=True
|
||||
1>>/var/log/kube-apiserver.log 2>&1
|
||||
image: gcr.io/google_containers/kube-apiserver:${KUBE_APISERVER_DOCKER_TAG}
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
name: kube-apiserver
|
||||
ports:
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
name: https
|
||||
protocol: TCP
|
||||
- containerPort: 8080
|
||||
hostPort: 8080
|
||||
name: local
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 250m
|
||||
requests:
|
||||
cpu: 250m
|
||||
volumeMounts:
|
||||
- mountPath: /srv/kubernetes
|
||||
name: srvkube
|
||||
readOnly: true
|
||||
- mountPath: /var/log/kube-apiserver.log
|
||||
name: logfile
|
||||
- mountPath: /etc/ssl
|
||||
name: etcssl
|
||||
readOnly: true
|
||||
- mountPath: /usr/share/ca-certificates
|
||||
name: usrsharecacerts
|
||||
readOnly: true
|
||||
- mountPath: /srv/sshproxy
|
||||
name: srvsshproxy
|
||||
dnsPolicy: ClusterFirst
|
||||
hostNetwork: true
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /srv/kubernetes
|
||||
name: srvkube
|
||||
- hostPath:
|
||||
path: /var/log/kube-apiserver.log
|
||||
name: logfile
|
||||
- hostPath:
|
||||
path: /etc/ssl
|
||||
name: etcssl
|
||||
- hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
name: usrsharecacerts
|
||||
- hostPath:
|
||||
path: /srv/sshproxy
|
||||
name: srvsshproxy
|
|
@ -0,0 +1,65 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-controller-manager-kubernetes-master
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/kube-controller-manager
|
||||
--master=127.0.0.1:8080
|
||||
--cluster-name=${INSTANCE_PREFIX}
|
||||
--cluster-cidr=${CLUSTER_IP_RANGE}
|
||||
--allocate-node-cidrs=true
|
||||
--cloud-provider=gce
|
||||
--service-account-private-key-file=/srv/kubernetes/server.key
|
||||
--v=2
|
||||
--root-ca-file=/srv/kubernetes/ca.crt
|
||||
1>>/var/log/kube-controller-manager.log 2>&1
|
||||
image: gcr.io/google_containers/kube-controller-manager:${KUBE_CONTROLLER_MANAGER_DOCKER_TAG}
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10252
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
name: kube-controller-manager
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
requests:
|
||||
cpu: 200m
|
||||
volumeMounts:
|
||||
- mountPath: /srv/kubernetes
|
||||
name: srvkube
|
||||
readOnly: true
|
||||
- mountPath: /var/log/kube-controller-manager.log
|
||||
name: logfile
|
||||
- mountPath: /etc/ssl
|
||||
name: etcssl
|
||||
readOnly: true
|
||||
- mountPath: /usr/share/ca-certificates
|
||||
name: usrsharecacerts
|
||||
readOnly: true
|
||||
dnsPolicy: ClusterFirst
|
||||
hostNetwork: true
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /srv/kubernetes
|
||||
name: srvkube
|
||||
- hostPath:
|
||||
path: /var/log/kube-controller-manager.log
|
||||
name: logfile
|
||||
- hostPath:
|
||||
path: /etc/ssl
|
||||
name: etcssl
|
||||
- hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
name: usrsharecacerts
|
|
@ -0,0 +1,42 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-scheduler-kubernetes-master
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/kube-scheduler
|
||||
--master=127.0.0.1:8080
|
||||
--v=2
|
||||
1>>/var/log/kube-scheduler.log 2>&1
|
||||
image: gcr.io/google_containers/kube-scheduler:${KUBE_SCHEDULER_DOCKER_TAG}
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10251
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
name: kube-scheduler
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
requests:
|
||||
cpu: 100m
|
||||
volumeMounts:
|
||||
- mountPath: /var/log/kube-scheduler.log
|
||||
name: logfile
|
||||
dnsPolicy: ClusterFirst
|
||||
hostNetwork: true
|
||||
nodeName: kubernetes-master
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/log/kube-scheduler.log
|
||||
name: logfile
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Namespace",
|
||||
"metadata": {
|
||||
"name": "kube-system"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate-data: ${KUBELET_CERT}
|
||||
client-key-data: ${KUBELET_KEY}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority-data: ${CA_CERT}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
token: ${KUBE_PROXY_TOKEN}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority-data: ${CA_CERT}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-proxy
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
|
@ -0,0 +1,188 @@
|
|||
#cloud-config
|
||||
|
||||
coreos:
|
||||
units:
|
||||
- name: kube-env.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Fetch kubernetes-node-environment
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
|
||||
-H "X-Google-Metadata-Request: True" \
|
||||
-o /etc/kube-env.yaml \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
|
||||
# Transform the yaml to env file.
|
||||
ExecStartPre=/usr/bin/mv /etc/kube-env.yaml /etc/kube-env
|
||||
ExecStart=/usr/bin/sed -i "s/: '/=/;s/'$//" /etc/kube-env
|
||||
|
||||
- name: kubernetes-install-rkt.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Fetch rkt
|
||||
Documentation=http://github.com/coreos/rkt
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
EnvironmentFile=/etc/kube-env
|
||||
ExecStartPre=/usr/bin/mkdir -p /etc/rkt
|
||||
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
|
||||
ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/downloads/rkt.tar.gz https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz
|
||||
ExecStart=/usr/bin/tar xf /opt/downloads/rkt.tar.gz -C /opt --overwrite
|
||||
|
||||
- name: kubernetes-download-salt.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Download salt
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
Requires=kube-env.service
|
||||
After=kube-env.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
EnvironmentFile=/etc/kube-env
|
||||
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
|
||||
ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/downloads/kubernetes-salt.tar.gz ${SALT_TAR_URL}
|
||||
# TODO(yifan): Check hash.
|
||||
ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-salt.tar.gz -C /opt --overwrite
|
||||
|
||||
- name: kubernetes-download-manifests.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Download manifests
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
Requires=kube-env.service
|
||||
After=kube-env.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
EnvironmentFile=/etc/kube-env
|
||||
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
|
||||
ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/downloads/kubernetes-manifests.tar.gz ${KUBE_MANIFESTS_TAR_URL}
|
||||
# TODO(yifan): Check hash.
|
||||
ExecStartPre=/usr/bin/mkdir -p /opt/kube-manifests
|
||||
ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-manifests.tar.gz -C /opt/kube-manifests --overwrite
|
||||
|
||||
- name: kubernetes-install-node.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Install Kubernetes Server
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
Requires=kube-env.service
|
||||
After=kube-env.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
EnvironmentFile=/etc/kube-env
|
||||
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
|
||||
ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/downloads/kubernetes-server-linux-amd64.tar.gz ${SERVER_BINARY_TAR_URL}
|
||||
# TODO(yifan): Check hash.
|
||||
ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-server-linux-amd64.tar.gz -C /opt --overwrite
|
||||
|
||||
- name: kubelet.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Run Kubelet service
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
Requires=kube-env.service
|
||||
After=kube-env.service
|
||||
Requires=kubernetes-download-manifests.service
|
||||
After=kubernetes-download-manifests.service
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kube-env
|
||||
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
|
||||
-H "X-Google-Metadata-Request: True" \
|
||||
-o /run/configure-kubelet.sh \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-kubelet
|
||||
ExecStartPre=/usr/bin/chmod 0755 /run/configure-kubelet.sh
|
||||
ExecStartPre=/run/configure-kubelet.sh
|
||||
ExecStart=/opt/kubernetes/server/bin/kubelet \
|
||||
--api-servers=https://${INSTANCE_PREFIX}-master \
|
||||
--enable-debugging-handlers=false \
|
||||
--cloud-provider=gce \
|
||||
--config=/etc/kubernetes/manifests \
|
||||
--allow-privileged=true \
|
||||
--v=2 \
|
||||
--cluster-dns=${DNS_SERVER_IP} \
|
||||
--cluster-domain=${DNS_DOMAIN} \
|
||||
--logtostderr=true \
|
||||
--container-runtime=${KUBERNETES_CONTAINER_RUNTIME} \
|
||||
--rkt-path=/opt/rkt-v${RKT_VERSON}/rkt \
|
||||
--configure-cbr0=${KUBERNETES_CONFIGURE_CBR0} \
|
||||
--pod-cidr=${MASTER_IP_RANGE} \
|
||||
--register-schedulable=false \
|
||||
--reconcile-cidr=false
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
- name: docker.service
|
||||
command: start
|
||||
drop-ins:
|
||||
- name: 50-docker-opts.conf
|
||||
content: |
|
||||
[Service]
|
||||
Environment='DOCKER_OPTS=--bridge=cbr0 --iptables=false --ip-masq=false'
|
||||
MountFlags=slave
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=1048576
|
||||
LimitCORE=infinity
|
||||
Restart=always
|
||||
RestartSec=2s
|
||||
StartLimitInterval=0
|
||||
|
||||
- name: kubernetes-configure-node.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Configure Node For Kubernetes service
|
||||
Requires=kubernetes-install-node.service
|
||||
After=kubernetes-install-node.service
|
||||
Requires=kubernetes-install-rkt.service
|
||||
After=kubernetes-install-rkt.service
|
||||
Requires=kubernetes-download-salt.service
|
||||
After=kubernetes-download-salt.service
|
||||
Requires=kubernetes-download-manifests.service
|
||||
After=kubernetes-download-manifests.service
|
||||
# Need the kubelet/docker running because we will use docker load for docker images.
|
||||
Requires=kubelet.service
|
||||
After=kubelet.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
EnvironmentFile=/etc/kube-env
|
||||
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
|
||||
-H "X-Google-Metadata-Request: True" \
|
||||
-o /run/configure-node.sh \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-node
|
||||
ExecStartPre=/usr/bin/chmod 0755 /run/configure-node.sh
|
||||
ExecStart=/run/configure-node.sh
|
||||
|
||||
- name: kubernetes-addons.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Start Kubernetes addons and watch for updates.
|
||||
Requires=kubernetes-configure-node.service
|
||||
After=kubernetes-configure-node.service
|
||||
[Service]
|
||||
Environment=KUBECTL_BIN=/opt/kubernetes/server/bin/kubectl
|
||||
Environment=kubelet_kubeconfig_file=/var/lib/kubelet/kubeconfig
|
||||
ExecStartPre=/usr/bin/chmod 0755 /opt/kubernetes/saltbase/salt/kube-addons/kube-addons.sh
|
||||
ExecStart=/opt/kubernetes/saltbase/salt/kube-addons/kube-addons.sh
|
||||
Restart=always
|
||||
RestartSec=10
|
|
@ -1,68 +1,5 @@
|
|||
#cloud-config
|
||||
|
||||
write_files:
|
||||
- path: /run/configure-hostname.sh
|
||||
permissions: "0755"
|
||||
content: |
|
||||
#!/bin/bash -e
|
||||
set -x
|
||||
source /etc/kube-env
|
||||
|
||||
hostnamectl set-hostname $(hostname | cut -f1 -d.)
|
||||
- path: /run/setup-auth.sh
|
||||
permissions: "0755"
|
||||
content: |
|
||||
#!/bin/bash -e
|
||||
set -x
|
||||
source /etc/kube-env
|
||||
|
||||
/usr/bin/mkdir -p /var/lib/kubelet
|
||||
cat > /var/lib/kubelet/kubeconfig << EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
token: ${KUBELET_TOKEN}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
|
||||
- path: /run/config-kube-proxy.sh
|
||||
permissions: "0755"
|
||||
content: |
|
||||
#!/bin/bash -e
|
||||
set -x
|
||||
source /etc/kube-env
|
||||
|
||||
/usr/bin/mkdir -p /var/lib/kube-proxy
|
||||
cat > /var/lib/kube-proxy/kubeconfig << EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
token: ${KUBE_PROXY_TOKEN}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-proxy
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
|
||||
coreos:
|
||||
units:
|
||||
- name: kube-env.service
|
||||
|
@ -80,30 +17,46 @@ coreos:
|
|||
-o /etc/kube-env.yaml \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
|
||||
# Transform the yaml to env file.
|
||||
ExecStartPre=/usr/bin/cp /etc/kube-env.yaml /etc/kube-env
|
||||
ExecStartPre=/usr/bin/mv /etc/kube-env.yaml /etc/kube-env
|
||||
ExecStart=/usr/bin/sed -i "s/: '/=/;s/'$//" /etc/kube-env
|
||||
|
||||
- name: kubernetes-install-rkt.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Fetch Rocket
|
||||
Description=Fetch rkt
|
||||
Documentation=http://github.com/coreos/rkt
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
EnvironmentFile=/etc/kube-env
|
||||
ExecStartPre=/usr/bin/rm -rf /opt/rkt
|
||||
ExecStartPre=/usr/bin/mkdir -p /opt/rkt
|
||||
ExecStartPre=/usr/bin/mkdir -p /etc/rkt
|
||||
ExecStartPre=/usr/bin/wget \
|
||||
-O /opt/rkt/rkt-v${RKT_VERSION}.tar.gz \
|
||||
https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz
|
||||
ExecStartPre=/usr/bin/tar xzvf /opt/rkt/rkt-v${RKT_VERSION}.tar.gz -C /opt --overwrite
|
||||
ExecStart=/usr/bin/mv /opt/rkt-v${RKT_VERSION} /opt/rkt/rkt
|
||||
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
|
||||
ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/downloads/rkt.tar.gz https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz
|
||||
ExecStart=/usr/bin/tar xf /opt/downloads/rkt.tar.gz -C /opt --overwrite
|
||||
|
||||
- name: kubernetes-install-minion.service
|
||||
- name: kubernetes-download-manifests.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Download manifests
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
Requires=kube-env.service
|
||||
After=kube-env.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
EnvironmentFile=/etc/kube-env
|
||||
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
|
||||
ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/downloads/kubernetes-manifests.tar.gz ${KUBE_MANIFESTS_TAR_URL}
|
||||
# TODO(yifan): Check hash.
|
||||
ExecStartPre=/usr/bin/mkdir -p /opt/kube-manifests
|
||||
ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-manifests.tar.gz -C /opt/kube-manifests --overwrite
|
||||
|
||||
- name: kubernetes-install-node.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
|
@ -120,33 +73,25 @@ coreos:
|
|||
ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/kubernetes/pkg/kubernetes-server-linux-amd64.tar.gz ${SERVER_BINARY_TAR_URL}
|
||||
ExecStart=/usr/bin/tar xf /opt/kubernetes/pkg/kubernetes-server-linux-amd64.tar.gz -C /opt --overwrite
|
||||
|
||||
- name: kubernetes-preparation.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Configure Node For Kubernetes service
|
||||
Requires=kubernetes-install-minion.service
|
||||
After=kubernetes-install-minion.service
|
||||
Requires=kubernetes-install-rkt.service
|
||||
After=kubernetes-install-rkt.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
EnvironmentFile=/etc/kube-env
|
||||
# TODO(dawnchen): Push this to separate write-files
|
||||
ExecStart=/run/configure-hostname.sh
|
||||
|
||||
- name: kubelet.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Run Kubelet service
|
||||
Requires=kubernetes-preparation.service
|
||||
After=kubernetes-preparation.service
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
Requires=kube-env.service
|
||||
After=kube-env.service
|
||||
Requires=kubernetes-download-manifests.service
|
||||
After=kubernetes-download-manifests.service
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kube-env
|
||||
ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/run/setup-auth.sh
|
||||
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
|
||||
-H "X-Google-Metadata-Request: True" \
|
||||
-o /run/configure-kubelet.sh \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-kubelet
|
||||
ExecStartPre=/usr/bin/chmod 0755 /run/configure-kubelet.sh
|
||||
ExecStartPre=/run/configure-kubelet.sh
|
||||
ExecStart=/opt/kubernetes/server/bin/kubelet \
|
||||
--api-servers=https://${INSTANCE_PREFIX}-master \
|
||||
--enable-debugging-handlers=true \
|
||||
|
@ -158,9 +103,8 @@ coreos:
|
|||
--cluster-domain=${DNS_DOMAIN} \
|
||||
--logtostderr=true \
|
||||
--container-runtime=${KUBERNETES_CONTAINER_RUNTIME} \
|
||||
--rkt-path=/opt/rkt/rkt/rkt \
|
||||
--configure-cbr0=${KUBERNETES_CONFIGURE_CBR0} \
|
||||
--pod-cidr=${MASTER_IP_RANGE} \
|
||||
--rkt-path=/opt/rkt-v${RKT_VERSON}/rkt \
|
||||
--configure-cbr0=${KUBERNETES_CONFIGURE_CBR0}
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
|
@ -169,15 +113,12 @@ coreos:
|
|||
content: |
|
||||
[Unit]
|
||||
Description=Start Kube-proxy service as Daemon
|
||||
Requires=kubernetes-install-minion.service
|
||||
After=kubernetes-install-minion.service
|
||||
Requires=kubernetes-install-rkt.service
|
||||
After=kubernetes-install-rkt.service
|
||||
Requires=kubernetes-configure-node.service
|
||||
After=kubernetes-configure-node.service
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kube-env
|
||||
ExecStartPre=/run/config-kube-proxy.sh
|
||||
ExecStart=/opt/kubernetes/server/bin/kube-proxy \
|
||||
--master=https://${KUBERNETES_MASTER_NAME}.c.${PROJECT_ID}.internal \
|
||||
--master=https://${KUBERNETES_MASTER_NAME} \
|
||||
--kubeconfig=/var/lib/kube-proxy/kubeconfig \
|
||||
--v=2 \
|
||||
--logtostderr=true
|
||||
|
@ -191,3 +132,32 @@ coreos:
|
|||
content: |
|
||||
[Service]
|
||||
Environment='DOCKER_OPTS=--bridge=cbr0 --iptables=false --ip-masq=false'
|
||||
MountFlags=slave
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=1048576
|
||||
LimitCORE=infinity
|
||||
Restart=always
|
||||
RestartSec=2s
|
||||
StartLimitInterval=0
|
||||
|
||||
- name: kubernetes-configure-node.service
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Configure Node For Kubernetes service
|
||||
Requires=kubernetes-install-node.service
|
||||
After=kubernetes-install-node.service
|
||||
Requires=kubernetes-install-rkt.service
|
||||
After=kubernetes-install-rkt.service
|
||||
Requires=kubernetes-download-manifests.service
|
||||
After=kubernetes-download-manifests.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
EnvironmentFile=/etc/kube-env
|
||||
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
|
||||
-H "X-Google-Metadata-Request: True" \
|
||||
-o /run/configure-node.sh \
|
||||
http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-node
|
||||
ExecStartPre=/usr/bin/chmod 0755 /run/configure-node.sh
|
||||
ExecStart=/run/configure-node.sh
|
||||
|
|
|
@ -212,7 +212,7 @@ function upload-server-tars() {
|
|||
SERVER_BINARY_TAR_URL="${server_binary_gs_url/gs:\/\//https://storage.googleapis.com/}"
|
||||
SALT_TAR_URL="${salt_gs_url/gs:\/\//https://storage.googleapis.com/}"
|
||||
|
||||
if [[ "${OS_DISTRIBUTION}" == "trusty" ]]; then
|
||||
if [[ "${OS_DISTRIBUTION}" == "trusty" || "${OS_DISTRIBUTION}" == "coreos" ]]; then
|
||||
local kube_manifests_gs_url="${staging_path}/${KUBE_MANIFESTS_TAR##*/}"
|
||||
KUBE_MANIFESTS_TAR_HASH=$(sha1sum-file "${KUBE_MANIFESTS_TAR}")
|
||||
copy-if-not-staged "${staging_path}" "${kube_manifests_gs_url}" "${KUBE_MANIFESTS_TAR}" "${KUBE_MANIFESTS_TAR_HASH}"
|
||||
|
@ -1513,6 +1513,8 @@ EOF
|
|||
if [[ "${OS_DISTRIBUTION}" == "coreos" ]]; then
|
||||
# CoreOS-only env vars. TODO(yifan): Make them available on other distros.
|
||||
cat >>$file <<EOF
|
||||
KUBE_MANIFESTS_TAR_URL: $(yaml-quote ${KUBE_MANIFESTS_TAR_URL})
|
||||
KUBE_MANIFESTS_TAR_HASH: $(yaml-quote ${KUBE_MANIFESTS_TAR_HASH})
|
||||
KUBERNETES_CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME:-docker})
|
||||
RKT_VERSION: $(yaml-quote ${RKT_VERSION:-})
|
||||
RKT_PATH: $(yaml-quote ${RKT_PATH:-})
|
||||
|
|
Loading…
Reference in New Issue