Merge pull request #75662 from s-ito-ts/shellcheck_local-up-cluster.sh

fix shellcheck failures in hack/local-up-cluster.sh
k3s-v1.15.3
Kubernetes Prow Robot 2019-03-31 18:24:32 -07:00 committed by GitHub
commit 997d5182d0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 129 additions and 128 deletions

View File

@ -38,7 +38,6 @@
./hack/lib/swagger.sh
./hack/lib/test.sh
./hack/lib/version.sh
./hack/local-up-cluster.sh
./hack/make-rules/clean.sh
./hack/make-rules/helpers/cache_go_dirs.sh
./hack/make-rules/make-help.sh

View File

@ -14,13 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
# This command builds and runs a local kubernetes cluster.
# You may need to run this as root to allow kubelet to open docker's socket,
# and to write the test CA in /var/run/kubernetes.
DOCKER_OPTS=${DOCKER_OPTS:-""}
DOCKER=(docker ${DOCKER_OPTS})
export DOCKER=(docker "${DOCKER_OPTS[@]}")
DOCKER_ROOT=${DOCKER_ROOT:-""}
ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""}
DENY_SECURITY_CONTEXT_ADMISSION=${DENY_SECURITY_CONTEXT_ADMISSION:-""}
@ -161,7 +161,8 @@ function usage {
# This function guesses where the existing cached binary build is for the `-O`
# flag
function guess_built_binary_path {
local hyperkube_path=$(kube::util::find-binary "hyperkube")
local hyperkube_path
hyperkube_path=$(kube::util::find-binary "hyperkube")
if [[ -z "${hyperkube_path}" ]]; then
return
fi
@ -251,8 +252,8 @@ if [[ ${CONTAINER_RUNTIME} == "docker" ]]; then
CGROUP_DRIVER=$(docker info | grep "Cgroup Driver:" | cut -f3- -d' ')
echo "Kubelet cgroup driver defaulted to use: ${CGROUP_DRIVER}"
fi
if [[ -f /var/log/docker.log && ! -f ${LOG_DIR}/docker.log ]]; then
ln -s /var/log/docker.log ${LOG_DIR}/docker.log
if [[ -f /var/log/docker.log && ! -f "${LOG_DIR}/docker.log" ]]; then
ln -s /var/log/docker.log "${LOG_DIR}/docker.log"
fi
fi
@ -266,8 +267,7 @@ function test_apiserver_off {
# For the common local scenario, fail fast if server is already running.
# this can happen if you run local-up-cluster.sh twice and kill etcd in between.
if [[ "${API_PORT}" -gt "0" ]]; then
curl --silent -g ${API_HOST}:${API_PORT}
if [ ! $? -eq 0 ]; then
if ! curl --silent -g "${API_HOST}:${API_PORT}" ; then
echo "API SERVER insecure port is free, proceeding..."
else
echo "ERROR starting API SERVER, exiting. Some process on ${API_HOST} is serving already on ${API_PORT}"
@ -275,8 +275,7 @@ function test_apiserver_off {
fi
fi
curl --silent -k -g ${API_HOST}:${API_SECURE_PORT}
if [ ! $? -eq 0 ]; then
if ! curl --silent -k -g "${API_HOST}:${API_SECURE_PORT}" ; then
echo "API SERVER secure port is free, proceeding..."
else
echo "ERROR starting API SERVER, exiting. Some process on ${API_HOST} is serving already on ${API_SECURE_PORT}"
@ -348,24 +347,24 @@ cleanup()
# fi
# Check if the API server is still running
[[ -n "${APISERVER_PID-}" ]] && APISERVER_PIDS=$(pgrep -P ${APISERVER_PID} ; ps -o pid= -p ${APISERVER_PID})
[[ -n "${APISERVER_PIDS-}" ]] && sudo kill ${APISERVER_PIDS} 2>/dev/null
[[ -n "${APISERVER_PID-}" ]] && mapfile -t APISERVER_PIDS < <(pgrep -P "${APISERVER_PID}" ; ps -o pid= -p "${APISERVER_PID}")
[[ -n "${APISERVER_PIDS-}" ]] && sudo kill "${APISERVER_PIDS[@]}" 2>/dev/null
# Check if the controller-manager is still running
[[ -n "${CTLRMGR_PID-}" ]] && CTLRMGR_PIDS=$(pgrep -P ${CTLRMGR_PID} ; ps -o pid= -p ${CTLRMGR_PID})
[[ -n "${CTLRMGR_PIDS-}" ]] && sudo kill ${CTLRMGR_PIDS} 2>/dev/null
[[ -n "${CTLRMGR_PID-}" ]] && mapfile -t CTLRMGR_PIDS < <(pgrep -P "${CTLRMGR_PID}" ; ps -o pid= -p "${CTLRMGR_PID}")
[[ -n "${CTLRMGR_PIDS-}" ]] && sudo kill "${CTLRMGR_PIDS[@]}" 2>/dev/null
# Check if the kubelet is still running
[[ -n "${KUBELET_PID-}" ]] && KUBELET_PIDS=$(pgrep -P ${KUBELET_PID} ; ps -o pid= -p ${KUBELET_PID})
[[ -n "${KUBELET_PIDS-}" ]] && sudo kill ${KUBELET_PIDS} 2>/dev/null
[[ -n "${KUBELET_PID-}" ]] && mapfile -t KUBELET_PIDS < <(pgrep -P "${KUBELET_PID}" ; ps -o pid= -p "${KUBELET_PID}")
[[ -n "${KUBELET_PIDS-}" ]] && sudo kill "${KUBELET_PIDS[@]}" 2>/dev/null
# Check if the proxy is still running
[[ -n "${PROXY_PID-}" ]] && PROXY_PIDS=$(pgrep -P ${PROXY_PID} ; ps -o pid= -p ${PROXY_PID})
[[ -n "${PROXY_PIDS-}" ]] && sudo kill ${PROXY_PIDS} 2>/dev/null
[[ -n "${PROXY_PID-}" ]] && mapfile -t PROXY_PIDS < <(pgrep -P "${PROXY_PID}" ; ps -o pid= -p "${PROXY_PID}")
[[ -n "${PROXY_PIDS-}" ]] && sudo kill "${PROXY_PIDS[@]}" 2>/dev/null
# Check if the scheduler is still running
[[ -n "${SCHEDULER_PID-}" ]] && SCHEDULER_PIDS=$(pgrep -P ${SCHEDULER_PID} ; ps -o pid= -p ${SCHEDULER_PID})
[[ -n "${SCHEDULER_PIDS-}" ]] && sudo kill ${SCHEDULER_PIDS} 2>/dev/null
[[ -n "${SCHEDULER_PID-}" ]] && mapfile -t SCHEDULER_PIDS < <(pgrep -P "${SCHEDULER_PID}" ; ps -o pid= -p "${SCHEDULER_PID}")
[[ -n "${SCHEDULER_PIDS-}" ]] && sudo kill "${SCHEDULER_PIDS[@]}" 2>/dev/null
# Check if the etcd is still running
[[ -n "${ETCD_PID-}" ]] && kube::etcd::stop
@ -378,32 +377,32 @@ cleanup()
# Check if all processes are still running. Prints a warning once each time
# a process dies unexpectedly.
function healthcheck {
if [[ -n "${APISERVER_PID-}" ]] && ! sudo kill -0 ${APISERVER_PID} 2>/dev/null; then
if [[ -n "${APISERVER_PID-}" ]] && ! sudo kill -0 "${APISERVER_PID}" 2>/dev/null; then
warning_log "API server terminated unexpectedly, see ${APISERVER_LOG}"
APISERVER_PID=
fi
if [[ -n "${CTLRMGR_PID-}" ]] && ! sudo kill -0 ${CTLRMGR_PID} 2>/dev/null; then
if [[ -n "${CTLRMGR_PID-}" ]] && ! sudo kill -0 "${CTLRMGR_PID}" 2>/dev/null; then
warning_log "kube-controller-manager terminated unexpectedly, see ${CTLRMGR_LOG}"
CTLRMGR_PID=
fi
if [[ -n "${KUBELET_PID-}" ]] && ! sudo kill -0 ${KUBELET_PID} 2>/dev/null; then
if [[ -n "${KUBELET_PID-}" ]] && ! sudo kill -0 "${KUBELET_PID}" 2>/dev/null; then
warning_log "kubelet terminated unexpectedly, see ${KUBELET_LOG}"
KUBELET_PID=
fi
if [[ -n "${PROXY_PID-}" ]] && ! sudo kill -0 ${PROXY_PID} 2>/dev/null; then
if [[ -n "${PROXY_PID-}" ]] && ! sudo kill -0 "${PROXY_PID}" 2>/dev/null; then
warning_log "kube-proxy terminated unexpectedly, see ${PROXY_LOG}"
PROXY_PID=
fi
if [[ -n "${SCHEDULER_PID-}" ]] && ! sudo kill -0 ${SCHEDULER_PID} 2>/dev/null; then
if [[ -n "${SCHEDULER_PID-}" ]] && ! sudo kill -0 "${SCHEDULER_PID}" 2>/dev/null; then
warning_log "scheduler terminated unexpectedly, see ${SCHEDULER_LOG}"
SCHEDULER_PID=
fi
if [[ -n "${ETCD_PID-}" ]] && ! sudo kill -0 ${ETCD_PID} 2>/dev/null; then
if [[ -n "${ETCD_PID-}" ]] && ! sudo kill -0 "${ETCD_PID}" 2>/dev/null; then
warning_log "etcd terminated unexpectedly"
ETCD_PID=
fi
@ -413,9 +412,9 @@ function print_color {
message=$1
prefix=${2:+$2: } # add colon only if defined
color=${3:-1} # default is red
echo -n $(tput bold)$(tput setaf ${color})
echo -n "$(tput bold)$(tput setaf "${color}")"
echo "${prefix}${message}"
echo -n $(tput sgr0)
echo -n "$(tput sgr0)"
}
function warning_log {
@ -424,7 +423,7 @@ function warning_log {
function start_etcd {
echo "Starting etcd"
ETCD_LOGFILE=${LOG_DIR}/etcd.log
export ETCD_LOGFILE=${LOG_DIR}/etcd.log
kube::etcd::start
}
@ -433,7 +432,7 @@ function set_service_accounts {
SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-/tmp/kube-serviceaccount.key}
# Generate ServiceAccount key if needed
if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
mkdir -p "$(dirname ${SERVICE_ACCOUNT_KEY})"
mkdir -p "$(dirname "${SERVICE_ACCOUNT_KEY}")"
openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
fi
}
@ -454,7 +453,7 @@ function generate_certs {
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header '"client auth"'
# serving cert for kube-apiserver
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-apiserver kubernetes.default kubernetes.default.svc "localhost" ${API_HOST_IP} ${API_HOST} ${FIRST_SERVICE_CLUSTER_IP}
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-apiserver kubernetes.default kubernetes.default.svc "localhost" "${API_HOST_IP}" "${API_HOST}" "${FIRST_SERVICE_CLUSTER_IP}"
# Create client certs signed with client-ca, given id, given CN and a number of groups
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' controller system:kube-controller-manager
@ -463,7 +462,7 @@ function generate_certs {
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-apiserver kube-apiserver
# Create matching certificates for kube-aggregator
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-aggregator api.kube-public.svc "localhost" ${API_HOST_IP}
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-aggregator api.kube-public.svc "localhost" "${API_HOST_IP}"
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header-ca auth-proxy system:auth-proxy
# TODO remove masters and add rolebinding
@ -477,7 +476,7 @@ function generate_kubeproxy_certs {
}
function generate_kubelet_certs {
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet system:node:${HOSTNAME_OVERRIDE} system:nodes
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet "system:node:${HOSTNAME_OVERRIDE}" system:nodes
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kubelet
}
@ -505,11 +504,11 @@ function start_apiserver {
authorizer_arg=""
if [[ -n "${AUTHORIZATION_MODE}" ]]; then
authorizer_arg="--authorization-mode=${AUTHORIZATION_MODE} "
authorizer_arg="--authorization-mode=${AUTHORIZATION_MODE}"
fi
priv_arg=""
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
priv_arg="--allow-privileged=${ALLOW_PRIVILEGED} "
priv_arg="--allow-privileged=${ALLOW_PRIVILEGED}"
fi
runtime_config=""
@ -553,14 +552,14 @@ EOF
fi
APISERVER_LOG=${LOG_DIR}/kube-apiserver.log
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" apiserver ${authorizer_arg} ${priv_arg} ${runtime_config} \
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" apiserver "${authorizer_arg}" "${priv_arg}" ${runtime_config} \
${cloud_config_arg} \
${advertise_address} \
${node_port_range} \
--v=${LOG_LEVEL} \
"${advertise_address}" \
"${node_port_range}" \
--v="${LOG_LEVEL}" \
--vmodule="${LOG_SPEC}" \
--audit-policy-file="${AUDIT_POLICY_FILE}" \
--audit-log-path=${LOG_DIR}/kube-apiserver-audit.log \
--audit-log-path="${LOG_DIR}/kube-apiserver-audit.log" \
--cert-dir="${CERT_DIR}" \
--client-ca-file="${CERT_DIR}/client-ca.crt" \
--kubelet-client-certificate="${CERT_DIR}/client-kube-apiserver.crt" \
@ -576,8 +575,8 @@ EOF
--tls-private-key-file="${CERT_DIR}/serving-kube-apiserver.key" \
--insecure-bind-address="${API_HOST_IP}" \
--insecure-port="${API_PORT}" \
--storage-backend=${STORAGE_BACKEND} \
--storage-media-type=${STORAGE_MEDIA_TYPE} \
--storage-backend="${STORAGE_BACKEND}" \
--storage-media-type="${STORAGE_MEDIA_TYPE}" \
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
--service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}" \
--feature-gates="${FEATURE_GATES}" \
@ -594,7 +593,7 @@ EOF
# Wait for kube-apiserver to come up before launching the rest of the components.
echo "Waiting for apiserver to come up"
kube::util::wait_for_url "https://${API_HOST_IP}:${API_SECURE_PORT}/healthz" "apiserver: " 1 ${WAIT_FOR_URL_API_SERVER} ${MAX_TIME_FOR_URL_API_SERVER} \
kube::util::wait_for_url "https://${API_HOST_IP}:${API_SECURE_PORT}/healthz" "apiserver: " 1 "${WAIT_FOR_URL_API_SERVER}" "${MAX_TIME_FOR_URL_API_SERVER}" \
|| { echo "check apiserver logs: ${APISERVER_LOG}" ; exit 1 ; }
# Create kubeconfigs for all components, using client certs
@ -611,38 +610,38 @@ EOF
${KUBECTL} --kubeconfig "${CERT_DIR}/admin.kubeconfig" create clusterrolebinding kube-apiserver-kubelet-admin --clusterrole=system:kubelet-api-admin --user=kube-apiserver
${CONTROLPLANE_SUDO} cp "${CERT_DIR}/admin.kubeconfig" "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
${CONTROLPLANE_SUDO} chown $(whoami) "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
${CONTROLPLANE_SUDO} chown "$(whoami)" "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
${KUBECTL} config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/admin-kube-aggregator.kubeconfig" --server="https://${API_HOST_IP}:31090"
echo "use 'kubectl --kubeconfig=${CERT_DIR}/admin-kube-aggregator.kubeconfig' to use the aggregated API server"
}
function start_controller_manager {
node_cidr_args=""
node_cidr_args=()
if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
node_cidr_args="--allocate-node-cidrs=true --cluster-cidr=10.1.0.0/16 "
node_cidr_args=("--allocate-node-cidrs=true" "--cluster-cidr=10.1.0.0/16")
fi
cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}"
cloud_config_arg=("--cloud-provider=${CLOUD_PROVIDER}" "--cloud-config=${CLOUD_CONFIG}")
if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
cloud_config_arg="--cloud-provider=external"
cloud_config_arg+=" --external-cloud-volume-plugin=${CLOUD_PROVIDER}"
cloud_config_arg+=" --cloud-config=${CLOUD_CONFIG}"
cloud_config_arg=("--cloud-provider=external")
cloud_config_arg+=("--external-cloud-volume-plugin=${CLOUD_PROVIDER}")
cloud_config_arg+=("--cloud-config=${CLOUD_CONFIG}")
fi
CTLRMGR_LOG=${LOG_DIR}/kube-controller-manager.log
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" controller-manager \
--v=${LOG_LEVEL} \
--v="${LOG_LEVEL}" \
--vmodule="${LOG_SPEC}" \
--service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \
--root-ca-file="${ROOT_CA_FILE}" \
--cluster-signing-cert-file="${CLUSTER_SIGNING_CERT_FILE}" \
--cluster-signing-key-file="${CLUSTER_SIGNING_KEY_FILE}" \
--enable-hostpath-provisioner="${ENABLE_HOSTPATH_PROVISIONER}" \
${node_cidr_args} \
"${node_cidr_args[@]}" \
--pvclaimbinder-sync-period="${CLAIM_BINDER_SYNC_PERIOD}" \
--feature-gates="${FEATURE_GATES}" \
${cloud_config_arg} \
"${cloud_config_arg[@]}" \
--kubeconfig "${CERT_DIR}"/controller.kubeconfig \
--use-service-account-credentials \
--controllers="${KUBE_CONTROLLERS}" \
@ -662,24 +661,24 @@ function start_cloud_controller_manager {
exit 1
fi
node_cidr_args=""
node_cidr_args=()
if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
node_cidr_args="--allocate-node-cidrs=true --cluster-cidr=10.1.0.0/16 "
node_cidr_args=("--allocate-node-cidrs=true" "--cluster-cidr=10.1.0.0/16")
fi
CLOUD_CTLRMGR_LOG=${LOG_DIR}/cloud-controller-manager.log
${CONTROLPLANE_SUDO} ${EXTERNAL_CLOUD_PROVIDER_BINARY:-"${GO_OUT}/hyperkube" cloud-controller-manager} \
--v=${LOG_LEVEL} \
${CONTROLPLANE_SUDO} "${EXTERNAL_CLOUD_PROVIDER_BINARY:-"${GO_OUT}/hyperkube" cloud-controller-manager}" \
--v="${LOG_LEVEL}" \
--vmodule="${LOG_SPEC}" \
${node_cidr_args} \
"${node_cidr_args[@]}" \
--feature-gates="${FEATURE_GATES}" \
--cloud-provider=${CLOUD_PROVIDER} \
--cloud-config=${CLOUD_CONFIG} \
--cloud-provider="${CLOUD_PROVIDER}" \
--cloud-config="${CLOUD_CONFIG}" \
--kubeconfig "${CERT_DIR}"/controller.kubeconfig \
--use-service-account-credentials \
--leader-elect=false \
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${CLOUD_CTLRMGR_LOG}" 2>&1 &
CLOUD_CTLRMGR_PID=$!
export CLOUD_CTLRMGR_PID=$!
}
function start_kubelet {
@ -688,96 +687,97 @@ function start_kubelet {
priv_arg=""
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
priv_arg="--allow-privileged=${ALLOW_PRIVILEGED} "
priv_arg="--allow-privileged=${ALLOW_PRIVILEGED}"
fi
cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}"
cloud_config_arg=("--cloud-provider=${CLOUD_PROVIDER}" "--cloud-config=${CLOUD_CONFIG}")
if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
cloud_config_arg="--cloud-provider=external"
cloud_config_arg+=" --provider-id=$(hostname)"
cloud_config_arg=("--cloud-provider=external")
cloud_config_arg+=("--provider-id=$(hostname)")
fi
mkdir -p "/var/lib/kubelet" &>/dev/null || sudo mkdir -p "/var/lib/kubelet"
# Enable dns
if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
dns_args="--cluster-dns=${LOCAL_DNS_IP} --cluster-domain=${DNS_DOMAIN}"
dns_args=("--cluster-dns=${LOCAL_DNS_IP}" "--cluster-domain=${DNS_DOMAIN}")
else
dns_args="--cluster-dns=${DNS_SERVER_IP} --cluster-domain=${DNS_DOMAIN}"
dns_args=("--cluster-dns=${DNS_SERVER_IP}" "--cluster-domain=${DNS_DOMAIN}")
fi
else
# To start a private DNS server set ENABLE_CLUSTER_DNS and
# DNS_SERVER_IP/DOMAIN. This will at least provide a working
# DNS server for real world hostnames.
dns_args="--cluster-dns=8.8.8.8"
dns_args=("--cluster-dns=8.8.8.8")
fi
net_plugin_args=""
net_plugin_args=()
if [[ -n "${NET_PLUGIN}" ]]; then
net_plugin_args="--network-plugin=${NET_PLUGIN}"
net_plugin_args=("--network-plugin=${NET_PLUGIN}")
fi
auth_args=""
auth_args=()
if [[ "${KUBELET_AUTHORIZATION_WEBHOOK:-}" != "false" ]]; then
auth_args="${auth_args} --authorization-mode=Webhook"
auth_args+=("--authorization-mode=Webhook")
fi
if [[ "${KUBELET_AUTHENTICATION_WEBHOOK:-}" != "false" ]]; then
auth_args="${auth_args} --authentication-token-webhook"
auth_args+=("--authentication-token-webhook")
fi
if [[ -n "${CLIENT_CA_FILE:-}" ]]; then
auth_args="${auth_args} --client-ca-file=${CLIENT_CA_FILE}"
auth_args+=("--client-ca-file=${CLIENT_CA_FILE}")
else
auth_args="${auth_args} --client-ca-file=${CERT_DIR}/client-ca.crt"
auth_args+=("--client-ca-file=${CERT_DIR}/client-ca.crt")
fi
cni_conf_dir_args=""
cni_conf_dir_args=()
if [[ -n "${CNI_CONF_DIR}" ]]; then
cni_conf_dir_args="--cni-conf-dir=${CNI_CONF_DIR}"
cni_conf_dir_args=("--cni-conf-dir=${CNI_CONF_DIR}")
fi
cni_bin_dir_args=""
cni_bin_dir_args=()
if [[ -n "${CNI_BIN_DIR}" ]]; then
cni_bin_dir_args="--cni-bin-dir=${CNI_BIN_DIR}"
cni_bin_dir_args=("--cni-bin-dir=${CNI_BIN_DIR}")
fi
container_runtime_endpoint_args=""
container_runtime_endpoint_args=()
if [[ -n "${CONTAINER_RUNTIME_ENDPOINT}" ]]; then
container_runtime_endpoint_args="--container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}"
container_runtime_endpoint_args=("--container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}")
fi
image_service_endpoint_args=""
image_service_endpoint_args=()
if [[ -n "${IMAGE_SERVICE_ENDPOINT}" ]]; then
image_service_endpoint_args="--image-service-endpoint=${IMAGE_SERVICE_ENDPOINT}"
image_service_endpoint_args=("--image-service-endpoint=${IMAGE_SERVICE_ENDPOINT}")
fi
# shellcheck disable=SC2206
all_kubelet_flags=(
${priv_arg}
--v="${LOG_LEVEL}"
--vmodule="${LOG_SPEC}"
--chaos-chance="${CHAOS_CHANCE}"
--container-runtime="${CONTAINER_RUNTIME}"
--hostname-override="${HOSTNAME_OVERRIDE}"
${cloud_config_arg}
--address="${KUBELET_HOST}"
"${priv_arg}"
"--v=${LOG_LEVEL}"
"--vmodule=${LOG_SPEC}"
"--chaos-chance=${CHAOS_CHANCE}"
"--container-runtime=${CONTAINER_RUNTIME}"
"--hostname-override=${HOSTNAME_OVERRIDE}"
"${cloud_config_arg[@]}"
"--address=${KUBELET_HOST}"
--kubeconfig "${CERT_DIR}"/kubelet.kubeconfig
--feature-gates="${FEATURE_GATES}"
--cpu-cfs-quota="${CPU_CFS_QUOTA}"
--enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}"
--cgroups-per-qos="${CGROUPS_PER_QOS}"
--cgroup-driver="${CGROUP_DRIVER}"
--cgroup-root="${CGROUP_ROOT}"
--eviction-hard="${EVICTION_HARD}"
--eviction-soft="${EVICTION_SOFT}"
--eviction-pressure-transition-period="${EVICTION_PRESSURE_TRANSITION_PERIOD}"
--pod-manifest-path="${POD_MANIFEST_PATH}"
--fail-swap-on="${FAIL_SWAP_ON}"
${auth_args}
${dns_args}
${cni_conf_dir_args}
${cni_bin_dir_args}
${net_plugin_args}
${container_runtime_endpoint_args}
${image_service_endpoint_args}
--port="${KUBELET_PORT}"
"--feature-gates=${FEATURE_GATES}"
"--cpu-cfs-quota=${CPU_CFS_QUOTA}"
"--enable-controller-attach-detach=${ENABLE_CONTROLLER_ATTACH_DETACH}"
"--cgroups-per-qos=${CGROUPS_PER_QOS}"
"--cgroup-driver=${CGROUP_DRIVER}"
"--cgroup-root=${CGROUP_ROOT}"
"--eviction-hard=${EVICTION_HARD}"
"--eviction-soft=${EVICTION_SOFT}"
"--eviction-pressure-transition-period=${EVICTION_PRESSURE_TRANSITION_PERIOD}"
"--pod-manifest-path=${POD_MANIFEST_PATH}"
"--fail-swap-on=${FAIL_SWAP_ON}"
"${auth_args[@]}"
"${dns_args[@]}"
"${cni_conf_dir_args[@]}"
"${cni_bin_dir_args[@]}"
"${net_plugin_args[@]}"
"${container_runtime_endpoint_args[@]}"
"${image_service_endpoint_args[@]}"
"--port=${KUBELET_PORT}"
${KUBELET_FLAGS}
)
@ -785,6 +785,7 @@ function start_kubelet {
generate_kubelet_certs
fi
# shellcheck disable=SC2024
sudo -E "${GO_OUT}/hyperkube" kubelet "${all_kubelet_flags[@]}" >"${KUBELET_LOG}" 2>&1 &
KUBELET_PID=$!
@ -792,7 +793,7 @@ function start_kubelet {
if [ -n "${KUBELET_PID}" ] && ps -p ${KUBELET_PID} > /dev/null; then
echo "kubelet ( ${KUBELET_PID} ) is running."
else
cat ${KUBELET_LOG} ; exit 1
cat "${KUBELET_LOG}" ; exit 1
fi
}
@ -812,8 +813,8 @@ EOF
# Convert from foo=true,bar=false to
# foo: true
# bar: false
for gate in $(echo ${FEATURE_GATES} | tr ',' ' '); do
echo ${gate} | ${SED} -e 's/\(.*\)=\(.*\)/ \1: \2/'
for gate in $(echo "${FEATURE_GATES}" | tr ',' ' '); do
echo "${gate}" | ${SED} -e 's/\(.*\)=\(.*\)/ \1: \2/'
done
fi >>/tmp/kube-proxy.yaml
@ -821,8 +822,9 @@ EOF
generate_kubeproxy_certs
fi
# shellcheck disable=SC2024
sudo "${GO_OUT}/hyperkube" proxy \
--v=${LOG_LEVEL} \
--v="${LOG_LEVEL}" \
--config=/tmp/kube-proxy.yaml \
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${PROXY_LOG}" 2>&1 &
PROXY_PID=$!
@ -832,7 +834,7 @@ function start_kubescheduler {
SCHEDULER_LOG=${LOG_DIR}/kube-scheduler.log
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" scheduler \
--v=${LOG_LEVEL} \
--v="${LOG_LEVEL}" \
--leader-elect=false \
--kubeconfig "${CERT_DIR}"/scheduler.kubeconfig \
--feature-gates="${FEATURE_GATES}" \
@ -868,20 +870,20 @@ function start_kubedashboard {
if [[ "${ENABLE_CLUSTER_DASHBOARD}" = true ]]; then
echo "Creating kubernetes-dashboard"
# use kubectl to create the dashboard
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-secret.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-configmap.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-rbac.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-secret.yaml"
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-configmap.yaml"
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-rbac.yaml"
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml"
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml"
echo "kubernetes-dashboard deployment and service successfully deployed."
fi
}
function create_psp_policy {
echo "Create podsecuritypolicy policies for RBAC."
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/policies.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/roles.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/bindings.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${KUBE_ROOT}/examples/podsecuritypolicy/rbac/policies.yaml"
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${KUBE_ROOT}/examples/podsecuritypolicy/rbac/roles.yaml"
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${KUBE_ROOT}/examples/podsecuritypolicy/rbac/bindings.yaml"
}
function create_storage_class {
@ -891,9 +893,9 @@ function create_storage_class {
CLASS_FILE=${KUBE_ROOT}/cluster/addons/storage-class/${CLOUD_PROVIDER}/default.yaml
fi
if [ -e ${CLASS_FILE} ]; then
if [ -e "${CLASS_FILE}" ]; then
echo "Create default storage class for ${CLOUD_PROVIDER}"
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${CLASS_FILE}
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${CLASS_FILE}"
else
echo "No storage class available for ${CLOUD_PROVIDER}."
fi
@ -905,7 +907,7 @@ create_csi_crd() {
if [ -e "${YAML_FILE}" ]; then
echo "Create $1 crd"
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${YAML_FILE}
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${YAML_FILE}"
else
echo "No $1 available."
fi
@ -972,7 +974,7 @@ fi
# If we are running in the CI, we need a few more things before we can start
if [[ "${KUBETEST_IN_DOCKER:-}" == "true" ]]; then
echo "Preparing to test ..."
${KUBE_ROOT}/hack/install-etcd.sh
"${KUBE_ROOT}"/hack/install-etcd.sh
export PATH="${KUBE_ROOT}/third_party/etcd:${PATH}"
KUBE_FASTBUILD=true make ginkgo cross
@ -1008,7 +1010,7 @@ if [ "${GO_OUT}" == "" ]; then
fi
echo "Detected host and ready to start services. Doing some housekeeping first..."
echo "Using GO_OUT ${GO_OUT}"
KUBELET_CIDFILE=/tmp/kubelet.cid
export KUBELET_CIDFILE=/tmp/kubelet.cid
if [[ "${ENABLE_DAEMON}" = false ]]; then
trap cleanup EXIT
fi