Merge pull request #53334 from bskiba/ca_km_clean

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Add launching Cluster Autoscaler in Kubemark

**What this PR does / why we need it**:
Allows to launch Cluster Autoscaler in Kubemark. 
To do it, set ENABLE_KUBEMARK_CLUSTER_AUTOSCALER flag to true. This currently only works with one nodegroup, for which you can specify minimum and maximum number of nodes and name. (KUBEMARK_AUTOSCALER_MIN_NODES, KUBEMARK_AUTOSCALER_MAX_NODES, KUBEMARK_AUTOSCALER_MIG_NAME).
Is is important to note that NUM_NODES has a different meaning when launching Cluster Autoscaler - we always start with only one node, but NUM_NODES is used to calculate the size of Kubemark master and addon components.

There are no changes to the current setup if ENABLE_KUBEMARK_CLUSTER_AUTOSCALER is set to false.

**Release note**:
```
NONE
```
pull/6/head
Kubernetes Submit Queue 2017-10-09 08:20:53 -07:00 committed by GitHub
commit a3103ec940
5 changed files with 171 additions and 7 deletions

View File

@ -100,6 +100,16 @@ ALLOCATE_NODE_CIDRS=true
# Optional: Enable cluster autoscaler.
ENABLE_KUBEMARK_CLUSTER_AUTOSCALER="${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-false}"
# When using Cluster Autoscaler, always start with one hollow-node replica.
# NUM_NODES should not be specified by the user. Instead we use
# NUM_NODES=KUBEMARK_AUTOSCALER_MAX_NODES. This gives other cluster components
# (e.g. kubemark master, Heapster) enough resources to handle maximum cluster size.
if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER}" == "true" ]]; then
NUM_REPLICAS=1
if [[ ! -z "$NUM_NODES" ]]; then
echo "WARNING: Using Cluster Autoscaler, ignoring NUM_NODES parameter. Set KUBEMARK_AUTOSCALER_MAX_NODES to specify maximum size of the cluster."
fi
fi
# Optional: set feature gates
FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"

View File

@ -0,0 +1,94 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "cluster-autoscaler",
"namespace": "kubemark",
"labels": {
"tier": "cluster-management",
"component": "cluster-autoscaler"
}
},
"spec": {
"hostNetwork": true,
"containers": [
{
"name": "cluster-autoscaler",
"image": "gcr.io/google_containers/cluster-autoscaler:v1.0.0",
"command": [
"./run.sh",
"--kubernetes=https://{{master_ip}}:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/cluster_autoscaler.kubeconfig",
"--v=4",
"--logtostderr=true",
"--write-status-configmap=true",
"--cloud-provider=kubemark",
"--nodes={{kubemark_autoscaler_min_nodes}}:{{kubemark_autoscaler_max_nodes}}:{{kubemark_autoscaler_mig_name}}"
],
"env": [
{
"name": "LOG_OUTPUT",
"value": "/var/log/cluster-autoscaler.log"
}
],
"resources": {
"requests": {
"cpu": "10m",
"memory": "300Mi"
}
},
"volumeMounts": [
{"name": "cloudconfigmount","mountPath": "/etc/gce.conf", "readOnly": true},
{
"name": "ssl-certs",
"readOnly": true,
"mountPath": "/etc/ssl/certs"
},
{
"name": "usrsharecacerts",
"readOnly": true,
"mountPath": "/usr/share/ca-certificates"
},
{
"name": "logdir",
"mountPath": "/var/log",
"readOnly": false
},
{
"name": "kubeconfig-volume",
"mountPath": "/kubeconfig"
}
],
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "Always"
}
],
"volumes": [
{"name": "cloudconfigmount","hostPath": {"path": "/etc/gce.conf"}},
{
"name": "ssl-certs",
"hostPath": {
"path": "/etc/ssl/certs"
}
},
{
"name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"
}
},
{
"name": "logdir",
"hostPath": {
"path": "/var/log"
}
},
{
"name": "kubeconfig-volume",
"secret": {
"secretName": "kubeconfig"
}
}
],
"restartPolicy": "Always"
}
}

View File

@ -4,6 +4,7 @@ metadata:
name: hollow-node
labels:
name: hollow-node
{{kubemark_mig_config}}
spec:
replicas: {{numreplicas}}
selector:
@ -12,6 +13,7 @@ spec:
metadata:
labels:
name: hollow-node
{{kubemark_mig_config}}
spec:
initContainers:
- name: init-inotify-limit

View File

@ -0,0 +1,16 @@
# This is the role binding for the kubemark cluster autoscaler.
# TODO: Use proper Cluster Autoscaler role (github.com/kubernetes/autoscaler/issues/383)
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-autoscaler-view-binding
labels:
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:cluster-autoscaler

View File

@ -92,6 +92,7 @@ function generate-pki-config {
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
HEAPSTER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
CLUSTER_AUTOSCALER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "Generated PKI authentication data for kubemark."
}
@ -117,6 +118,7 @@ function write-pki-config-to-master {
sudo bash -c \"echo \"${KUBELET_TOKEN},system:node:node-name,uid:kubelet,system:nodes\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},system:kube-proxy,uid:kube_proxy\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${HEAPSTER_TOKEN},system:heapster,uid:heapster\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${CLUSTER_AUTOSCALER_TOKEN},system:cluster-autoscaler,uid:cluster-autoscaler\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo \"${NODE_PROBLEM_DETECTOR_TOKEN},system:node-problem-detector,uid:system:node-problem-detector\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
sudo bash -c \"echo ${KUBE_PASSWORD},admin,admin > /home/kubernetes/k8s_auth_data/basic_auth.csv\""
execute-cmd-on-master-with-retries "${PKI_SETUP_CMD}" 3
@ -274,6 +276,25 @@ contexts:
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Cluster Autoscaler.
CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: cluster-autoscaler
user:
token: ${CLUSTER_AUTOSCALER_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: cluster-autoscaler
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for NodeProblemDetector.
NPD_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
@ -306,31 +327,51 @@ current-context: kubemark-context")
--from-literal=kubelet.kubeconfig="${KUBELET_KUBECONFIG_CONTENTS}" \
--from-literal=kubeproxy.kubeconfig="${KUBEPROXY_KUBECONFIG_CONTENTS}" \
--from-literal=heapster.kubeconfig="${HEAPSTER_KUBECONFIG_CONTENTS}" \
--from-literal=cluster_autoscaler.kubeconfig="${CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS}" \
--from-literal=npd.kubeconfig="${NPD_KUBECONFIG_CONTENTS}"
# Create addon pods.
# Heapster.
mkdir -p "${RESOURCE_DIRECTORY}/addons"
sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_mem_per_node=4
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES:-10}))
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES}))
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_cpu_per_node_numerator=${NUM_NODES:-10}
metrics_cpu_per_node_numerator=${NUM_NODES}
metrics_cpu_per_node_denominator=2
metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator))
sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
eventer_mem_per_node=500
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES:-10}))
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES}))
sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
# Cluster Autoscaler.
if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER}" == "true" ]]; then
echo "Setting up Cluster Autoscaler"
KUBEMARK_AUTOSCALER_MIG_NAME="${KUBEMARK_AUTOSCALER_MIG_NAME:-${NODE_INSTANCE_PREFIX}-group}"
KUBEMARK_AUTOSCALER_MIN_NODES="${KUBEMARK_AUTOSCALER_MIN_NODES:-0}"
KUBEMARK_AUTOSCALER_MAX_NODES="${KUBEMARK_AUTOSCALER_MAX_NODES:-10}"
NUM_NODES=${KUBEMARK_AUTOSCALER_MAX_NODES}
echo "Setting maximum cluster size to ${NUM_NODES}."
KUBEMARK_MIG_CONFIG="autoscaling.k8s.io/nodegroup: ${KUBEMARK_AUTOSCALER_MIG_NAME}"
sed "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/cluster-autoscaler_template.json" > "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s/{{kubemark_autoscaler_mig_name}}/${KUBEMARK_AUTOSCALER_MIG_NAME}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s/{{kubemark_autoscaler_min_nodes}}/${KUBEMARK_AUTOSCALER_MIN_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
sed -i'' -e "s/{{kubemark_autoscaler_max_nodes}}/${KUBEMARK_AUTOSCALER_MAX_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
fi
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark"
# Create the replication controller for hollow-nodes.
sed "s/{{numreplicas}}/${NUM_NODES:-10}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.yaml" > "${RESOURCE_DIRECTORY}/hollow-node.yaml"
# We allow to override the NUM_REPLICAS when running Cluster Autoscaler.
NUM_REPLICAS=${NUM_REPLICAS:-${NUM_NODES}}
sed "s/{{numreplicas}}/${NUM_REPLICAS}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.yaml" > "${RESOURCE_DIRECTORY}/hollow-node.yaml"
proxy_cpu=20
if [ "${NUM_NODES:-10}" -gt 1000 ]; then
if [ "${NUM_NODES}" -gt 1000 ]; then
proxy_cpu=50
fi
proxy_mem_per_node=50
proxy_mem=$((100 * 1024 + ${proxy_mem_per_node}*${NUM_NODES:-10}))
proxy_mem=$((100 * 1024 + ${proxy_mem_per_node}*${NUM_NODES}))
sed -i'' -e "s/{{HOLLOW_PROXY_CPU}}/${proxy_cpu}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{HOLLOW_PROXY_MEM}}/${proxy_mem}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{registry}}/${CONTAINER_REGISTRY}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
@ -339,6 +380,7 @@ current-context: kubemark-context")
sed -i'' -e "s/{{kubelet_verbosity_level}}/${KUBELET_TEST_LOG_LEVEL}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{kubeproxy_verbosity_level}}/${KUBEPROXY_TEST_LOG_LEVEL}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{use_real_proxier}}/${USE_REAL_PROXIER}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s'{{kubemark_mig_config}}'${KUBEMARK_MIG_CONFIG:-}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" --namespace="kubemark"
echo "Created secrets, configMaps, replication-controllers required for hollow-nodes."
@ -351,7 +393,7 @@ function wait-for-hollow-nodes-to-run-or-timeout {
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
until [[ "${ready}" -ge "${NUM_NODES}" ]]; do
until [[ "${ready}" -ge "${NUM_REPLICAS}" ]]; do
echo -n "."
sleep 1
now=$(date +%s)