mirror of https://github.com/k3s-io/k3s
Merge pull request #26828 from vishh/oom-kill-enable
Automatic merge from submit-queue Enable support for memory eviction configuration via salt Added evictions based on memory by default whenever the available memory is < 100Mi. Updated GCE and GCI.pull/6/head
commit
a283a0a759
|
@ -647,6 +647,11 @@ EOF
|
|||
if [ -n "${NODE_LABELS:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
NODE_LABELS: $(yaml-quote ${NODE_LABELS})
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${EVICTION_HARD:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
EVICTION_HARD: $(yaml-quote ${EVICTION_HARD})
|
||||
EOF
|
||||
fi
|
||||
if [[ "${OS_DISTRIBUTION}" == "coreos" ]]; then
|
||||
|
|
|
@ -138,3 +138,6 @@ OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
|
|||
HAIRPIN_MODE="${HAIRPIN_MODE:-promiscuous-bridge}" # promiscuous-bridge, hairpin-veth, none
|
||||
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
|
||||
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
||||
|
||||
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
|
||||
EVICTION_HARD="${EVICTION_HARD:-memory.available<100Mi}"
|
||||
|
|
|
@ -170,3 +170,6 @@ E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
|||
# Optional: if set to true, a image puller is deployed. Only for use in e2e clusters.
|
||||
# TODO: Pipe this through GKE e2e clusters once we know it helps.
|
||||
PREPULL_E2E_IMAGES="${PREPULL_E2E_IMAGES:-true}"
|
||||
|
||||
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
|
||||
EVICTION_HARD="${EVICTION_HARD:-memory.available<100Mi}"
|
||||
|
|
|
@ -535,6 +535,11 @@ EOF
|
|||
if [ -n "${NODE_LABELS:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
node_labels: '$(echo "${NODE_LABELS}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${EVICTION_HARD:-}" ]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
eviction_hard: '$(echo "${EVICTION_HARD}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_AUTOSCALER:-false}" == "true" ]]; then
|
||||
|
|
|
@ -351,6 +351,9 @@ function start-kubelet {
|
|||
if [[ -n "${NODE_LABELS:-}" ]]; then
|
||||
flags+=" --node-labels=${NODE_LABELS}"
|
||||
fi
|
||||
if [[ -n "${EVICTION_HARD:-}" ]]; then
|
||||
flags+=" --eviction-hard=${EVICTION_HARD}"
|
||||
fi
|
||||
if [[ "${ALLOCATE_NODE_CIDRS:-}" == "true" ]]; then
|
||||
flags+=" --configure-cbr0=${ALLOCATE_NODE_CIDRS}"
|
||||
fi
|
||||
|
|
|
@ -196,5 +196,10 @@
|
|||
{% set node_labels="--node-labels=" + pillar['node_labels'] %}
|
||||
{% endif -%}
|
||||
|
||||
{% set eviction_hard = "" %}
|
||||
{% if pillar['eviction_hard'] is defined -%}
|
||||
{% set eviction_hard="--eviction-hard=" + pillar['eviction_hard'] %}
|
||||
{% endif -%}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{ reconcile_cidr_args }} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{babysit_daemons}} {{test_args}}"
|
||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{ reconcile_cidr_args }} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{babysit_daemons}} {{eviction_hard}} {{test_args}}"
|
||||
|
|
|
@ -55,6 +55,8 @@ def get_all_files(rootdir):
|
|||
# don't visit certain dirs
|
||||
if 'vendor' in dirs:
|
||||
dirs.remove('vendor')
|
||||
if '_output' in dirs:
|
||||
dirs.remove('_output')
|
||||
if '_gopath' in dirs:
|
||||
dirs.remove('_gopath')
|
||||
if 'third_party' in dirs:
|
||||
|
|
|
@ -8,8 +8,6 @@ cluster/aws/templates/configure-vm-aws.sh: env-to-grains "hostname_override"
|
|||
cluster/aws/templates/configure-vm-aws.sh: env-to-grains "runtime_config"
|
||||
cluster/aws/templates/configure-vm-aws.sh: kubelet_api_servers: '${KUBELET_APISERVER}'
|
||||
cluster/centos/util.sh: local node_ip=${node#*@}
|
||||
cluster/gce/config-default.sh:# reloads <os_distro>/helper.sh in the gap between when the master is created
|
||||
cluster/gce/config-test.sh:# reloads <os_distro>/helper.sh in the gap between when the master is created
|
||||
cluster/gce/configure-vm.sh: advertise_address: '${EXTERNAL_IP}'
|
||||
cluster/gce/configure-vm.sh: api_servers: '${KUBERNETES_MASTER_NAME}'
|
||||
cluster/gce/configure-vm.sh: cloud_config: ${CLOUD_CONFIG}
|
||||
|
@ -46,6 +44,7 @@ cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set cluster_cidr=" --c
|
|||
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers + ":6443" -%}
|
||||
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers -%}
|
||||
cluster/saltbase/salt/kubelet/default: {% set enable_custom_metrics="--enable-custom-metrics=" + pillar['enable_custom_metrics'] %}
|
||||
cluster/saltbase/salt/kubelet/default: {% set eviction_hard="--eviction-hard=" + pillar['eviction_hard'] %}
|
||||
cluster/saltbase/salt/kubelet/default: {% set kubelet_port="--port=" + pillar['kubelet_port'] %}
|
||||
cluster/saltbase/salt/kubelet/default: {% set node_labels="--node-labels=" + pillar['node_labels'] %}
|
||||
cluster/saltbase/salt/kubelet/default:{% if pillar.get('non_masquerade_cidr','') -%}
|
||||
|
|
Loading…
Reference in New Issue