mirror of https://github.com/k3s-io/k3s
Scheduling algorithm provider flag in kube-up.sh
parent
a41e6e3817
commit
11fabd7176
|
@ -759,6 +759,11 @@ EOF
|
||||||
if [ -n "${DNS_ZONE_NAME:-}" ]; then
|
if [ -n "${DNS_ZONE_NAME:-}" ]; then
|
||||||
cat >>$file <<EOF
|
cat >>$file <<EOF
|
||||||
DNS_ZONE_NAME: $(yaml-quote ${DNS_ZONE_NAME})
|
DNS_ZONE_NAME: $(yaml-quote ${DNS_ZONE_NAME})
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
|
||||||
|
cat >>$file <<EOF
|
||||||
|
SCHEDULING_ALGORITHM_PROVIDER: $(yaml-quote ${SCHEDULING_ALGORITHM_PROVIDER})
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
|
@ -160,3 +160,6 @@ E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
||||||
|
|
||||||
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
|
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
|
||||||
EVICTION_HARD="${EVICTION_HARD:-memory.available<100Mi,nodefs.available<10%}"
|
EVICTION_HARD="${EVICTION_HARD:-memory.available<100Mi,nodefs.available<10%}"
|
||||||
|
|
||||||
|
# Optional: custom scheduling algorithm
|
||||||
|
SCHEDULING_ALGORITHM_PROVIDER="${SCHEDULING_ALGORITHM_PROVIDER:-}"
|
||||||
|
|
|
@ -190,3 +190,6 @@ PREPULL_E2E_IMAGES="${PREPULL_E2E_IMAGES:-true}"
|
||||||
|
|
||||||
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
|
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
|
||||||
EVICTION_HARD="${EVICTION_HARD:-memory.available<100Mi,nodefs.available<10%}"
|
EVICTION_HARD="${EVICTION_HARD:-memory.available<100Mi,nodefs.available<10%}"
|
||||||
|
|
||||||
|
# Optional: custom scheduling algorithm
|
||||||
|
SCHEDULING_ALGORITHM_PROVIDER="${SCHEDULING_ALGORITHM_PROVIDER:-}"
|
||||||
|
|
|
@ -577,6 +577,11 @@ EOF
|
||||||
else
|
else
|
||||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||||
federations_domain_map: ''
|
federations_domain_map: ''
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
|
||||||
|
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||||
|
scheduling_algorithm_provider: '$(echo "${SCHEDULING_ALGORITHM_PROVIDER}" | sed -e "s/'/''/g")'
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
|
@ -809,6 +809,9 @@ function start-kube-scheduler {
|
||||||
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
||||||
params+=" --feature-gates=${FEATURE_GATES}"
|
params+=" --feature-gates=${FEATURE_GATES}"
|
||||||
fi
|
fi
|
||||||
|
if [[ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]]; then
|
||||||
|
params+=" --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}"
|
||||||
|
fi
|
||||||
local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
|
local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
|
||||||
|
|
||||||
# Remove salt comments and replace variables with values.
|
# Remove salt comments and replace variables with values.
|
||||||
|
|
|
@ -675,6 +675,10 @@ start_kube_scheduler() {
|
||||||
log_level="${SCHEDULER_TEST_LOG_LEVEL}"
|
log_level="${SCHEDULER_TEST_LOG_LEVEL}"
|
||||||
fi
|
fi
|
||||||
params="${log_level} ${SCHEDULER_TEST_ARGS:-}"
|
params="${log_level} ${SCHEDULER_TEST_ARGS:-}"
|
||||||
|
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
|
||||||
|
params="${params} --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}"
|
||||||
|
fi
|
||||||
|
|
||||||
readonly kube_scheduler_docker_tag=$(cat "${kube_home}/kube-docker-files/kube-scheduler.docker_tag")
|
readonly kube_scheduler_docker_tag=$(cat "${kube_home}/kube-docker-files/kube-scheduler.docker_tag")
|
||||||
|
|
||||||
# Remove salt comments and replace variables with values
|
# Remove salt comments and replace variables with values
|
||||||
|
|
|
@ -7,10 +7,15 @@
|
||||||
|
|
||||||
{% set feature_gates = "" -%}
|
{% set feature_gates = "" -%}
|
||||||
{% if grains.feature_gates is defined -%}
|
{% if grains.feature_gates is defined -%}
|
||||||
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
|
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
{% set params = params + log_level + " " + feature_gates -%}
|
{% set scheduling_algorithm_provider = "" -%}
|
||||||
|
{% if grains.scheduling_algorithm_provider is defined -%}
|
||||||
|
{% set scheduling_algorithm_provider = "--algorithm-provider=" + grains.scheduling_algorithm_provider -%}
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
{% set params = params + log_level + " " + feature_gates + " " + scheduling_algorithm_provider -%}
|
||||||
|
|
||||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||||
{% if pillar['scheduler_test_args'] is defined -%}
|
{% if pillar['scheduler_test_args'] is defined -%}
|
||||||
|
|
|
@ -50,7 +50,7 @@ cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_p
|
||||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers -%}
|
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers -%}
|
||||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set cluster_cidr=" --cluster-cidr=" + pillar['cluster_cidr'] %}
|
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set cluster_cidr=" --cluster-cidr=" + pillar['cluster_cidr'] %}
|
||||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest:{% set params = log_level + " " + feature_gates + " " + test_args -%}
|
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest:{% set params = log_level + " " + feature_gates + " " + test_args -%}
|
||||||
cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest:{% set params = params + log_level + " " + feature_gates -%}
|
cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest:{% set params = params + log_level + " " + feature_gates + " " + scheduling_algorithm_provider -%}
|
||||||
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers + ":6443" -%}
|
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers + ":6443" -%}
|
||||||
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers -%}
|
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers -%}
|
||||||
cluster/saltbase/salt/kubelet/default: {% set enable_custom_metrics="--enable-custom-metrics=" + pillar['enable_custom_metrics'] %}
|
cluster/saltbase/salt/kubelet/default: {% set enable_custom_metrics="--enable-custom-metrics=" + pillar['enable_custom_metrics'] %}
|
||||||
|
@ -84,10 +84,6 @@ federation/config.default.json: "cluster_name": "cluster3-kubernetes",
|
||||||
federation/config.default.json: "num_nodes": 3,
|
federation/config.default.json: "num_nodes": 3,
|
||||||
federation/config.default.json: "num_nodes": 3,
|
federation/config.default.json: "num_nodes": 3,
|
||||||
federation/config.default.json: "num_nodes": 3,
|
federation/config.default.json: "num_nodes": 3,
|
||||||
hack/fed-up-cluster.sh: advertise_address="--advertise_address=${API_HOST}"
|
|
||||||
hack/fed-up-cluster.sh: runtime_config="--runtime-config=${RUNTIME_CONFIG}"
|
|
||||||
hack/fed-up-cluster.sh: advertise_address=""
|
|
||||||
hack/fed-up-cluster.sh: runtime_config=""
|
|
||||||
hack/local-up-cluster.sh: advertise_address="--advertise_address=${API_HOST}"
|
hack/local-up-cluster.sh: advertise_address="--advertise_address=${API_HOST}"
|
||||||
hack/local-up-cluster.sh: runtime_config="--runtime-config=${RUNTIME_CONFIG}"
|
hack/local-up-cluster.sh: runtime_config="--runtime-config=${RUNTIME_CONFIG}"
|
||||||
hack/local-up-cluster.sh: advertise_address=""
|
hack/local-up-cluster.sh: advertise_address=""
|
||||||
|
|
Loading…
Reference in New Issue