mirror of https://github.com/k3s-io/k3s
Merge pull request #30468 from jlowdermilk/feature-config
Automatic merge from submit-queue Feature gates for kube-system components Implements [this proposal](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/runtimeconfig.md). Adds `--feature-gates` to apiserver, scheduler, controller-manager and proxy. cc @lavalamp @adityakalipull/6/head
commit
010c976ce8
|
@ -682,6 +682,11 @@ EOF
|
|||
if [ -n "${INITIAL_ETCD_CLUSTER:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
INITIAL_ETCD_CLUSTER: $(yaml-quote ${INITIAL_ETCD_CLUSTER})
|
||||
EOF
|
||||
fi
|
||||
if [ -n "${FEATURE_GATES:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
FEATURE_GATES: $(yaml-quote ${FEATURE_GATES})
|
||||
EOF
|
||||
fi
|
||||
else
|
||||
|
|
|
@ -102,6 +102,9 @@ fi
|
|||
# Optional: customize runtime config
|
||||
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
|
||||
|
||||
# Optional: set feature gates
|
||||
FEATURE_GATES="${KUBE_FEATURE_GATES:-}"
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}"
|
||||
|
|
|
@ -907,6 +907,7 @@ EOF
|
|||
fi
|
||||
|
||||
env-to-grains "runtime_config"
|
||||
env-to-grains "feature_gates"
|
||||
env-to-grains "kube_user"
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,10 @@
|
|||
# This script is for configuring kubernetes master and node instances. It is
|
||||
# uploaded in the manifests tar ball.
|
||||
|
||||
# TODO: this script duplicates templating logic from cluster/saltbase/salt
|
||||
# using sed. It should use an actual template parser on the manifest
|
||||
# files.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
@ -469,6 +473,9 @@ function start-kubelet {
|
|||
if [[ "${ALLOCATE_NODE_CIDRS:-}" == "true" ]]; then
|
||||
flags+=" --configure-cbr0=${ALLOCATE_NODE_CIDRS}"
|
||||
fi
|
||||
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
||||
flags+=" --feature-gates=${feature_gates}"
|
||||
fi
|
||||
echo "KUBELET_OPTS=\"${flags}\"" > /etc/default/kubelet
|
||||
|
||||
# Delete docker0 to avoid interference
|
||||
|
@ -502,12 +509,18 @@ function start-kube-proxy {
|
|||
fi
|
||||
local -r kube_proxy_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-proxy.docker_tag)
|
||||
local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
|
||||
local params="${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}"
|
||||
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
||||
params+=" --feature-gates=${FEATURE_GATES}"
|
||||
fi
|
||||
if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then
|
||||
params+=" ${KUBE_PROXY_TEST_ARGS}"
|
||||
fi
|
||||
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file}
|
||||
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file}
|
||||
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file}
|
||||
sed -i -e "s@{{test_args}}@${KUBEPROXY_TEST_ARGS:-}@g" ${src_file}
|
||||
sed -i -e "s@{{params}}@${params}@g" ${src_file}
|
||||
sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file}
|
||||
sed -i -e "s@{{log_level}}@${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}@g" ${src_file}
|
||||
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file}
|
||||
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
|
||||
sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" ${src_file}
|
||||
|
@ -661,6 +674,9 @@ function start-kube-apiserver {
|
|||
if [[ -n "${RUNTIME_CONFIG:-}" ]]; then
|
||||
params+=" --runtime-config=${RUNTIME_CONFIG}"
|
||||
fi
|
||||
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
||||
params+=" --feature-gates=${FEATURE_GATES}"
|
||||
fi
|
||||
if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then
|
||||
local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
params+=" --advertise-address=${vm_external_ip}"
|
||||
|
@ -754,6 +770,9 @@ function start-kube-controller-manager {
|
|||
if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then
|
||||
params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}"
|
||||
fi
|
||||
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
||||
params+=" --feature-gates=${FEATURE_GATES}"
|
||||
fi
|
||||
local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
|
||||
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
|
||||
|
@ -782,6 +801,9 @@ function start-kube-scheduler {
|
|||
|
||||
# Calculate variables and set them in the manifest.
|
||||
params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}"
|
||||
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
||||
params+=" --feature-gates=${FEATURE_GATES}"
|
||||
fi
|
||||
local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
|
||||
|
||||
# Remove salt comments and replace variables with values.
|
||||
|
|
|
@ -130,6 +130,11 @@
|
|||
{% set runtime_config = "--runtime-config=" + grains.runtime_config -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set feature_gates = "" -%}
|
||||
{% if grains.feature_gates is defined -%}
|
||||
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set log_level = pillar['log_level'] -%}
|
||||
{% if pillar['api_server_test_log_level'] is defined -%}
|
||||
{% set log_level = pillar['api_server_test_log_level'] -%}
|
||||
|
@ -140,7 +145,7 @@
|
|||
{% set enable_garbage_collector = "--enable-garbage-collector=" + pillar['enable_garbage_collector'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
|
||||
{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
|
||||
{% set params = params + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + token_auth_file + " " + bind_address + " " + log_level + " " + advertise_address + " " + proxy_ssh_options + authz_mode + abac_policy_file + webhook_authentication_config + webhook_authorization_config -%}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
|
|
|
@ -73,7 +73,13 @@
|
|||
{% set log_level = pillar['controller_manager_test_log_level'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set feature_gates = "" -%}
|
||||
{% if grains.feature_gates is defined -%}
|
||||
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
|
||||
{% set params = params + " " feature_gates %}
|
||||
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
|
|
|
@ -24,6 +24,14 @@
|
|||
{% set log_level = pillar['kubeproxy_test_log_level'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set feature_gates = "" -%}
|
||||
{% if grains.feature_gates is defined -%}
|
||||
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
|
||||
{% endif -%}
|
||||
|
||||
# test_args should always go last to overwrite prior configuration
|
||||
{% set params = log_level + " " + feature_gates + " " + test_args -%}
|
||||
|
||||
# kube-proxy podspec
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
|
@ -44,7 +52,7 @@ spec:
|
|||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- kube-proxy {{api_servers_with_port}} {{kubeconfig}} {{cluster_cidr}} --resource-container="" {{log_level}} {{test_args}} 1>>/var/log/kube-proxy.log 2>&1
|
||||
- kube-proxy {{api_servers_with_port}} {{kubeconfig}} {{cluster_cidr}} --resource-container="" {{params}} 1>>/var/log/kube-proxy.log 2>&1
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
|
|
|
@ -5,7 +5,12 @@
|
|||
{% set log_level = pillar['scheduler_test_log_level'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set params = params + log_level -%}
|
||||
{% set feature_gates = "" -%}
|
||||
{% if grains.feature_gates is defined -%}
|
||||
{% set feature_gates = "--feature-gates=" + grains.feature_gates -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set params = params + log_level + " " + feature_gates -%}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
{% if pillar['scheduler_test_args'] is defined -%}
|
||||
|
|
|
@ -141,6 +141,11 @@
|
|||
{% set cpu_cfs_quota = "--cpu-cfs-quota=" + pillar['enable_cpu_cfs_quota'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set feature_gates = "" -%}
|
||||
{% if grains['feature_gates'] is defined -%}
|
||||
{% set feature_gates = "--feature-gates=" + grains['feature_gates'] -%}
|
||||
{% endif %}
|
||||
|
||||
{% set test_args = "" -%}
|
||||
{% if pillar['kubelet_test_args'] is defined -%}
|
||||
{% set test_args=pillar['kubelet_test_args'] %}
|
||||
|
@ -204,4 +209,4 @@
|
|||
{% endif -%}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{ reconcile_cidr_args }} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{babysit_daemons}} {{eviction_hard}} {{test_args}}"
|
||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{ reconcile_cidr_args }} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{babysit_daemons}} {{eviction_hard}} {{feature_gates}} {{test_args}}"
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/pkg/util/config"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
@ -180,4 +181,5 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.Float32Var(&s.UnhealthyZoneThreshold, "unhealthy-zone-threshold", 0.55, "Fraction of Nodes in a zone which needs to be not Ready (minimum 3) for zone to be treated as unhealthy. ")
|
||||
|
||||
leaderelection.BindFlags(&s.LeaderElection, fs)
|
||||
config.DefaultFeatureGate.AddFlag(fs)
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/config"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
@ -49,10 +50,10 @@ type ProxyServerConfig struct {
|
|||
}
|
||||
|
||||
func NewProxyConfig() *ProxyServerConfig {
|
||||
config := componentconfig.KubeProxyConfiguration{}
|
||||
api.Scheme.Convert(&v1alpha1.KubeProxyConfiguration{}, &config, nil)
|
||||
cfg := componentconfig.KubeProxyConfiguration{}
|
||||
api.Scheme.Convert(&v1alpha1.KubeProxyConfiguration{}, &cfg, nil)
|
||||
return &ProxyServerConfig{
|
||||
KubeProxyConfiguration: config,
|
||||
KubeProxyConfiguration: cfg,
|
||||
ContentType: "application/vnd.kubernetes.protobuf",
|
||||
KubeAPIQPS: 5.0,
|
||||
KubeAPIBurst: 10,
|
||||
|
@ -88,4 +89,5 @@ func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.Int32Var(&s.ConntrackMaxPerCore, "conntrack-max-per-core", s.ConntrackMaxPerCore,
|
||||
"Maximum number of NAT connections to track per CPU core (0 to leave as-is). This is only considered if conntrack-max is 0.")
|
||||
fs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", s.ConntrackTCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)")
|
||||
config.DefaultFeatureGate.AddFlag(fs)
|
||||
}
|
||||
|
|
|
@ -141,6 +141,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.StringVar(&s.VolumePluginDir, "volume-plugin-dir", s.VolumePluginDir, "<Warning: Alpha feature> The full path of the directory in which to search for additional third party volume plugins")
|
||||
fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. By default, kubelet will attempt to auto-detect the cloud provider. Specify empty string for running with no cloud provider. [default=auto-detect]")
|
||||
fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
|
||||
utilconfig.DefaultFeatureGate.AddFlag(fs)
|
||||
|
||||
fs.StringVar(&s.KubeletCgroups, "resource-container", s.KubeletCgroups, "Optional absolute name of the resource-only container to create and run the Kubelet in.")
|
||||
fs.MarkDeprecated("resource-container", "Use --kubelet-cgroups instead. Will be removed in a future version.")
|
||||
|
|
|
@ -11,6 +11,7 @@ cluster/centos/util.sh: local node_ip=${node#*@}
|
|||
cluster/gce/configure-vm.sh: advertise_address: '${EXTERNAL_IP}'
|
||||
cluster/gce/configure-vm.sh: api_servers: '${KUBERNETES_MASTER_NAME}'
|
||||
cluster/gce/configure-vm.sh: cloud_config: ${CLOUD_CONFIG}
|
||||
cluster/gce/configure-vm.sh: env-to-grains "feature_gates"
|
||||
cluster/gce/configure-vm.sh: env-to-grains "runtime_config"
|
||||
cluster/gce/configure-vm.sh: kubelet_api_servers: '${KUBELET_APISERVER}'
|
||||
cluster/gce/gci/configure-helper.sh: reconcile_cidr="false"
|
||||
|
@ -42,17 +43,21 @@ cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest:{% set para
|
|||
cluster/saltbase/salt/etcd/etcd.manifest: "value": "{{ storage_backend }}"
|
||||
cluster/saltbase/salt/etcd/etcd.manifest:{% set storage_backend = pillar.get('storage_backend', 'etcd2') -%}
|
||||
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
|
||||
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
|
||||
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
|
||||
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
|
||||
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = params + " " feature_gates %}
|
||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers + ":6443" -%}
|
||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers -%}
|
||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set cluster_cidr=" --cluster-cidr=" + pillar['cluster_cidr'] %}
|
||||
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest:{% set params = log_level + " " + feature_gates + " " + test_args -%}
|
||||
cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest:{% set params = params + log_level + " " + feature_gates -%}
|
||||
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers + ":6443" -%}
|
||||
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers -%}
|
||||
cluster/saltbase/salt/kubelet/default: {% set enable_custom_metrics="--enable-custom-metrics=" + pillar['enable_custom_metrics'] %}
|
||||
cluster/saltbase/salt/kubelet/default: {% set eviction_hard="--eviction-hard=" + pillar['eviction_hard'] %}
|
||||
cluster/saltbase/salt/kubelet/default: {% set kubelet_port="--port=" + pillar['kubelet_port'] %}
|
||||
cluster/saltbase/salt/kubelet/default: {% set node_labels="--node-labels=" + pillar['node_labels'] %}
|
||||
cluster/saltbase/salt/kubelet/default:{% if grains['feature_gates'] is defined -%}
|
||||
cluster/saltbase/salt/kubelet/default:{% if pillar.get('non_masquerade_cidr','') -%}
|
||||
cluster/saltbase/salt/opencontrail-networking-master/init.sls: - 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
|
||||
cluster/saltbase/salt/opencontrail-networking-minion/init.sls: - 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
|
||||
|
|
|
@ -173,6 +173,7 @@ extra-peer-dirs
|
|||
failover-timeout
|
||||
failure-domains
|
||||
fake-clientset
|
||||
feature-gates
|
||||
federated-api-burst
|
||||
federated-api-qps
|
||||
federated-kube-context
|
||||
|
|
|
@ -451,4 +451,6 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) {
|
|||
"List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. "+
|
||||
"The individual override format: resource#size, where size is a number. It takes effect "+
|
||||
"when watch-cache is enabled.")
|
||||
|
||||
config.DefaultFeatureGate.AddFlag(fs)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,151 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
const (
|
||||
flagName = "feature-gates"
|
||||
|
||||
// All known feature keys
|
||||
// To add a new feature, define a key for it below and add
|
||||
// a featureInfo entry to knownFeatures.
|
||||
|
||||
// allAlpha is a global toggle for alpha features. Per feature key
|
||||
// values override the default set by allAlpha.
|
||||
// e.g. allAlpha=false,newFeature=true will result in newFeature=true
|
||||
allAlpha = "allAlpha"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultFeatureGate is a shared global FeatureGate.
|
||||
DefaultFeatureGate = &featureGate{}
|
||||
|
||||
// Default values for recorded features.
|
||||
knownFeatures = map[string]featureInfo{
|
||||
allAlpha: {false, alpha},
|
||||
}
|
||||
|
||||
alpha = prerelease("ALPHA")
|
||||
beta = prerelease("BETA")
|
||||
ga = prerelease("")
|
||||
)
|
||||
|
||||
type prerelease string
|
||||
|
||||
type featureInfo struct {
|
||||
enabled bool
|
||||
prerelease prerelease
|
||||
}
|
||||
|
||||
// FeatureGate parses and stores flag gates for known features from
|
||||
// a string like feature1=true,feature2=false,...
|
||||
type FeatureGate interface {
|
||||
AddFlag(fs *pflag.FlagSet)
|
||||
// owner: @jlowdermilk
|
||||
// alpha: v1.4
|
||||
AllAlpha() bool
|
||||
// TODO: Define accessors for each non-API alpha feature.
|
||||
}
|
||||
|
||||
// featureGate implements FeatureGate as well as pflag.Value for flag parsing.
|
||||
type featureGate struct {
|
||||
features map[string]bool
|
||||
}
|
||||
|
||||
// Set, String, and Type implement pflag.Value
|
||||
|
||||
// Set Parses a string of the form // "key1=value1,key2=value2,..." into a
|
||||
// map[string]bool of known keys or returns an error.
|
||||
func (f *featureGate) Set(value string) error {
|
||||
f.features = make(map[string]bool)
|
||||
for _, s := range strings.Split(value, ",") {
|
||||
if len(s) == 0 {
|
||||
continue
|
||||
}
|
||||
arr := strings.SplitN(s, "=", 2)
|
||||
k := strings.TrimSpace(arr[0])
|
||||
_, ok := knownFeatures[k]
|
||||
if !ok {
|
||||
return fmt.Errorf("unrecognized key: %s", k)
|
||||
}
|
||||
if len(arr) != 2 {
|
||||
return fmt.Errorf("missing bool value for %s", k)
|
||||
}
|
||||
v := strings.TrimSpace(arr[1])
|
||||
boolValue, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value of %s: %s, err: %v", k, v, err)
|
||||
}
|
||||
f.features[k] = boolValue
|
||||
}
|
||||
glog.Infof("feature gates: %v", f.features)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *featureGate) String() string {
|
||||
pairs := []string{}
|
||||
for k, v := range f.features {
|
||||
pairs = append(pairs, fmt.Sprintf("%s=%t", k, v))
|
||||
}
|
||||
sort.Strings(pairs)
|
||||
return strings.Join(pairs, ",")
|
||||
}
|
||||
|
||||
func (f *featureGate) Type() string {
|
||||
return "mapStringBool"
|
||||
}
|
||||
|
||||
// AllAlpha returns value for allAlpha.
|
||||
func (f *featureGate) AllAlpha() bool {
|
||||
return f.lookup(allAlpha)
|
||||
}
|
||||
|
||||
func (f *featureGate) lookup(key string) bool {
|
||||
defaultValue := knownFeatures[key].enabled
|
||||
if f.features == nil {
|
||||
panic(fmt.Sprintf("--%s has not been parsed", flagName))
|
||||
}
|
||||
if v, ok := f.features[key]; ok {
|
||||
return v
|
||||
}
|
||||
return defaultValue
|
||||
|
||||
}
|
||||
|
||||
// AddFlag adds a flag for setting global feature gates to the specified FlagSet.
|
||||
func (f *featureGate) AddFlag(fs *pflag.FlagSet) {
|
||||
var known []string
|
||||
for k, v := range knownFeatures {
|
||||
pre := ""
|
||||
if v.prerelease != ga {
|
||||
pre = fmt.Sprintf("%s - ", v.prerelease)
|
||||
}
|
||||
known = append(known, fmt.Sprintf("%s=true|false (%sdefault=%t)", k, pre, v.enabled))
|
||||
}
|
||||
fs.Var(f, flagName, ""+
|
||||
"A set of key=value pairs that describe feature gates for alpha/experimental features. "+
|
||||
"Options are:\n"+strings.Join(known, "\n"))
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func TestFeatureGateFlag(t *testing.T) {
|
||||
tests := []struct {
|
||||
arg string
|
||||
allAlpha bool
|
||||
parseError error
|
||||
}{
|
||||
{fmt.Sprintf("--%s=fooBarBaz=maybeidk", flagName), false, fmt.Errorf("unrecognized key: fooBarBaz")},
|
||||
{fmt.Sprintf("--%s=", flagName), false, nil},
|
||||
{fmt.Sprintf("--%s=allAlpha=false", flagName), false, nil},
|
||||
{fmt.Sprintf("--%s=allAlpha=true", flagName), true, nil},
|
||||
{fmt.Sprintf("--%s=allAlpha=banana", flagName), false, fmt.Errorf("invalid value of allAlpha")},
|
||||
}
|
||||
for i, test := range tests {
|
||||
fs := pflag.NewFlagSet("testfeaturegateflag", pflag.ContinueOnError)
|
||||
f := &featureGate{}
|
||||
f.AddFlag(fs)
|
||||
|
||||
err := fs.Parse([]string{test.arg})
|
||||
if test.parseError != nil {
|
||||
if !strings.Contains(err.Error(), test.parseError.Error()) {
|
||||
t.Errorf("%d: Parse() Expected %v, Got %v", i, test.parseError, err)
|
||||
}
|
||||
} else if err != nil {
|
||||
t.Errorf("%d: Parse() Expected nil, Got %v", i, err)
|
||||
}
|
||||
if alpha := f.AllAlpha(); alpha != test.allAlpha {
|
||||
t.Errorf("%d: AlphaEnabled() expected %v, Got %v", i, test.allAlpha, alpha)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -22,6 +22,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||
"k8s.io/kubernetes/pkg/util/config"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
@ -36,15 +37,16 @@ type SchedulerServer struct {
|
|||
// Kubeconfig is Path to kubeconfig file with authorization and master
|
||||
// location information.
|
||||
Kubeconfig string
|
||||
// Dynamic conifguration for scheduler features.
|
||||
}
|
||||
|
||||
// NewSchedulerServer creates a new SchedulerServer with default parameters
|
||||
func NewSchedulerServer() *SchedulerServer {
|
||||
config := componentconfig.KubeSchedulerConfiguration{}
|
||||
api.Scheme.Convert(&v1alpha1.KubeSchedulerConfiguration{}, &config, nil)
|
||||
config.LeaderElection.LeaderElect = true
|
||||
cfg := componentconfig.KubeSchedulerConfiguration{}
|
||||
api.Scheme.Convert(&v1alpha1.KubeSchedulerConfiguration{}, &cfg, nil)
|
||||
cfg.LeaderElection.LeaderElect = true
|
||||
s := SchedulerServer{
|
||||
KubeSchedulerConfiguration: config,
|
||||
KubeSchedulerConfiguration: cfg,
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
@ -73,4 +75,5 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) {
|
|||
"to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule.")
|
||||
fs.StringVar(&s.FailureDomains, "failure-domains", api.DefaultFailureDomains, "Indicate the \"all topologies\" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.")
|
||||
leaderelection.BindFlags(&s.LeaderElection, fs)
|
||||
config.DefaultFeatureGate.AddFlag(fs)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue