mirror of https://github.com/k3s-io/k3s
use external KubeSchedulerConfiguration
parent
8a1c7ba428
commit
7c68c585f4
|
@ -24,13 +24,13 @@ import (
|
|||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
)
|
||||
|
||||
// Config has all the context to run a Scheduler
|
||||
type Config struct {
|
||||
// config is the scheduler server's configuration object.
|
||||
ComponentConfig componentconfig.KubeSchedulerConfiguration
|
||||
ComponentConfig kubeschedulerconfig.KubeSchedulerConfiguration
|
||||
|
||||
InsecureServing *apiserver.DeprecatedInsecureServingInfo // nil will disable serving on an insecure port
|
||||
InsecureMetricsServing *apiserver.DeprecatedInsecureServingInfo // non-nil if metrics should be served independently
|
||||
|
|
|
@ -18,29 +18,17 @@ package options
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer/json"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
kubeschedulerscheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
|
||||
kubeschedulerconfigv1alpha1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
configScheme = runtime.NewScheme()
|
||||
configCodecs = serializer.NewCodecFactory(configScheme)
|
||||
)
|
||||
|
||||
func init() {
|
||||
utilruntime.Must(componentconfig.AddToScheme(configScheme))
|
||||
utilruntime.Must(componentconfigv1alpha1.AddToScheme(configScheme))
|
||||
}
|
||||
|
||||
func loadConfigFromFile(file string) (*componentconfig.KubeSchedulerConfiguration, error) {
|
||||
func loadConfigFromFile(file string) (*kubeschedulerconfig.KubeSchedulerConfiguration, error) {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -49,22 +37,18 @@ func loadConfigFromFile(file string) (*componentconfig.KubeSchedulerConfiguratio
|
|||
return loadConfig(data)
|
||||
}
|
||||
|
||||
func loadConfig(data []byte) (*componentconfig.KubeSchedulerConfiguration, error) {
|
||||
configObj, gvk, err := configCodecs.UniversalDecoder().Decode(data, nil, nil)
|
||||
if err != nil {
|
||||
func loadConfig(data []byte) (*kubeschedulerconfig.KubeSchedulerConfiguration, error) {
|
||||
configObj := &kubeschedulerconfig.KubeSchedulerConfiguration{}
|
||||
if err := runtime.DecodeInto(kubeschedulerscheme.Codecs.UniversalDecoder(), data, configObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config, ok := configObj.(*componentconfig.KubeSchedulerConfiguration)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("got unexpected config type: %v", gvk)
|
||||
}
|
||||
return config, nil
|
||||
return configObj, nil
|
||||
}
|
||||
|
||||
// WriteConfigFile writes the config into the given file name as YAML.
|
||||
func WriteConfigFile(fileName string, cfg *componentconfig.KubeSchedulerConfiguration) error {
|
||||
func WriteConfigFile(fileName string, cfg *kubeschedulerconfig.KubeSchedulerConfiguration) error {
|
||||
var encoder runtime.Encoder
|
||||
mediaTypes := configCodecs.SupportedMediaTypes()
|
||||
mediaTypes := kubeschedulerscheme.Codecs.SupportedMediaTypes()
|
||||
for _, info := range mediaTypes {
|
||||
if info.MediaType == "application/yaml" {
|
||||
encoder = info.Serializer
|
||||
|
@ -74,8 +58,8 @@ func WriteConfigFile(fileName string, cfg *componentconfig.KubeSchedulerConfigur
|
|||
if encoder == nil {
|
||||
return errors.New("unable to locate yaml encoder")
|
||||
}
|
||||
encoder = json.NewYAMLSerializer(json.DefaultMetaFactory, configScheme, configScheme)
|
||||
encoder = configCodecs.EncoderForVersion(encoder, componentconfigv1alpha1.SchemeGroupVersion)
|
||||
encoder = json.NewYAMLSerializer(json.DefaultMetaFactory, kubeschedulerscheme.Scheme, kubeschedulerscheme.Scheme)
|
||||
encoder = kubeschedulerscheme.Codecs.EncoderForVersion(encoder, kubeschedulerconfigv1alpha1.SchemeGroupVersion)
|
||||
|
||||
configFile, err := os.Create(fileName)
|
||||
if err != nil {
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
)
|
||||
|
||||
|
@ -37,7 +37,7 @@ type DeprecatedOptions struct {
|
|||
}
|
||||
|
||||
// AddFlags adds flags for the deprecated options.
|
||||
func (o *DeprecatedOptions) AddFlags(fs *pflag.FlagSet, cfg *componentconfig.KubeSchedulerConfiguration) {
|
||||
func (o *DeprecatedOptions) AddFlags(fs *pflag.FlagSet, cfg *kubeschedulerconfig.KubeSchedulerConfiguration) {
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ func (o *DeprecatedOptions) AddFlags(fs *pflag.FlagSet, cfg *componentconfig.Kub
|
|||
|
||||
fs.StringVar(&o.AlgorithmProvider, "algorithm-provider", o.AlgorithmProvider, "DEPRECATED: the scheduling algorithm provider to use, one of: "+factory.ListAlgorithmProviders())
|
||||
fs.StringVar(&o.PolicyConfigFile, "policy-config-file", o.PolicyConfigFile, "DEPRECATED: file with scheduler policy configuration. This file is used if policy ConfigMap is not provided or --use-legacy-policy-config=true")
|
||||
usage := fmt.Sprintf("DEPRECATED: name of the ConfigMap object that contains scheduler's policy configuration. It must exist in the system namespace before scheduler initialization if --use-legacy-policy-config=false. The config must be provided as the value of an element in 'Data' map with the key='%v'", componentconfig.SchedulerPolicyConfigMapKey)
|
||||
usage := fmt.Sprintf("DEPRECATED: name of the ConfigMap object that contains scheduler's policy configuration. It must exist in the system namespace before scheduler initialization if --use-legacy-policy-config=false. The config must be provided as the value of an element in 'Data' map with the key='%v'", kubeschedulerconfig.SchedulerPolicyConfigMapKey)
|
||||
fs.StringVar(&o.PolicyConfigMapName, "policy-configmap", o.PolicyConfigMapName, usage)
|
||||
fs.StringVar(&o.PolicyConfigMapNamespace, "policy-configmap-namespace", o.PolicyConfigMapNamespace, "DEPRECATED: the namespace where policy ConfigMap is located. The kube-system namespace will be used if this is not provided or is empty.")
|
||||
fs.BoolVar(&o.UseLegacyPolicyConfig, "use-legacy-policy-config", o.UseLegacyPolicyConfig, "DEPRECATED: when set to true, scheduler will ignore policy ConfigMap and uses policy config file")
|
||||
|
@ -82,31 +82,31 @@ func (o *DeprecatedOptions) Validate() []error {
|
|||
// 1. --use-legacy-policy-config to use a policy file.
|
||||
// 2. --policy-configmap to use a policy config map value.
|
||||
// 3. --algorithm-provider to use a named algorithm provider.
|
||||
func (o *DeprecatedOptions) ApplyTo(cfg *componentconfig.KubeSchedulerConfiguration) error {
|
||||
func (o *DeprecatedOptions) ApplyTo(cfg *kubeschedulerconfig.KubeSchedulerConfiguration) error {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case o.UseLegacyPolicyConfig || (len(o.PolicyConfigFile) > 0 && o.PolicyConfigMapName == ""):
|
||||
cfg.AlgorithmSource = componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
File: &componentconfig.SchedulerPolicyFileSource{
|
||||
cfg.AlgorithmSource = kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||
File: &kubeschedulerconfig.SchedulerPolicyFileSource{
|
||||
Path: o.PolicyConfigFile,
|
||||
},
|
||||
},
|
||||
}
|
||||
case len(o.PolicyConfigMapName) > 0:
|
||||
cfg.AlgorithmSource = componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
cfg.AlgorithmSource = kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
|
||||
Name: o.PolicyConfigMapName,
|
||||
Namespace: o.PolicyConfigMapNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
case len(o.AlgorithmProvider) > 0:
|
||||
cfg.AlgorithmSource = componentconfig.SchedulerAlgorithmSource{
|
||||
cfg.AlgorithmSource = kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||
Provider: &o.AlgorithmProvider,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
)
|
||||
|
||||
// CombinedInsecureServingOptions sets up up to two insecure listeners for healthz and metrics. The flags
|
||||
|
@ -52,7 +52,7 @@ func (o *CombinedInsecureServingOptions) AddFlags(fs *pflag.FlagSet) {
|
|||
// fs.MarkDeprecated("port", "see --secure-port instead.")
|
||||
}
|
||||
|
||||
func (o *CombinedInsecureServingOptions) applyTo(c *schedulerappconfig.Config, componentConfig *componentconfig.KubeSchedulerConfiguration) error {
|
||||
func (o *CombinedInsecureServingOptions) applyTo(c *schedulerappconfig.Config, componentConfig *kubeschedulerconfig.KubeSchedulerConfiguration) error {
|
||||
if err := updateAddressFromDeprecatedInsecureServingOptions(&componentConfig.HealthzBindAddress, o.Healthz); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ func (o *CombinedInsecureServingOptions) applyTo(c *schedulerappconfig.Config, c
|
|||
}
|
||||
|
||||
// ApplyTo applies the insecure serving options to the given scheduler app configuration, and updates the componentConfig.
|
||||
func (o *CombinedInsecureServingOptions) ApplyTo(c *schedulerappconfig.Config, componentConfig *componentconfig.KubeSchedulerConfiguration) error {
|
||||
func (o *CombinedInsecureServingOptions) ApplyTo(c *schedulerappconfig.Config, componentConfig *kubeschedulerconfig.KubeSchedulerConfiguration) error {
|
||||
if o == nil {
|
||||
componentConfig.HealthzBindAddress = ""
|
||||
componentConfig.MetricsBindAddress = ""
|
||||
|
@ -93,7 +93,7 @@ func (o *CombinedInsecureServingOptions) ApplyTo(c *schedulerappconfig.Config, c
|
|||
}
|
||||
|
||||
// ApplyToFromLoadedConfig updates the insecure serving options from the component config and then appies it to the given scheduler app configuration.
|
||||
func (o *CombinedInsecureServingOptions) ApplyToFromLoadedConfig(c *schedulerappconfig.Config, componentConfig *componentconfig.KubeSchedulerConfiguration) error {
|
||||
func (o *CombinedInsecureServingOptions) ApplyToFromLoadedConfig(c *schedulerappconfig.Config, componentConfig *kubeschedulerconfig.KubeSchedulerConfiguration) error {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
)
|
||||
|
||||
func TestOptions_ApplyTo(t *testing.T) {
|
||||
|
@ -41,7 +41,7 @@ func TestOptions_ApplyTo(t *testing.T) {
|
|||
{
|
||||
name: "no config, zero port",
|
||||
options: Options{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
HealthzBindAddress: "1.2.3.4:1234",
|
||||
MetricsBindAddress: "1.2.3.4:1234",
|
||||
},
|
||||
|
@ -56,7 +56,7 @@ func TestOptions_ApplyTo(t *testing.T) {
|
|||
{
|
||||
name: "config loaded, non-nil healthz",
|
||||
options: Options{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
HealthzBindAddress: "1.2.3.4:1234",
|
||||
MetricsBindAddress: "1.2.3.4:1234",
|
||||
},
|
||||
|
@ -74,7 +74,7 @@ func TestOptions_ApplyTo(t *testing.T) {
|
|||
{
|
||||
name: "config loaded, non-nil metrics",
|
||||
options: Options{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
HealthzBindAddress: "1.2.3.4:1234",
|
||||
MetricsBindAddress: "1.2.3.4:1234",
|
||||
},
|
||||
|
@ -92,7 +92,7 @@ func TestOptions_ApplyTo(t *testing.T) {
|
|||
{
|
||||
name: "config loaded, all set, zero BindPort",
|
||||
options: Options{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
HealthzBindAddress: "1.2.3.4:1234",
|
||||
MetricsBindAddress: "1.2.3.4:1234",
|
||||
},
|
||||
|
@ -113,7 +113,7 @@ func TestOptions_ApplyTo(t *testing.T) {
|
|||
{
|
||||
name: "config loaded, all set, different addresses",
|
||||
options: Options{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
HealthzBindAddress: "1.2.3.4:1234",
|
||||
MetricsBindAddress: "1.2.3.4:1235",
|
||||
},
|
||||
|
@ -136,7 +136,7 @@ func TestOptions_ApplyTo(t *testing.T) {
|
|||
{
|
||||
name: "no config, all set, port passed",
|
||||
options: Options{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
HealthzBindAddress: "1.2.3.4:1234",
|
||||
MetricsBindAddress: "1.2.3.4:1234",
|
||||
},
|
||||
|
@ -158,7 +158,7 @@ func TestOptions_ApplyTo(t *testing.T) {
|
|||
{
|
||||
name: "no config, all set, address passed",
|
||||
options: Options{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
HealthzBindAddress: "1.2.3.4:1234",
|
||||
MetricsBindAddress: "1.2.3.4:1234",
|
||||
},
|
||||
|
@ -180,7 +180,7 @@ func TestOptions_ApplyTo(t *testing.T) {
|
|||
{
|
||||
name: "no config, all set, zero port passed",
|
||||
options: Options{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
HealthzBindAddress: "1.2.3.4:1234",
|
||||
MetricsBindAddress: "1.2.3.4:1234",
|
||||
},
|
||||
|
|
|
@ -41,18 +41,19 @@ import (
|
|||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
kubeschedulerconfigv1alpha1 "k8s.io/kube-scheduler/config/v1alpha1"
|
||||
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelectionconfig"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
kubeschedulerscheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
)
|
||||
|
||||
// Options has all the params needed to run a Scheduler
|
||||
type Options struct {
|
||||
// The default values. These are overridden if ConfigFile is set or by values in InsecureServing.
|
||||
ComponentConfig componentconfig.KubeSchedulerConfiguration
|
||||
ComponentConfig kubeschedulerconfig.KubeSchedulerConfiguration
|
||||
|
||||
SecureServing *apiserveroptions.SecureServingOptions
|
||||
CombinedInsecureServing *CombinedInsecureServingOptions
|
||||
|
@ -117,11 +118,11 @@ func splitHostIntPort(s string) (string, int, error) {
|
|||
return host, portInt, err
|
||||
}
|
||||
|
||||
func newDefaultComponentConfig() (*componentconfig.KubeSchedulerConfiguration, error) {
|
||||
cfgv1alpha1 := componentconfigv1alpha1.KubeSchedulerConfiguration{}
|
||||
configScheme.Default(&cfgv1alpha1)
|
||||
cfg := componentconfig.KubeSchedulerConfiguration{}
|
||||
if err := configScheme.Convert(&cfgv1alpha1, &cfg, nil); err != nil {
|
||||
func newDefaultComponentConfig() (*kubeschedulerconfig.KubeSchedulerConfiguration, error) {
|
||||
cfgv1alpha1 := kubeschedulerconfigv1alpha1.KubeSchedulerConfiguration{}
|
||||
kubeschedulerscheme.Scheme.Default(&cfgv1alpha1)
|
||||
cfg := kubeschedulerconfig.KubeSchedulerConfiguration{}
|
||||
if err := kubeschedulerscheme.Scheme.Convert(&cfgv1alpha1, &cfg, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cfg, nil
|
||||
|
@ -232,7 +233,7 @@ func (o *Options) Config() (*schedulerappconfig.Config, error) {
|
|||
|
||||
// makeLeaderElectionConfig builds a leader election configuration. It will
|
||||
// create a new resource lock associated with the configuration.
|
||||
func makeLeaderElectionConfig(config componentconfig.KubeSchedulerLeaderElectionConfiguration, client clientset.Interface, recorder record.EventRecorder) (*leaderelection.LeaderElectionConfig, error) {
|
||||
func makeLeaderElectionConfig(config kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration, client clientset.Interface, recorder record.EventRecorder) (*leaderelection.LeaderElectionConfig, error) {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get hostname: %v", err)
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
apiserverconfig "k8s.io/apiserver/pkg/apis/config"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
)
|
||||
|
||||
func TestSchedulerOptions(t *testing.T) {
|
||||
|
@ -70,7 +70,7 @@ func TestSchedulerOptions(t *testing.T) {
|
|||
configFile := filepath.Join(tmpDir, "scheduler.yaml")
|
||||
configKubeconfig := filepath.Join(tmpDir, "config.kubeconfig")
|
||||
if err := ioutil.WriteFile(configFile, []byte(fmt.Sprintf(`
|
||||
apiVersion: componentconfig/v1alpha1
|
||||
apiVersion: kubescheduler.config.k8s.io/v1alpha1
|
||||
kind: KubeSchedulerConfiguration
|
||||
clientConnection:
|
||||
kubeconfig: "%s"
|
||||
|
@ -100,6 +100,17 @@ users:
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
oldconfigFile := filepath.Join(tmpDir, "scheduler_old.yaml")
|
||||
if err := ioutil.WriteFile(oldconfigFile, []byte(fmt.Sprintf(`
|
||||
apiVersion: componentconfig/v1alpha1
|
||||
kind: KubeSchedulerConfiguration
|
||||
clientConnection:
|
||||
kubeconfig: "%s"
|
||||
leaderElection:
|
||||
leaderElect: true`, configKubeconfig)), os.FileMode(0600)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// flag-specified kubeconfig
|
||||
flagKubeconfig := filepath.Join(tmpDir, "flag.kubeconfig")
|
||||
if err := ioutil.WriteFile(flagKubeconfig, []byte(fmt.Sprintf(`
|
||||
|
@ -139,26 +150,26 @@ users:
|
|||
options *Options
|
||||
expectedUsername string
|
||||
expectedError string
|
||||
expectedConfig componentconfig.KubeSchedulerConfiguration
|
||||
expectedConfig kubeschedulerconfig.KubeSchedulerConfiguration
|
||||
}{
|
||||
{
|
||||
name: "config file",
|
||||
options: &Options{
|
||||
ConfigFile: configFile,
|
||||
ComponentConfig: func() componentconfig.KubeSchedulerConfiguration {
|
||||
ComponentConfig: func() kubeschedulerconfig.KubeSchedulerConfiguration {
|
||||
cfg, _ := newDefaultComponentConfig()
|
||||
return *cfg
|
||||
}(),
|
||||
},
|
||||
expectedUsername: "config",
|
||||
expectedConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
expectedConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
SchedulerName: "default-scheduler",
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{Provider: &defaultSource},
|
||||
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{Provider: &defaultSource},
|
||||
HardPodAffinitySymmetricWeight: 1,
|
||||
HealthzBindAddress: "0.0.0.0:10251",
|
||||
MetricsBindAddress: "0.0.0.0:10251",
|
||||
FailureDomains: "kubernetes.io/hostname,failure-domain.beta.kubernetes.io/zone,failure-domain.beta.kubernetes.io/region",
|
||||
LeaderElection: componentconfig.KubeSchedulerLeaderElectionConfiguration{
|
||||
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
|
||||
LeaderElectionConfiguration: apiserverconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: true,
|
||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||
|
@ -178,24 +189,29 @@ users:
|
|||
PercentageOfNodesToScore: 50,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "config file in componentconfig/v1alpha1",
|
||||
options: &Options{ConfigFile: oldconfigFile},
|
||||
expectedError: "no kind \"KubeSchedulerConfiguration\" is registered for version \"componentconfig/v1alpha1\" in scheme",
|
||||
},
|
||||
{
|
||||
name: "kubeconfig flag",
|
||||
options: &Options{
|
||||
ComponentConfig: func() componentconfig.KubeSchedulerConfiguration {
|
||||
ComponentConfig: func() kubeschedulerconfig.KubeSchedulerConfiguration {
|
||||
cfg, _ := newDefaultComponentConfig()
|
||||
cfg.ClientConnection.Kubeconfig = flagKubeconfig
|
||||
return *cfg
|
||||
}(),
|
||||
},
|
||||
expectedUsername: "flag",
|
||||
expectedConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
expectedConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
SchedulerName: "default-scheduler",
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{Provider: &defaultSource},
|
||||
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{Provider: &defaultSource},
|
||||
HardPodAffinitySymmetricWeight: 1,
|
||||
HealthzBindAddress: "", // defaults empty when not running from config file
|
||||
MetricsBindAddress: "", // defaults empty when not running from config file
|
||||
FailureDomains: "kubernetes.io/hostname,failure-domain.beta.kubernetes.io/zone,failure-domain.beta.kubernetes.io/region",
|
||||
LeaderElection: componentconfig.KubeSchedulerLeaderElectionConfiguration{
|
||||
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
|
||||
LeaderElectionConfiguration: apiserverconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: true,
|
||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||
|
|
|
@ -45,13 +45,13 @@ import (
|
|||
schedulerserverconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
"k8s.io/kubernetes/cmd/kube-scheduler/app/options"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/configz"
|
||||
|
@ -249,7 +249,7 @@ func installMetricHandler(pathRecorderMux *mux.PathRecorderMux) {
|
|||
}
|
||||
|
||||
// newMetricsHandler builds a metrics server from the config.
|
||||
func newMetricsHandler(config *componentconfig.KubeSchedulerConfiguration) http.Handler {
|
||||
func newMetricsHandler(config *kubeschedulerconfig.KubeSchedulerConfiguration) http.Handler {
|
||||
pathRecorderMux := mux.NewPathRecorderMux("kube-scheduler")
|
||||
installMetricHandler(pathRecorderMux)
|
||||
if config.EnableProfiling {
|
||||
|
@ -264,7 +264,7 @@ func newMetricsHandler(config *componentconfig.KubeSchedulerConfiguration) http.
|
|||
// newHealthzServer creates a healthz server from the config, and will also
|
||||
// embed the metrics handler if the healthz and metrics address configurations
|
||||
// are the same.
|
||||
func newHealthzHandler(config *componentconfig.KubeSchedulerConfiguration, separateMetrics bool) http.Handler {
|
||||
func newHealthzHandler(config *kubeschedulerconfig.KubeSchedulerConfiguration, separateMetrics bool) http.Handler {
|
||||
pathRecorderMux := mux.NewPathRecorderMux("kube-scheduler")
|
||||
healthz.InstallHandler(pathRecorderMux)
|
||||
if !separateMetrics {
|
||||
|
@ -342,9 +342,9 @@ func NewSchedulerConfig(s schedulerserverconfig.CompletedConfig) (*scheduler.Con
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get policy config map %s/%s: %v", policyRef.Namespace, policyRef.Name, err)
|
||||
}
|
||||
data, found := policyConfigMap.Data[componentconfig.SchedulerPolicyConfigMapKey]
|
||||
data, found := policyConfigMap.Data[kubeschedulerconfig.SchedulerPolicyConfigMapKey]
|
||||
if !found {
|
||||
return nil, fmt.Errorf("missing policy config map value at key %q", componentconfig.SchedulerPolicyConfigMapKey)
|
||||
return nil, fmt.Errorf("missing policy config map value at key %q", kubeschedulerconfig.SchedulerPolicyConfigMapKey)
|
||||
}
|
||||
err = runtime.DecodeInto(latestschedulerapi.Codec, []byte(data), policy)
|
||||
if err != nil {
|
||||
|
|
|
@ -46,8 +46,7 @@ func Kind(kind string) schema.GroupKind {
|
|||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&KubeSchedulerConfiguration{},
|
||||
)
|
||||
// TODO: All structs in this package are about to be moved out,
|
||||
// so nothing should be registered here as this API group is going to be removed soon.
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,110 +17,10 @@ limitations under the License.
|
|||
package componentconfig
|
||||
|
||||
import (
|
||||
apimachineryconfig "k8s.io/apimachinery/pkg/apis/config"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiserverconfig "k8s.io/apiserver/pkg/apis/config"
|
||||
)
|
||||
|
||||
// SchedulerPolicyConfigMapKey defines the key of the element in the
|
||||
// scheduler's policy ConfigMap that contains scheduler's policy config.
|
||||
const SchedulerPolicyConfigMapKey string = "policy.cfg"
|
||||
|
||||
// SchedulerPolicySource configures a means to obtain a scheduler Policy. One
|
||||
// source field must be specified, and source fields are mutually exclusive.
|
||||
type SchedulerPolicySource struct {
|
||||
// File is a file policy source.
|
||||
File *SchedulerPolicyFileSource
|
||||
// ConfigMap is a config map policy source.
|
||||
ConfigMap *SchedulerPolicyConfigMapSource
|
||||
}
|
||||
|
||||
// SchedulerPolicyFileSource is a policy serialized to disk and accessed via
|
||||
// path.
|
||||
type SchedulerPolicyFileSource struct {
|
||||
// Path is the location of a serialized policy.
|
||||
Path string
|
||||
}
|
||||
|
||||
// SchedulerPolicyConfigMapSource is a policy serialized into a config map value
|
||||
// under the SchedulerPolicyConfigMapKey key.
|
||||
type SchedulerPolicyConfigMapSource struct {
|
||||
// Namespace is the namespace of the policy config map.
|
||||
Namespace string
|
||||
// Name is the name of hte policy config map.
|
||||
Name string
|
||||
}
|
||||
|
||||
// SchedulerAlgorithmSource is the source of a scheduler algorithm. One source
|
||||
// field must be specified, and source fields are mutually exclusive.
|
||||
type SchedulerAlgorithmSource struct {
|
||||
// Policy is a policy based algorithm source.
|
||||
Policy *SchedulerPolicySource
|
||||
// Provider is the name of a scheduling algorithm provider to use.
|
||||
Provider *string
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type KubeSchedulerConfiguration struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// DebuggingConfiguration holds profiling- and debugging-related fields
|
||||
// TODO: DebuggingConfiguration is inlined because it's been like that earlier.
|
||||
// We might consider making it a "real" sub-struct.
|
||||
apiserverconfig.DebuggingConfiguration
|
||||
|
||||
// schedulerName is name of the scheduler, used to select which pods
|
||||
// will be processed by this scheduler, based on pod's "spec.SchedulerName".
|
||||
SchedulerName string
|
||||
// AlgorithmSource specifies the scheduler algorithm source.
|
||||
AlgorithmSource SchedulerAlgorithmSource
|
||||
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
|
||||
// corresponding to every RequiredDuringScheduling affinity rule.
|
||||
// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100.
|
||||
HardPodAffinitySymmetricWeight int32
|
||||
|
||||
// LeaderElection defines the configuration of leader election client.
|
||||
LeaderElection KubeSchedulerLeaderElectionConfiguration
|
||||
|
||||
// ClientConnection specifies the kubeconfig file and client connection
|
||||
// settings for the proxy server to use when communicating with the apiserver.
|
||||
ClientConnection apimachineryconfig.ClientConnectionConfiguration
|
||||
// HealthzBindAddress is the IP address and port for the health check server to serve on,
|
||||
// defaulting to 0.0.0.0:10251
|
||||
HealthzBindAddress string
|
||||
// MetricsBindAddress is the IP address and port for the metrics server to
|
||||
// serve on, defaulting to 0.0.0.0:10251.
|
||||
MetricsBindAddress string
|
||||
|
||||
// Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
|
||||
// DEPRECATED: This is no longer used.
|
||||
FailureDomains string
|
||||
|
||||
// DisablePreemption disables the pod preemption feature.
|
||||
DisablePreemption bool
|
||||
|
||||
// PercentageOfNodeToScore is the percentage of all nodes that once found feasible
|
||||
// for running a pod, the scheduler stops its search for more feasible nodes in
|
||||
// the cluster. This helps improve scheduler's performance. Scheduler always tries to find
|
||||
// at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is.
|
||||
// Example: if the cluster size is 500 nodes and the value of this flag is 30,
|
||||
// then scheduler stops finding further feasible nodes once it finds 150 feasible ones.
|
||||
// When the value is 0, default percentage (50%) of the nodes will be scored.
|
||||
PercentageOfNodesToScore int32
|
||||
}
|
||||
|
||||
// KubeSchedulerLeaderElectionConfiguration expands LeaderElectionConfiguration
|
||||
// to include scheduler specific configuration.
|
||||
type KubeSchedulerLeaderElectionConfiguration struct {
|
||||
apiserverconfig.LeaderElectionConfiguration
|
||||
|
||||
// LockObjectNamespace defines the namespace of the lock object
|
||||
LockObjectNamespace string
|
||||
// LockObjectName defines the lock object name
|
||||
LockObjectName string
|
||||
}
|
||||
|
||||
type GroupResource struct {
|
||||
// group is the group portion of the GroupResource.
|
||||
Group string
|
||||
|
@ -526,11 +426,3 @@ type PersistentVolumeRecyclerConfiguration struct {
|
|||
// in a multi-node cluster.
|
||||
IncrementTimeoutHostPath int32
|
||||
}
|
||||
|
||||
const (
|
||||
// "kube-system" is the default scheduler lock object namespace
|
||||
SchedulerDefaultLockObjectNamespace string = "kube-system"
|
||||
|
||||
// "kube-scheduler" is the default scheduler lock object name
|
||||
SchedulerDefaultLockObjectName = "kube-scheduler"
|
||||
)
|
||||
|
|
|
@ -17,17 +17,11 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
apimachineryconfigv1alpha1 "k8s.io/apimachinery/pkg/apis/config/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
apiserverconfigv1alpha1 "k8s.io/apiserver/pkg/apis/config/v1alpha1"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
|
@ -219,71 +213,3 @@ func SetDefaults_VolumeConfiguration(obj *VolumeConfiguration) {
|
|||
obj.FlexVolumePluginDir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/"
|
||||
}
|
||||
}
|
||||
|
||||
func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) {
|
||||
if len(obj.SchedulerName) == 0 {
|
||||
obj.SchedulerName = api.DefaultSchedulerName
|
||||
}
|
||||
|
||||
if obj.HardPodAffinitySymmetricWeight == 0 {
|
||||
obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight
|
||||
}
|
||||
|
||||
if obj.AlgorithmSource.Policy == nil &&
|
||||
(obj.AlgorithmSource.Provider == nil || len(*obj.AlgorithmSource.Provider) == 0) {
|
||||
val := SchedulerDefaultProviderName
|
||||
obj.AlgorithmSource.Provider = &val
|
||||
}
|
||||
|
||||
if policy := obj.AlgorithmSource.Policy; policy != nil {
|
||||
if policy.ConfigMap != nil && len(policy.ConfigMap.Namespace) == 0 {
|
||||
obj.AlgorithmSource.Policy.ConfigMap.Namespace = api.NamespaceSystem
|
||||
}
|
||||
}
|
||||
|
||||
if host, port, err := net.SplitHostPort(obj.HealthzBindAddress); err == nil {
|
||||
if len(host) == 0 {
|
||||
host = "0.0.0.0"
|
||||
}
|
||||
obj.HealthzBindAddress = net.JoinHostPort(host, port)
|
||||
} else {
|
||||
obj.HealthzBindAddress = net.JoinHostPort("0.0.0.0", strconv.Itoa(ports.SchedulerPort))
|
||||
}
|
||||
|
||||
if host, port, err := net.SplitHostPort(obj.MetricsBindAddress); err == nil {
|
||||
if len(host) == 0 {
|
||||
host = "0.0.0.0"
|
||||
}
|
||||
obj.MetricsBindAddress = net.JoinHostPort(host, port)
|
||||
} else {
|
||||
obj.MetricsBindAddress = net.JoinHostPort("0.0.0.0", strconv.Itoa(ports.SchedulerPort))
|
||||
}
|
||||
|
||||
if obj.ClientConnection.QPS == 0.0 {
|
||||
obj.ClientConnection.QPS = 50.0
|
||||
}
|
||||
|
||||
if obj.ClientConnection.Burst == 0 {
|
||||
obj.ClientConnection.Burst = 100
|
||||
}
|
||||
|
||||
if len(obj.LeaderElection.LockObjectNamespace) == 0 {
|
||||
obj.LeaderElection.LockObjectNamespace = SchedulerDefaultLockObjectNamespace
|
||||
}
|
||||
if len(obj.LeaderElection.LockObjectName) == 0 {
|
||||
obj.LeaderElection.LockObjectName = SchedulerDefaultLockObjectName
|
||||
}
|
||||
|
||||
if len(obj.FailureDomains) == 0 {
|
||||
obj.FailureDomains = kubeletapis.DefaultFailureDomains
|
||||
}
|
||||
|
||||
if obj.PercentageOfNodesToScore == 0 {
|
||||
// by default, stop finding feasible nodes once the number of feasible nodes is 50% of the cluster.
|
||||
obj.PercentageOfNodesToScore = 50
|
||||
}
|
||||
|
||||
// Use the default ClientConnectionConfiguration and LeaderElectionConfiguration options
|
||||
apimachineryconfigv1alpha1.RecommendedDefaultClientConnectionConfiguration(&obj.ClientConnection)
|
||||
apiserverconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection.LeaderElectionConfiguration)
|
||||
}
|
||||
|
|
|
@ -24,24 +24,6 @@ import (
|
|||
componentconfig "k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
)
|
||||
|
||||
func TestSchedulerDefaults(t *testing.T) {
|
||||
ks1 := &KubeSchedulerConfiguration{}
|
||||
SetDefaults_KubeSchedulerConfiguration(ks1)
|
||||
cm, err := componentconfig.ConvertObjToConfigMap("KubeSchedulerConfiguration", ks1)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected ConvertObjToConfigMap error %v", err)
|
||||
}
|
||||
|
||||
ks2 := &KubeSchedulerConfiguration{}
|
||||
if err = json.Unmarshal([]byte(cm.Data["KubeSchedulerConfiguration"]), ks2); err != nil {
|
||||
t.Errorf("unexpected error unserializing scheduler config %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ks2, ks1) {
|
||||
t.Errorf("Expected:\n%#v\n\nGot:\n%#v", ks1, ks2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestControllerDefaultsRoundTrip(t *testing.T) {
|
||||
ks1 := &KubeControllerManagerConfiguration{}
|
||||
SetDefaults_KubeControllerManagerConfiguration(ks1)
|
||||
|
|
|
@ -41,8 +41,7 @@ func init() {
|
|||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&KubeSchedulerConfiguration{},
|
||||
)
|
||||
// TODO: All structs in this package are about to be moved out,
|
||||
// so nothing should be registered here as this API group is going to be removed soon.
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,106 +17,10 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
apimachineryconfigv1alpha1 "k8s.io/apimachinery/pkg/apis/config/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiserverconfigv1alpha1 "k8s.io/apiserver/pkg/apis/config/v1alpha1"
|
||||
)
|
||||
|
||||
// SchedulerPolicySource configures a means to obtain a scheduler Policy. One
|
||||
// source field must be specified, and source fields are mutually exclusive.
|
||||
type SchedulerPolicySource struct {
|
||||
// File is a file policy source.
|
||||
File *SchedulerPolicyFileSource `json:"file,omitempty"`
|
||||
// ConfigMap is a config map policy source.
|
||||
ConfigMap *SchedulerPolicyConfigMapSource `json:"configMap,omitempty"`
|
||||
}
|
||||
|
||||
// SchedulerPolicyFileSource is a policy serialized to disk and accessed via
|
||||
// path.
|
||||
type SchedulerPolicyFileSource struct {
|
||||
// Path is the location of a serialized policy.
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// SchedulerPolicyConfigMapSource is a policy serialized into a config map value
|
||||
// under the SchedulerPolicyConfigMapKey key.
|
||||
type SchedulerPolicyConfigMapSource struct {
|
||||
// Namespace is the namespace of the policy config map.
|
||||
Namespace string `json:"namespace"`
|
||||
// Name is the name of hte policy config map.
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// SchedulerAlgorithmSource is the source of a scheduler algorithm. One source
|
||||
// field must be specified, and source fields are mutually exclusive.
|
||||
type SchedulerAlgorithmSource struct {
|
||||
// Policy is a policy based algorithm source.
|
||||
Policy *SchedulerPolicySource `json:"policy,omitempty"`
|
||||
// Provider is the name of a scheduling algorithm provider to use.
|
||||
Provider *string `json:"provider,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type KubeSchedulerConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// DebuggingConfiguration holds profiling- and debugging-related fields
|
||||
// TODO: DebuggingConfiguration is inlined because it's been like that earlier.
|
||||
// We might consider making it a "real" sub-struct.
|
||||
apiserverconfigv1alpha1.DebuggingConfiguration `json:",inline"`
|
||||
|
||||
// SchedulerName is name of the scheduler, used to select which pods
|
||||
// will be processed by this scheduler, based on pod's "spec.SchedulerName".
|
||||
SchedulerName string `json:"schedulerName"`
|
||||
// AlgorithmSource specifies the scheduler algorithm source.
|
||||
AlgorithmSource SchedulerAlgorithmSource `json:"algorithmSource"`
|
||||
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
|
||||
// corresponding to every RequiredDuringScheduling affinity rule.
|
||||
// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100.
|
||||
HardPodAffinitySymmetricWeight int32 `json:"hardPodAffinitySymmetricWeight"`
|
||||
|
||||
// LeaderElection defines the configuration of leader election client.
|
||||
// TODO: Migrate the kube-scheduler-specific stuff into the generic LeaderElectionConfig?
|
||||
LeaderElection KubeSchedulerLeaderElectionConfiguration `json:"leaderElection"`
|
||||
|
||||
// ClientConnection specifies the kubeconfig file and client connection
|
||||
// settings for the proxy server to use when communicating with the apiserver.
|
||||
ClientConnection apimachineryconfigv1alpha1.ClientConnectionConfiguration `json:"clientConnection"`
|
||||
// HealthzBindAddress is the IP address and port for the health check server to serve on,
|
||||
// defaulting to 0.0.0.0:10251
|
||||
HealthzBindAddress string `json:"healthzBindAddress"`
|
||||
// MetricsBindAddress is the IP address and port for the metrics server to
|
||||
// serve on, defaulting to 0.0.0.0:10251.
|
||||
MetricsBindAddress string `json:"metricsBindAddress"`
|
||||
|
||||
// Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.
|
||||
FailureDomains string `json:"failureDomains"`
|
||||
|
||||
// DisablePreemption disables the pod preemption feature.
|
||||
DisablePreemption bool `json:"disablePreemption"`
|
||||
|
||||
// PercentageOfNodeToScore specifies what percentage of all nodes should be scored in each
|
||||
// scheduling cycle. This helps improve scheduler's performance. Scheduler always tries to find
|
||||
// at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is.
|
||||
// When this value is below 100%, the scheduler stops finding feasible nodes for running a pod
|
||||
// once it finds that percentage of feasible nodes of the whole cluster size. For example, if the
|
||||
// cluster size is 500 nodes and the value of this flag is 30, then scheduler stops finding
|
||||
// feasible nodes once it finds 150 feasible nodes.
|
||||
// When the value is 0, default percentage (50%) of the nodes will be scored.
|
||||
PercentageOfNodesToScore int32 `json:"percentageOfNodesToScore"`
|
||||
}
|
||||
|
||||
// KubeSchedulerLeaderElectionConfiguration expands LeaderElectionConfiguration
|
||||
// to include scheduler specific configuration.
|
||||
type KubeSchedulerLeaderElectionConfiguration struct {
|
||||
apiserverconfigv1alpha1.LeaderElectionConfiguration `json:",inline"`
|
||||
// LockObjectNamespace defines the namespace of the lock object
|
||||
LockObjectNamespace string `json:"lockObjectNamespace"`
|
||||
// LockObjectName defines the lock object name
|
||||
LockObjectName string `json:"lockObjectName"`
|
||||
}
|
||||
|
||||
type PersistentVolumeRecyclerConfiguration struct {
|
||||
// maximumRetry is number of retries the PV recycler will execute on failure to recycle
|
||||
// PV.
|
||||
|
@ -523,13 +427,3 @@ type ServiceControllerConfiguration struct {
|
|||
// management, but more CPU (and network) load.
|
||||
ConcurrentServiceSyncs int32
|
||||
}
|
||||
|
||||
const (
|
||||
// "kube-system" is the default scheduler lock object namespace
|
||||
SchedulerDefaultLockObjectNamespace string = "kube-system"
|
||||
|
||||
// "kube-scheduler" is the default scheduler lock object name
|
||||
SchedulerDefaultLockObjectName = "kube-scheduler"
|
||||
|
||||
SchedulerDefaultProviderName = "DefaultProvider"
|
||||
)
|
||||
|
|
|
@ -45,11 +45,11 @@ import (
|
|||
schedulerapp "k8s.io/kubernetes/cmd/kube-scheduler/app"
|
||||
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/factory"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
|
@ -173,7 +173,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
|||
configPolicyName := fmt.Sprintf("scheduler-custom-policy-config-%d", i)
|
||||
policyConfigMap := v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName},
|
||||
Data: map[string]string{componentconfig.SchedulerPolicyConfigMapKey: test.policy},
|
||||
Data: map[string]string{kubeschedulerconfig.SchedulerPolicyConfigMapKey: test.policy},
|
||||
}
|
||||
|
||||
policyConfigMap.APIVersion = "v1"
|
||||
|
@ -183,12 +183,12 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
|||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
HardPodAffinitySymmetricWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
},
|
||||
|
@ -244,11 +244,11 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
|||
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
|
||||
ss := &schedulerappconfig.Config{
|
||||
ComponentConfig: componentconfig.KubeSchedulerConfiguration{
|
||||
ComponentConfig: kubeschedulerconfig.KubeSchedulerConfiguration{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
AlgorithmSource: componentconfig.SchedulerAlgorithmSource{
|
||||
Policy: &componentconfig.SchedulerPolicySource{
|
||||
ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{
|
||||
AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||
ConfigMap: &kubeschedulerconfig.SchedulerPolicyConfigMapSource{
|
||||
Namespace: "non-existent-config",
|
||||
Name: "non-existent-config",
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue