split up the component config into smaller config

pull/8/head
stewart-yu 2018-04-12 11:12:10 +08:00
parent 75e39073a3
commit ec6399be53
12 changed files with 688 additions and 422 deletions

View File

@ -85,13 +85,13 @@ the cloud specific control loops shipped with Kubernetes.`,
func resyncPeriod(c *cloudcontrollerconfig.CompletedConfig) func() time.Duration { func resyncPeriod(c *cloudcontrollerconfig.CompletedConfig) func() time.Duration {
return func() time.Duration { return func() time.Duration {
factor := rand.Float64() + 1 factor := rand.Float64() + 1
return time.Duration(float64(c.Generic.ComponentConfig.MinResyncPeriod.Nanoseconds()) * factor) return time.Duration(float64(c.Generic.ComponentConfig.GenericComponent.MinResyncPeriod.Nanoseconds()) * factor)
} }
} }
// Run runs the ExternalCMServer. This should never exit. // Run runs the ExternalCMServer. This should never exit.
func Run(c *cloudcontrollerconfig.CompletedConfig) error { func Run(c *cloudcontrollerconfig.CompletedConfig) error {
cloud, err := cloudprovider.InitCloudProvider(c.Generic.ComponentConfig.CloudProvider, c.Generic.ComponentConfig.CloudConfigFile) cloud, err := cloudprovider.InitCloudProvider(c.Generic.ComponentConfig.CloudProvider.Name, c.Generic.ComponentConfig.CloudProvider.CloudConfigFile)
if err != nil { if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err) glog.Fatalf("Cloud provider could not be initialized: %v", err)
} }
@ -100,7 +100,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error {
} }
if cloud.HasClusterID() == false { if cloud.HasClusterID() == false {
if c.Generic.ComponentConfig.AllowUntaggedCloud == true { if c.Generic.ComponentConfig.KubeCloudShared.AllowUntaggedCloud == true {
glog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues") glog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues")
} else { } else {
glog.Fatalf("no ClusterID found. A ClusterID is required for the cloud provider to function properly. This check can be bypassed by setting the allow-untagged-cloud option") glog.Fatalf("no ClusterID found. A ClusterID is required for the cloud provider to function properly. This check can be bypassed by setting the allow-untagged-cloud option")
@ -132,7 +132,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error {
ClientConfig: c.Generic.Kubeconfig, ClientConfig: c.Generic.Kubeconfig,
} }
var clientBuilder controller.ControllerClientBuilder var clientBuilder controller.ControllerClientBuilder
if c.Generic.ComponentConfig.UseServiceAccountCredentials { if c.Generic.ComponentConfig.KubeCloudShared.UseServiceAccountCredentials {
clientBuilder = controller.SAControllerClientBuilder{ clientBuilder = controller.SAControllerClientBuilder{
ClientConfig: restclient.AnonymousClientConfig(c.Generic.Kubeconfig), ClientConfig: restclient.AnonymousClientConfig(c.Generic.Kubeconfig),
CoreClient: c.Generic.Client.CoreV1(), CoreClient: c.Generic.Client.CoreV1(),
@ -148,7 +148,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error {
} }
} }
if !c.Generic.ComponentConfig.LeaderElection.LeaderElect { if !c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaderElect {
run(nil) run(nil)
panic("unreachable") panic("unreachable")
} }
@ -162,7 +162,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error {
id = id + "_" + string(uuid.NewUUID()) id = id + "_" + string(uuid.NewUUID())
// Lock required for leader election // Lock required for leader election
rl, err := resourcelock.New(c.Generic.ComponentConfig.LeaderElection.ResourceLock, rl, err := resourcelock.New(c.Generic.ComponentConfig.GenericComponent.LeaderElection.ResourceLock,
"kube-system", "kube-system",
"cloud-controller-manager", "cloud-controller-manager",
c.Generic.LeaderElectionClient.CoreV1(), c.Generic.LeaderElectionClient.CoreV1(),
@ -177,9 +177,9 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error {
// Try and become the leader and start cloud controller manager loops // Try and become the leader and start cloud controller manager loops
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
Lock: rl, Lock: rl,
LeaseDuration: c.Generic.ComponentConfig.LeaderElection.LeaseDuration.Duration, LeaseDuration: c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaseDuration.Duration,
RenewDeadline: c.Generic.ComponentConfig.LeaderElection.RenewDeadline.Duration, RenewDeadline: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RenewDeadline.Duration,
RetryPeriod: c.Generic.ComponentConfig.LeaderElection.RetryPeriod.Duration, RetryPeriod: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RetryPeriod.Duration,
Callbacks: leaderelection.LeaderCallbacks{ Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: run, OnStartedLeading: run,
OnStoppedLeading: func() { OnStoppedLeading: func() {
@ -209,17 +209,17 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, rootClientBuilde
nodeController := cloudcontrollers.NewCloudNodeController( nodeController := cloudcontrollers.NewCloudNodeController(
sharedInformers.Core().V1().Nodes(), sharedInformers.Core().V1().Nodes(),
client("cloud-node-controller"), cloud, client("cloud-node-controller"), cloud,
c.Generic.ComponentConfig.NodeMonitorPeriod.Duration, c.Generic.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration,
c.Extra.NodeStatusUpdateFrequency) c.Extra.NodeStatusUpdateFrequency)
nodeController.Run() nodeController.Run()
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter))
// Start the PersistentVolumeLabelController // Start the PersistentVolumeLabelController
pvlController := cloudcontrollers.NewPersistentVolumeLabelController(client("pvl-controller"), cloud) pvlController := cloudcontrollers.NewPersistentVolumeLabelController(client("pvl-controller"), cloud)
threads := 5 threads := 5
go pvlController.Run(threads, stop) go pvlController.Run(threads, stop)
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter))
// Start the service controller // Start the service controller
serviceController, err := servicecontroller.New( serviceController, err := servicecontroller.New(
@ -227,34 +227,34 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, rootClientBuilde
client("service-controller"), client("service-controller"),
sharedInformers.Core().V1().Services(), sharedInformers.Core().V1().Services(),
sharedInformers.Core().V1().Nodes(), sharedInformers.Core().V1().Nodes(),
c.Generic.ComponentConfig.ClusterName, c.Generic.ComponentConfig.KubeCloudShared.ClusterName,
) )
if err != nil { if err != nil {
glog.Errorf("Failed to start service controller: %v", err) glog.Errorf("Failed to start service controller: %v", err)
} else { } else {
go serviceController.Run(stop, int(c.Generic.ComponentConfig.ConcurrentServiceSyncs)) go serviceController.Run(stop, int(c.Generic.ComponentConfig.ServiceController.ConcurrentServiceSyncs))
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter))
} }
// If CIDRs should be allocated for pods and set on the CloudProvider, then start the route controller // If CIDRs should be allocated for pods and set on the CloudProvider, then start the route controller
if c.Generic.ComponentConfig.AllocateNodeCIDRs && c.Generic.ComponentConfig.ConfigureCloudRoutes { if c.Generic.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs && c.Generic.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes {
if routes, ok := cloud.Routes(); !ok { if routes, ok := cloud.Routes(); !ok {
glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
} else { } else {
var clusterCIDR *net.IPNet var clusterCIDR *net.IPNet
if len(strings.TrimSpace(c.Generic.ComponentConfig.ClusterCIDR)) != 0 { if len(strings.TrimSpace(c.Generic.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 {
_, clusterCIDR, err = net.ParseCIDR(c.Generic.ComponentConfig.ClusterCIDR) _, clusterCIDR, err = net.ParseCIDR(c.Generic.ComponentConfig.KubeCloudShared.ClusterCIDR)
if err != nil { if err != nil {
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", c.Generic.ComponentConfig.ClusterCIDR, err) glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", c.Generic.ComponentConfig.KubeCloudShared.ClusterCIDR, err)
} }
} }
routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), c.Generic.ComponentConfig.ClusterName, clusterCIDR) routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), c.Generic.ComponentConfig.KubeCloudShared.ClusterName, clusterCIDR)
go routeController.Run(stop, c.Generic.ComponentConfig.RouteReconciliationPeriod.Duration) go routeController.Run(stop, c.Generic.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter))
} }
} else { } else {
glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", c.Generic.ComponentConfig.AllocateNodeCIDRs, c.Generic.ComponentConfig.ConfigureCloudRoutes) glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", c.Generic.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, c.Generic.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes)
} }
// If apiserver is not running we should wait for some time and fail only then. This is particularly // If apiserver is not running we should wait for some time and fail only then. This is particularly

View File

@ -26,7 +26,6 @@ import (
// Config is the main context object for the controller manager. // Config is the main context object for the controller manager.
type Config struct { type Config struct {
// TODO: split up the component config. This is not generic.
ComponentConfig componentconfig.KubeControllerManagerConfiguration ComponentConfig componentconfig.KubeControllerManagerConfiguration
SecureServing *apiserver.SecureServingInfo SecureServing *apiserver.SecureServingInfo

View File

@ -40,9 +40,9 @@ type serveFunc func(handler http.Handler, shutdownTimeout time.Duration, stopCh
func Serve(c *CompletedConfig, serveFunc serveFunc, stopCh <-chan struct{}) error { func Serve(c *CompletedConfig, serveFunc serveFunc, stopCh <-chan struct{}) error {
mux := mux.NewPathRecorderMux("controller-manager") mux := mux.NewPathRecorderMux("controller-manager")
healthz.InstallHandler(mux) healthz.InstallHandler(mux)
if c.ComponentConfig.EnableProfiling { if c.ComponentConfig.Debugging.EnableProfiling {
routes.Profiling{}.Install(mux) routes.Profiling{}.Install(mux)
if c.ComponentConfig.EnableContentionProfiling { if c.ComponentConfig.Debugging.EnableContentionProfiling {
goruntime.SetBlockProfileRate(1) goruntime.SetBlockProfileRate(1)
} }
} }

View File

@ -43,7 +43,7 @@ func startDaemonSetController(ctx ControllerContext) (bool, error) {
if err != nil { if err != nil {
return true, fmt.Errorf("error creating DaemonSets controller: %v", err) return true, fmt.Errorf("error creating DaemonSets controller: %v", err)
} }
go dsc.Run(int(ctx.ComponentConfig.ConcurrentDaemonSetSyncs), ctx.Stop) go dsc.Run(int(ctx.ComponentConfig.DaemonSetController.ConcurrentDaemonSetSyncs), ctx.Stop)
return true, nil return true, nil
} }
@ -70,6 +70,6 @@ func startReplicaSetController(ctx ControllerContext) (bool, error) {
ctx.InformerFactory.Core().V1().Pods(), ctx.InformerFactory.Core().V1().Pods(),
ctx.ClientBuilder.ClientOrDie("replicaset-controller"), ctx.ClientBuilder.ClientOrDie("replicaset-controller"),
replicaset.BurstReplicas, replicaset.BurstReplicas,
).Run(int(ctx.ComponentConfig.ConcurrentRSSyncs), ctx.Stop) ).Run(int(ctx.ComponentConfig.ReplicaSetController.ConcurrentRSSyncs), ctx.Stop)
return true, nil return true, nil
} }

View File

@ -39,7 +39,7 @@ func startHPAController(ctx ControllerContext) (bool, error) {
return false, nil return false, nil
} }
if ctx.ComponentConfig.HorizontalPodAutoscalerUseRESTClients { if ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerUseRESTClients {
// use the new-style clients if support for custom metrics is enabled // use the new-style clients if support for custom metrics is enabled
return startHPAControllerWithRESTClient(ctx) return startHPAControllerWithRESTClient(ctx)
} }
@ -90,7 +90,7 @@ func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient me
replicaCalc := podautoscaler.NewReplicaCalculator( replicaCalc := podautoscaler.NewReplicaCalculator(
metricsClient, metricsClient,
hpaClient.CoreV1(), hpaClient.CoreV1(),
ctx.ComponentConfig.HorizontalPodAutoscalerTolerance, ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerTolerance,
) )
go podautoscaler.NewHorizontalController( go podautoscaler.NewHorizontalController(
hpaClientGoClient.CoreV1(), hpaClientGoClient.CoreV1(),
@ -99,9 +99,9 @@ func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient me
restMapper, restMapper,
replicaCalc, replicaCalc,
ctx.InformerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), ctx.InformerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
ctx.ComponentConfig.HorizontalPodAutoscalerSyncPeriod.Duration, ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerSyncPeriod.Duration,
ctx.ComponentConfig.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration,
ctx.ComponentConfig.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration,
).Run(ctx.Stop) ).Run(ctx.Stop)
return true, nil return true, nil
} }

View File

@ -36,7 +36,7 @@ func startJobController(ctx ControllerContext) (bool, error) {
ctx.InformerFactory.Core().V1().Pods(), ctx.InformerFactory.Core().V1().Pods(),
ctx.InformerFactory.Batch().V1().Jobs(), ctx.InformerFactory.Batch().V1().Jobs(),
ctx.ClientBuilder.ClientOrDie("job-controller"), ctx.ClientBuilder.ClientOrDie("job-controller"),
).Run(int(ctx.ComponentConfig.ConcurrentJobSyncs), ctx.Stop) ).Run(int(ctx.ComponentConfig.JobController.ConcurrentJobSyncs), ctx.Stop)
return true, nil return true, nil
} }

View File

@ -37,7 +37,7 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}] { if !ctx.AvailableResources[schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}] {
return false, nil return false, nil
} }
if ctx.ComponentConfig.ClusterSigningCertFile == "" || ctx.ComponentConfig.ClusterSigningKeyFile == "" { if ctx.ComponentConfig.CSRSigningController.ClusterSigningCertFile == "" || ctx.ComponentConfig.CSRSigningController.ClusterSigningKeyFile == "" {
return false, nil return false, nil
} }
@ -52,15 +52,15 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) {
// bail out of startController without logging. // bail out of startController without logging.
var keyFileExists, keyUsesDefault, certFileExists, certUsesDefault bool var keyFileExists, keyUsesDefault, certFileExists, certUsesDefault bool
_, err := os.Stat(ctx.ComponentConfig.ClusterSigningCertFile) _, err := os.Stat(ctx.ComponentConfig.CSRSigningController.ClusterSigningCertFile)
certFileExists = !os.IsNotExist(err) certFileExists = !os.IsNotExist(err)
certUsesDefault = (ctx.ComponentConfig.ClusterSigningCertFile == cmoptions.DefaultClusterSigningCertFile) certUsesDefault = (ctx.ComponentConfig.CSRSigningController.ClusterSigningCertFile == cmoptions.DefaultClusterSigningCertFile)
_, err = os.Stat(ctx.ComponentConfig.ClusterSigningKeyFile) _, err = os.Stat(ctx.ComponentConfig.CSRSigningController.ClusterSigningKeyFile)
keyFileExists = !os.IsNotExist(err) keyFileExists = !os.IsNotExist(err)
keyUsesDefault = (ctx.ComponentConfig.ClusterSigningKeyFile == cmoptions.DefaultClusterSigningKeyFile) keyUsesDefault = (ctx.ComponentConfig.CSRSigningController.ClusterSigningKeyFile == cmoptions.DefaultClusterSigningKeyFile)
switch { switch {
case (keyFileExists && keyUsesDefault) || (certFileExists && certUsesDefault): case (keyFileExists && keyUsesDefault) || (certFileExists && certUsesDefault):
@ -84,9 +84,9 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) {
signer, err := signer.NewCSRSigningController( signer, err := signer.NewCSRSigningController(
c, c,
ctx.InformerFactory.Certificates().V1beta1().CertificateSigningRequests(), ctx.InformerFactory.Certificates().V1beta1().CertificateSigningRequests(),
ctx.ComponentConfig.ClusterSigningCertFile, ctx.ComponentConfig.CSRSigningController.ClusterSigningCertFile,
ctx.ComponentConfig.ClusterSigningKeyFile, ctx.ComponentConfig.CSRSigningController.ClusterSigningKeyFile,
ctx.ComponentConfig.ClusterSigningDuration.Duration, ctx.ComponentConfig.CSRSigningController.ClusterSigningDuration.Duration,
) )
if err != nil { if err != nil {
return false, fmt.Errorf("failed to start certificate controller: %v", err) return false, fmt.Errorf("failed to start certificate controller: %v", err)

View File

@ -106,7 +106,7 @@ controller, and serviceaccounts controller.`,
func ResyncPeriod(c *config.CompletedConfig) func() time.Duration { func ResyncPeriod(c *config.CompletedConfig) func() time.Duration {
return func() time.Duration { return func() time.Duration {
factor := rand.Float64() + 1 factor := rand.Float64() + 1
return time.Duration(float64(c.Generic.ComponentConfig.MinResyncPeriod.Nanoseconds()) * factor) return time.Duration(float64(c.Generic.ComponentConfig.GenericComponent.MinResyncPeriod.Nanoseconds()) * factor)
} }
} }
@ -139,8 +139,8 @@ func Run(c *config.CompletedConfig) error {
ClientConfig: c.Generic.Kubeconfig, ClientConfig: c.Generic.Kubeconfig,
} }
var clientBuilder controller.ControllerClientBuilder var clientBuilder controller.ControllerClientBuilder
if c.Generic.ComponentConfig.UseServiceAccountCredentials { if c.Generic.ComponentConfig.KubeCloudShared.UseServiceAccountCredentials {
if len(c.Generic.ComponentConfig.ServiceAccountKeyFile) == 0 { if len(c.Generic.ComponentConfig.KubeCloudShared.ServiceAccountKeyFile) == 0 {
// It'c possible another controller process is creating the tokens for us. // It'c possible another controller process is creating the tokens for us.
// If one isn't, we'll timeout and exit when our client builder is unable to create the tokens. // If one isn't, we'll timeout and exit when our client builder is unable to create the tokens.
glog.Warningf("--use-service-account-credentials was specified without providing a --service-account-private-key-file") glog.Warningf("--use-service-account-credentials was specified without providing a --service-account-private-key-file")
@ -170,7 +170,7 @@ func Run(c *config.CompletedConfig) error {
select {} select {}
} }
if !c.Generic.ComponentConfig.LeaderElection.LeaderElect { if !c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaderElect {
run(wait.NeverStop) run(wait.NeverStop)
panic("unreachable") panic("unreachable")
} }
@ -182,7 +182,7 @@ func Run(c *config.CompletedConfig) error {
// add a uniquifier so that two processes on the same host don't accidentally both become active // add a uniquifier so that two processes on the same host don't accidentally both become active
id = id + "_" + string(uuid.NewUUID()) id = id + "_" + string(uuid.NewUUID())
rl, err := resourcelock.New(c.Generic.ComponentConfig.LeaderElection.ResourceLock, rl, err := resourcelock.New(c.Generic.ComponentConfig.GenericComponent.LeaderElection.ResourceLock,
"kube-system", "kube-system",
"kube-controller-manager", "kube-controller-manager",
c.Generic.LeaderElectionClient.CoreV1(), c.Generic.LeaderElectionClient.CoreV1(),
@ -196,9 +196,9 @@ func Run(c *config.CompletedConfig) error {
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
Lock: rl, Lock: rl,
LeaseDuration: c.Generic.ComponentConfig.LeaderElection.LeaseDuration.Duration, LeaseDuration: c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaseDuration.Duration,
RenewDeadline: c.Generic.ComponentConfig.LeaderElection.RenewDeadline.Duration, RenewDeadline: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RenewDeadline.Duration,
RetryPeriod: c.Generic.ComponentConfig.LeaderElection.RetryPeriod.Duration, RetryPeriod: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RetryPeriod.Duration,
Callbacks: leaderelection.LeaderCallbacks{ Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: run, OnStartedLeading: run,
OnStoppedLeading: func() { OnStoppedLeading: func() {
@ -216,7 +216,7 @@ type ControllerContext struct {
// InformerFactory gives access to informers for the controller. // InformerFactory gives access to informers for the controller.
InformerFactory informers.SharedInformerFactory InformerFactory informers.SharedInformerFactory
// Options provides access to init options for a given controller // ComponentConfig provides access to init options for a given controller
ComponentConfig componentconfig.KubeControllerManagerConfiguration ComponentConfig componentconfig.KubeControllerManagerConfiguration
// AvailableResources is a map listing currently available resources // AvailableResources is a map listing currently available resources
@ -390,8 +390,8 @@ func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clien
return ControllerContext{}, err return ControllerContext{}, err
} }
cloud, loopMode, err := createCloudProvider(s.Generic.ComponentConfig.CloudProvider, s.Generic.ComponentConfig.ExternalCloudVolumePlugin, cloud, loopMode, err := createCloudProvider(s.Generic.ComponentConfig.CloudProvider.Name, s.Generic.ComponentConfig.ExternalCloudVolumePlugin,
s.Generic.ComponentConfig.CloudConfigFile, s.Generic.ComponentConfig.AllowUntaggedCloud, sharedInformers) s.Generic.ComponentConfig.CloudProvider.CloudConfigFile, s.Generic.ComponentConfig.KubeCloudShared.AllowUntaggedCloud, sharedInformers)
if err != nil { if err != nil {
return ControllerContext{}, err return ControllerContext{}, err
} }
@ -429,7 +429,7 @@ func StartControllers(ctx ControllerContext, startSATokenController InitFunc, co
continue continue
} }
time.Sleep(wait.Jitter(ctx.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(ctx.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter))
glog.V(1).Infof("Starting %q", controllerName) glog.V(1).Infof("Starting %q", controllerName)
started, err := initFn(ctx) started, err := initFn(ctx)
@ -460,23 +460,23 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController
return false, nil return false, nil
} }
if len(ctx.ComponentConfig.ServiceAccountKeyFile) == 0 { if len(ctx.ComponentConfig.KubeCloudShared.ServiceAccountKeyFile) == 0 {
glog.Warningf("%q is disabled because there is no private key", saTokenControllerName) glog.Warningf("%q is disabled because there is no private key", saTokenControllerName)
return false, nil return false, nil
} }
privateKey, err := certutil.PrivateKeyFromFile(ctx.ComponentConfig.ServiceAccountKeyFile) privateKey, err := certutil.PrivateKeyFromFile(ctx.ComponentConfig.KubeCloudShared.ServiceAccountKeyFile)
if err != nil { if err != nil {
return true, fmt.Errorf("error reading key for service account token controller: %v", err) return true, fmt.Errorf("error reading key for service account token controller: %v", err)
} }
var rootCA []byte var rootCA []byte
if ctx.ComponentConfig.RootCAFile != "" { if ctx.ComponentConfig.SAController.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(ctx.ComponentConfig.RootCAFile) rootCA, err = ioutil.ReadFile(ctx.ComponentConfig.SAController.RootCAFile)
if err != nil { if err != nil {
return true, fmt.Errorf("error reading root-ca-file at %s: %v", ctx.ComponentConfig.RootCAFile, err) return true, fmt.Errorf("error reading root-ca-file at %s: %v", ctx.ComponentConfig.SAController.RootCAFile, err)
} }
if _, err := certutil.ParseCertsPEM(rootCA); err != nil { if _, err := certutil.ParseCertsPEM(rootCA); err != nil {
return true, fmt.Errorf("error parsing root-ca-file at %s: %v", ctx.ComponentConfig.RootCAFile, err) return true, fmt.Errorf("error parsing root-ca-file at %s: %v", ctx.ComponentConfig.SAController.RootCAFile, err)
} }
} else { } else {
rootCA = c.rootClientBuilder.ConfigOrDie("tokens-controller").CAData rootCA = c.rootClientBuilder.ConfigOrDie("tokens-controller").CAData
@ -494,7 +494,7 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController
if err != nil { if err != nil {
return true, fmt.Errorf("error creating Tokens controller: %v", err) return true, fmt.Errorf("error creating Tokens controller: %v", err)
} }
go controller.Run(int(ctx.ComponentConfig.ConcurrentSATokenSyncs), ctx.Stop) go controller.Run(int(ctx.ComponentConfig.SAController.ConcurrentSATokenSyncs), ctx.Stop)
// start the first set of informers now so that other controllers can start // start the first set of informers now so that other controllers can start
ctx.InformerFactory.Start(ctx.Stop) ctx.InformerFactory.Start(ctx.Stop)

View File

@ -68,33 +68,33 @@ func startServiceController(ctx ControllerContext) (bool, error) {
ctx.ClientBuilder.ClientOrDie("service-controller"), ctx.ClientBuilder.ClientOrDie("service-controller"),
ctx.InformerFactory.Core().V1().Services(), ctx.InformerFactory.Core().V1().Services(),
ctx.InformerFactory.Core().V1().Nodes(), ctx.InformerFactory.Core().V1().Nodes(),
ctx.ComponentConfig.ClusterName, ctx.ComponentConfig.KubeCloudShared.ClusterName,
) )
if err != nil { if err != nil {
// This error shouldn't fail. It lives like this as a legacy. // This error shouldn't fail. It lives like this as a legacy.
glog.Errorf("Failed to start service controller: %v", err) glog.Errorf("Failed to start service controller: %v", err)
return false, nil return false, nil
} }
go serviceController.Run(ctx.Stop, int(ctx.ComponentConfig.ConcurrentServiceSyncs)) go serviceController.Run(ctx.Stop, int(ctx.ComponentConfig.ServiceController.ConcurrentServiceSyncs))
return true, nil return true, nil
} }
func startNodeIpamController(ctx ControllerContext) (bool, error) { func startNodeIpamController(ctx ControllerContext) (bool, error) {
var clusterCIDR *net.IPNet = nil var clusterCIDR *net.IPNet = nil
var serviceCIDR *net.IPNet = nil var serviceCIDR *net.IPNet = nil
if ctx.ComponentConfig.AllocateNodeCIDRs { if ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs {
var err error var err error
if len(strings.TrimSpace(ctx.ComponentConfig.ClusterCIDR)) != 0 { if len(strings.TrimSpace(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 {
_, clusterCIDR, err = net.ParseCIDR(ctx.ComponentConfig.ClusterCIDR) _, clusterCIDR, err = net.ParseCIDR(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
if err != nil { if err != nil {
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.ClusterCIDR, err) glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err)
} }
} }
if len(strings.TrimSpace(ctx.ComponentConfig.ServiceCIDR)) != 0 { if len(strings.TrimSpace(ctx.ComponentConfig.NodeIpamController.ServiceCIDR)) != 0 {
_, serviceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.ServiceCIDR) _, serviceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.NodeIpamController.ServiceCIDR)
if err != nil { if err != nil {
glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.ServiceCIDR, err) glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIpamController.ServiceCIDR, err)
} }
} }
} }
@ -105,9 +105,9 @@ func startNodeIpamController(ctx ControllerContext) (bool, error) {
ctx.ClientBuilder.ClientOrDie("node-controller"), ctx.ClientBuilder.ClientOrDie("node-controller"),
clusterCIDR, clusterCIDR,
serviceCIDR, serviceCIDR,
int(ctx.ComponentConfig.NodeCIDRMaskSize), int(ctx.ComponentConfig.NodeIpamController.NodeCIDRMaskSize),
ctx.ComponentConfig.AllocateNodeCIDRs, ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs,
ipam.CIDRAllocatorType(ctx.ComponentConfig.CIDRAllocatorType), ipam.CIDRAllocatorType(ctx.ComponentConfig.KubeCloudShared.CIDRAllocatorType),
) )
if err != nil { if err != nil {
return true, err return true, err
@ -123,15 +123,15 @@ func startNodeLifecycleController(ctx ControllerContext) (bool, error) {
ctx.InformerFactory.Extensions().V1beta1().DaemonSets(), ctx.InformerFactory.Extensions().V1beta1().DaemonSets(),
ctx.Cloud, ctx.Cloud,
ctx.ClientBuilder.ClientOrDie("node-controller"), ctx.ClientBuilder.ClientOrDie("node-controller"),
ctx.ComponentConfig.NodeMonitorPeriod.Duration, ctx.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration,
ctx.ComponentConfig.NodeStartupGracePeriod.Duration, ctx.ComponentConfig.NodeLifecycleController.NodeStartupGracePeriod.Duration,
ctx.ComponentConfig.NodeMonitorGracePeriod.Duration, ctx.ComponentConfig.NodeLifecycleController.NodeMonitorGracePeriod.Duration,
ctx.ComponentConfig.PodEvictionTimeout.Duration, ctx.ComponentConfig.NodeLifecycleController.PodEvictionTimeout.Duration,
ctx.ComponentConfig.NodeEvictionRate, ctx.ComponentConfig.NodeLifecycleController.NodeEvictionRate,
ctx.ComponentConfig.SecondaryNodeEvictionRate, ctx.ComponentConfig.NodeLifecycleController.SecondaryNodeEvictionRate,
ctx.ComponentConfig.LargeClusterSizeThreshold, ctx.ComponentConfig.NodeLifecycleController.LargeClusterSizeThreshold,
ctx.ComponentConfig.UnhealthyZoneThreshold, ctx.ComponentConfig.NodeLifecycleController.UnhealthyZoneThreshold,
ctx.ComponentConfig.EnableTaintManager, ctx.ComponentConfig.NodeLifecycleController.EnableTaintManager,
utilfeature.DefaultFeatureGate.Enabled(features.TaintBasedEvictions), utilfeature.DefaultFeatureGate.Enabled(features.TaintBasedEvictions),
utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition), utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition),
) )
@ -143,8 +143,8 @@ func startNodeLifecycleController(ctx ControllerContext) (bool, error) {
} }
func startRouteController(ctx ControllerContext) (bool, error) { func startRouteController(ctx ControllerContext) (bool, error) {
if !ctx.ComponentConfig.AllocateNodeCIDRs || !ctx.ComponentConfig.ConfigureCloudRoutes { if !ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs || !ctx.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes {
glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", ctx.ComponentConfig.AllocateNodeCIDRs, ctx.ComponentConfig.ConfigureCloudRoutes) glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, ctx.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes)
return false, nil return false, nil
} }
if ctx.Cloud == nil { if ctx.Cloud == nil {
@ -156,27 +156,27 @@ func startRouteController(ctx ControllerContext) (bool, error) {
glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
return false, nil return false, nil
} }
_, clusterCIDR, err := net.ParseCIDR(ctx.ComponentConfig.ClusterCIDR) _, clusterCIDR, err := net.ParseCIDR(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
if err != nil { if err != nil {
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.ClusterCIDR, err) glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err)
} }
routeController := routecontroller.New(routes, ctx.ClientBuilder.ClientOrDie("route-controller"), ctx.InformerFactory.Core().V1().Nodes(), ctx.ComponentConfig.ClusterName, clusterCIDR) routeController := routecontroller.New(routes, ctx.ClientBuilder.ClientOrDie("route-controller"), ctx.InformerFactory.Core().V1().Nodes(), ctx.ComponentConfig.KubeCloudShared.ClusterName, clusterCIDR)
go routeController.Run(ctx.Stop, ctx.ComponentConfig.RouteReconciliationPeriod.Duration) go routeController.Run(ctx.Stop, ctx.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)
return true, nil return true, nil
} }
func startPersistentVolumeBinderController(ctx ControllerContext) (bool, error) { func startPersistentVolumeBinderController(ctx ControllerContext) (bool, error) {
params := persistentvolumecontroller.ControllerParameters{ params := persistentvolumecontroller.ControllerParameters{
KubeClient: ctx.ClientBuilder.ClientOrDie("persistent-volume-binder"), KubeClient: ctx.ClientBuilder.ClientOrDie("persistent-volume-binder"),
SyncPeriod: ctx.ComponentConfig.PVClaimBinderSyncPeriod.Duration, SyncPeriod: ctx.ComponentConfig.PersistentVolumeBinderController.PVClaimBinderSyncPeriod.Duration,
VolumePlugins: ProbeControllerVolumePlugins(ctx.Cloud, ctx.ComponentConfig.VolumeConfiguration), VolumePlugins: ProbeControllerVolumePlugins(ctx.Cloud, ctx.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration),
Cloud: ctx.Cloud, Cloud: ctx.Cloud,
ClusterName: ctx.ComponentConfig.ClusterName, ClusterName: ctx.ComponentConfig.KubeCloudShared.ClusterName,
VolumeInformer: ctx.InformerFactory.Core().V1().PersistentVolumes(), VolumeInformer: ctx.InformerFactory.Core().V1().PersistentVolumes(),
ClaimInformer: ctx.InformerFactory.Core().V1().PersistentVolumeClaims(), ClaimInformer: ctx.InformerFactory.Core().V1().PersistentVolumeClaims(),
ClassInformer: ctx.InformerFactory.Storage().V1().StorageClasses(), ClassInformer: ctx.InformerFactory.Storage().V1().StorageClasses(),
PodInformer: ctx.InformerFactory.Core().V1().Pods(), PodInformer: ctx.InformerFactory.Core().V1().Pods(),
EnableDynamicProvisioning: ctx.ComponentConfig.VolumeConfiguration.EnableDynamicProvisioning, EnableDynamicProvisioning: ctx.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration.EnableDynamicProvisioning,
} }
volumeController, volumeControllerErr := persistentvolumecontroller.NewController(params) volumeController, volumeControllerErr := persistentvolumecontroller.NewController(params)
if volumeControllerErr != nil { if volumeControllerErr != nil {
@ -187,7 +187,7 @@ func startPersistentVolumeBinderController(ctx ControllerContext) (bool, error)
} }
func startAttachDetachController(ctx ControllerContext) (bool, error) { func startAttachDetachController(ctx ControllerContext) (bool, error) {
if ctx.ComponentConfig.ReconcilerSyncLoopPeriod.Duration < time.Second { if ctx.ComponentConfig.AttachDetachController.ReconcilerSyncLoopPeriod.Duration < time.Second {
return true, fmt.Errorf("Duration time must be greater than one second as set via command line option reconcile-sync-loop-period.") return true, fmt.Errorf("Duration time must be greater than one second as set via command line option reconcile-sync-loop-period.")
} }
attachDetachController, attachDetachControllerErr := attachDetachController, attachDetachControllerErr :=
@ -199,9 +199,9 @@ func startAttachDetachController(ctx ControllerContext) (bool, error) {
ctx.InformerFactory.Core().V1().PersistentVolumes(), ctx.InformerFactory.Core().V1().PersistentVolumes(),
ctx.Cloud, ctx.Cloud,
ProbeAttachableVolumePlugins(), ProbeAttachableVolumePlugins(),
GetDynamicPluginProber(ctx.ComponentConfig.VolumeConfiguration), GetDynamicPluginProber(ctx.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration),
ctx.ComponentConfig.DisableAttachDetachReconcilerSync, ctx.ComponentConfig.AttachDetachController.DisableAttachDetachReconcilerSync,
ctx.ComponentConfig.ReconcilerSyncLoopPeriod.Duration, ctx.ComponentConfig.AttachDetachController.ReconcilerSyncLoopPeriod.Duration,
attachdetach.DefaultTimerConfig, attachdetach.DefaultTimerConfig,
) )
if attachDetachControllerErr != nil { if attachDetachControllerErr != nil {
@ -218,7 +218,7 @@ func startVolumeExpandController(ctx ControllerContext) (bool, error) {
ctx.InformerFactory.Core().V1().PersistentVolumeClaims(), ctx.InformerFactory.Core().V1().PersistentVolumeClaims(),
ctx.InformerFactory.Core().V1().PersistentVolumes(), ctx.InformerFactory.Core().V1().PersistentVolumes(),
ctx.Cloud, ctx.Cloud,
ProbeExpandableVolumePlugins(ctx.ComponentConfig.VolumeConfiguration)) ProbeExpandableVolumePlugins(ctx.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration))
if expandControllerErr != nil { if expandControllerErr != nil {
return true, fmt.Errorf("Failed to start volume expand controller : %v", expandControllerErr) return true, fmt.Errorf("Failed to start volume expand controller : %v", expandControllerErr)
@ -235,7 +235,7 @@ func startEndpointController(ctx ControllerContext) (bool, error) {
ctx.InformerFactory.Core().V1().Services(), ctx.InformerFactory.Core().V1().Services(),
ctx.InformerFactory.Core().V1().Endpoints(), ctx.InformerFactory.Core().V1().Endpoints(),
ctx.ClientBuilder.ClientOrDie("endpoint-controller"), ctx.ClientBuilder.ClientOrDie("endpoint-controller"),
).Run(int(ctx.ComponentConfig.ConcurrentEndpointSyncs), ctx.Stop) ).Run(int(ctx.ComponentConfig.EndPointController.ConcurrentEndpointSyncs), ctx.Stop)
return true, nil return true, nil
} }
@ -245,7 +245,7 @@ func startReplicationController(ctx ControllerContext) (bool, error) {
ctx.InformerFactory.Core().V1().ReplicationControllers(), ctx.InformerFactory.Core().V1().ReplicationControllers(),
ctx.ClientBuilder.ClientOrDie("replication-controller"), ctx.ClientBuilder.ClientOrDie("replication-controller"),
replicationcontroller.BurstReplicas, replicationcontroller.BurstReplicas,
).Run(int(ctx.ComponentConfig.ConcurrentRCSyncs), ctx.Stop) ).Run(int(ctx.ComponentConfig.ReplicationController.ConcurrentRCSyncs), ctx.Stop)
return true, nil return true, nil
} }
@ -253,7 +253,7 @@ func startPodGCController(ctx ControllerContext) (bool, error) {
go podgc.NewPodGC( go podgc.NewPodGC(
ctx.ClientBuilder.ClientOrDie("pod-garbage-collector"), ctx.ClientBuilder.ClientOrDie("pod-garbage-collector"),
ctx.InformerFactory.Core().V1().Pods(), ctx.InformerFactory.Core().V1().Pods(),
int(ctx.ComponentConfig.TerminatedPodGCThreshold), int(ctx.ComponentConfig.PodGCController.TerminatedPodGCThreshold),
).Run(ctx.Stop) ).Run(ctx.Stop)
return true, nil return true, nil
} }
@ -267,7 +267,7 @@ func startResourceQuotaController(ctx ControllerContext) (bool, error) {
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{ resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
QuotaClient: resourceQuotaControllerClient.CoreV1(), QuotaClient: resourceQuotaControllerClient.CoreV1(),
ResourceQuotaInformer: ctx.InformerFactory.Core().V1().ResourceQuotas(), ResourceQuotaInformer: ctx.InformerFactory.Core().V1().ResourceQuotas(),
ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.ComponentConfig.ResourceQuotaSyncPeriod.Duration), ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.ComponentConfig.ResourceQuotaController.ResourceQuotaSyncPeriod.Duration),
InformerFactory: ctx.InformerFactory, InformerFactory: ctx.InformerFactory,
ReplenishmentResyncPeriod: ctx.ResyncPeriod, ReplenishmentResyncPeriod: ctx.ResyncPeriod,
DiscoveryFunc: discoveryFunc, DiscoveryFunc: discoveryFunc,
@ -285,7 +285,7 @@ func startResourceQuotaController(ctx ControllerContext) (bool, error) {
if err != nil { if err != nil {
return false, err return false, err
} }
go resourceQuotaController.Run(int(ctx.ComponentConfig.ConcurrentResourceQuotaSyncs), ctx.Stop) go resourceQuotaController.Run(int(ctx.ComponentConfig.ResourceQuotaController.ConcurrentResourceQuotaSyncs), ctx.Stop)
// Periodically the quota controller to detect new resource types // Periodically the quota controller to detect new resource types
go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, ctx.Stop) go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, ctx.Stop)
@ -313,10 +313,10 @@ func startNamespaceController(ctx ControllerContext) (bool, error) {
namespaceClientPool, namespaceClientPool,
discoverResourcesFn, discoverResourcesFn,
ctx.InformerFactory.Core().V1().Namespaces(), ctx.InformerFactory.Core().V1().Namespaces(),
ctx.ComponentConfig.NamespaceSyncPeriod.Duration, ctx.ComponentConfig.NamespaceController.NamespaceSyncPeriod.Duration,
v1.FinalizerKubernetes, v1.FinalizerKubernetes,
) )
go namespaceController.Run(int(ctx.ComponentConfig.ConcurrentNamespaceSyncs), ctx.Stop) go namespaceController.Run(int(ctx.ComponentConfig.NamespaceController.ConcurrentNamespaceSyncs), ctx.Stop)
return true, nil return true, nil
} }
@ -344,7 +344,7 @@ func startTTLController(ctx ControllerContext) (bool, error) {
} }
func startGarbageCollectorController(ctx ControllerContext) (bool, error) { func startGarbageCollectorController(ctx ControllerContext) (bool, error) {
if !ctx.ComponentConfig.EnableGarbageCollector { if !ctx.ComponentConfig.GarbageCollectorController.EnableGarbageCollector {
return false, nil return false, nil
} }
@ -367,7 +367,7 @@ func startGarbageCollectorController(ctx ControllerContext) (bool, error) {
// Get an initial set of deletable resources to prime the garbage collector. // Get an initial set of deletable resources to prime the garbage collector.
deletableResources := garbagecollector.GetDeletableResources(discoveryClient) deletableResources := garbagecollector.GetDeletableResources(discoveryClient)
ignoredResources := make(map[schema.GroupResource]struct{}) ignoredResources := make(map[schema.GroupResource]struct{})
for _, r := range ctx.ComponentConfig.GCIgnoredResources { for _, r := range ctx.ComponentConfig.GarbageCollectorController.GCIgnoredResources {
ignoredResources[schema.GroupResource{Group: r.Group, Resource: r.Resource}] = struct{}{} ignoredResources[schema.GroupResource{Group: r.Group, Resource: r.Resource}] = struct{}{}
} }
garbageCollector, err := garbagecollector.NewGarbageCollector( garbageCollector, err := garbagecollector.NewGarbageCollector(
@ -384,7 +384,7 @@ func startGarbageCollectorController(ctx ControllerContext) (bool, error) {
} }
// Start the garbage collector. // Start the garbage collector.
workers := int(ctx.ComponentConfig.ConcurrentGCSyncs) workers := int(ctx.ComponentConfig.GarbageCollectorController.ConcurrentGCSyncs)
go garbageCollector.Run(workers, ctx.Stop) go garbageCollector.Run(workers, ctx.Stop)
// Periodically refresh the RESTMapper with new discovery information and sync // Periodically refresh the RESTMapper with new discovery information and sync

View File

@ -40,6 +40,6 @@ func startDeploymentController(ctx ControllerContext) (bool, error) {
if err != nil { if err != nil {
return true, fmt.Errorf("error creating Deployment controller: %v", err) return true, fmt.Errorf("error creating Deployment controller: %v", err)
} }
go dc.Run(int(ctx.ComponentConfig.ConcurrentDeploymentSyncs), ctx.Stop) go dc.Run(int(ctx.ComponentConfig.DeploymentController.ConcurrentDeploymentSyncs), ctx.Stop)
return true, nil return true, nil
} }

View File

@ -164,13 +164,115 @@ type GroupResource struct {
type KubeControllerManagerConfiguration struct { type KubeControllerManagerConfiguration struct {
metav1.TypeMeta metav1.TypeMeta
// CloudProviderConfiguration holds configuration for CloudProvider related features.
CloudProvider CloudProviderConfiguration
// DebuggingConfiguration holds configuration for Debugging related features.
Debugging DebuggingConfiguration
// GenericComponentConfiguration holds configuration for GenericComponent
// related features both in cloud controller manager and kube-controller manager.
GenericComponent GenericComponentConfiguration
// KubeCloudSharedConfiguration holds configuration for shared related features
// both in cloud controller manager and kube-controller manager.
KubeCloudShared KubeCloudSharedConfiguration
// AttachDetachControllerConfiguration holds configuration for
// AttachDetachController related features.
AttachDetachController AttachDetachControllerConfiguration
// CSRSigningControllerConfiguration holds configuration for
// CSRSigningController related features.
CSRSigningController CSRSigningControllerConfiguration
// DaemonSetControllerConfiguration holds configuration for DaemonSetController
// related features.
DaemonSetController DaemonSetControllerConfiguration
// DeploymentControllerConfiguration holds configuration for
// DeploymentController related features.
DeploymentController DeploymentControllerConfiguration
// DeprecatedControllerConfiguration holds configuration for some deprecated
// features.
DeprecatedController DeprecatedControllerConfiguration
// EndPointControllerConfiguration holds configuration for EndPointController
// related features.
EndPointController EndPointControllerConfiguration
// GarbageCollectorControllerConfiguration holds configuration for
// GarbageCollectorController related features.
GarbageCollectorController GarbageCollectorControllerConfiguration
// HPAControllerConfiguration holds configuration for HPAController related features.
HPAController HPAControllerConfiguration
// JobControllerConfiguration holds configuration for JobController related features.
JobController JobControllerConfiguration
// NamespaceControllerConfiguration holds configuration for
// NamespaceController related features.
NamespaceController NamespaceControllerConfiguration
// NodeIpamControllerConfiguration holds configuration for NodeIpamController
// related features.
NodeIpamController NodeIpamControllerConfiguration
// NodeLifecycleControllerConfiguration holds configuration for
// NodeLifecycleController related features.
NodeLifecycleController NodeLifecycleControllerConfiguration
// PersistentVolumeBinderControllerConfiguration holds configuration for
// PersistentVolumeBinderController related features.
PersistentVolumeBinderController PersistentVolumeBinderControllerConfiguration
// PodGCControllerConfiguration holds configuration for PodGCController
// related features.
PodGCController PodGCControllerConfiguration
// ReplicaSetControllerConfiguration holds configuration for ReplicaSet related features.
ReplicaSetController ReplicaSetControllerConfiguration
// ReplicationControllerConfiguration holds configuration for
// ReplicationController related features.
ReplicationController ReplicationControllerConfiguration
// ResourceQuotaControllerConfiguration holds configuration for
// ResourceQuotaController related features.
ResourceQuotaController ResourceQuotaControllerConfiguration
// SAControllerConfiguration holds configuration for ServiceAccountController
// related features.
SAController SAControllerConfiguration
// ServiceControllerConfiguration holds configuration for ServiceController
// related features.
ServiceController ServiceControllerConfiguration
// Controllers is the list of controllers to enable or disable // Controllers is the list of controllers to enable or disable
// '*' means "all enabled by default controllers" // '*' means "all enabled by default controllers"
// 'foo' means "enable 'foo'" // 'foo' means "enable 'foo'"
// '-foo' means "disable 'foo'" // '-foo' means "disable 'foo'"
// first item for a particular name wins // first item for a particular name wins
Controllers []string Controllers []string
// externalCloudVolumePlugin specifies the plugin to use when cloudProvider is "external".
// It is currently used by the in repo cloud providers to handle node and volume control in the KCM.
ExternalCloudVolumePlugin string
}
type CloudProviderConfiguration struct {
// Name is the provider for cloud services.
Name string
// cloudConfigFile is the path to the cloud provider configuration file.
CloudConfigFile string
}
type DebuggingConfiguration struct {
// enableProfiling enables profiling via web interface host:port/debug/pprof/
EnableProfiling bool
// EnableContentionProfiling enables lock contention profiling, if
// EnableProfiling is true.
EnableContentionProfiling bool
}
type GenericComponentConfiguration struct {
// minResyncPeriod is the resync period in reflectors; will be random between
// minResyncPeriod and 2*minResyncPeriod.
MinResyncPeriod metav1.Duration
// contentType is contentType of requests sent to apiserver.
ContentType string
// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
KubeAPIQPS float32
// kubeAPIBurst is the burst to use while talking with kubernetes apiserver.
KubeAPIBurst int32
// How long to wait between starting controller managers
ControllerStartInterval metav1.Duration
// leaderElection defines the configuration of leader election client.
LeaderElection LeaderElectionConfiguration
}
type KubeCloudSharedConfiguration struct {
// port is the port that the controller-manager's http service runs on. // port is the port that the controller-manager's http service runs on.
Port int32 Port int32
// address is the IP address to serve on (set to 0.0.0.0 for all interfaces). // address is the IP address to serve on (set to 0.0.0.0 for all interfaces).
@ -178,77 +280,103 @@ type KubeControllerManagerConfiguration struct {
// useServiceAccountCredentials indicates whether controllers should be run with // useServiceAccountCredentials indicates whether controllers should be run with
// individual service account credentials. // individual service account credentials.
UseServiceAccountCredentials bool UseServiceAccountCredentials bool
// cloudProvider is the provider for cloud services.
CloudProvider string
// cloudConfigFile is the path to the cloud provider configuration file.
CloudConfigFile string
// externalCloudVolumePlugin specifies the plugin to use when cloudProvider is "external".
// It is currently used by the in repo cloud providers to handle node and volume control in the KCM.
ExternalCloudVolumePlugin string
// run with untagged cloud instances // run with untagged cloud instances
AllowUntaggedCloud bool AllowUntaggedCloud bool
// concurrentEndpointSyncs is the number of endpoint syncing operations // routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider..
// that will be done concurrently. Larger number = faster endpoint updating, RouteReconciliationPeriod metav1.Duration
// but more CPU (and network) load. // nodeMonitorPeriod is the period for syncing NodeStatus in NodeController.
ConcurrentEndpointSyncs int32 NodeMonitorPeriod metav1.Duration
// concurrentRSSyncs is the number of replica sets that are allowed to sync // clusterName is the instance prefix for the cluster.
// concurrently. Larger number = more responsive replica management, but more ClusterName string
// CPU (and network) load. // clusterCIDR is CIDR Range for Pods in cluster.
ConcurrentRSSyncs int32 ClusterCIDR string
// concurrentRCSyncs is the number of replication controllers that are // AllocateNodeCIDRs enables CIDRs for Pods to be allocated and, if
// allowed to sync concurrently. Larger number = more responsive replica // ConfigureCloudRoutes is true, to be set on the cloud provider.
// management, but more CPU (and network) load. AllocateNodeCIDRs bool
ConcurrentRCSyncs int32 // CIDRAllocatorType determines what kind of pod CIDR allocator will be used.
// concurrentServiceSyncs is the number of services that are CIDRAllocatorType string
// allowed to sync concurrently. Larger number = more responsive service // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs
// management, but more CPU (and network) load. // to be configured on the cloud provider.
ConcurrentServiceSyncs int32 ConfigureCloudRoutes bool
// concurrentResourceQuotaSyncs is the number of resource quotas that are // serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key
// allowed to sync concurrently. Larger number = more responsive quota // used to sign service account tokens.
// management, but more CPU (and network) load. ServiceAccountKeyFile string
ConcurrentResourceQuotaSyncs int32
// concurrentDeploymentSyncs is the number of deployment objects that are
// allowed to sync concurrently. Larger number = more responsive deployments,
// but more CPU (and network) load.
ConcurrentDeploymentSyncs int32
// concurrentDaemonSetSyncs is the number of daemonset objects that are
// allowed to sync concurrently. Larger number = more responsive daemonset,
// but more CPU (and network) load.
ConcurrentDaemonSetSyncs int32
// concurrentJobSyncs is the number of job objects that are
// allowed to sync concurrently. Larger number = more responsive jobs,
// but more CPU (and network) load.
ConcurrentJobSyncs int32
// concurrentNamespaceSyncs is the number of namespace objects that are
// allowed to sync concurrently.
ConcurrentNamespaceSyncs int32
// concurrentSATokenSyncs is the number of service account token syncing operations
// that will be done concurrently.
ConcurrentSATokenSyncs int32
// lookupCacheSizeForRC is the size of lookup cache for replication controllers.
// Larger number = more responsive replica management, but more MEM load.
// nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer // nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer
// periods will result in fewer calls to cloud provider, but may delay addition // periods will result in fewer calls to cloud provider, but may delay addition
// of new nodes to cluster. // of new nodes to cluster.
NodeSyncPeriod metav1.Duration NodeSyncPeriod metav1.Duration
// routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider.. }
RouteReconciliationPeriod metav1.Duration
// resourceQuotaSyncPeriod is the period for syncing quota usage status type AttachDetachControllerConfiguration struct {
// in the system. // Reconciler runs a periodic loop to reconcile the desired state of the with
ResourceQuotaSyncPeriod metav1.Duration // the actual state of the world by triggering attach detach operations.
// namespaceSyncPeriod is the period for syncing namespace life-cycle // This flag enables or disables reconcile. Is false by default, and thus enabled.
// updates. DisableAttachDetachReconcilerSync bool
NamespaceSyncPeriod metav1.Duration // ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop
// pvClaimBinderSyncPeriod is the period for syncing persistent volumes // wait between successive executions. Is set to 5 sec by default.
// and persistent volume claims. ReconcilerSyncLoopPeriod metav1.Duration
PVClaimBinderSyncPeriod metav1.Duration }
// minResyncPeriod is the resync period in reflectors; will be random between
// minResyncPeriod and 2*minResyncPeriod. type CSRSigningControllerConfiguration struct {
MinResyncPeriod metav1.Duration // clusterSigningCertFile is the filename containing a PEM-encoded
// terminatedPodGCThreshold is the number of terminated pods that can exist // X509 CA certificate used to issue cluster-scoped certificates
// before the terminated pod garbage collector starts deleting terminated pods. ClusterSigningCertFile string
// If <= 0, the terminated pod garbage collector is disabled. // clusterSigningCertFile is the filename containing a PEM-encoded
TerminatedPodGCThreshold int32 // RSA or ECDSA private key used to issue cluster-scoped certificates
ClusterSigningKeyFile string
// clusterSigningDuration is the length of duration signed certificates
// will be given.
ClusterSigningDuration metav1.Duration
}
type DaemonSetControllerConfiguration struct {
// concurrentDaemonSetSyncs is the number of daemonset objects that are
// allowed to sync concurrently. Larger number = more responsive daemonset,
// but more CPU (and network) load.
ConcurrentDaemonSetSyncs int32
}
type DeploymentControllerConfiguration struct {
// concurrentDeploymentSyncs is the number of deployment objects that are
// allowed to sync concurrently. Larger number = more responsive deployments,
// but more CPU (and network) load.
ConcurrentDeploymentSyncs int32
// deploymentControllerSyncPeriod is the period for syncing the deployments.
DeploymentControllerSyncPeriod metav1.Duration
}
type DeprecatedControllerConfiguration struct {
// DEPRECATED: deletingPodsQps is the number of nodes per second on which pods are deleted in
// case of node failure.
DeletingPodsQps float32
// DEPRECATED: deletingPodsBurst is the number of nodes on which pods are bursty deleted in
// case of node failure. For more details look into RateLimiter.
DeletingPodsBurst int32
// registerRetryCount is the number of retries for initial node registration.
// Retry interval equals node-sync-period.
RegisterRetryCount int32
}
type EndPointControllerConfiguration struct {
// concurrentEndpointSyncs is the number of endpoint syncing operations
// that will be done concurrently. Larger number = faster endpoint updating,
// but more CPU (and network) load.
ConcurrentEndpointSyncs int32
}
type GarbageCollectorControllerConfiguration struct {
// enables the generic garbage collector. MUST be synced with the
// corresponding flag of the kube-apiserver. WARNING: the generic garbage
// collector is an alpha feature.
EnableGarbageCollector bool
// concurrentGCSyncs is the number of garbage collector workers that are
// allowed to sync concurrently.
ConcurrentGCSyncs int32
// gcIgnoredResources is the list of GroupResources that garbage collection should ignore.
GCIgnoredResources []GroupResource
}
type HPAControllerConfiguration struct {
// horizontalPodAutoscalerSyncPeriod is the period for syncing the number of // horizontalPodAutoscalerSyncPeriod is the period for syncing the number of
// pods in horizontal pod autoscaler. // pods in horizontal pod autoscaler.
HorizontalPodAutoscalerSyncPeriod metav1.Duration HorizontalPodAutoscalerSyncPeriod metav1.Duration
@ -259,108 +387,113 @@ type KubeControllerManagerConfiguration struct {
// horizontalPodAutoscalerTolerance is the tolerance for when // horizontalPodAutoscalerTolerance is the tolerance for when
// resource usage suggests upscaling/downscaling // resource usage suggests upscaling/downscaling
HorizontalPodAutoscalerTolerance float64 HorizontalPodAutoscalerTolerance float64
// deploymentControllerSyncPeriod is the period for syncing the deployments. // HorizontalPodAutoscalerUseRESTClients causes the HPA controller to use REST clients
DeploymentControllerSyncPeriod metav1.Duration // through the kube-aggregator when enabled, instead of using the legacy metrics client
// podEvictionTimeout is the grace period for deleting pods on failed nodes. // through the API server proxy.
PodEvictionTimeout metav1.Duration HorizontalPodAutoscalerUseRESTClients bool
// DEPRECATED: deletingPodsQps is the number of nodes per second on which pods are deleted in }
// case of node failure.
DeletingPodsQps float32 type JobControllerConfiguration struct {
// DEPRECATED: deletingPodsBurst is the number of nodes on which pods are bursty deleted in // concurrentJobSyncs is the number of job objects that are
// case of node failure. For more details look into RateLimiter. // allowed to sync concurrently. Larger number = more responsive jobs,
DeletingPodsBurst int32 // but more CPU (and network) load.
ConcurrentJobSyncs int32
}
type NamespaceControllerConfiguration struct {
// namespaceSyncPeriod is the period for syncing namespace life-cycle
// updates.
NamespaceSyncPeriod metav1.Duration
// concurrentNamespaceSyncs is the number of namespace objects that are
// allowed to sync concurrently.
ConcurrentNamespaceSyncs int32
}
type NodeIpamControllerConfiguration struct {
// serviceCIDR is CIDR Range for Services in cluster.
ServiceCIDR string
// NodeCIDRMaskSize is the mask size for node cidr in cluster.
NodeCIDRMaskSize int32
}
type NodeLifecycleControllerConfiguration struct {
// If set to true enables NoExecute Taints and will evict all not-tolerating
// Pod running on Nodes tainted with this kind of Taints.
EnableTaintManager bool
// nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy
NodeEvictionRate float32
// secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy
SecondaryNodeEvictionRate float32
// nodeStartupGracePeriod is the amount of time which we allow starting a node to
// be unresponsive before marking it unhealthy.
NodeStartupGracePeriod metav1.Duration
// nodeMontiorGracePeriod is the amount of time which we allow a running node to be // nodeMontiorGracePeriod is the amount of time which we allow a running node to be
// unresponsive before marking it unhealthy. Must be N times more than kubelet's // unresponsive before marking it unhealthy. Must be N times more than kubelet's
// nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet // nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet
// to post node status. // to post node status.
NodeMonitorGracePeriod metav1.Duration NodeMonitorGracePeriod metav1.Duration
// registerRetryCount is the number of retries for initial node registration. // podEvictionTimeout is the grace period for deleting pods on failed nodes.
// Retry interval equals node-sync-period. PodEvictionTimeout metav1.Duration
RegisterRetryCount int32
// nodeStartupGracePeriod is the amount of time which we allow starting a node to
// be unresponsive before marking it unhealthy.
NodeStartupGracePeriod metav1.Duration
// nodeMonitorPeriod is the period for syncing NodeStatus in NodeController.
NodeMonitorPeriod metav1.Duration
// serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key
// used to sign service account tokens.
ServiceAccountKeyFile string
// clusterSigningCertFile is the filename containing a PEM-encoded
// X509 CA certificate used to issue cluster-scoped certificates
ClusterSigningCertFile string
// clusterSigningCertFile is the filename containing a PEM-encoded
// RSA or ECDSA private key used to issue cluster-scoped certificates
ClusterSigningKeyFile string
// clusterSigningDuration is the length of duration signed certificates
// will be given.
ClusterSigningDuration metav1.Duration
// enableProfiling enables profiling via web interface host:port/debug/pprof/
EnableProfiling bool
// enableContentionProfiling enables lock contention profiling, if enableProfiling is true.
EnableContentionProfiling bool
// clusterName is the instance prefix for the cluster.
ClusterName string
// clusterCIDR is CIDR Range for Pods in cluster.
ClusterCIDR string
// serviceCIDR is CIDR Range for Services in cluster.
ServiceCIDR string
// NodeCIDRMaskSize is the mask size for node cidr in cluster.
NodeCIDRMaskSize int32
// AllocateNodeCIDRs enables CIDRs for Pods to be allocated and, if
// ConfigureCloudRoutes is true, to be set on the cloud provider.
AllocateNodeCIDRs bool
// CIDRAllocatorType determines what kind of pod CIDR allocator will be used.
CIDRAllocatorType string
// configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs
// to be configured on the cloud provider.
ConfigureCloudRoutes bool
// rootCAFile is the root certificate authority will be included in service
// account's token secret. This must be a valid PEM-encoded CA bundle.
RootCAFile string
// contentType is contentType of requests sent to apiserver.
ContentType string
// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
KubeAPIQPS float32
// kubeAPIBurst is the burst to use while talking with kubernetes apiserver.
KubeAPIBurst int32
// leaderElection defines the configuration of leader election client.
LeaderElection LeaderElectionConfiguration
// volumeConfiguration holds configuration for volume related features.
VolumeConfiguration VolumeConfiguration
// How long to wait between starting controller managers
ControllerStartInterval metav1.Duration
// enables the generic garbage collector. MUST be synced with the
// corresponding flag of the kube-apiserver. WARNING: the generic garbage
// collector is an alpha feature.
EnableGarbageCollector bool
// concurrentGCSyncs is the number of garbage collector workers that are
// allowed to sync concurrently.
ConcurrentGCSyncs int32
// gcIgnoredResources is the list of GroupResources that garbage collection should ignore.
GCIgnoredResources []GroupResource
// nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy
NodeEvictionRate float32
// secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy
SecondaryNodeEvictionRate float32
// secondaryNodeEvictionRate is implicitly overridden to 0 for clusters smaller than or equal to largeClusterSizeThreshold // secondaryNodeEvictionRate is implicitly overridden to 0 for clusters smaller than or equal to largeClusterSizeThreshold
LargeClusterSizeThreshold int32 LargeClusterSizeThreshold int32
// Zone is treated as unhealthy in nodeEvictionRate and secondaryNodeEvictionRate when at least // Zone is treated as unhealthy in nodeEvictionRate and secondaryNodeEvictionRate when at least
// unhealthyZoneThreshold (no less than 3) of Nodes in the zone are NotReady // unhealthyZoneThreshold (no less than 3) of Nodes in the zone are NotReady
UnhealthyZoneThreshold float32 UnhealthyZoneThreshold float32
// Reconciler runs a periodic loop to reconcile the desired state of the with }
// the actual state of the world by triggering attach detach operations.
// This flag enables or disables reconcile. Is false by default, and thus enabled. type PersistentVolumeBinderControllerConfiguration struct {
DisableAttachDetachReconcilerSync bool // pvClaimBinderSyncPeriod is the period for syncing persistent volumes
// ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop // and persistent volume claims.
// wait between successive executions. Is set to 5 sec by default. PVClaimBinderSyncPeriod metav1.Duration
ReconcilerSyncLoopPeriod metav1.Duration // volumeConfiguration holds configuration for volume related features.
// If set to true enables NoExecute Taints and will evict all not-tolerating VolumeConfiguration VolumeConfiguration
// Pod running on Nodes tainted with this kind of Taints. }
EnableTaintManager bool
// HorizontalPodAutoscalerUseRESTClients causes the HPA controller to use REST clients type PodGCControllerConfiguration struct {
// through the kube-aggregator when enabled, instead of using the legacy metrics client // terminatedPodGCThreshold is the number of terminated pods that can exist
// through the API server proxy. // before the terminated pod garbage collector starts deleting terminated pods.
HorizontalPodAutoscalerUseRESTClients bool // If <= 0, the terminated pod garbage collector is disabled.
TerminatedPodGCThreshold int32
}
type ReplicaSetControllerConfiguration struct {
// concurrentRSSyncs is the number of replica sets that are allowed to sync
// concurrently. Larger number = more responsive replica management, but more
// CPU (and network) load.
ConcurrentRSSyncs int32
}
type ReplicationControllerConfiguration struct {
// concurrentRCSyncs is the number of replication controllers that are
// allowed to sync concurrently. Larger number = more responsive replica
// management, but more CPU (and network) load.
ConcurrentRCSyncs int32
}
type ResourceQuotaControllerConfiguration struct {
// resourceQuotaSyncPeriod is the period for syncing quota usage status
// in the system.
ResourceQuotaSyncPeriod metav1.Duration
// concurrentResourceQuotaSyncs is the number of resource quotas that are
// allowed to sync concurrently. Larger number = more responsive quota
// management, but more CPU (and network) load.
ConcurrentResourceQuotaSyncs int32
}
type SAControllerConfiguration struct {
// concurrentSATokenSyncs is the number of service account token syncing operations
// that will be done concurrently.
ConcurrentSATokenSyncs int32
// rootCAFile is the root certificate authority will be included in service
// account's token secret. This must be a valid PEM-encoded CA bundle.
RootCAFile string
}
type ServiceControllerConfiguration struct {
// concurrentServiceSyncs is the number of services that are
// allowed to sync concurrently. Larger number = more responsive service
// management, but more CPU (and network) load.
ConcurrentServiceSyncs int32
} }
// VolumeConfiguration contains *all* enumerated flags meant to configure all volume // VolumeConfiguration contains *all* enumerated flags meant to configure all volume

View File

@ -206,13 +206,116 @@ type GroupResource struct {
type KubeControllerManagerConfiguration struct { type KubeControllerManagerConfiguration struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
// CloudProviderConfiguration holds configuration for CloudProvider related features.
CloudProvider CloudProviderConfiguration
// DebuggingConfiguration holds configuration for Debugging related features.
Debugging DebuggingConfiguration
// GenericComponentConfiguration holds configuration for GenericComponent
// related features both in cloud controller manager and kube-controller manager.
GenericComponent GenericComponentConfiguration
// KubeCloudSharedConfiguration holds configuration for shared related features
// both in cloud controller manager and kube-controller manager.
KubeCloudShared KubeCloudSharedConfiguration
// AttachDetachControllerConfiguration holds configuration for
// AttachDetachController related features.
AttachDetachController AttachDetachControllerConfiguration
// CSRSigningControllerConfiguration holds configuration for
// CSRSigningController related features.
CSRSigningController CSRSigningControllerConfiguration
// DaemonSetControllerConfiguration holds configuration for DaemonSetController
// related features.
DaemonSetController DaemonSetControllerConfiguration
// DeploymentControllerConfiguration holds configuration for
// DeploymentController related features.
DeploymentController DeploymentControllerConfiguration
// DeprecatedControllerConfiguration holds configuration for some deprecated
// features.
DeprecatedController DeprecatedControllerConfiguration
// EndPointControllerConfiguration holds configuration for EndPointController
// related features.
EndPointController EndPointControllerConfiguration
// GarbageCollectorControllerConfiguration holds configuration for
// GarbageCollectorController related features.
GarbageCollectorController GarbageCollectorControllerConfiguration
// HPAControllerConfiguration holds configuration for HPAController related features.
HPAController HPAControllerConfiguration
// JobControllerConfiguration holds configuration for JobController related features.
JobController JobControllerConfiguration
// NamespaceControllerConfiguration holds configuration for NamespaceController
// related features.
NamespaceController NamespaceControllerConfiguration
// NodeIpamControllerConfiguration holds configuration for NodeIpamController
// related features.
NodeIpamController NodeIpamControllerConfiguration
// NodeLifecycleControllerConfiguration holds configuration for
// NodeLifecycleController related features.
NodeLifecycleController NodeLifecycleControllerConfiguration
// PersistentVolumeBinderControllerConfiguration holds configuration for
// PersistentVolumeBinderController related features.
PersistentVolumeBinderController PersistentVolumeBinderControllerConfiguration
// PodGCControllerConfiguration holds configuration for PodGCController
// related features.
PodGCController PodGCControllerConfiguration
// ReplicaSetControllerConfiguration holds configuration for ReplicaSet related features.
ReplicaSetController ReplicaSetControllerConfiguration
// ReplicationControllerConfiguration holds configuration for
// ReplicationController related features.
ReplicationController ReplicationControllerConfiguration
// ResourceQuotaControllerConfiguration holds configuration for
// ResourceQuotaController related features.
ResourceQuotaController ResourceQuotaControllerConfiguration
// SAControllerConfiguration holds configuration for ServiceAccountController
// related features.
SAController SAControllerConfiguration
// ServiceControllerConfiguration holds configuration for ServiceController
// related features.
ServiceController ServiceControllerConfiguration
// Controllers is the list of controllers to enable or disable // Controllers is the list of controllers to enable or disable
// '*' means "all enabled by default controllers" // '*' means "all enabled by default controllers"
// 'foo' means "enable 'foo'" // 'foo' means "enable 'foo'"
// '-foo' means "disable 'foo'" // '-foo' means "disable 'foo'"
// first item for a particular name wins // first item for a particular name wins
Controllers []string `json:"controllers"` Controllers []string `json:"controllers"`
// externalCloudVolumePlugin specifies the plugin to use when cloudProvider is "external".
// It is currently used by the in repo cloud providers to handle node and volume control in the KCM.
ExternalCloudVolumePlugin string `json:"externalCloudVolumePlugin"`
}
type CloudProviderConfiguration struct {
// Name is the provider for cloud services.
Name string `json:"cloudProvider"`
// cloudConfigFile is the path to the cloud provider configuration file.
CloudConfigFile string `json:"cloudConfigFile"`
}
type DebuggingConfiguration struct {
// enableProfiling enables profiling via web interface host:port/debug/pprof/
EnableProfiling bool `json:"enableProfiling"`
// EnableContentionProfiling enables lock contention profiling, if
// EnableProfiling is true.
EnableContentionProfiling bool `json:"enableContentionProfiling"`
}
type GenericComponentConfiguration struct {
// minResyncPeriod is the resync period in reflectors; will be random between
// minResyncPeriod and 2*minResyncPeriod.
MinResyncPeriod metav1.Duration `json:"minResyncPeriod"`
// contentType is contentType of requests sent to apiserver.
ContentType string `json:"contentType"`
// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
KubeAPIQPS float32 `json:"kubeAPIQPS"`
// kubeAPIBurst is the burst to use while talking with kubernetes apiserver.
KubeAPIBurst int32 `json:"kubeAPIBurst"`
// How long to wait between starting controller managers
ControllerStartInterval metav1.Duration `json:"controllerStartInterval"`
// leaderElection defines the configuration of leader election client.
LeaderElection LeaderElectionConfiguration `json:"leaderElection"`
}
type KubeCloudSharedConfiguration struct {
// port is the port that the controller-manager's http service runs on. // port is the port that the controller-manager's http service runs on.
Port int32 `json:"port"` Port int32 `json:"port"`
// address is the IP address to serve on (set to 0.0.0.0 for all interfaces). // address is the IP address to serve on (set to 0.0.0.0 for all interfaces).
@ -220,77 +323,103 @@ type KubeControllerManagerConfiguration struct {
// useServiceAccountCredentials indicates whether controllers should be run with // useServiceAccountCredentials indicates whether controllers should be run with
// individual service account credentials. // individual service account credentials.
UseServiceAccountCredentials bool `json:"useServiceAccountCredentials"` UseServiceAccountCredentials bool `json:"useServiceAccountCredentials"`
// cloudProvider is the provider for cloud services.
CloudProvider string `json:"cloudProvider"`
// cloudConfigFile is the path to the cloud provider configuration file.
CloudConfigFile string `json:"cloudConfigFile"`
// externalCloudVolumePlugin specifies the plugin to use when cloudProvider is "external".
// It is currently used by the in repo cloud providers to handle node and volume control in the KCM.
ExternalCloudVolumePlugin string `json:"externalCloudVolumePlugin"`
// run with untagged cloud instances // run with untagged cloud instances
AllowUntaggedCloud bool `json:"allowUntaggedCloud"` AllowUntaggedCloud bool `json:"allowUntaggedCloud"`
// concurrentEndpointSyncs is the number of endpoint syncing operations // routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider..
// that will be done concurrently. Larger number = faster endpoint updating, RouteReconciliationPeriod metav1.Duration `json:"routeReconciliationPeriod"`
// but more CPU (and network) load. // nodeMonitorPeriod is the period for syncing NodeStatus in NodeController.
ConcurrentEndpointSyncs int32 `json:"concurrentEndpointSyncs"` NodeMonitorPeriod metav1.Duration `json:"nodeMonitorPeriod"`
// concurrentRSSyncs is the number of replica sets that are allowed to sync // clusterName is the instance prefix for the cluster.
// concurrently. Larger number = more responsive replica management, but more ClusterName string `json:"clusterName"`
// CPU (and network) load. // clusterCIDR is CIDR Range for Pods in cluster.
ConcurrentRSSyncs int32 `json:"concurrentRSSyncs"` ClusterCIDR string `json:"clusterCIDR"`
// concurrentRCSyncs is the number of replication controllers that are // AllocateNodeCIDRs enables CIDRs for Pods to be allocated and, if
// allowed to sync concurrently. Larger number = more responsive replica // ConfigureCloudRoutes is true, to be set on the cloud provider.
// management, but more CPU (and network) load. AllocateNodeCIDRs bool `json:"allocateNodeCIDRs"`
ConcurrentRCSyncs int32 `json:"concurrentRCSyncs"` // CIDRAllocatorType determines what kind of pod CIDR allocator will be used.
// concurrentServiceSyncs is the number of services that are CIDRAllocatorType string `json:"cIDRAllocatorType"`
// allowed to sync concurrently. Larger number = more responsive service // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs
// management, but more CPU (and network) load. // to be configured on the cloud provider.
ConcurrentServiceSyncs int32 `json:"concurrentServiceSyncs"` ConfigureCloudRoutes *bool `json:"configureCloudRoutes"`
// concurrentResourceQuotaSyncs is the number of resource quotas that are // serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key
// allowed to sync concurrently. Larger number = more responsive quota // used to sign service account tokens.
// management, but more CPU (and network) load. ServiceAccountKeyFile string `json:"serviceAccountKeyFile"`
ConcurrentResourceQuotaSyncs int32 `json:"concurrentResourceQuotaSyncs"`
// concurrentDeploymentSyncs is the number of deployment objects that are
// allowed to sync concurrently. Larger number = more responsive deployments,
// but more CPU (and network) load.
ConcurrentDeploymentSyncs int32 `json:"concurrentDeploymentSyncs"`
// concurrentDaemonSetSyncs is the number of daemonset objects that are
// allowed to sync concurrently. Larger number = more responsive daemonset,
// but more CPU (and network) load.
ConcurrentDaemonSetSyncs int32 `json:"concurrentDaemonSetSyncs"`
// concurrentJobSyncs is the number of job objects that are
// allowed to sync concurrently. Larger number = more responsive jobs,
// but more CPU (and network) load.
ConcurrentJobSyncs int32 `json:"concurrentJobSyncs"`
// concurrentNamespaceSyncs is the number of namespace objects that are
// allowed to sync concurrently.
ConcurrentNamespaceSyncs int32 `json:"concurrentNamespaceSyncs"`
// concurrentSATokenSyncs is the number of service account token syncing operations
// that will be done concurrently.
ConcurrentSATokenSyncs int32 `json:"concurrentSATokenSyncs"`
// lookupCacheSizeForRC is the size of lookup cache for replication controllers.
// Larger number = more responsive replica management, but more MEM load.
// nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer // nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer
// periods will result in fewer calls to cloud provider, but may delay addition // periods will result in fewer calls to cloud provider, but may delay addition
// of new nodes to cluster. // of new nodes to cluster.
NodeSyncPeriod metav1.Duration `json:"nodeSyncPeriod"` NodeSyncPeriod metav1.Duration `json:"nodeSyncPeriod"`
// routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider.. }
RouteReconciliationPeriod metav1.Duration `json:"routeReconciliationPeriod"`
// resourceQuotaSyncPeriod is the period for syncing quota usage status type AttachDetachControllerConfiguration struct {
// in the system. // Reconciler runs a periodic loop to reconcile the desired state of the with
ResourceQuotaSyncPeriod metav1.Duration `json:"resourceQuotaSyncPeriod"` // the actual state of the world by triggering attach detach operations.
// namespaceSyncPeriod is the period for syncing namespace life-cycle // This flag enables or disables reconcile. Is false by default, and thus enabled.
// updates. DisableAttachDetachReconcilerSync bool `json:"disableAttachDetachReconcilerSync"`
NamespaceSyncPeriod metav1.Duration `json:"namespaceSyncPeriod"` // ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop
// pvClaimBinderSyncPeriod is the period for syncing persistent volumes // wait between successive executions. Is set to 5 sec by default.
// and persistent volume claims. ReconcilerSyncLoopPeriod metav1.Duration `json:"reconcilerSyncLoopPeriod"`
PVClaimBinderSyncPeriod metav1.Duration `json:"pVClaimBinderSyncPeriod"` }
// minResyncPeriod is the resync period in reflectors; will be random between
// minResyncPeriod and 2*minResyncPeriod. type CSRSigningControllerConfiguration struct {
MinResyncPeriod metav1.Duration `json:"minResyncPeriod"` // clusterSigningCertFile is the filename containing a PEM-encoded
// terminatedPodGCThreshold is the number of terminated pods that can exist // X509 CA certificate used to issue cluster-scoped certificates
// before the terminated pod garbage collector starts deleting terminated pods. ClusterSigningCertFile string `json:"clusterSigningCertFile"`
// If <= 0, the terminated pod garbage collector is disabled. // clusterSigningCertFile is the filename containing a PEM-encoded
TerminatedPodGCThreshold int32 `json:"terminatedPodGCThreshold"` // RSA or ECDSA private key used to issue cluster-scoped certificates
ClusterSigningKeyFile string `json:"clusterSigningKeyFile"`
// clusterSigningDuration is the length of duration signed certificates
// will be given.
ClusterSigningDuration metav1.Duration `json:"clusterSigningDuration"`
}
type DaemonSetControllerConfiguration struct {
// concurrentDaemonSetSyncs is the number of daemonset objects that are
// allowed to sync concurrently. Larger number = more responsive daemonset,
// but more CPU (and network) load.
ConcurrentDaemonSetSyncs int32 `json:"concurrentDaemonSetSyncs"`
}
type DeploymentControllerConfiguration struct {
// concurrentDeploymentSyncs is the number of deployment objects that are
// allowed to sync concurrently. Larger number = more responsive deployments,
// but more CPU (and network) load.
ConcurrentDeploymentSyncs int32 `json:"concurrentDeploymentSyncs"`
// deploymentControllerSyncPeriod is the period for syncing the deployments.
DeploymentControllerSyncPeriod metav1.Duration `json:"deploymentControllerSyncPeriod"`
}
type DeprecatedControllerConfiguration struct {
// DEPRECATED: deletingPodsQps is the number of nodes per second on which pods are deleted in
// case of node failure.
DeletingPodsQps float32 `json:"deletingPodsQps"`
// DEPRECATED: deletingPodsBurst is the number of nodes on which pods are bursty deleted in
// case of node failure. For more details look into RateLimiter.
DeletingPodsBurst int32 `json:"deletingPodsBurst"`
// registerRetryCount is the number of retries for initial node registration.
// Retry interval equals node-sync-period.
RegisterRetryCount int32 `json:"registerRetryCount"`
}
type EndPointControllerConfiguration struct {
// concurrentEndpointSyncs is the number of endpoint syncing operations
// that will be done concurrently. Larger number = faster endpoint updating,
// but more CPU (and network) load.
ConcurrentEndpointSyncs int32 `json:"concurrentEndpointSyncs"`
}
type GarbageCollectorControllerConfiguration struct {
// enables the generic garbage collector. MUST be synced with the
// corresponding flag of the kube-apiserver. WARNING: the generic garbage
// collector is an alpha feature.
EnableGarbageCollector *bool `json:"enableGarbageCollector"`
// concurrentGCSyncs is the number of garbage collector workers that are
// allowed to sync concurrently.
ConcurrentGCSyncs int32 `json:"concurrentGCSyncs"`
// gcIgnoredResources is the list of GroupResources that garbage collection should ignore.
GCIgnoredResources []GroupResource `json:"gCIgnoredResources"`
}
type HPAControllerConfiguration struct {
// horizontalPodAutoscalerSyncPeriod is the period for syncing the number of // horizontalPodAutoscalerSyncPeriod is the period for syncing the number of
// pods in horizontal pod autoscaler. // pods in horizontal pod autoscaler.
HorizontalPodAutoscalerSyncPeriod metav1.Duration `json:"horizontalPodAutoscalerSyncPeriod"` HorizontalPodAutoscalerSyncPeriod metav1.Duration `json:"horizontalPodAutoscalerSyncPeriod"`
@ -301,108 +430,113 @@ type KubeControllerManagerConfiguration struct {
// horizontalPodAutoscalerTolerance is the tolerance for when // horizontalPodAutoscalerTolerance is the tolerance for when
// resource usage suggests upscaling/downscaling // resource usage suggests upscaling/downscaling
HorizontalPodAutoscalerTolerance float64 `json:"horizontalPodAutoscalerTolerance"` HorizontalPodAutoscalerTolerance float64 `json:"horizontalPodAutoscalerTolerance"`
// deploymentControllerSyncPeriod is the period for syncing the deployments. // HorizontalPodAutoscalerUseRESTClients causes the HPA controller to use REST clients
DeploymentControllerSyncPeriod metav1.Duration `json:"deploymentControllerSyncPeriod"` // through the kube-aggregator when enabled, instead of using the legacy metrics client
// podEvictionTimeout is the grace period for deleting pods on failed nodes. // through the API server proxy.
PodEvictionTimeout metav1.Duration `json:"podEvictionTimeout"` HorizontalPodAutoscalerUseRESTClients *bool `json:"horizontalPodAutoscalerUseRESTClients"`
// DEPRECATED: deletingPodsQps is the number of nodes per second on which pods are deleted in }
// case of node failure.
DeletingPodsQps float32 `json:"deletingPodsQps"` type JobControllerConfiguration struct {
// DEPRECATED: deletingPodsBurst is the number of nodes on which pods are bursty deleted in // concurrentJobSyncs is the number of job objects that are
// case of node failure. For more details look into RateLimiter. // allowed to sync concurrently. Larger number = more responsive jobs,
DeletingPodsBurst int32 `json:"deletingPodsBurst"` // but more CPU (and network) load.
ConcurrentJobSyncs int32
}
type NamespaceControllerConfiguration struct {
// namespaceSyncPeriod is the period for syncing namespace life-cycle
// updates.
NamespaceSyncPeriod metav1.Duration `json:"namespaceSyncPeriod"`
// concurrentNamespaceSyncs is the number of namespace objects that are
// allowed to sync concurrently.
ConcurrentNamespaceSyncs int32 `json:"concurrentNamespaceSyncs"`
}
type NodeIpamControllerConfiguration struct {
// serviceCIDR is CIDR Range for Services in cluster.
ServiceCIDR string `json:"serviceCIDR"`
// NodeCIDRMaskSize is the mask size for node cidr in cluster.
NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"`
}
type NodeLifecycleControllerConfiguration struct {
// If set to true enables NoExecute Taints and will evict all not-tolerating
// Pod running on Nodes tainted with this kind of Taints.
EnableTaintManager *bool `json:"enableTaintManager"`
// nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy
NodeEvictionRate float32 `json:"nodeEvictionRate"`
// secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy
SecondaryNodeEvictionRate float32 `json:"secondaryNodeEvictionRate"`
// nodeStartupGracePeriod is the amount of time which we allow starting a node to
// be unresponsive before marking it unhealthy.
NodeStartupGracePeriod metav1.Duration `json:"nodeStartupGracePeriod"`
// nodeMontiorGracePeriod is the amount of time which we allow a running node to be // nodeMontiorGracePeriod is the amount of time which we allow a running node to be
// unresponsive before marking it unhealthy. Must be N times more than kubelet's // unresponsive before marking it unhealthy. Must be N times more than kubelet's
// nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet // nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet
// to post node status. // to post node status.
NodeMonitorGracePeriod metav1.Duration `json:"nodeMonitorGracePeriod"` NodeMonitorGracePeriod metav1.Duration `json:"nodeMonitorGracePeriod"`
// registerRetryCount is the number of retries for initial node registration. // podEvictionTimeout is the grace period for deleting pods on failed nodes.
// Retry interval equals node-sync-period. PodEvictionTimeout metav1.Duration `json:"podEvictionTimeout"`
RegisterRetryCount int32 `json:"registerRetryCount"`
// nodeStartupGracePeriod is the amount of time which we allow starting a node to
// be unresponsive before marking it unhealthy.
NodeStartupGracePeriod metav1.Duration `json:"nodeStartupGracePeriod"`
// nodeMonitorPeriod is the period for syncing NodeStatus in NodeController.
NodeMonitorPeriod metav1.Duration `json:"nodeMonitorPeriod"`
// serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key
// used to sign service account tokens.
ServiceAccountKeyFile string `json:"serviceAccountKeyFile"`
// clusterSigningCertFile is the filename containing a PEM-encoded
// X509 CA certificate used to issue cluster-scoped certificates
ClusterSigningCertFile string `json:"clusterSigningCertFile"`
// clusterSigningCertFile is the filename containing a PEM-encoded
// RSA or ECDSA private key used to issue cluster-scoped certificates
ClusterSigningKeyFile string `json:"clusterSigningKeyFile"`
// clusterSigningDuration is the length of duration signed certificates
// will be given.
ClusterSigningDuration metav1.Duration `json:"clusterSigningDuration"`
// enableProfiling enables profiling via web interface host:port/debug/pprof/
EnableProfiling bool `json:"enableProfiling"`
// enableContentionProfiling enables lock contention profiling, if enableProfiling is true.
EnableContentionProfiling bool `json:"enableContentionProfiling"`
// clusterName is the instance prefix for the cluster.
ClusterName string `json:"clusterName"`
// clusterCIDR is CIDR Range for Pods in cluster.
ClusterCIDR string `json:"clusterCIDR"`
// serviceCIDR is CIDR Range for Services in cluster.
ServiceCIDR string `json:"serviceCIDR"`
// NodeCIDRMaskSize is the mask size for node cidr in cluster.
NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"`
// AllocateNodeCIDRs enables CIDRs for Pods to be allocated and, if
// ConfigureCloudRoutes is true, to be set on the cloud provider.
AllocateNodeCIDRs bool `json:"allocateNodeCIDRs"`
// CIDRAllocatorType determines what kind of pod CIDR allocator will be used.
CIDRAllocatorType string `json:"cIDRAllocatorType"`
// configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs
// to be configured on the cloud provider.
ConfigureCloudRoutes *bool `json:"configureCloudRoutes"`
// rootCAFile is the root certificate authority will be included in service
// account's token secret. This must be a valid PEM-encoded CA bundle.
RootCAFile string `json:"rootCAFile"`
// contentType is contentType of requests sent to apiserver.
ContentType string `json:"contentType"`
// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
KubeAPIQPS float32 `json:"kubeAPIQPS"`
// kubeAPIBurst is the burst to use while talking with kubernetes apiserver.
KubeAPIBurst int32 `json:"kubeAPIBurst"`
// leaderElection defines the configuration of leader election client.
LeaderElection LeaderElectionConfiguration `json:"leaderElection"`
// volumeConfiguration holds configuration for volume related features.
VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"`
// How long to wait between starting controller managers
ControllerStartInterval metav1.Duration `json:"controllerStartInterval"`
// enables the generic garbage collector. MUST be synced with the
// corresponding flag of the kube-apiserver. WARNING: the generic garbage
// collector is an alpha feature.
EnableGarbageCollector *bool `json:"enableGarbageCollector"`
// concurrentGCSyncs is the number of garbage collector workers that are
// allowed to sync concurrently.
ConcurrentGCSyncs int32 `json:"concurrentGCSyncs"`
// gcIgnoredResources is the list of GroupResources that garbage collection should ignore.
GCIgnoredResources []GroupResource `json:"gCIgnoredResources"`
// nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy
NodeEvictionRate float32 `json:"nodeEvictionRate"`
// secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy
SecondaryNodeEvictionRate float32 `json:"secondaryNodeEvictionRate"`
// secondaryNodeEvictionRate is implicitly overridden to 0 for clusters smaller than or equal to largeClusterSizeThreshold // secondaryNodeEvictionRate is implicitly overridden to 0 for clusters smaller than or equal to largeClusterSizeThreshold
LargeClusterSizeThreshold int32 `json:"largeClusterSizeThreshold"` LargeClusterSizeThreshold int32 `json:"largeClusterSizeThreshold"`
// Zone is treated as unhealthy in nodeEvictionRate and secondaryNodeEvictionRate when at least // Zone is treated as unhealthy in nodeEvictionRate and secondaryNodeEvictionRate when at least
// unhealthyZoneThreshold (no less than 3) of Nodes in the zone are NotReady // unhealthyZoneThreshold (no less than 3) of Nodes in the zone are NotReady
UnhealthyZoneThreshold float32 `json:"unhealthyZoneThreshold"` UnhealthyZoneThreshold float32 `json:"unhealthyZoneThreshold"`
// Reconciler runs a periodic loop to reconcile the desired state of the with }
// the actual state of the world by triggering attach detach operations.
// This flag enables or disables reconcile. Is false by default, and thus enabled. type PersistentVolumeBinderControllerConfiguration struct {
DisableAttachDetachReconcilerSync bool `json:"disableAttachDetachReconcilerSync"` // pvClaimBinderSyncPeriod is the period for syncing persistent volumes
// ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop // and persistent volume claims.
// wait between successive executions. Is set to 5 sec by default. PVClaimBinderSyncPeriod metav1.Duration `json:"pVClaimBinderSyncPeriod"`
ReconcilerSyncLoopPeriod metav1.Duration `json:"reconcilerSyncLoopPeriod"` // volumeConfiguration holds configuration for volume related features.
// If set to true enables NoExecute Taints and will evict all not-tolerating VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"`
// Pod running on Nodes tainted with this kind of Taints. }
EnableTaintManager *bool `json:"enableTaintManager"`
// HorizontalPodAutoscalerUseRESTClients causes the HPA controller to use REST clients type PodGCControllerConfiguration struct {
// through the kube-aggregator when enabled, instead of using the legacy metrics client // terminatedPodGCThreshold is the number of terminated pods that can exist
// through the API server proxy. // before the terminated pod garbage collector starts deleting terminated pods.
HorizontalPodAutoscalerUseRESTClients *bool `json:"horizontalPodAutoscalerUseRESTClients"` // If <= 0, the terminated pod garbage collector is disabled.
TerminatedPodGCThreshold int32 `json:"terminatedPodGCThreshold"`
}
type ReplicaSetControllerConfiguration struct {
// concurrentRSSyncs is the number of replica sets that are allowed to sync
// concurrently. Larger number = more responsive replica management, but more
// CPU (and network) load.
ConcurrentRSSyncs int32 `json:"concurrentRSSyncs"`
}
type ReplicationControllerConfiguration struct {
// concurrentRCSyncs is the number of replication controllers that are
// allowed to sync concurrently. Larger number = more responsive replica
// management, but more CPU (and network) load.
ConcurrentRCSyncs int32 `json:"concurrentRCSyncs"`
}
type ResourceQuotaControllerConfiguration struct {
// resourceQuotaSyncPeriod is the period for syncing quota usage status
// in the system.
ResourceQuotaSyncPeriod metav1.Duration `json:"resourceQuotaSyncPeriod"`
// concurrentResourceQuotaSyncs is the number of resource quotas that are
// allowed to sync concurrently. Larger number = more responsive quota
// management, but more CPU (and network) load.
ConcurrentResourceQuotaSyncs int32 `json:"concurrentResourceQuotaSyncs"`
}
type SAControllerConfiguration struct {
// concurrentSATokenSyncs is the number of service account token syncing operations
// that will be done concurrently.
ConcurrentSATokenSyncs int32 `json:"concurrentSATokenSyncs"`
// rootCAFile is the root certificate authority will be included in service
// account's token secret. This must be a valid PEM-encoded CA bundle.
RootCAFile string `json:"rootCAFile"`
}
type ServiceControllerConfiguration struct {
// concurrentServiceSyncs is the number of services that are
// allowed to sync concurrently. Larger number = more responsive service
// management, but more CPU (and network) load.
ConcurrentServiceSyncs int32 `json:"concurrentServiceSyncs"`
} }
const ( const (