From ec6399be53dde6c17e3da9f77c5070842881e5b7 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Thu, 12 Apr 2018 11:12:10 +0800 Subject: [PATCH] split up the component config into smaller config --- .../app/controllermanager.go | 46 +- cmd/controller-manager/app/config.go | 1 - cmd/controller-manager/app/serve.go | 4 +- cmd/kube-controller-manager/app/apps.go | 4 +- .../app/autoscaling.go | 10 +- cmd/kube-controller-manager/app/batch.go | 2 +- .../app/certificates.go | 16 +- .../app/controllermanager.go | 38 +- cmd/kube-controller-manager/app/core.go | 92 ++-- cmd/kube-controller-manager/app/extensions.go | 2 +- pkg/apis/componentconfig/types.go | 447 +++++++++++------ pkg/apis/componentconfig/v1alpha1/types.go | 448 ++++++++++++------ 12 files changed, 688 insertions(+), 422 deletions(-) diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index 414b2f5a50..cc6624d483 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -85,13 +85,13 @@ the cloud specific control loops shipped with Kubernetes.`, func resyncPeriod(c *cloudcontrollerconfig.CompletedConfig) func() time.Duration { return func() time.Duration { factor := rand.Float64() + 1 - return time.Duration(float64(c.Generic.ComponentConfig.MinResyncPeriod.Nanoseconds()) * factor) + return time.Duration(float64(c.Generic.ComponentConfig.GenericComponent.MinResyncPeriod.Nanoseconds()) * factor) } } // Run runs the ExternalCMServer. This should never exit. func Run(c *cloudcontrollerconfig.CompletedConfig) error { - cloud, err := cloudprovider.InitCloudProvider(c.Generic.ComponentConfig.CloudProvider, c.Generic.ComponentConfig.CloudConfigFile) + cloud, err := cloudprovider.InitCloudProvider(c.Generic.ComponentConfig.CloudProvider.Name, c.Generic.ComponentConfig.CloudProvider.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } @@ -100,7 +100,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error { } if cloud.HasClusterID() == false { - if c.Generic.ComponentConfig.AllowUntaggedCloud == true { + if c.Generic.ComponentConfig.KubeCloudShared.AllowUntaggedCloud == true { glog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues") } else { glog.Fatalf("no ClusterID found. A ClusterID is required for the cloud provider to function properly. This check can be bypassed by setting the allow-untagged-cloud option") @@ -132,7 +132,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error { ClientConfig: c.Generic.Kubeconfig, } var clientBuilder controller.ControllerClientBuilder - if c.Generic.ComponentConfig.UseServiceAccountCredentials { + if c.Generic.ComponentConfig.KubeCloudShared.UseServiceAccountCredentials { clientBuilder = controller.SAControllerClientBuilder{ ClientConfig: restclient.AnonymousClientConfig(c.Generic.Kubeconfig), CoreClient: c.Generic.Client.CoreV1(), @@ -148,7 +148,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error { } } - if !c.Generic.ComponentConfig.LeaderElection.LeaderElect { + if !c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaderElect { run(nil) panic("unreachable") } @@ -162,7 +162,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error { id = id + "_" + string(uuid.NewUUID()) // Lock required for leader election - rl, err := resourcelock.New(c.Generic.ComponentConfig.LeaderElection.ResourceLock, + rl, err := resourcelock.New(c.Generic.ComponentConfig.GenericComponent.LeaderElection.ResourceLock, "kube-system", "cloud-controller-manager", c.Generic.LeaderElectionClient.CoreV1(), @@ -177,9 +177,9 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error { // Try and become the leader and start cloud controller manager loops leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ Lock: rl, - LeaseDuration: c.Generic.ComponentConfig.LeaderElection.LeaseDuration.Duration, - RenewDeadline: c.Generic.ComponentConfig.LeaderElection.RenewDeadline.Duration, - RetryPeriod: c.Generic.ComponentConfig.LeaderElection.RetryPeriod.Duration, + LeaseDuration: c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaseDuration.Duration, + RenewDeadline: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RenewDeadline.Duration, + RetryPeriod: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RetryPeriod.Duration, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { @@ -209,17 +209,17 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, rootClientBuilde nodeController := cloudcontrollers.NewCloudNodeController( sharedInformers.Core().V1().Nodes(), client("cloud-node-controller"), cloud, - c.Generic.ComponentConfig.NodeMonitorPeriod.Duration, + c.Generic.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration, c.Extra.NodeStatusUpdateFrequency) nodeController.Run() - time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) + time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter)) // Start the PersistentVolumeLabelController pvlController := cloudcontrollers.NewPersistentVolumeLabelController(client("pvl-controller"), cloud) threads := 5 go pvlController.Run(threads, stop) - time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) + time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter)) // Start the service controller serviceController, err := servicecontroller.New( @@ -227,34 +227,34 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, rootClientBuilde client("service-controller"), sharedInformers.Core().V1().Services(), sharedInformers.Core().V1().Nodes(), - c.Generic.ComponentConfig.ClusterName, + c.Generic.ComponentConfig.KubeCloudShared.ClusterName, ) if err != nil { glog.Errorf("Failed to start service controller: %v", err) } else { - go serviceController.Run(stop, int(c.Generic.ComponentConfig.ConcurrentServiceSyncs)) - time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) + go serviceController.Run(stop, int(c.Generic.ComponentConfig.ServiceController.ConcurrentServiceSyncs)) + time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter)) } // If CIDRs should be allocated for pods and set on the CloudProvider, then start the route controller - if c.Generic.ComponentConfig.AllocateNodeCIDRs && c.Generic.ComponentConfig.ConfigureCloudRoutes { + if c.Generic.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs && c.Generic.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes { if routes, ok := cloud.Routes(); !ok { glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") } else { var clusterCIDR *net.IPNet - if len(strings.TrimSpace(c.Generic.ComponentConfig.ClusterCIDR)) != 0 { - _, clusterCIDR, err = net.ParseCIDR(c.Generic.ComponentConfig.ClusterCIDR) + if len(strings.TrimSpace(c.Generic.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 { + _, clusterCIDR, err = net.ParseCIDR(c.Generic.ComponentConfig.KubeCloudShared.ClusterCIDR) if err != nil { - glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", c.Generic.ComponentConfig.ClusterCIDR, err) + glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", c.Generic.ComponentConfig.KubeCloudShared.ClusterCIDR, err) } } - routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), c.Generic.ComponentConfig.ClusterName, clusterCIDR) - go routeController.Run(stop, c.Generic.ComponentConfig.RouteReconciliationPeriod.Duration) - time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) + routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), c.Generic.ComponentConfig.KubeCloudShared.ClusterName, clusterCIDR) + go routeController.Run(stop, c.Generic.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration) + time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter)) } } else { - glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", c.Generic.ComponentConfig.AllocateNodeCIDRs, c.Generic.ComponentConfig.ConfigureCloudRoutes) + glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", c.Generic.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, c.Generic.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes) } // If apiserver is not running we should wait for some time and fail only then. This is particularly diff --git a/cmd/controller-manager/app/config.go b/cmd/controller-manager/app/config.go index d67841562e..6a2fc592e1 100644 --- a/cmd/controller-manager/app/config.go +++ b/cmd/controller-manager/app/config.go @@ -26,7 +26,6 @@ import ( // Config is the main context object for the controller manager. type Config struct { - // TODO: split up the component config. This is not generic. ComponentConfig componentconfig.KubeControllerManagerConfiguration SecureServing *apiserver.SecureServingInfo diff --git a/cmd/controller-manager/app/serve.go b/cmd/controller-manager/app/serve.go index 7a9d36d114..8ab205de8b 100644 --- a/cmd/controller-manager/app/serve.go +++ b/cmd/controller-manager/app/serve.go @@ -40,9 +40,9 @@ type serveFunc func(handler http.Handler, shutdownTimeout time.Duration, stopCh func Serve(c *CompletedConfig, serveFunc serveFunc, stopCh <-chan struct{}) error { mux := mux.NewPathRecorderMux("controller-manager") healthz.InstallHandler(mux) - if c.ComponentConfig.EnableProfiling { + if c.ComponentConfig.Debugging.EnableProfiling { routes.Profiling{}.Install(mux) - if c.ComponentConfig.EnableContentionProfiling { + if c.ComponentConfig.Debugging.EnableContentionProfiling { goruntime.SetBlockProfileRate(1) } } diff --git a/cmd/kube-controller-manager/app/apps.go b/cmd/kube-controller-manager/app/apps.go index b54c9053b9..143367e3e9 100644 --- a/cmd/kube-controller-manager/app/apps.go +++ b/cmd/kube-controller-manager/app/apps.go @@ -43,7 +43,7 @@ func startDaemonSetController(ctx ControllerContext) (bool, error) { if err != nil { return true, fmt.Errorf("error creating DaemonSets controller: %v", err) } - go dsc.Run(int(ctx.ComponentConfig.ConcurrentDaemonSetSyncs), ctx.Stop) + go dsc.Run(int(ctx.ComponentConfig.DaemonSetController.ConcurrentDaemonSetSyncs), ctx.Stop) return true, nil } @@ -70,6 +70,6 @@ func startReplicaSetController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Core().V1().Pods(), ctx.ClientBuilder.ClientOrDie("replicaset-controller"), replicaset.BurstReplicas, - ).Run(int(ctx.ComponentConfig.ConcurrentRSSyncs), ctx.Stop) + ).Run(int(ctx.ComponentConfig.ReplicaSetController.ConcurrentRSSyncs), ctx.Stop) return true, nil } diff --git a/cmd/kube-controller-manager/app/autoscaling.go b/cmd/kube-controller-manager/app/autoscaling.go index e83ef05450..1fd7377a45 100644 --- a/cmd/kube-controller-manager/app/autoscaling.go +++ b/cmd/kube-controller-manager/app/autoscaling.go @@ -39,7 +39,7 @@ func startHPAController(ctx ControllerContext) (bool, error) { return false, nil } - if ctx.ComponentConfig.HorizontalPodAutoscalerUseRESTClients { + if ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerUseRESTClients { // use the new-style clients if support for custom metrics is enabled return startHPAControllerWithRESTClient(ctx) } @@ -90,7 +90,7 @@ func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient me replicaCalc := podautoscaler.NewReplicaCalculator( metricsClient, hpaClient.CoreV1(), - ctx.ComponentConfig.HorizontalPodAutoscalerTolerance, + ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerTolerance, ) go podautoscaler.NewHorizontalController( hpaClientGoClient.CoreV1(), @@ -99,9 +99,9 @@ func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient me restMapper, replicaCalc, ctx.InformerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), - ctx.ComponentConfig.HorizontalPodAutoscalerSyncPeriod.Duration, - ctx.ComponentConfig.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, - ctx.ComponentConfig.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, + ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerSyncPeriod.Duration, + ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, + ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, ).Run(ctx.Stop) return true, nil } diff --git a/cmd/kube-controller-manager/app/batch.go b/cmd/kube-controller-manager/app/batch.go index 31340eadfd..24cf6e0fa7 100644 --- a/cmd/kube-controller-manager/app/batch.go +++ b/cmd/kube-controller-manager/app/batch.go @@ -36,7 +36,7 @@ func startJobController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Core().V1().Pods(), ctx.InformerFactory.Batch().V1().Jobs(), ctx.ClientBuilder.ClientOrDie("job-controller"), - ).Run(int(ctx.ComponentConfig.ConcurrentJobSyncs), ctx.Stop) + ).Run(int(ctx.ComponentConfig.JobController.ConcurrentJobSyncs), ctx.Stop) return true, nil } diff --git a/cmd/kube-controller-manager/app/certificates.go b/cmd/kube-controller-manager/app/certificates.go index 98e2f2ce49..9e6e70cf3f 100644 --- a/cmd/kube-controller-manager/app/certificates.go +++ b/cmd/kube-controller-manager/app/certificates.go @@ -37,7 +37,7 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) { if !ctx.AvailableResources[schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}] { return false, nil } - if ctx.ComponentConfig.ClusterSigningCertFile == "" || ctx.ComponentConfig.ClusterSigningKeyFile == "" { + if ctx.ComponentConfig.CSRSigningController.ClusterSigningCertFile == "" || ctx.ComponentConfig.CSRSigningController.ClusterSigningKeyFile == "" { return false, nil } @@ -52,15 +52,15 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) { // bail out of startController without logging. var keyFileExists, keyUsesDefault, certFileExists, certUsesDefault bool - _, err := os.Stat(ctx.ComponentConfig.ClusterSigningCertFile) + _, err := os.Stat(ctx.ComponentConfig.CSRSigningController.ClusterSigningCertFile) certFileExists = !os.IsNotExist(err) - certUsesDefault = (ctx.ComponentConfig.ClusterSigningCertFile == cmoptions.DefaultClusterSigningCertFile) + certUsesDefault = (ctx.ComponentConfig.CSRSigningController.ClusterSigningCertFile == cmoptions.DefaultClusterSigningCertFile) - _, err = os.Stat(ctx.ComponentConfig.ClusterSigningKeyFile) + _, err = os.Stat(ctx.ComponentConfig.CSRSigningController.ClusterSigningKeyFile) keyFileExists = !os.IsNotExist(err) - keyUsesDefault = (ctx.ComponentConfig.ClusterSigningKeyFile == cmoptions.DefaultClusterSigningKeyFile) + keyUsesDefault = (ctx.ComponentConfig.CSRSigningController.ClusterSigningKeyFile == cmoptions.DefaultClusterSigningKeyFile) switch { case (keyFileExists && keyUsesDefault) || (certFileExists && certUsesDefault): @@ -84,9 +84,9 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) { signer, err := signer.NewCSRSigningController( c, ctx.InformerFactory.Certificates().V1beta1().CertificateSigningRequests(), - ctx.ComponentConfig.ClusterSigningCertFile, - ctx.ComponentConfig.ClusterSigningKeyFile, - ctx.ComponentConfig.ClusterSigningDuration.Duration, + ctx.ComponentConfig.CSRSigningController.ClusterSigningCertFile, + ctx.ComponentConfig.CSRSigningController.ClusterSigningKeyFile, + ctx.ComponentConfig.CSRSigningController.ClusterSigningDuration.Duration, ) if err != nil { return false, fmt.Errorf("failed to start certificate controller: %v", err) diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 7658f29e32..44d3f295aa 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -106,7 +106,7 @@ controller, and serviceaccounts controller.`, func ResyncPeriod(c *config.CompletedConfig) func() time.Duration { return func() time.Duration { factor := rand.Float64() + 1 - return time.Duration(float64(c.Generic.ComponentConfig.MinResyncPeriod.Nanoseconds()) * factor) + return time.Duration(float64(c.Generic.ComponentConfig.GenericComponent.MinResyncPeriod.Nanoseconds()) * factor) } } @@ -139,8 +139,8 @@ func Run(c *config.CompletedConfig) error { ClientConfig: c.Generic.Kubeconfig, } var clientBuilder controller.ControllerClientBuilder - if c.Generic.ComponentConfig.UseServiceAccountCredentials { - if len(c.Generic.ComponentConfig.ServiceAccountKeyFile) == 0 { + if c.Generic.ComponentConfig.KubeCloudShared.UseServiceAccountCredentials { + if len(c.Generic.ComponentConfig.KubeCloudShared.ServiceAccountKeyFile) == 0 { // It'c possible another controller process is creating the tokens for us. // If one isn't, we'll timeout and exit when our client builder is unable to create the tokens. glog.Warningf("--use-service-account-credentials was specified without providing a --service-account-private-key-file") @@ -170,7 +170,7 @@ func Run(c *config.CompletedConfig) error { select {} } - if !c.Generic.ComponentConfig.LeaderElection.LeaderElect { + if !c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaderElect { run(wait.NeverStop) panic("unreachable") } @@ -182,7 +182,7 @@ func Run(c *config.CompletedConfig) error { // add a uniquifier so that two processes on the same host don't accidentally both become active id = id + "_" + string(uuid.NewUUID()) - rl, err := resourcelock.New(c.Generic.ComponentConfig.LeaderElection.ResourceLock, + rl, err := resourcelock.New(c.Generic.ComponentConfig.GenericComponent.LeaderElection.ResourceLock, "kube-system", "kube-controller-manager", c.Generic.LeaderElectionClient.CoreV1(), @@ -196,9 +196,9 @@ func Run(c *config.CompletedConfig) error { leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ Lock: rl, - LeaseDuration: c.Generic.ComponentConfig.LeaderElection.LeaseDuration.Duration, - RenewDeadline: c.Generic.ComponentConfig.LeaderElection.RenewDeadline.Duration, - RetryPeriod: c.Generic.ComponentConfig.LeaderElection.RetryPeriod.Duration, + LeaseDuration: c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaseDuration.Duration, + RenewDeadline: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RenewDeadline.Duration, + RetryPeriod: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RetryPeriod.Duration, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { @@ -216,7 +216,7 @@ type ControllerContext struct { // InformerFactory gives access to informers for the controller. InformerFactory informers.SharedInformerFactory - // Options provides access to init options for a given controller + // ComponentConfig provides access to init options for a given controller ComponentConfig componentconfig.KubeControllerManagerConfiguration // AvailableResources is a map listing currently available resources @@ -390,8 +390,8 @@ func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clien return ControllerContext{}, err } - cloud, loopMode, err := createCloudProvider(s.Generic.ComponentConfig.CloudProvider, s.Generic.ComponentConfig.ExternalCloudVolumePlugin, - s.Generic.ComponentConfig.CloudConfigFile, s.Generic.ComponentConfig.AllowUntaggedCloud, sharedInformers) + cloud, loopMode, err := createCloudProvider(s.Generic.ComponentConfig.CloudProvider.Name, s.Generic.ComponentConfig.ExternalCloudVolumePlugin, + s.Generic.ComponentConfig.CloudProvider.CloudConfigFile, s.Generic.ComponentConfig.KubeCloudShared.AllowUntaggedCloud, sharedInformers) if err != nil { return ControllerContext{}, err } @@ -429,7 +429,7 @@ func StartControllers(ctx ControllerContext, startSATokenController InitFunc, co continue } - time.Sleep(wait.Jitter(ctx.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter)) + time.Sleep(wait.Jitter(ctx.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter)) glog.V(1).Infof("Starting %q", controllerName) started, err := initFn(ctx) @@ -460,23 +460,23 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController return false, nil } - if len(ctx.ComponentConfig.ServiceAccountKeyFile) == 0 { + if len(ctx.ComponentConfig.KubeCloudShared.ServiceAccountKeyFile) == 0 { glog.Warningf("%q is disabled because there is no private key", saTokenControllerName) return false, nil } - privateKey, err := certutil.PrivateKeyFromFile(ctx.ComponentConfig.ServiceAccountKeyFile) + privateKey, err := certutil.PrivateKeyFromFile(ctx.ComponentConfig.KubeCloudShared.ServiceAccountKeyFile) if err != nil { return true, fmt.Errorf("error reading key for service account token controller: %v", err) } var rootCA []byte - if ctx.ComponentConfig.RootCAFile != "" { - rootCA, err = ioutil.ReadFile(ctx.ComponentConfig.RootCAFile) + if ctx.ComponentConfig.SAController.RootCAFile != "" { + rootCA, err = ioutil.ReadFile(ctx.ComponentConfig.SAController.RootCAFile) if err != nil { - return true, fmt.Errorf("error reading root-ca-file at %s: %v", ctx.ComponentConfig.RootCAFile, err) + return true, fmt.Errorf("error reading root-ca-file at %s: %v", ctx.ComponentConfig.SAController.RootCAFile, err) } if _, err := certutil.ParseCertsPEM(rootCA); err != nil { - return true, fmt.Errorf("error parsing root-ca-file at %s: %v", ctx.ComponentConfig.RootCAFile, err) + return true, fmt.Errorf("error parsing root-ca-file at %s: %v", ctx.ComponentConfig.SAController.RootCAFile, err) } } else { rootCA = c.rootClientBuilder.ConfigOrDie("tokens-controller").CAData @@ -494,7 +494,7 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController if err != nil { return true, fmt.Errorf("error creating Tokens controller: %v", err) } - go controller.Run(int(ctx.ComponentConfig.ConcurrentSATokenSyncs), ctx.Stop) + go controller.Run(int(ctx.ComponentConfig.SAController.ConcurrentSATokenSyncs), ctx.Stop) // start the first set of informers now so that other controllers can start ctx.InformerFactory.Start(ctx.Stop) diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index 71b03ef86e..88e8b6d2e1 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -68,33 +68,33 @@ func startServiceController(ctx ControllerContext) (bool, error) { ctx.ClientBuilder.ClientOrDie("service-controller"), ctx.InformerFactory.Core().V1().Services(), ctx.InformerFactory.Core().V1().Nodes(), - ctx.ComponentConfig.ClusterName, + ctx.ComponentConfig.KubeCloudShared.ClusterName, ) if err != nil { // This error shouldn't fail. It lives like this as a legacy. glog.Errorf("Failed to start service controller: %v", err) return false, nil } - go serviceController.Run(ctx.Stop, int(ctx.ComponentConfig.ConcurrentServiceSyncs)) + go serviceController.Run(ctx.Stop, int(ctx.ComponentConfig.ServiceController.ConcurrentServiceSyncs)) return true, nil } func startNodeIpamController(ctx ControllerContext) (bool, error) { var clusterCIDR *net.IPNet = nil var serviceCIDR *net.IPNet = nil - if ctx.ComponentConfig.AllocateNodeCIDRs { + if ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs { var err error - if len(strings.TrimSpace(ctx.ComponentConfig.ClusterCIDR)) != 0 { - _, clusterCIDR, err = net.ParseCIDR(ctx.ComponentConfig.ClusterCIDR) + if len(strings.TrimSpace(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 { + _, clusterCIDR, err = net.ParseCIDR(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR) if err != nil { - glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.ClusterCIDR, err) + glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err) } } - if len(strings.TrimSpace(ctx.ComponentConfig.ServiceCIDR)) != 0 { - _, serviceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.ServiceCIDR) + if len(strings.TrimSpace(ctx.ComponentConfig.NodeIpamController.ServiceCIDR)) != 0 { + _, serviceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.NodeIpamController.ServiceCIDR) if err != nil { - glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.ServiceCIDR, err) + glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIpamController.ServiceCIDR, err) } } } @@ -105,9 +105,9 @@ func startNodeIpamController(ctx ControllerContext) (bool, error) { ctx.ClientBuilder.ClientOrDie("node-controller"), clusterCIDR, serviceCIDR, - int(ctx.ComponentConfig.NodeCIDRMaskSize), - ctx.ComponentConfig.AllocateNodeCIDRs, - ipam.CIDRAllocatorType(ctx.ComponentConfig.CIDRAllocatorType), + int(ctx.ComponentConfig.NodeIpamController.NodeCIDRMaskSize), + ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, + ipam.CIDRAllocatorType(ctx.ComponentConfig.KubeCloudShared.CIDRAllocatorType), ) if err != nil { return true, err @@ -123,15 +123,15 @@ func startNodeLifecycleController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Extensions().V1beta1().DaemonSets(), ctx.Cloud, ctx.ClientBuilder.ClientOrDie("node-controller"), - ctx.ComponentConfig.NodeMonitorPeriod.Duration, - ctx.ComponentConfig.NodeStartupGracePeriod.Duration, - ctx.ComponentConfig.NodeMonitorGracePeriod.Duration, - ctx.ComponentConfig.PodEvictionTimeout.Duration, - ctx.ComponentConfig.NodeEvictionRate, - ctx.ComponentConfig.SecondaryNodeEvictionRate, - ctx.ComponentConfig.LargeClusterSizeThreshold, - ctx.ComponentConfig.UnhealthyZoneThreshold, - ctx.ComponentConfig.EnableTaintManager, + ctx.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration, + ctx.ComponentConfig.NodeLifecycleController.NodeStartupGracePeriod.Duration, + ctx.ComponentConfig.NodeLifecycleController.NodeMonitorGracePeriod.Duration, + ctx.ComponentConfig.NodeLifecycleController.PodEvictionTimeout.Duration, + ctx.ComponentConfig.NodeLifecycleController.NodeEvictionRate, + ctx.ComponentConfig.NodeLifecycleController.SecondaryNodeEvictionRate, + ctx.ComponentConfig.NodeLifecycleController.LargeClusterSizeThreshold, + ctx.ComponentConfig.NodeLifecycleController.UnhealthyZoneThreshold, + ctx.ComponentConfig.NodeLifecycleController.EnableTaintManager, utilfeature.DefaultFeatureGate.Enabled(features.TaintBasedEvictions), utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition), ) @@ -143,8 +143,8 @@ func startNodeLifecycleController(ctx ControllerContext) (bool, error) { } func startRouteController(ctx ControllerContext) (bool, error) { - if !ctx.ComponentConfig.AllocateNodeCIDRs || !ctx.ComponentConfig.ConfigureCloudRoutes { - glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", ctx.ComponentConfig.AllocateNodeCIDRs, ctx.ComponentConfig.ConfigureCloudRoutes) + if !ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs || !ctx.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes { + glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, ctx.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes) return false, nil } if ctx.Cloud == nil { @@ -156,27 +156,27 @@ func startRouteController(ctx ControllerContext) (bool, error) { glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") return false, nil } - _, clusterCIDR, err := net.ParseCIDR(ctx.ComponentConfig.ClusterCIDR) + _, clusterCIDR, err := net.ParseCIDR(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR) if err != nil { - glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.ClusterCIDR, err) + glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err) } - routeController := routecontroller.New(routes, ctx.ClientBuilder.ClientOrDie("route-controller"), ctx.InformerFactory.Core().V1().Nodes(), ctx.ComponentConfig.ClusterName, clusterCIDR) - go routeController.Run(ctx.Stop, ctx.ComponentConfig.RouteReconciliationPeriod.Duration) + routeController := routecontroller.New(routes, ctx.ClientBuilder.ClientOrDie("route-controller"), ctx.InformerFactory.Core().V1().Nodes(), ctx.ComponentConfig.KubeCloudShared.ClusterName, clusterCIDR) + go routeController.Run(ctx.Stop, ctx.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration) return true, nil } func startPersistentVolumeBinderController(ctx ControllerContext) (bool, error) { params := persistentvolumecontroller.ControllerParameters{ KubeClient: ctx.ClientBuilder.ClientOrDie("persistent-volume-binder"), - SyncPeriod: ctx.ComponentConfig.PVClaimBinderSyncPeriod.Duration, - VolumePlugins: ProbeControllerVolumePlugins(ctx.Cloud, ctx.ComponentConfig.VolumeConfiguration), + SyncPeriod: ctx.ComponentConfig.PersistentVolumeBinderController.PVClaimBinderSyncPeriod.Duration, + VolumePlugins: ProbeControllerVolumePlugins(ctx.Cloud, ctx.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration), Cloud: ctx.Cloud, - ClusterName: ctx.ComponentConfig.ClusterName, + ClusterName: ctx.ComponentConfig.KubeCloudShared.ClusterName, VolumeInformer: ctx.InformerFactory.Core().V1().PersistentVolumes(), ClaimInformer: ctx.InformerFactory.Core().V1().PersistentVolumeClaims(), ClassInformer: ctx.InformerFactory.Storage().V1().StorageClasses(), PodInformer: ctx.InformerFactory.Core().V1().Pods(), - EnableDynamicProvisioning: ctx.ComponentConfig.VolumeConfiguration.EnableDynamicProvisioning, + EnableDynamicProvisioning: ctx.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration.EnableDynamicProvisioning, } volumeController, volumeControllerErr := persistentvolumecontroller.NewController(params) if volumeControllerErr != nil { @@ -187,7 +187,7 @@ func startPersistentVolumeBinderController(ctx ControllerContext) (bool, error) } func startAttachDetachController(ctx ControllerContext) (bool, error) { - if ctx.ComponentConfig.ReconcilerSyncLoopPeriod.Duration < time.Second { + if ctx.ComponentConfig.AttachDetachController.ReconcilerSyncLoopPeriod.Duration < time.Second { return true, fmt.Errorf("Duration time must be greater than one second as set via command line option reconcile-sync-loop-period.") } attachDetachController, attachDetachControllerErr := @@ -199,9 +199,9 @@ func startAttachDetachController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Core().V1().PersistentVolumes(), ctx.Cloud, ProbeAttachableVolumePlugins(), - GetDynamicPluginProber(ctx.ComponentConfig.VolumeConfiguration), - ctx.ComponentConfig.DisableAttachDetachReconcilerSync, - ctx.ComponentConfig.ReconcilerSyncLoopPeriod.Duration, + GetDynamicPluginProber(ctx.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration), + ctx.ComponentConfig.AttachDetachController.DisableAttachDetachReconcilerSync, + ctx.ComponentConfig.AttachDetachController.ReconcilerSyncLoopPeriod.Duration, attachdetach.DefaultTimerConfig, ) if attachDetachControllerErr != nil { @@ -218,7 +218,7 @@ func startVolumeExpandController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Core().V1().PersistentVolumeClaims(), ctx.InformerFactory.Core().V1().PersistentVolumes(), ctx.Cloud, - ProbeExpandableVolumePlugins(ctx.ComponentConfig.VolumeConfiguration)) + ProbeExpandableVolumePlugins(ctx.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration)) if expandControllerErr != nil { return true, fmt.Errorf("Failed to start volume expand controller : %v", expandControllerErr) @@ -235,7 +235,7 @@ func startEndpointController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Core().V1().Services(), ctx.InformerFactory.Core().V1().Endpoints(), ctx.ClientBuilder.ClientOrDie("endpoint-controller"), - ).Run(int(ctx.ComponentConfig.ConcurrentEndpointSyncs), ctx.Stop) + ).Run(int(ctx.ComponentConfig.EndPointController.ConcurrentEndpointSyncs), ctx.Stop) return true, nil } @@ -245,7 +245,7 @@ func startReplicationController(ctx ControllerContext) (bool, error) { ctx.InformerFactory.Core().V1().ReplicationControllers(), ctx.ClientBuilder.ClientOrDie("replication-controller"), replicationcontroller.BurstReplicas, - ).Run(int(ctx.ComponentConfig.ConcurrentRCSyncs), ctx.Stop) + ).Run(int(ctx.ComponentConfig.ReplicationController.ConcurrentRCSyncs), ctx.Stop) return true, nil } @@ -253,7 +253,7 @@ func startPodGCController(ctx ControllerContext) (bool, error) { go podgc.NewPodGC( ctx.ClientBuilder.ClientOrDie("pod-garbage-collector"), ctx.InformerFactory.Core().V1().Pods(), - int(ctx.ComponentConfig.TerminatedPodGCThreshold), + int(ctx.ComponentConfig.PodGCController.TerminatedPodGCThreshold), ).Run(ctx.Stop) return true, nil } @@ -267,7 +267,7 @@ func startResourceQuotaController(ctx ControllerContext) (bool, error) { resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{ QuotaClient: resourceQuotaControllerClient.CoreV1(), ResourceQuotaInformer: ctx.InformerFactory.Core().V1().ResourceQuotas(), - ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.ComponentConfig.ResourceQuotaSyncPeriod.Duration), + ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.ComponentConfig.ResourceQuotaController.ResourceQuotaSyncPeriod.Duration), InformerFactory: ctx.InformerFactory, ReplenishmentResyncPeriod: ctx.ResyncPeriod, DiscoveryFunc: discoveryFunc, @@ -285,7 +285,7 @@ func startResourceQuotaController(ctx ControllerContext) (bool, error) { if err != nil { return false, err } - go resourceQuotaController.Run(int(ctx.ComponentConfig.ConcurrentResourceQuotaSyncs), ctx.Stop) + go resourceQuotaController.Run(int(ctx.ComponentConfig.ResourceQuotaController.ConcurrentResourceQuotaSyncs), ctx.Stop) // Periodically the quota controller to detect new resource types go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, ctx.Stop) @@ -313,10 +313,10 @@ func startNamespaceController(ctx ControllerContext) (bool, error) { namespaceClientPool, discoverResourcesFn, ctx.InformerFactory.Core().V1().Namespaces(), - ctx.ComponentConfig.NamespaceSyncPeriod.Duration, + ctx.ComponentConfig.NamespaceController.NamespaceSyncPeriod.Duration, v1.FinalizerKubernetes, ) - go namespaceController.Run(int(ctx.ComponentConfig.ConcurrentNamespaceSyncs), ctx.Stop) + go namespaceController.Run(int(ctx.ComponentConfig.NamespaceController.ConcurrentNamespaceSyncs), ctx.Stop) return true, nil } @@ -344,7 +344,7 @@ func startTTLController(ctx ControllerContext) (bool, error) { } func startGarbageCollectorController(ctx ControllerContext) (bool, error) { - if !ctx.ComponentConfig.EnableGarbageCollector { + if !ctx.ComponentConfig.GarbageCollectorController.EnableGarbageCollector { return false, nil } @@ -367,7 +367,7 @@ func startGarbageCollectorController(ctx ControllerContext) (bool, error) { // Get an initial set of deletable resources to prime the garbage collector. deletableResources := garbagecollector.GetDeletableResources(discoveryClient) ignoredResources := make(map[schema.GroupResource]struct{}) - for _, r := range ctx.ComponentConfig.GCIgnoredResources { + for _, r := range ctx.ComponentConfig.GarbageCollectorController.GCIgnoredResources { ignoredResources[schema.GroupResource{Group: r.Group, Resource: r.Resource}] = struct{}{} } garbageCollector, err := garbagecollector.NewGarbageCollector( @@ -384,7 +384,7 @@ func startGarbageCollectorController(ctx ControllerContext) (bool, error) { } // Start the garbage collector. - workers := int(ctx.ComponentConfig.ConcurrentGCSyncs) + workers := int(ctx.ComponentConfig.GarbageCollectorController.ConcurrentGCSyncs) go garbageCollector.Run(workers, ctx.Stop) // Periodically refresh the RESTMapper with new discovery information and sync diff --git a/cmd/kube-controller-manager/app/extensions.go b/cmd/kube-controller-manager/app/extensions.go index fe2133f58e..6cca85bab6 100644 --- a/cmd/kube-controller-manager/app/extensions.go +++ b/cmd/kube-controller-manager/app/extensions.go @@ -40,6 +40,6 @@ func startDeploymentController(ctx ControllerContext) (bool, error) { if err != nil { return true, fmt.Errorf("error creating Deployment controller: %v", err) } - go dc.Run(int(ctx.ComponentConfig.ConcurrentDeploymentSyncs), ctx.Stop) + go dc.Run(int(ctx.ComponentConfig.DeploymentController.ConcurrentDeploymentSyncs), ctx.Stop) return true, nil } diff --git a/pkg/apis/componentconfig/types.go b/pkg/apis/componentconfig/types.go index 8a3c952f0b..5687ae25b8 100644 --- a/pkg/apis/componentconfig/types.go +++ b/pkg/apis/componentconfig/types.go @@ -164,13 +164,115 @@ type GroupResource struct { type KubeControllerManagerConfiguration struct { metav1.TypeMeta + // CloudProviderConfiguration holds configuration for CloudProvider related features. + CloudProvider CloudProviderConfiguration + // DebuggingConfiguration holds configuration for Debugging related features. + Debugging DebuggingConfiguration + // GenericComponentConfiguration holds configuration for GenericComponent + // related features both in cloud controller manager and kube-controller manager. + GenericComponent GenericComponentConfiguration + // KubeCloudSharedConfiguration holds configuration for shared related features + // both in cloud controller manager and kube-controller manager. + KubeCloudShared KubeCloudSharedConfiguration + + // AttachDetachControllerConfiguration holds configuration for + // AttachDetachController related features. + AttachDetachController AttachDetachControllerConfiguration + // CSRSigningControllerConfiguration holds configuration for + // CSRSigningController related features. + CSRSigningController CSRSigningControllerConfiguration + // DaemonSetControllerConfiguration holds configuration for DaemonSetController + // related features. + DaemonSetController DaemonSetControllerConfiguration + // DeploymentControllerConfiguration holds configuration for + // DeploymentController related features. + DeploymentController DeploymentControllerConfiguration + // DeprecatedControllerConfiguration holds configuration for some deprecated + // features. + DeprecatedController DeprecatedControllerConfiguration + // EndPointControllerConfiguration holds configuration for EndPointController + // related features. + EndPointController EndPointControllerConfiguration + // GarbageCollectorControllerConfiguration holds configuration for + // GarbageCollectorController related features. + GarbageCollectorController GarbageCollectorControllerConfiguration + // HPAControllerConfiguration holds configuration for HPAController related features. + HPAController HPAControllerConfiguration + // JobControllerConfiguration holds configuration for JobController related features. + JobController JobControllerConfiguration + // NamespaceControllerConfiguration holds configuration for + // NamespaceController related features. + NamespaceController NamespaceControllerConfiguration + // NodeIpamControllerConfiguration holds configuration for NodeIpamController + // related features. + NodeIpamController NodeIpamControllerConfiguration + // NodeLifecycleControllerConfiguration holds configuration for + // NodeLifecycleController related features. + NodeLifecycleController NodeLifecycleControllerConfiguration + // PersistentVolumeBinderControllerConfiguration holds configuration for + // PersistentVolumeBinderController related features. + PersistentVolumeBinderController PersistentVolumeBinderControllerConfiguration + // PodGCControllerConfiguration holds configuration for PodGCController + // related features. + PodGCController PodGCControllerConfiguration + // ReplicaSetControllerConfiguration holds configuration for ReplicaSet related features. + ReplicaSetController ReplicaSetControllerConfiguration + // ReplicationControllerConfiguration holds configuration for + // ReplicationController related features. + ReplicationController ReplicationControllerConfiguration + // ResourceQuotaControllerConfiguration holds configuration for + // ResourceQuotaController related features. + ResourceQuotaController ResourceQuotaControllerConfiguration + // SAControllerConfiguration holds configuration for ServiceAccountController + // related features. + SAController SAControllerConfiguration + // ServiceControllerConfiguration holds configuration for ServiceController + // related features. + ServiceController ServiceControllerConfiguration + // Controllers is the list of controllers to enable or disable // '*' means "all enabled by default controllers" // 'foo' means "enable 'foo'" // '-foo' means "disable 'foo'" // first item for a particular name wins Controllers []string + // externalCloudVolumePlugin specifies the plugin to use when cloudProvider is "external". + // It is currently used by the in repo cloud providers to handle node and volume control in the KCM. + ExternalCloudVolumePlugin string +} +type CloudProviderConfiguration struct { + // Name is the provider for cloud services. + Name string + // cloudConfigFile is the path to the cloud provider configuration file. + CloudConfigFile string +} + +type DebuggingConfiguration struct { + // enableProfiling enables profiling via web interface host:port/debug/pprof/ + EnableProfiling bool + // EnableContentionProfiling enables lock contention profiling, if + // EnableProfiling is true. + EnableContentionProfiling bool +} + +type GenericComponentConfiguration struct { + // minResyncPeriod is the resync period in reflectors; will be random between + // minResyncPeriod and 2*minResyncPeriod. + MinResyncPeriod metav1.Duration + // contentType is contentType of requests sent to apiserver. + ContentType string + // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. + KubeAPIQPS float32 + // kubeAPIBurst is the burst to use while talking with kubernetes apiserver. + KubeAPIBurst int32 + // How long to wait between starting controller managers + ControllerStartInterval metav1.Duration + // leaderElection defines the configuration of leader election client. + LeaderElection LeaderElectionConfiguration +} + +type KubeCloudSharedConfiguration struct { // port is the port that the controller-manager's http service runs on. Port int32 // address is the IP address to serve on (set to 0.0.0.0 for all interfaces). @@ -178,77 +280,103 @@ type KubeControllerManagerConfiguration struct { // useServiceAccountCredentials indicates whether controllers should be run with // individual service account credentials. UseServiceAccountCredentials bool - // cloudProvider is the provider for cloud services. - CloudProvider string - // cloudConfigFile is the path to the cloud provider configuration file. - CloudConfigFile string - // externalCloudVolumePlugin specifies the plugin to use when cloudProvider is "external". - // It is currently used by the in repo cloud providers to handle node and volume control in the KCM. - ExternalCloudVolumePlugin string // run with untagged cloud instances AllowUntaggedCloud bool - // concurrentEndpointSyncs is the number of endpoint syncing operations - // that will be done concurrently. Larger number = faster endpoint updating, - // but more CPU (and network) load. - ConcurrentEndpointSyncs int32 - // concurrentRSSyncs is the number of replica sets that are allowed to sync - // concurrently. Larger number = more responsive replica management, but more - // CPU (and network) load. - ConcurrentRSSyncs int32 - // concurrentRCSyncs is the number of replication controllers that are - // allowed to sync concurrently. Larger number = more responsive replica - // management, but more CPU (and network) load. - ConcurrentRCSyncs int32 - // concurrentServiceSyncs is the number of services that are - // allowed to sync concurrently. Larger number = more responsive service - // management, but more CPU (and network) load. - ConcurrentServiceSyncs int32 - // concurrentResourceQuotaSyncs is the number of resource quotas that are - // allowed to sync concurrently. Larger number = more responsive quota - // management, but more CPU (and network) load. - ConcurrentResourceQuotaSyncs int32 - // concurrentDeploymentSyncs is the number of deployment objects that are - // allowed to sync concurrently. Larger number = more responsive deployments, - // but more CPU (and network) load. - ConcurrentDeploymentSyncs int32 - // concurrentDaemonSetSyncs is the number of daemonset objects that are - // allowed to sync concurrently. Larger number = more responsive daemonset, - // but more CPU (and network) load. - ConcurrentDaemonSetSyncs int32 - // concurrentJobSyncs is the number of job objects that are - // allowed to sync concurrently. Larger number = more responsive jobs, - // but more CPU (and network) load. - ConcurrentJobSyncs int32 - // concurrentNamespaceSyncs is the number of namespace objects that are - // allowed to sync concurrently. - ConcurrentNamespaceSyncs int32 - // concurrentSATokenSyncs is the number of service account token syncing operations - // that will be done concurrently. - ConcurrentSATokenSyncs int32 - // lookupCacheSizeForRC is the size of lookup cache for replication controllers. - // Larger number = more responsive replica management, but more MEM load. + // routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider.. + RouteReconciliationPeriod metav1.Duration + // nodeMonitorPeriod is the period for syncing NodeStatus in NodeController. + NodeMonitorPeriod metav1.Duration + // clusterName is the instance prefix for the cluster. + ClusterName string + // clusterCIDR is CIDR Range for Pods in cluster. + ClusterCIDR string + // AllocateNodeCIDRs enables CIDRs for Pods to be allocated and, if + // ConfigureCloudRoutes is true, to be set on the cloud provider. + AllocateNodeCIDRs bool + // CIDRAllocatorType determines what kind of pod CIDR allocator will be used. + CIDRAllocatorType string + // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs + // to be configured on the cloud provider. + ConfigureCloudRoutes bool + // serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key + // used to sign service account tokens. + ServiceAccountKeyFile string // nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer // periods will result in fewer calls to cloud provider, but may delay addition // of new nodes to cluster. NodeSyncPeriod metav1.Duration - // routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider.. - RouteReconciliationPeriod metav1.Duration - // resourceQuotaSyncPeriod is the period for syncing quota usage status - // in the system. - ResourceQuotaSyncPeriod metav1.Duration - // namespaceSyncPeriod is the period for syncing namespace life-cycle - // updates. - NamespaceSyncPeriod metav1.Duration - // pvClaimBinderSyncPeriod is the period for syncing persistent volumes - // and persistent volume claims. - PVClaimBinderSyncPeriod metav1.Duration - // minResyncPeriod is the resync period in reflectors; will be random between - // minResyncPeriod and 2*minResyncPeriod. - MinResyncPeriod metav1.Duration - // terminatedPodGCThreshold is the number of terminated pods that can exist - // before the terminated pod garbage collector starts deleting terminated pods. - // If <= 0, the terminated pod garbage collector is disabled. - TerminatedPodGCThreshold int32 +} + +type AttachDetachControllerConfiguration struct { + // Reconciler runs a periodic loop to reconcile the desired state of the with + // the actual state of the world by triggering attach detach operations. + // This flag enables or disables reconcile. Is false by default, and thus enabled. + DisableAttachDetachReconcilerSync bool + // ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop + // wait between successive executions. Is set to 5 sec by default. + ReconcilerSyncLoopPeriod metav1.Duration +} + +type CSRSigningControllerConfiguration struct { + // clusterSigningCertFile is the filename containing a PEM-encoded + // X509 CA certificate used to issue cluster-scoped certificates + ClusterSigningCertFile string + // clusterSigningCertFile is the filename containing a PEM-encoded + // RSA or ECDSA private key used to issue cluster-scoped certificates + ClusterSigningKeyFile string + // clusterSigningDuration is the length of duration signed certificates + // will be given. + ClusterSigningDuration metav1.Duration +} + +type DaemonSetControllerConfiguration struct { + // concurrentDaemonSetSyncs is the number of daemonset objects that are + // allowed to sync concurrently. Larger number = more responsive daemonset, + // but more CPU (and network) load. + ConcurrentDaemonSetSyncs int32 +} + +type DeploymentControllerConfiguration struct { + // concurrentDeploymentSyncs is the number of deployment objects that are + // allowed to sync concurrently. Larger number = more responsive deployments, + // but more CPU (and network) load. + ConcurrentDeploymentSyncs int32 + // deploymentControllerSyncPeriod is the period for syncing the deployments. + DeploymentControllerSyncPeriod metav1.Duration +} + +type DeprecatedControllerConfiguration struct { + // DEPRECATED: deletingPodsQps is the number of nodes per second on which pods are deleted in + // case of node failure. + DeletingPodsQps float32 + // DEPRECATED: deletingPodsBurst is the number of nodes on which pods are bursty deleted in + // case of node failure. For more details look into RateLimiter. + DeletingPodsBurst int32 + // registerRetryCount is the number of retries for initial node registration. + // Retry interval equals node-sync-period. + RegisterRetryCount int32 +} + +type EndPointControllerConfiguration struct { + // concurrentEndpointSyncs is the number of endpoint syncing operations + // that will be done concurrently. Larger number = faster endpoint updating, + // but more CPU (and network) load. + ConcurrentEndpointSyncs int32 +} + +type GarbageCollectorControllerConfiguration struct { + // enables the generic garbage collector. MUST be synced with the + // corresponding flag of the kube-apiserver. WARNING: the generic garbage + // collector is an alpha feature. + EnableGarbageCollector bool + // concurrentGCSyncs is the number of garbage collector workers that are + // allowed to sync concurrently. + ConcurrentGCSyncs int32 + // gcIgnoredResources is the list of GroupResources that garbage collection should ignore. + GCIgnoredResources []GroupResource +} + +type HPAControllerConfiguration struct { // horizontalPodAutoscalerSyncPeriod is the period for syncing the number of // pods in horizontal pod autoscaler. HorizontalPodAutoscalerSyncPeriod metav1.Duration @@ -259,108 +387,113 @@ type KubeControllerManagerConfiguration struct { // horizontalPodAutoscalerTolerance is the tolerance for when // resource usage suggests upscaling/downscaling HorizontalPodAutoscalerTolerance float64 - // deploymentControllerSyncPeriod is the period for syncing the deployments. - DeploymentControllerSyncPeriod metav1.Duration - // podEvictionTimeout is the grace period for deleting pods on failed nodes. - PodEvictionTimeout metav1.Duration - // DEPRECATED: deletingPodsQps is the number of nodes per second on which pods are deleted in - // case of node failure. - DeletingPodsQps float32 - // DEPRECATED: deletingPodsBurst is the number of nodes on which pods are bursty deleted in - // case of node failure. For more details look into RateLimiter. - DeletingPodsBurst int32 + // HorizontalPodAutoscalerUseRESTClients causes the HPA controller to use REST clients + // through the kube-aggregator when enabled, instead of using the legacy metrics client + // through the API server proxy. + HorizontalPodAutoscalerUseRESTClients bool +} + +type JobControllerConfiguration struct { + // concurrentJobSyncs is the number of job objects that are + // allowed to sync concurrently. Larger number = more responsive jobs, + // but more CPU (and network) load. + ConcurrentJobSyncs int32 +} + +type NamespaceControllerConfiguration struct { + // namespaceSyncPeriod is the period for syncing namespace life-cycle + // updates. + NamespaceSyncPeriod metav1.Duration + // concurrentNamespaceSyncs is the number of namespace objects that are + // allowed to sync concurrently. + ConcurrentNamespaceSyncs int32 +} + +type NodeIpamControllerConfiguration struct { + // serviceCIDR is CIDR Range for Services in cluster. + ServiceCIDR string + // NodeCIDRMaskSize is the mask size for node cidr in cluster. + NodeCIDRMaskSize int32 +} + +type NodeLifecycleControllerConfiguration struct { + // If set to true enables NoExecute Taints and will evict all not-tolerating + // Pod running on Nodes tainted with this kind of Taints. + EnableTaintManager bool + // nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy + NodeEvictionRate float32 + // secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy + SecondaryNodeEvictionRate float32 + // nodeStartupGracePeriod is the amount of time which we allow starting a node to + // be unresponsive before marking it unhealthy. + NodeStartupGracePeriod metav1.Duration // nodeMontiorGracePeriod is the amount of time which we allow a running node to be // unresponsive before marking it unhealthy. Must be N times more than kubelet's // nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet // to post node status. NodeMonitorGracePeriod metav1.Duration - // registerRetryCount is the number of retries for initial node registration. - // Retry interval equals node-sync-period. - RegisterRetryCount int32 - // nodeStartupGracePeriod is the amount of time which we allow starting a node to - // be unresponsive before marking it unhealthy. - NodeStartupGracePeriod metav1.Duration - // nodeMonitorPeriod is the period for syncing NodeStatus in NodeController. - NodeMonitorPeriod metav1.Duration - // serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key - // used to sign service account tokens. - ServiceAccountKeyFile string - // clusterSigningCertFile is the filename containing a PEM-encoded - // X509 CA certificate used to issue cluster-scoped certificates - ClusterSigningCertFile string - // clusterSigningCertFile is the filename containing a PEM-encoded - // RSA or ECDSA private key used to issue cluster-scoped certificates - ClusterSigningKeyFile string - // clusterSigningDuration is the length of duration signed certificates - // will be given. - ClusterSigningDuration metav1.Duration - // enableProfiling enables profiling via web interface host:port/debug/pprof/ - EnableProfiling bool - // enableContentionProfiling enables lock contention profiling, if enableProfiling is true. - EnableContentionProfiling bool - // clusterName is the instance prefix for the cluster. - ClusterName string - // clusterCIDR is CIDR Range for Pods in cluster. - ClusterCIDR string - // serviceCIDR is CIDR Range for Services in cluster. - ServiceCIDR string - // NodeCIDRMaskSize is the mask size for node cidr in cluster. - NodeCIDRMaskSize int32 - // AllocateNodeCIDRs enables CIDRs for Pods to be allocated and, if - // ConfigureCloudRoutes is true, to be set on the cloud provider. - AllocateNodeCIDRs bool - // CIDRAllocatorType determines what kind of pod CIDR allocator will be used. - CIDRAllocatorType string - // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs - // to be configured on the cloud provider. - ConfigureCloudRoutes bool - // rootCAFile is the root certificate authority will be included in service - // account's token secret. This must be a valid PEM-encoded CA bundle. - RootCAFile string - // contentType is contentType of requests sent to apiserver. - ContentType string - // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. - KubeAPIQPS float32 - // kubeAPIBurst is the burst to use while talking with kubernetes apiserver. - KubeAPIBurst int32 - // leaderElection defines the configuration of leader election client. - LeaderElection LeaderElectionConfiguration - // volumeConfiguration holds configuration for volume related features. - VolumeConfiguration VolumeConfiguration - // How long to wait between starting controller managers - ControllerStartInterval metav1.Duration - // enables the generic garbage collector. MUST be synced with the - // corresponding flag of the kube-apiserver. WARNING: the generic garbage - // collector is an alpha feature. - EnableGarbageCollector bool - // concurrentGCSyncs is the number of garbage collector workers that are - // allowed to sync concurrently. - ConcurrentGCSyncs int32 - // gcIgnoredResources is the list of GroupResources that garbage collection should ignore. - GCIgnoredResources []GroupResource - // nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy - NodeEvictionRate float32 - // secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy - SecondaryNodeEvictionRate float32 + // podEvictionTimeout is the grace period for deleting pods on failed nodes. + PodEvictionTimeout metav1.Duration // secondaryNodeEvictionRate is implicitly overridden to 0 for clusters smaller than or equal to largeClusterSizeThreshold LargeClusterSizeThreshold int32 // Zone is treated as unhealthy in nodeEvictionRate and secondaryNodeEvictionRate when at least // unhealthyZoneThreshold (no less than 3) of Nodes in the zone are NotReady UnhealthyZoneThreshold float32 - // Reconciler runs a periodic loop to reconcile the desired state of the with - // the actual state of the world by triggering attach detach operations. - // This flag enables or disables reconcile. Is false by default, and thus enabled. - DisableAttachDetachReconcilerSync bool - // ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop - // wait between successive executions. Is set to 5 sec by default. - ReconcilerSyncLoopPeriod metav1.Duration - // If set to true enables NoExecute Taints and will evict all not-tolerating - // Pod running on Nodes tainted with this kind of Taints. - EnableTaintManager bool - // HorizontalPodAutoscalerUseRESTClients causes the HPA controller to use REST clients - // through the kube-aggregator when enabled, instead of using the legacy metrics client - // through the API server proxy. - HorizontalPodAutoscalerUseRESTClients bool +} + +type PersistentVolumeBinderControllerConfiguration struct { + // pvClaimBinderSyncPeriod is the period for syncing persistent volumes + // and persistent volume claims. + PVClaimBinderSyncPeriod metav1.Duration + // volumeConfiguration holds configuration for volume related features. + VolumeConfiguration VolumeConfiguration +} + +type PodGCControllerConfiguration struct { + // terminatedPodGCThreshold is the number of terminated pods that can exist + // before the terminated pod garbage collector starts deleting terminated pods. + // If <= 0, the terminated pod garbage collector is disabled. + TerminatedPodGCThreshold int32 +} + +type ReplicaSetControllerConfiguration struct { + // concurrentRSSyncs is the number of replica sets that are allowed to sync + // concurrently. Larger number = more responsive replica management, but more + // CPU (and network) load. + ConcurrentRSSyncs int32 +} + +type ReplicationControllerConfiguration struct { + // concurrentRCSyncs is the number of replication controllers that are + // allowed to sync concurrently. Larger number = more responsive replica + // management, but more CPU (and network) load. + ConcurrentRCSyncs int32 +} + +type ResourceQuotaControllerConfiguration struct { + // resourceQuotaSyncPeriod is the period for syncing quota usage status + // in the system. + ResourceQuotaSyncPeriod metav1.Duration + // concurrentResourceQuotaSyncs is the number of resource quotas that are + // allowed to sync concurrently. Larger number = more responsive quota + // management, but more CPU (and network) load. + ConcurrentResourceQuotaSyncs int32 +} + +type SAControllerConfiguration struct { + // concurrentSATokenSyncs is the number of service account token syncing operations + // that will be done concurrently. + ConcurrentSATokenSyncs int32 + // rootCAFile is the root certificate authority will be included in service + // account's token secret. This must be a valid PEM-encoded CA bundle. + RootCAFile string +} + +type ServiceControllerConfiguration struct { + // concurrentServiceSyncs is the number of services that are + // allowed to sync concurrently. Larger number = more responsive service + // management, but more CPU (and network) load. + ConcurrentServiceSyncs int32 } // VolumeConfiguration contains *all* enumerated flags meant to configure all volume diff --git a/pkg/apis/componentconfig/v1alpha1/types.go b/pkg/apis/componentconfig/v1alpha1/types.go index e694f193b2..3fd65fec65 100644 --- a/pkg/apis/componentconfig/v1alpha1/types.go +++ b/pkg/apis/componentconfig/v1alpha1/types.go @@ -206,13 +206,116 @@ type GroupResource struct { type KubeControllerManagerConfiguration struct { metav1.TypeMeta `json:",inline"` + // CloudProviderConfiguration holds configuration for CloudProvider related features. + CloudProvider CloudProviderConfiguration + // DebuggingConfiguration holds configuration for Debugging related features. + Debugging DebuggingConfiguration + // GenericComponentConfiguration holds configuration for GenericComponent + // related features both in cloud controller manager and kube-controller manager. + GenericComponent GenericComponentConfiguration + // KubeCloudSharedConfiguration holds configuration for shared related features + // both in cloud controller manager and kube-controller manager. + KubeCloudShared KubeCloudSharedConfiguration + + // AttachDetachControllerConfiguration holds configuration for + // AttachDetachController related features. + AttachDetachController AttachDetachControllerConfiguration + // CSRSigningControllerConfiguration holds configuration for + // CSRSigningController related features. + CSRSigningController CSRSigningControllerConfiguration + // DaemonSetControllerConfiguration holds configuration for DaemonSetController + // related features. + DaemonSetController DaemonSetControllerConfiguration + // DeploymentControllerConfiguration holds configuration for + // DeploymentController related features. + DeploymentController DeploymentControllerConfiguration + // DeprecatedControllerConfiguration holds configuration for some deprecated + // features. + DeprecatedController DeprecatedControllerConfiguration + // EndPointControllerConfiguration holds configuration for EndPointController + // related features. + EndPointController EndPointControllerConfiguration + // GarbageCollectorControllerConfiguration holds configuration for + // GarbageCollectorController related features. + GarbageCollectorController GarbageCollectorControllerConfiguration + // HPAControllerConfiguration holds configuration for HPAController related features. + HPAController HPAControllerConfiguration + // JobControllerConfiguration holds configuration for JobController related features. + JobController JobControllerConfiguration + // NamespaceControllerConfiguration holds configuration for NamespaceController + // related features. + NamespaceController NamespaceControllerConfiguration + // NodeIpamControllerConfiguration holds configuration for NodeIpamController + // related features. + NodeIpamController NodeIpamControllerConfiguration + // NodeLifecycleControllerConfiguration holds configuration for + // NodeLifecycleController related features. + NodeLifecycleController NodeLifecycleControllerConfiguration + // PersistentVolumeBinderControllerConfiguration holds configuration for + // PersistentVolumeBinderController related features. + PersistentVolumeBinderController PersistentVolumeBinderControllerConfiguration + // PodGCControllerConfiguration holds configuration for PodGCController + // related features. + PodGCController PodGCControllerConfiguration + // ReplicaSetControllerConfiguration holds configuration for ReplicaSet related features. + ReplicaSetController ReplicaSetControllerConfiguration + // ReplicationControllerConfiguration holds configuration for + // ReplicationController related features. + ReplicationController ReplicationControllerConfiguration + // ResourceQuotaControllerConfiguration holds configuration for + // ResourceQuotaController related features. + ResourceQuotaController ResourceQuotaControllerConfiguration + // SAControllerConfiguration holds configuration for ServiceAccountController + // related features. + SAController SAControllerConfiguration + // ServiceControllerConfiguration holds configuration for ServiceController + // related features. + ServiceController ServiceControllerConfiguration + // Controllers is the list of controllers to enable or disable // '*' means "all enabled by default controllers" // 'foo' means "enable 'foo'" // '-foo' means "disable 'foo'" // first item for a particular name wins Controllers []string `json:"controllers"` + // externalCloudVolumePlugin specifies the plugin to use when cloudProvider is "external". + // It is currently used by the in repo cloud providers to handle node and volume control in the KCM. + ExternalCloudVolumePlugin string `json:"externalCloudVolumePlugin"` +} +type CloudProviderConfiguration struct { + // Name is the provider for cloud services. + Name string `json:"cloudProvider"` + // cloudConfigFile is the path to the cloud provider configuration file. + CloudConfigFile string `json:"cloudConfigFile"` +} + +type DebuggingConfiguration struct { + // enableProfiling enables profiling via web interface host:port/debug/pprof/ + EnableProfiling bool `json:"enableProfiling"` + // EnableContentionProfiling enables lock contention profiling, if + // EnableProfiling is true. + EnableContentionProfiling bool `json:"enableContentionProfiling"` +} + +type GenericComponentConfiguration struct { + + // minResyncPeriod is the resync period in reflectors; will be random between + // minResyncPeriod and 2*minResyncPeriod. + MinResyncPeriod metav1.Duration `json:"minResyncPeriod"` + // contentType is contentType of requests sent to apiserver. + ContentType string `json:"contentType"` + // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. + KubeAPIQPS float32 `json:"kubeAPIQPS"` + // kubeAPIBurst is the burst to use while talking with kubernetes apiserver. + KubeAPIBurst int32 `json:"kubeAPIBurst"` + // How long to wait between starting controller managers + ControllerStartInterval metav1.Duration `json:"controllerStartInterval"` + // leaderElection defines the configuration of leader election client. + LeaderElection LeaderElectionConfiguration `json:"leaderElection"` +} + +type KubeCloudSharedConfiguration struct { // port is the port that the controller-manager's http service runs on. Port int32 `json:"port"` // address is the IP address to serve on (set to 0.0.0.0 for all interfaces). @@ -220,77 +323,103 @@ type KubeControllerManagerConfiguration struct { // useServiceAccountCredentials indicates whether controllers should be run with // individual service account credentials. UseServiceAccountCredentials bool `json:"useServiceAccountCredentials"` - // cloudProvider is the provider for cloud services. - CloudProvider string `json:"cloudProvider"` - // cloudConfigFile is the path to the cloud provider configuration file. - CloudConfigFile string `json:"cloudConfigFile"` - // externalCloudVolumePlugin specifies the plugin to use when cloudProvider is "external". - // It is currently used by the in repo cloud providers to handle node and volume control in the KCM. - ExternalCloudVolumePlugin string `json:"externalCloudVolumePlugin"` // run with untagged cloud instances AllowUntaggedCloud bool `json:"allowUntaggedCloud"` - // concurrentEndpointSyncs is the number of endpoint syncing operations - // that will be done concurrently. Larger number = faster endpoint updating, - // but more CPU (and network) load. - ConcurrentEndpointSyncs int32 `json:"concurrentEndpointSyncs"` - // concurrentRSSyncs is the number of replica sets that are allowed to sync - // concurrently. Larger number = more responsive replica management, but more - // CPU (and network) load. - ConcurrentRSSyncs int32 `json:"concurrentRSSyncs"` - // concurrentRCSyncs is the number of replication controllers that are - // allowed to sync concurrently. Larger number = more responsive replica - // management, but more CPU (and network) load. - ConcurrentRCSyncs int32 `json:"concurrentRCSyncs"` - // concurrentServiceSyncs is the number of services that are - // allowed to sync concurrently. Larger number = more responsive service - // management, but more CPU (and network) load. - ConcurrentServiceSyncs int32 `json:"concurrentServiceSyncs"` - // concurrentResourceQuotaSyncs is the number of resource quotas that are - // allowed to sync concurrently. Larger number = more responsive quota - // management, but more CPU (and network) load. - ConcurrentResourceQuotaSyncs int32 `json:"concurrentResourceQuotaSyncs"` - // concurrentDeploymentSyncs is the number of deployment objects that are - // allowed to sync concurrently. Larger number = more responsive deployments, - // but more CPU (and network) load. - ConcurrentDeploymentSyncs int32 `json:"concurrentDeploymentSyncs"` - // concurrentDaemonSetSyncs is the number of daemonset objects that are - // allowed to sync concurrently. Larger number = more responsive daemonset, - // but more CPU (and network) load. - ConcurrentDaemonSetSyncs int32 `json:"concurrentDaemonSetSyncs"` - // concurrentJobSyncs is the number of job objects that are - // allowed to sync concurrently. Larger number = more responsive jobs, - // but more CPU (and network) load. - ConcurrentJobSyncs int32 `json:"concurrentJobSyncs"` - // concurrentNamespaceSyncs is the number of namespace objects that are - // allowed to sync concurrently. - ConcurrentNamespaceSyncs int32 `json:"concurrentNamespaceSyncs"` - // concurrentSATokenSyncs is the number of service account token syncing operations - // that will be done concurrently. - ConcurrentSATokenSyncs int32 `json:"concurrentSATokenSyncs"` - // lookupCacheSizeForRC is the size of lookup cache for replication controllers. - // Larger number = more responsive replica management, but more MEM load. + // routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider.. + RouteReconciliationPeriod metav1.Duration `json:"routeReconciliationPeriod"` + // nodeMonitorPeriod is the period for syncing NodeStatus in NodeController. + NodeMonitorPeriod metav1.Duration `json:"nodeMonitorPeriod"` + // clusterName is the instance prefix for the cluster. + ClusterName string `json:"clusterName"` + // clusterCIDR is CIDR Range for Pods in cluster. + ClusterCIDR string `json:"clusterCIDR"` + // AllocateNodeCIDRs enables CIDRs for Pods to be allocated and, if + // ConfigureCloudRoutes is true, to be set on the cloud provider. + AllocateNodeCIDRs bool `json:"allocateNodeCIDRs"` + // CIDRAllocatorType determines what kind of pod CIDR allocator will be used. + CIDRAllocatorType string `json:"cIDRAllocatorType"` + // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs + // to be configured on the cloud provider. + ConfigureCloudRoutes *bool `json:"configureCloudRoutes"` + // serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key + // used to sign service account tokens. + ServiceAccountKeyFile string `json:"serviceAccountKeyFile"` // nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer // periods will result in fewer calls to cloud provider, but may delay addition // of new nodes to cluster. NodeSyncPeriod metav1.Duration `json:"nodeSyncPeriod"` - // routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider.. - RouteReconciliationPeriod metav1.Duration `json:"routeReconciliationPeriod"` - // resourceQuotaSyncPeriod is the period for syncing quota usage status - // in the system. - ResourceQuotaSyncPeriod metav1.Duration `json:"resourceQuotaSyncPeriod"` - // namespaceSyncPeriod is the period for syncing namespace life-cycle - // updates. - NamespaceSyncPeriod metav1.Duration `json:"namespaceSyncPeriod"` - // pvClaimBinderSyncPeriod is the period for syncing persistent volumes - // and persistent volume claims. - PVClaimBinderSyncPeriod metav1.Duration `json:"pVClaimBinderSyncPeriod"` - // minResyncPeriod is the resync period in reflectors; will be random between - // minResyncPeriod and 2*minResyncPeriod. - MinResyncPeriod metav1.Duration `json:"minResyncPeriod"` - // terminatedPodGCThreshold is the number of terminated pods that can exist - // before the terminated pod garbage collector starts deleting terminated pods. - // If <= 0, the terminated pod garbage collector is disabled. - TerminatedPodGCThreshold int32 `json:"terminatedPodGCThreshold"` +} + +type AttachDetachControllerConfiguration struct { + // Reconciler runs a periodic loop to reconcile the desired state of the with + // the actual state of the world by triggering attach detach operations. + // This flag enables or disables reconcile. Is false by default, and thus enabled. + DisableAttachDetachReconcilerSync bool `json:"disableAttachDetachReconcilerSync"` + // ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop + // wait between successive executions. Is set to 5 sec by default. + ReconcilerSyncLoopPeriod metav1.Duration `json:"reconcilerSyncLoopPeriod"` +} + +type CSRSigningControllerConfiguration struct { + // clusterSigningCertFile is the filename containing a PEM-encoded + // X509 CA certificate used to issue cluster-scoped certificates + ClusterSigningCertFile string `json:"clusterSigningCertFile"` + // clusterSigningCertFile is the filename containing a PEM-encoded + // RSA or ECDSA private key used to issue cluster-scoped certificates + ClusterSigningKeyFile string `json:"clusterSigningKeyFile"` + // clusterSigningDuration is the length of duration signed certificates + // will be given. + ClusterSigningDuration metav1.Duration `json:"clusterSigningDuration"` +} + +type DaemonSetControllerConfiguration struct { + // concurrentDaemonSetSyncs is the number of daemonset objects that are + // allowed to sync concurrently. Larger number = more responsive daemonset, + // but more CPU (and network) load. + ConcurrentDaemonSetSyncs int32 `json:"concurrentDaemonSetSyncs"` +} + +type DeploymentControllerConfiguration struct { + // concurrentDeploymentSyncs is the number of deployment objects that are + // allowed to sync concurrently. Larger number = more responsive deployments, + // but more CPU (and network) load. + ConcurrentDeploymentSyncs int32 `json:"concurrentDeploymentSyncs"` + // deploymentControllerSyncPeriod is the period for syncing the deployments. + DeploymentControllerSyncPeriod metav1.Duration `json:"deploymentControllerSyncPeriod"` +} + +type DeprecatedControllerConfiguration struct { + // DEPRECATED: deletingPodsQps is the number of nodes per second on which pods are deleted in + // case of node failure. + DeletingPodsQps float32 `json:"deletingPodsQps"` + // DEPRECATED: deletingPodsBurst is the number of nodes on which pods are bursty deleted in + // case of node failure. For more details look into RateLimiter. + DeletingPodsBurst int32 `json:"deletingPodsBurst"` + // registerRetryCount is the number of retries for initial node registration. + // Retry interval equals node-sync-period. + RegisterRetryCount int32 `json:"registerRetryCount"` +} + +type EndPointControllerConfiguration struct { + // concurrentEndpointSyncs is the number of endpoint syncing operations + // that will be done concurrently. Larger number = faster endpoint updating, + // but more CPU (and network) load. + ConcurrentEndpointSyncs int32 `json:"concurrentEndpointSyncs"` +} + +type GarbageCollectorControllerConfiguration struct { + // enables the generic garbage collector. MUST be synced with the + // corresponding flag of the kube-apiserver. WARNING: the generic garbage + // collector is an alpha feature. + EnableGarbageCollector *bool `json:"enableGarbageCollector"` + // concurrentGCSyncs is the number of garbage collector workers that are + // allowed to sync concurrently. + ConcurrentGCSyncs int32 `json:"concurrentGCSyncs"` + // gcIgnoredResources is the list of GroupResources that garbage collection should ignore. + GCIgnoredResources []GroupResource `json:"gCIgnoredResources"` +} + +type HPAControllerConfiguration struct { // horizontalPodAutoscalerSyncPeriod is the period for syncing the number of // pods in horizontal pod autoscaler. HorizontalPodAutoscalerSyncPeriod metav1.Duration `json:"horizontalPodAutoscalerSyncPeriod"` @@ -301,108 +430,113 @@ type KubeControllerManagerConfiguration struct { // horizontalPodAutoscalerTolerance is the tolerance for when // resource usage suggests upscaling/downscaling HorizontalPodAutoscalerTolerance float64 `json:"horizontalPodAutoscalerTolerance"` - // deploymentControllerSyncPeriod is the period for syncing the deployments. - DeploymentControllerSyncPeriod metav1.Duration `json:"deploymentControllerSyncPeriod"` - // podEvictionTimeout is the grace period for deleting pods on failed nodes. - PodEvictionTimeout metav1.Duration `json:"podEvictionTimeout"` - // DEPRECATED: deletingPodsQps is the number of nodes per second on which pods are deleted in - // case of node failure. - DeletingPodsQps float32 `json:"deletingPodsQps"` - // DEPRECATED: deletingPodsBurst is the number of nodes on which pods are bursty deleted in - // case of node failure. For more details look into RateLimiter. - DeletingPodsBurst int32 `json:"deletingPodsBurst"` + // HorizontalPodAutoscalerUseRESTClients causes the HPA controller to use REST clients + // through the kube-aggregator when enabled, instead of using the legacy metrics client + // through the API server proxy. + HorizontalPodAutoscalerUseRESTClients *bool `json:"horizontalPodAutoscalerUseRESTClients"` +} + +type JobControllerConfiguration struct { + // concurrentJobSyncs is the number of job objects that are + // allowed to sync concurrently. Larger number = more responsive jobs, + // but more CPU (and network) load. + ConcurrentJobSyncs int32 +} + +type NamespaceControllerConfiguration struct { + // namespaceSyncPeriod is the period for syncing namespace life-cycle + // updates. + NamespaceSyncPeriod metav1.Duration `json:"namespaceSyncPeriod"` + // concurrentNamespaceSyncs is the number of namespace objects that are + // allowed to sync concurrently. + ConcurrentNamespaceSyncs int32 `json:"concurrentNamespaceSyncs"` +} + +type NodeIpamControllerConfiguration struct { + // serviceCIDR is CIDR Range for Services in cluster. + ServiceCIDR string `json:"serviceCIDR"` + // NodeCIDRMaskSize is the mask size for node cidr in cluster. + NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"` +} + +type NodeLifecycleControllerConfiguration struct { + // If set to true enables NoExecute Taints and will evict all not-tolerating + // Pod running on Nodes tainted with this kind of Taints. + EnableTaintManager *bool `json:"enableTaintManager"` + // nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy + NodeEvictionRate float32 `json:"nodeEvictionRate"` + // secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy + SecondaryNodeEvictionRate float32 `json:"secondaryNodeEvictionRate"` + // nodeStartupGracePeriod is the amount of time which we allow starting a node to + // be unresponsive before marking it unhealthy. + NodeStartupGracePeriod metav1.Duration `json:"nodeStartupGracePeriod"` // nodeMontiorGracePeriod is the amount of time which we allow a running node to be // unresponsive before marking it unhealthy. Must be N times more than kubelet's // nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet // to post node status. NodeMonitorGracePeriod metav1.Duration `json:"nodeMonitorGracePeriod"` - // registerRetryCount is the number of retries for initial node registration. - // Retry interval equals node-sync-period. - RegisterRetryCount int32 `json:"registerRetryCount"` - // nodeStartupGracePeriod is the amount of time which we allow starting a node to - // be unresponsive before marking it unhealthy. - NodeStartupGracePeriod metav1.Duration `json:"nodeStartupGracePeriod"` - // nodeMonitorPeriod is the period for syncing NodeStatus in NodeController. - NodeMonitorPeriod metav1.Duration `json:"nodeMonitorPeriod"` - // serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key - // used to sign service account tokens. - ServiceAccountKeyFile string `json:"serviceAccountKeyFile"` - // clusterSigningCertFile is the filename containing a PEM-encoded - // X509 CA certificate used to issue cluster-scoped certificates - ClusterSigningCertFile string `json:"clusterSigningCertFile"` - // clusterSigningCertFile is the filename containing a PEM-encoded - // RSA or ECDSA private key used to issue cluster-scoped certificates - ClusterSigningKeyFile string `json:"clusterSigningKeyFile"` - // clusterSigningDuration is the length of duration signed certificates - // will be given. - ClusterSigningDuration metav1.Duration `json:"clusterSigningDuration"` - // enableProfiling enables profiling via web interface host:port/debug/pprof/ - EnableProfiling bool `json:"enableProfiling"` - // enableContentionProfiling enables lock contention profiling, if enableProfiling is true. - EnableContentionProfiling bool `json:"enableContentionProfiling"` - // clusterName is the instance prefix for the cluster. - ClusterName string `json:"clusterName"` - // clusterCIDR is CIDR Range for Pods in cluster. - ClusterCIDR string `json:"clusterCIDR"` - // serviceCIDR is CIDR Range for Services in cluster. - ServiceCIDR string `json:"serviceCIDR"` - // NodeCIDRMaskSize is the mask size for node cidr in cluster. - NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"` - // AllocateNodeCIDRs enables CIDRs for Pods to be allocated and, if - // ConfigureCloudRoutes is true, to be set on the cloud provider. - AllocateNodeCIDRs bool `json:"allocateNodeCIDRs"` - // CIDRAllocatorType determines what kind of pod CIDR allocator will be used. - CIDRAllocatorType string `json:"cIDRAllocatorType"` - // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs - // to be configured on the cloud provider. - ConfigureCloudRoutes *bool `json:"configureCloudRoutes"` - // rootCAFile is the root certificate authority will be included in service - // account's token secret. This must be a valid PEM-encoded CA bundle. - RootCAFile string `json:"rootCAFile"` - // contentType is contentType of requests sent to apiserver. - ContentType string `json:"contentType"` - // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. - KubeAPIQPS float32 `json:"kubeAPIQPS"` - // kubeAPIBurst is the burst to use while talking with kubernetes apiserver. - KubeAPIBurst int32 `json:"kubeAPIBurst"` - // leaderElection defines the configuration of leader election client. - LeaderElection LeaderElectionConfiguration `json:"leaderElection"` - // volumeConfiguration holds configuration for volume related features. - VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"` - // How long to wait between starting controller managers - ControllerStartInterval metav1.Duration `json:"controllerStartInterval"` - // enables the generic garbage collector. MUST be synced with the - // corresponding flag of the kube-apiserver. WARNING: the generic garbage - // collector is an alpha feature. - EnableGarbageCollector *bool `json:"enableGarbageCollector"` - // concurrentGCSyncs is the number of garbage collector workers that are - // allowed to sync concurrently. - ConcurrentGCSyncs int32 `json:"concurrentGCSyncs"` - // gcIgnoredResources is the list of GroupResources that garbage collection should ignore. - GCIgnoredResources []GroupResource `json:"gCIgnoredResources"` - // nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy - NodeEvictionRate float32 `json:"nodeEvictionRate"` - // secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy - SecondaryNodeEvictionRate float32 `json:"secondaryNodeEvictionRate"` + // podEvictionTimeout is the grace period for deleting pods on failed nodes. + PodEvictionTimeout metav1.Duration `json:"podEvictionTimeout"` // secondaryNodeEvictionRate is implicitly overridden to 0 for clusters smaller than or equal to largeClusterSizeThreshold LargeClusterSizeThreshold int32 `json:"largeClusterSizeThreshold"` // Zone is treated as unhealthy in nodeEvictionRate and secondaryNodeEvictionRate when at least // unhealthyZoneThreshold (no less than 3) of Nodes in the zone are NotReady UnhealthyZoneThreshold float32 `json:"unhealthyZoneThreshold"` - // Reconciler runs a periodic loop to reconcile the desired state of the with - // the actual state of the world by triggering attach detach operations. - // This flag enables or disables reconcile. Is false by default, and thus enabled. - DisableAttachDetachReconcilerSync bool `json:"disableAttachDetachReconcilerSync"` - // ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop - // wait between successive executions. Is set to 5 sec by default. - ReconcilerSyncLoopPeriod metav1.Duration `json:"reconcilerSyncLoopPeriod"` - // If set to true enables NoExecute Taints and will evict all not-tolerating - // Pod running on Nodes tainted with this kind of Taints. - EnableTaintManager *bool `json:"enableTaintManager"` - // HorizontalPodAutoscalerUseRESTClients causes the HPA controller to use REST clients - // through the kube-aggregator when enabled, instead of using the legacy metrics client - // through the API server proxy. - HorizontalPodAutoscalerUseRESTClients *bool `json:"horizontalPodAutoscalerUseRESTClients"` +} + +type PersistentVolumeBinderControllerConfiguration struct { + // pvClaimBinderSyncPeriod is the period for syncing persistent volumes + // and persistent volume claims. + PVClaimBinderSyncPeriod metav1.Duration `json:"pVClaimBinderSyncPeriod"` + // volumeConfiguration holds configuration for volume related features. + VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"` +} + +type PodGCControllerConfiguration struct { + // terminatedPodGCThreshold is the number of terminated pods that can exist + // before the terminated pod garbage collector starts deleting terminated pods. + // If <= 0, the terminated pod garbage collector is disabled. + TerminatedPodGCThreshold int32 `json:"terminatedPodGCThreshold"` +} + +type ReplicaSetControllerConfiguration struct { + // concurrentRSSyncs is the number of replica sets that are allowed to sync + // concurrently. Larger number = more responsive replica management, but more + // CPU (and network) load. + ConcurrentRSSyncs int32 `json:"concurrentRSSyncs"` +} + +type ReplicationControllerConfiguration struct { + // concurrentRCSyncs is the number of replication controllers that are + // allowed to sync concurrently. Larger number = more responsive replica + // management, but more CPU (and network) load. + ConcurrentRCSyncs int32 `json:"concurrentRCSyncs"` +} + +type ResourceQuotaControllerConfiguration struct { + // resourceQuotaSyncPeriod is the period for syncing quota usage status + // in the system. + ResourceQuotaSyncPeriod metav1.Duration `json:"resourceQuotaSyncPeriod"` + // concurrentResourceQuotaSyncs is the number of resource quotas that are + // allowed to sync concurrently. Larger number = more responsive quota + // management, but more CPU (and network) load. + ConcurrentResourceQuotaSyncs int32 `json:"concurrentResourceQuotaSyncs"` +} + +type SAControllerConfiguration struct { + // concurrentSATokenSyncs is the number of service account token syncing operations + // that will be done concurrently. + ConcurrentSATokenSyncs int32 `json:"concurrentSATokenSyncs"` + // rootCAFile is the root certificate authority will be included in service + // account's token secret. This must be a valid PEM-encoded CA bundle. + RootCAFile string `json:"rootCAFile"` +} + +type ServiceControllerConfiguration struct { + // concurrentServiceSyncs is the number of services that are + // allowed to sync concurrently. Larger number = more responsive service + // management, but more CPU (and network) load. + ConcurrentServiceSyncs int32 `json:"concurrentServiceSyncs"` } const (