move controller manager to compoenent config

pull/6/head
Mike Danese 2016-02-08 13:33:20 -08:00
parent 5d66ee4f02
commit 31b8905da7
6 changed files with 278 additions and 171 deletions

View File

@ -125,7 +125,7 @@ func Run(s *options.CMServer) error {
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
@ -193,14 +193,16 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
// this cidr has been validated already
_, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR)
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "node-controller")),
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
s.PodEvictionTimeout.Duration, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, &s.ClusterCIDR, s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod)
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod.Duration)
serviceController := servicecontroller.New(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
if err := serviceController.Run(s.ServiceSyncPeriod.Duration, s.NodeSyncPeriod.Duration); err != nil {
glog.Errorf("Failed to start service controller: %v", err)
}
@ -210,8 +212,8 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
} else if routes, ok := cloud.Routes(); !ok {
glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
} else {
routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, &s.ClusterCIDR)
routeController.Run(s.NodeSyncPeriod)
routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, clusterCIDR)
routeController.Run(s.NodeSyncPeriod.Duration)
}
} else {
glog.Infof("allocate-node-cidrs set to %v, node controller not creating routes", s.AllocateNodeCIDRs)
@ -219,7 +221,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
go resourcequotacontroller.NewResourceQuotaController(
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resourcequota-controller")),
controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod)).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration)).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
// If apiserver is not running we should wait for some time and fail only then. This is particularly
// important when we start apiserver and controller manager at the same time.
@ -241,7 +243,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
glog.Fatalf("Failed to get supported resources from server: %v", err)
}
namespaceController := namespacecontroller.NewNamespaceController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), versions, s.NamespaceSyncPeriod)
namespaceController := namespacecontroller.NewNamespaceController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), versions, s.NamespaceSyncPeriod.Duration)
go namespaceController.Run(s.ConcurrentNamespaceSyncs, wait.NeverStop)
groupVersion := "extensions/v1beta1"
@ -260,13 +262,13 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
metrics.DefaultHeapsterPort,
)
podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient).
Run(s.HorizontalPodAutoscalerSyncPeriod)
Run(s.HorizontalPodAutoscalerSyncPeriod.Duration)
}
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s)).
Run(s.ConcurrentDSCSyncs, wait.NeverStop)
Run(s.ConcurrentDaemonSetSyncs, wait.NeverStop)
}
if containsResource(resources, "jobs") {
@ -288,20 +290,20 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
}
}
volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfigFlags)
volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfiguration)
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod)
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration)
pvclaimBinder.Run()
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
s.PVClaimBinderSyncPeriod,
s.VolumeConfigFlags.PersistentVolumeRecyclerMaximumRetry,
ProbeRecyclableVolumePlugins(s.VolumeConfigFlags),
s.PVClaimBinderSyncPeriod.Duration,
s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry,
ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
cloud,
)
if err != nil {
@ -310,7 +312,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
pvRecycler.Run()
if provisioner != nil {
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PVClaimBinderSyncPeriod.Duration, volumePlugins, provisioner, cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
}

View File

@ -21,9 +21,9 @@ limitations under the License.
package options
import (
"net"
"time"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/client/leaderelection"
"k8s.io/kubernetes/pkg/master/ports"
@ -33,107 +33,55 @@ import (
// CMServer is the main context object for the controller manager.
type CMServer struct {
Port int
Address net.IP
CloudProvider string
CloudConfigFile string
ConcurrentEndpointSyncs int
ConcurrentRCSyncs int
ConcurrentRSSyncs int
ConcurrentDSCSyncs int
ConcurrentJobSyncs int
ConcurrentResourceQuotaSyncs int
ConcurrentDeploymentSyncs int
ConcurrentNamespaceSyncs int
ServiceSyncPeriod time.Duration
NodeSyncPeriod time.Duration
ResourceQuotaSyncPeriod time.Duration
NamespaceSyncPeriod time.Duration
PVClaimBinderSyncPeriod time.Duration
VolumeConfigFlags VolumeConfigFlags
TerminatedPodGCThreshold int
HorizontalPodAutoscalerSyncPeriod time.Duration
DeploymentControllerSyncPeriod time.Duration
MinResyncPeriod time.Duration
RegisterRetryCount int
NodeMonitorGracePeriod time.Duration
NodeStartupGracePeriod time.Duration
NodeMonitorPeriod time.Duration
NodeStatusUpdateRetry int
PodEvictionTimeout time.Duration
DeletingPodsQps float32
DeletingPodsBurst int
ServiceAccountKeyFile string
RootCAFile string
componentconfig.KubeControllerManagerConfiguration
ClusterName string
ClusterCIDR net.IPNet
AllocateNodeCIDRs bool
EnableProfiling bool
Master string
Kubeconfig string
KubeAPIQPS float32
KubeAPIBurst int
LeaderElection componentconfig.LeaderElectionConfiguration
}
// VolumeConfigFlags is used to bind CLI flags to variables. This top-level struct contains *all* enumerated
// CLI flags meant to configure all volume plugins. From this config, the binary will create many instances
// of volume.VolumeConfig which are then passed to the appropriate plugin. The ControllerManager binary is the only
// part of the code which knows what plugins are supported and which CLI flags correspond to each plugin.
type VolumeConfigFlags struct {
PersistentVolumeRecyclerMaximumRetry int
PersistentVolumeRecyclerMinimumTimeoutNFS int
PersistentVolumeRecyclerPodTemplateFilePathNFS string
PersistentVolumeRecyclerIncrementTimeoutNFS int
PersistentVolumeRecyclerPodTemplateFilePathHostPath string
PersistentVolumeRecyclerMinimumTimeoutHostPath int
PersistentVolumeRecyclerIncrementTimeoutHostPath int
EnableHostPathProvisioning bool
Master string
Kubeconfig string
}
// NewCMServer creates a new CMServer with a default config.
func NewCMServer() *CMServer {
s := CMServer{
Port: ports.ControllerManagerPort,
Address: net.ParseIP("0.0.0.0"),
ConcurrentEndpointSyncs: 5,
ConcurrentRCSyncs: 5,
ConcurrentRSSyncs: 5,
ConcurrentDSCSyncs: 2,
ConcurrentJobSyncs: 5,
ConcurrentResourceQuotaSyncs: 5,
ConcurrentDeploymentSyncs: 5,
ConcurrentNamespaceSyncs: 2,
ServiceSyncPeriod: 5 * time.Minute,
NodeSyncPeriod: 10 * time.Second,
ResourceQuotaSyncPeriod: 5 * time.Minute,
NamespaceSyncPeriod: 5 * time.Minute,
PVClaimBinderSyncPeriod: 10 * time.Minute,
HorizontalPodAutoscalerSyncPeriod: 30 * time.Second,
DeploymentControllerSyncPeriod: 30 * time.Second,
MinResyncPeriod: 12 * time.Hour,
RegisterRetryCount: 10,
PodEvictionTimeout: 5 * time.Minute,
NodeMonitorGracePeriod: 40 * time.Second,
NodeStartupGracePeriod: 60 * time.Second,
NodeMonitorPeriod: 5 * time.Second,
ClusterName: "kubernetes",
TerminatedPodGCThreshold: 12500,
VolumeConfigFlags: VolumeConfigFlags{
// default values here
PersistentVolumeRecyclerMaximumRetry: 3,
PersistentVolumeRecyclerMinimumTimeoutNFS: 300,
PersistentVolumeRecyclerIncrementTimeoutNFS: 30,
PersistentVolumeRecyclerMinimumTimeoutHostPath: 60,
PersistentVolumeRecyclerIncrementTimeoutHostPath: 30,
EnableHostPathProvisioning: false,
KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{
Port: ports.ControllerManagerPort,
Address: "0.0.0.0",
ConcurrentEndpointSyncs: 5,
ConcurrentRCSyncs: 5,
ConcurrentRSSyncs: 5,
ConcurrentDaemonSetSyncs: 2,
ConcurrentJobSyncs: 5,
ConcurrentResourceQuotaSyncs: 5,
ConcurrentDeploymentSyncs: 5,
ConcurrentNamespaceSyncs: 2,
ServiceSyncPeriod: unversioned.Duration{5 * time.Minute},
NodeSyncPeriod: unversioned.Duration{10 * time.Second},
ResourceQuotaSyncPeriod: unversioned.Duration{5 * time.Minute},
NamespaceSyncPeriod: unversioned.Duration{5 * time.Minute},
PVClaimBinderSyncPeriod: unversioned.Duration{10 * time.Minute},
HorizontalPodAutoscalerSyncPeriod: unversioned.Duration{30 * time.Second},
DeploymentControllerSyncPeriod: unversioned.Duration{30 * time.Second},
MinResyncPeriod: unversioned.Duration{12 * time.Hour},
RegisterRetryCount: 10,
PodEvictionTimeout: unversioned.Duration{5 * time.Minute},
NodeMonitorGracePeriod: unversioned.Duration{40 * time.Second},
NodeStartupGracePeriod: unversioned.Duration{60 * time.Second},
NodeMonitorPeriod: unversioned.Duration{5 * time.Second},
ClusterName: "kubernetes",
TerminatedPodGCThreshold: 12500,
VolumeConfiguration: componentconfig.VolumeConfiguration{
EnableHostPathProvisioning: false,
PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{
MaximumRetry: 3,
MinimumTimeoutNFS: 300,
IncrementTimeoutNFS: 30,
MinimumTimeoutHostPath: 60,
IncrementTimeoutHostPath: 30,
},
},
KubeAPIQPS: 20.0,
KubeAPIBurst: 30,
LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(),
},
KubeAPIQPS: 20.0,
KubeAPIBurst: 30,
LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(),
}
return &s
}
@ -141,7 +89,7 @@ func NewCMServer() *CMServer {
// AddFlags adds flags for a specific CMServer to the specified FlagSet
func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.IntVar(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on")
fs.IPVar(&s.Address, "address", s.Address, "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
fs.Var(componentconfig.IPVar{&s.Address}, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.")
fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
fs.IntVar(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
@ -150,42 +98,42 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.IntVar(&s.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load")
fs.IntVar(&s.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load")
fs.IntVar(&s.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", s.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load")
fs.DurationVar(&s.ServiceSyncPeriod, "service-sync-period", s.ServiceSyncPeriod, "The period for syncing services with their external load balancers")
fs.DurationVar(&s.NodeSyncPeriod, "node-sync-period", s.NodeSyncPeriod, ""+
fs.DurationVar(&s.ServiceSyncPeriod.Duration, "service-sync-period", s.ServiceSyncPeriod.Duration, "The period for syncing services with their external load balancers")
fs.DurationVar(&s.NodeSyncPeriod.Duration, "node-sync-period", s.NodeSyncPeriod.Duration, ""+
"The period for syncing nodes from cloudprovider. Longer periods will result in "+
"fewer calls to cloud provider, but may delay addition of new nodes to cluster.")
fs.DurationVar(&s.ResourceQuotaSyncPeriod, "resource-quota-sync-period", s.ResourceQuotaSyncPeriod, "The period for syncing quota usage status in the system")
fs.DurationVar(&s.NamespaceSyncPeriod, "namespace-sync-period", s.NamespaceSyncPeriod, "The period for syncing namespace life-cycle updates")
fs.DurationVar(&s.PVClaimBinderSyncPeriod, "pvclaimbinder-sync-period", s.PVClaimBinderSyncPeriod, "The period for syncing persistent volumes and persistent volume claims")
fs.DurationVar(&s.MinResyncPeriod, "min-resync-period", s.MinResyncPeriod, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod")
fs.StringVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathNFS, "pv-recycler-pod-template-filepath-nfs", s.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathNFS, "The file path to a pod definition used as a template for NFS persistent volume recycling")
fs.IntVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod")
fs.IntVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod")
fs.StringVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath", s.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, "The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.")
fs.IntVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
fs.IntVar(&s.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.")
fs.BoolVar(&s.VolumeConfigFlags.EnableHostPathProvisioning, "enable-hostpath-provisioner", s.VolumeConfigFlags.EnableHostPathProvisioning, "Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.")
fs.DurationVar(&s.ResourceQuotaSyncPeriod.Duration, "resource-quota-sync-period", s.ResourceQuotaSyncPeriod.Duration, "The period for syncing quota usage status in the system")
fs.DurationVar(&s.NamespaceSyncPeriod.Duration, "namespace-sync-period", s.NamespaceSyncPeriod.Duration, "The period for syncing namespace life-cycle updates")
fs.DurationVar(&s.PVClaimBinderSyncPeriod.Duration, "pvclaimbinder-sync-period", s.PVClaimBinderSyncPeriod.Duration, "The period for syncing persistent volumes and persistent volume claims")
fs.DurationVar(&s.MinResyncPeriod.Duration, "min-resync-period", s.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod")
fs.StringVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "pv-recycler-pod-template-filepath-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "The file path to a pod definition used as a template for NFS persistent volume recycling")
fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod")
fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod")
fs.StringVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.")
fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
fs.IntVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.")
fs.BoolVar(&s.VolumeConfiguration.EnableHostPathProvisioning, "enable-hostpath-provisioner", s.VolumeConfiguration.EnableHostPathProvisioning, "Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.")
fs.IntVar(&s.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", s.TerminatedPodGCThreshold, "Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.")
fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod, "The period for syncing the number of pods in horizontal pod autoscaler.")
fs.DurationVar(&s.DeploymentControllerSyncPeriod, "deployment-controller-sync-period", s.DeploymentControllerSyncPeriod, "Period for syncing the deployments.")
fs.DurationVar(&s.PodEvictionTimeout, "pod-eviction-timeout", s.PodEvictionTimeout, "The grace period for deleting pods on failed nodes.")
fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod.Duration, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod.Duration, "The period for syncing the number of pods in horizontal pod autoscaler.")
fs.DurationVar(&s.DeploymentControllerSyncPeriod.Duration, "deployment-controller-sync-period", s.DeploymentControllerSyncPeriod.Duration, "Period for syncing the deployments.")
fs.DurationVar(&s.PodEvictionTimeout.Duration, "pod-eviction-timeout", s.PodEvictionTimeout.Duration, "The grace period for deleting pods on failed nodes.")
fs.Float32Var(&s.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.")
fs.IntVar(&s.DeletingPodsBurst, "deleting-pods-burst", 10, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.")
fs.IntVar(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+
"The number of retries for initial node registration. Retry interval equals node-sync-period.")
fs.MarkDeprecated("register-retry-count", "This flag is currently no-op and will be deleted.")
fs.DurationVar(&s.NodeMonitorGracePeriod, "node-monitor-grace-period", s.NodeMonitorGracePeriod,
fs.DurationVar(&s.NodeMonitorGracePeriod.Duration, "node-monitor-grace-period", s.NodeMonitorGracePeriod.Duration,
"Amount of time which we allow running Node to be unresponsive before marking it unhealty. "+
"Must be N times more than kubelet's nodeStatusUpdateFrequency, "+
"where N means number of retries allowed for kubelet to post node status.")
fs.DurationVar(&s.NodeStartupGracePeriod, "node-startup-grace-period", s.NodeStartupGracePeriod,
fs.DurationVar(&s.NodeStartupGracePeriod.Duration, "node-startup-grace-period", s.NodeStartupGracePeriod.Duration,
"Amount of time which we allow starting Node to be unresponsive before marking it unhealty.")
fs.DurationVar(&s.NodeMonitorPeriod, "node-monitor-period", s.NodeMonitorPeriod,
fs.DurationVar(&s.NodeMonitorPeriod.Duration, "node-monitor-period", s.NodeMonitorPeriod.Duration,
"The period for syncing NodeStatus in NodeController.")
fs.StringVar(&s.ServiceAccountKeyFile, "service-account-private-key-file", s.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA key used to sign service account tokens.")
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster")
fs.IPNetVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster.")
fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster.")
fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.")
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")

View File

@ -23,9 +23,8 @@ import (
"fmt"
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
// Cloud providers
"k8s.io/kubernetes/pkg/apis/componentconfig"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers"
// Volume plugins
@ -45,7 +44,7 @@ import (
)
// ProbeRecyclableVolumePlugins collects all persistent volume plugins into an easy to use list.
func ProbeRecyclableVolumePlugins(flags options.VolumeConfigFlags) []volume.VolumePlugin {
func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) []volume.VolumePlugin {
allPlugins := []volume.VolumePlugin{}
// The list of plugins to probe is decided by this binary, not
@ -53,27 +52,27 @@ func ProbeRecyclableVolumePlugins(flags options.VolumeConfigFlags) []volume.Volu
// initialized later.
// Each plugin can make use of VolumeConfig. The single arg to this func contains *all* enumerated
// CLI flags meant to configure volume plugins. From that single config, create an instance of volume.VolumeConfig
// options meant to configure volume plugins. From that single config, create an instance of volume.VolumeConfig
// for a specific plugin and pass that instance to the plugin's ProbeVolumePlugins(config) func.
// HostPath recycling is for testing and development purposes only!
hostPathConfig := volume.VolumeConfig{
RecyclerMinimumTimeout: flags.PersistentVolumeRecyclerMinimumTimeoutHostPath,
RecyclerTimeoutIncrement: flags.PersistentVolumeRecyclerIncrementTimeoutHostPath,
RecyclerMinimumTimeout: config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath,
RecyclerTimeoutIncrement: config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath,
RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(),
}
if err := AttemptToLoadRecycler(flags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, &hostPathConfig); err != nil {
glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", flags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, err)
if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil {
glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, err)
}
allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(hostPathConfig)...)
nfsConfig := volume.VolumeConfig{
RecyclerMinimumTimeout: flags.PersistentVolumeRecyclerMinimumTimeoutNFS,
RecyclerTimeoutIncrement: flags.PersistentVolumeRecyclerIncrementTimeoutNFS,
RecyclerMinimumTimeout: config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS,
RecyclerTimeoutIncrement: config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS,
RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(),
}
if err := AttemptToLoadRecycler(flags.PersistentVolumeRecyclerPodTemplateFilePathNFS, &nfsConfig); err != nil {
glog.Fatalf("Could not create NFS recycler pod from file %s: %+v", flags.PersistentVolumeRecyclerPodTemplateFilePathNFS, err)
if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, &nfsConfig); err != nil {
glog.Fatalf("Could not create NFS recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err)
}
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...)
@ -88,9 +87,9 @@ func ProbeRecyclableVolumePlugins(flags options.VolumeConfigFlags) []volume.Volu
// The beta implementation of provisioning allows 1 implied provisioner per cloud, until we allow configuration of many.
// We explicitly map clouds to volume plugins here which allows us to configure many later without backwards compatibility issues.
// Not all cloudproviders have provisioning capability, which is the reason for the bool in the return to tell the caller to expect one or not.
func NewVolumeProvisioner(cloud cloudprovider.Interface, flags options.VolumeConfigFlags) (volume.ProvisionableVolumePlugin, error) {
func NewVolumeProvisioner(cloud cloudprovider.Interface, config componentconfig.VolumeConfiguration) (volume.ProvisionableVolumePlugin, error) {
switch {
case cloud == nil && flags.EnableHostPathProvisioning:
case cloud == nil && config.EnableHostPathProvisioning:
return getProvisionablePluginFromVolumePlugins(host_path.ProbeVolumePlugins(volume.VolumeConfig{}))
case cloud != nil && aws.ProviderName == cloud.ProviderName():
return getProvisionablePluginFromVolumePlugins(aws_ebs.ProbeVolumePlugins())

View File

@ -122,7 +122,7 @@ func (s *CMServer) Run(_ []string) error {
}
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
@ -147,20 +147,20 @@ func (s *CMServer) Run(_ []string) error {
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
_, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR)
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "node-controller")),
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
s.PodEvictionTimeout.Duration, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod)
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod.Duration)
nodeStatusUpdaterController := node.NewStatusUpdater(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "node-status-controller")), s.NodeMonitorPeriod, time.Now)
nodeStatusUpdaterController := node.NewStatusUpdater(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "node-status-controller")), s.NodeMonitorPeriod.Duration, time.Now)
if err := nodeStatusUpdaterController.Run(wait.NeverStop); err != nil {
glog.Fatalf("Failed to start node status update controller: %v", err)
}
serviceController := servicecontroller.New(cloud, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
if err := serviceController.Run(s.ServiceSyncPeriod.Duration, s.NodeSyncPeriod.Duration); err != nil {
glog.Errorf("Failed to start service controller: %v", err)
}
@ -169,12 +169,12 @@ func (s *CMServer) Run(_ []string) error {
if !ok {
glog.Fatal("Cloud provider must support routes if allocate-node-cidrs is set")
}
routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, (*net.IPNet)(&s.ClusterCIDR))
routeController.Run(s.NodeSyncPeriod)
routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, clusterCIDR)
routeController.Run(s.NodeSyncPeriod.Duration)
}
go resourcequotacontroller.NewResourceQuotaController(
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resource-quota-controller")), controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod)).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "resource-quota-controller")), controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration)).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
// If apiserver is not running we should wait for some time and fail only then. This is particularly
// important when we start apiserver and controller manager at the same time.
@ -196,7 +196,7 @@ func (s *CMServer) Run(_ []string) error {
glog.Fatalf("Failed to get supported resources from server: %v", err)
}
namespaceController := namespacecontroller.NewNamespaceController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), versions, s.NamespaceSyncPeriod)
namespaceController := namespacecontroller.NewNamespaceController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), versions, s.NamespaceSyncPeriod.Duration)
go namespaceController.Run(s.ConcurrentNamespaceSyncs, wait.NeverStop)
groupVersion := "extensions/v1beta1"
@ -215,13 +215,13 @@ func (s *CMServer) Run(_ []string) error {
metrics.DefaultHeapsterPort,
)
podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient).
Run(s.HorizontalPodAutoscalerSyncPeriod)
Run(s.HorizontalPodAutoscalerSyncPeriod.Duration)
}
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "daemon-set-controller")), s.resyncPeriod).
Run(s.ConcurrentDSCSyncs, wait.NeverStop)
Run(s.ConcurrentDaemonSetSyncs, wait.NeverStop)
}
if containsResource(resources, "jobs") {
@ -243,23 +243,23 @@ func (s *CMServer) Run(_ []string) error {
}
}
volumePlugins := kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)
provisioner, err := kubecontrollermanager.NewVolumeProvisioner(cloud, s.VolumeConfigFlags)
volumePlugins := kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration)
provisioner, err := kubecontrollermanager.NewVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")),
s.PVClaimBinderSyncPeriod,
s.PVClaimBinderSyncPeriod.Duration,
)
pvclaimBinder.Run()
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
s.PVClaimBinderSyncPeriod,
s.VolumeConfigFlags.PersistentVolumeRecyclerMaximumRetry,
kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags),
s.PVClaimBinderSyncPeriod.Duration,
s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry,
kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
cloud,
)
if err != nil {
@ -268,7 +268,7 @@ func (s *CMServer) Run(_ []string) error {
pvRecycler.Run()
if provisioner != nil {
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-controller"))), s.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-controller"))), s.PVClaimBinderSyncPeriod.Duration, volumePlugins, provisioner, cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
}

View File

@ -48,15 +48,15 @@ func Test_nodeWithUpdatedStatus(t *testing.T) {
cm := cmoptions.NewCMServer()
kubecfg := kubeletoptions.NewKubeletServer()
assert.True(t, kubecfg.NodeStatusUpdateFrequency.Duration*3 < cm.NodeMonitorGracePeriod) // sanity check for defaults
assert.True(t, kubecfg.NodeStatusUpdateFrequency.Duration*3 < cm.NodeMonitorGracePeriod.Duration) // sanity check for defaults
n := testNode(0, api.ConditionTrue, "KubeletReady")
su := NewStatusUpdater(nil, cm.NodeMonitorPeriod, func() time.Time { return now })
su := NewStatusUpdater(nil, cm.NodeMonitorPeriod.Duration, func() time.Time { return now })
_, updated, err := su.nodeWithUpdatedStatus(n)
assert.NoError(t, err)
assert.False(t, updated, "no update expected b/c kubelet updated heartbeat just now")
n = testNode(-cm.NodeMonitorGracePeriod, api.ConditionTrue, "KubeletReady")
n = testNode(-cm.NodeMonitorGracePeriod.Duration, api.ConditionTrue, "KubeletReady")
n2, updated, err := su.nodeWithUpdatedStatus(n)
assert.NoError(t, err)
assert.True(t, updated, "update expected b/c kubelet's update is older than DefaultNodeMonitorGracePeriod")

View File

@ -356,3 +356,161 @@ type LeaderElectionConfiguration struct {
// leader election is enabled.
RetryPeriod unversioned.Duration `json:"retryPeriod"`
}
type KubeControllerManagerConfiguration struct {
unversioned.TypeMeta
// port is the port that the controller-manager's http service runs on.
Port int `json:"port"`
// address is the IP address to serve on (set to 0.0.0.0 for all interfaces).
Address string `json:"address"`
// cloudProvider is the provider for cloud services.
CloudProvider string `json:"cloudProvider"`
// cloudConfigFile is the path to the cloud provider configuration file.
CloudConfigFile string `json:"cloudConfigFile"`
// concurrentEndpointSyncs is the number of endpoint syncing operations
// that will be done concurrently. Larger number = faster endpoint updating,
// but more CPU (and network) load.
ConcurrentEndpointSyncs int `json:"concurrentEndpointSyncs"`
// concurrentRSSyncs is the number of replica sets that are allowed to sync
// concurrently. Larger number = more reponsive replica management, but more
// CPU (and network) load.
ConcurrentRSSyncs int `json:"concurrentRSSyncs"`
// concurrentRCSyncs is the number of replication controllers that are
// allowed to sync concurrently. Larger number = more reponsive replica
// management, but more CPU (and network) load.
ConcurrentRCSyncs int `json:"concurrentRCSyncs"`
// concurrentResourceQuotaSyncs is the number of resource quotas that are
// allowed to sync concurrently. Larger number = more responsive quota
// management, but more CPU (and network) load.
ConcurrentResourceQuotaSyncs int `json:"concurrentResourceQuotaSyncs"`
// concurrentDeploymentSyncs is the number of deployment objects that are
// allowed to sync concurrently. Larger number = more reponsive deployments,
// but more CPU (and network) load.
ConcurrentDeploymentSyncs int `json:"concurrentDeploymentSyncs"`
// concurrentDaemonSetSyncs is the number of daemonset objects that are
// allowed to sync concurrently. Larger number = more reponsive DaemonSet,
// but more CPU (and network) load.
ConcurrentDaemonSetSyncs int `json:"concurrentDaemonSetSyncs"`
// concurrentJobSyncs is the number of job objects that are
// allowed to sync concurrently. Larger number = more reponsive jobs,
// but more CPU (and network) load.
ConcurrentJobSyncs int `json:"concurrentJobSyncs"`
// concurrentNamespaceSyncs is the number of namespace objects that are
// allowed to sync concurrently.
ConcurrentNamespaceSyncs int `json:"concurrentNamespaceSyncs"`
// serviceSyncPeriod is the period for syncing services with their external
// load balancers.
ServiceSyncPeriod unversioned.Duration `json:"serviceSyncPeriod"`
// nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer
// periods will result in fewer calls to cloud provider, but may delay addition
// of new nodes to cluster.
NodeSyncPeriod unversioned.Duration `json:"nodeSyncPeriod"`
// resourceQuotaSyncPeriod is the period for syncing quota usage status
// in the system.
ResourceQuotaSyncPeriod unversioned.Duration `json:"resourceQuotaSyncPeriod"`
// namespaceSyncPeriod is the period for syncing namespace life-cycle
// updates.
NamespaceSyncPeriod unversioned.Duration `json:"namespaceSyncPeriod"`
// pvClaimBinderSyncPeriod is the period for syncing persistent volumes
// and persistent volume claims.
PVClaimBinderSyncPeriod unversioned.Duration `json:"pvClaimBinderSyncPeriod"`
// minResyncPeriod is the resync period in reflectors; will be random between
// minResyncPeriod and 2*minResyncPeriod.
MinResyncPeriod unversioned.Duration `json:"minResyncPeriod"`
// terminatedPodGCThreshold is the number of terminated pods that can exist
// before the terminated pod garbage collector starts deleting terminated pods.
// If <= 0, the terminated pod garbage collector is disabled.
TerminatedPodGCThreshold int `json:"terminatedPodGCThreshold"`
// horizontalPodAutoscalerSyncPeriod is the period for syncing the number of
// pods in horizontal pod autoscaler.
HorizontalPodAutoscalerSyncPeriod unversioned.Duration `json:"horizontalPodAutoscalerSyncPeriod"`
// deploymentControllerSyncPeriod is the period for syncing the deployments.
DeploymentControllerSyncPeriod unversioned.Duration `json:"deploymentControllerSyncPeriod"`
// podEvictionTimeout is the grace period for deleting pods on failed nodes.
PodEvictionTimeout unversioned.Duration `json:"podEvictionTimeout"`
// deletingPodsQps is the number of nodes per second on which pods are deleted in
// case of node failure.
DeletingPodsQps float32 `json:"deletingPodsQps"`
// deletingPodsBurst is the number of nodes on which pods are bursty deleted in
// case of node failure. For more details look into RateLimiter.
DeletingPodsBurst int `json:"deletingPodsBurst"`
// nodeMontiorGracePeriod is the amount of time which we allow a running node to be
// unresponsive before marking it unhealty. Must be N times more than kubelet's
// nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet
// to post node status.
NodeMonitorGracePeriod unversioned.Duration `json:"nodeMonitorGracePeriod"`
// registerRetryCount is the number of retries for initial node registration.
// Retry interval equals node-sync-period.
RegisterRetryCount int `json:"registerRetryCount"`
// nodeStartupGracePeriod is the amount of time which we allow starting a node to
// be unresponsive before marking it unhealty.
NodeStartupGracePeriod unversioned.Duration `json:"nodeStartupGracePeriod"`
// nodeMonitorPeriod is the period for syncing NodeStatus in NodeController.
NodeMonitorPeriod unversioned.Duration `json:"nodeMonitorPeriod"`
// serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key
// used to sign service account tokens.
ServiceAccountKeyFile string `json:"serviceAccountKeyFile"`
// enableProfiling enables profiling via web interface host:port/debug/pprof/
EnableProfiling bool `json:"enableProfiling"`
// clusterName is the instance prefix for the cluster.
ClusterName string `json:"clusterName"`
// clusterCIDR is CIDR Range for Pods in cluster.
ClusterCIDR string `json:"clusterCIDR"`
// allocateNodeCIDRs enables CIDRs for Pods to be allocated and set on the
// cloud provider.
AllocateNodeCIDRs bool `json:"allocateNodeCIDRs"`
// rootCAFile is the root certificate authority will be included in service
// account's token secret. This must be a valid PEM-encoded CA bundle.
RootCAFile string `json:"rootCAFile"`
// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
KubeAPIQPS float32 `json:"kubeAPIQPS"`
// kubeAPIBurst is the burst to use while talking with kubernetes apiserver.
KubeAPIBurst int `json:"kubeAPIBurst"`
// leaderElection defines the configuration of leader election client.
LeaderElection LeaderElectionConfiguration `json:"leaderElection"`
// vloumeConfiguration holds configuration for volume related features.
VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"`
}
// VolumeConfiguration contains *all* enumerated flags meant to configure all volume
// plugins. From this config, the controller-manager binary will create many instances of
// volume.VolumeConfig, each containing only the configuration needed for that plugin which
// are then passed to the appropriate plugin. The ControllerManager binary is the only part
// of the code which knows what plugins are supported and which flags correspond to each plugin.
type VolumeConfiguration struct {
// enableHostPathProvisioning enables HostPath PV provisioning when running without a
// cloud provider. This allows testing and development of provisioning features. HostPath
// provisioning is not supported in any way, won't work in a multi-node cluster, and
// should not be used for anything other than testing or development.
EnableHostPathProvisioning bool `json:"enableHostPathProvisioning"`
// persistentVolumeRecyclerConfiguration holds configuration for persistent volume plugins.
PersistentVolumeRecyclerConfiguration PersistentVolumeRecyclerConfiguration `json:"persitentVolumeRecyclerConfiguration"`
}
type PersistentVolumeRecyclerConfiguration struct {
// maximumRetry is number of retries the PV recycler will execute on failure to recycle
// PV.
MaximumRetry int `json:"maximumRetry"`
// minimumTimeoutNFS is the minimum ActiveDeadlineSeconds to use for an NFS Recycler
// pod.
MinimumTimeoutNFS int `json:"minimumTimeoutNFS"`
// podTemplateFilePathNFS is the file path to a pod definition used as a template for
// NFS persistent volume recycling
PodTemplateFilePathNFS string `json:"podTemplateFilePathNFS"`
// incrementTimeoutNFS is the increment of time added per Gi to ActiveDeadlineSeconds
// for an NFS scrubber pod.
IncrementTimeoutNFS int `json:"incrementTimeoutNFS"`
// podTemplateFilePathHostPath is the file path to a pod definition used as a template for
// HostPath persistent volume recycling. This is for development and testing only and
// will not work in a multi-node cluster.
PodTemplateFilePathHostPath string `json:"podTemplateFilePathHostPath"`
// minimumTimeoutHostPath is the minimum ActiveDeadlineSeconds to use for a HostPath
// Recycler pod. This is for development and testing only and will not work in a multi-node
// cluster.
MinimumTimeoutHostPath int `json:"minimumTimeoutHostPath"`
// incrementTimeoutHostPath is the increment of time added per Gi to ActiveDeadlineSeconds
// for a HostPath scrubber pod. This is for development and testing only and will not work
// in a multi-node cluster.
IncrementTimeoutHostPath int `json:"incrementTimeoutHostPath"`
}