mirror of https://github.com/k3s-io/k3s
simplify run method of kube-scheduler cmd
parent
a8e8e891f2
commit
564ee22123
|
@ -81,6 +81,21 @@ constraints, affinity and anti-affinity specifications, data locality, inter-wor
|
||||||
interference, deadlines, and so on. Workload-specific requirements will be exposed
|
interference, deadlines, and so on. Workload-specific requirements will be exposed
|
||||||
through the API as necessary.`,
|
through the API as necessary.`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
if err := run(cmd, args, opts); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
opts.AddFlags(cmd.Flags())
|
||||||
|
cmd.MarkFlagFilename("config", "yaml", "yml", "json")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// run runs the scheduler.
|
||||||
|
func run(cmd *cobra.Command, args []string, opts *options.Options) error {
|
||||||
verflag.PrintAndExitIfRequested()
|
verflag.PrintAndExitIfRequested()
|
||||||
utilflag.PrintFlags(cmd.Flags())
|
utilflag.PrintFlags(cmd.Flags())
|
||||||
|
|
||||||
|
@ -99,7 +114,6 @@ through the API as necessary.`,
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
glog.Infof("Wrote configuration to: %s\n", opts.WriteConfigTo)
|
glog.Infof("Wrote configuration to: %s\n", opts.WriteConfigTo)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := opts.Config()
|
c, err := opts.Config()
|
||||||
|
@ -109,21 +123,10 @@ through the API as necessary.`,
|
||||||
}
|
}
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
if err := Run(c.Complete(), stopCh); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.AddFlags(cmd.Flags())
|
// Get the completed config
|
||||||
cmd.MarkFlagFilename("config", "yaml", "yml", "json")
|
cc := c.Complete()
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run runs the Scheduler.
|
|
||||||
func Run(c schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}) error {
|
|
||||||
// To help debugging, immediately log version
|
// To help debugging, immediately log version
|
||||||
glog.Infof("Version: %+v", version.Get())
|
glog.Infof("Version: %+v", version.Get())
|
||||||
|
|
||||||
|
@ -139,7 +142,7 @@ func Run(c schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build a scheduler config from the provided algorithm source.
|
// Build a scheduler config from the provided algorithm source.
|
||||||
schedulerConfig, err := NewSchedulerConfig(c)
|
schedulerConfig, err := NewSchedulerConfig(cc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -148,39 +151,39 @@ func Run(c schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}) error
|
||||||
sched := scheduler.NewFromConfig(schedulerConfig)
|
sched := scheduler.NewFromConfig(schedulerConfig)
|
||||||
|
|
||||||
// Prepare the event broadcaster.
|
// Prepare the event broadcaster.
|
||||||
if c.Broadcaster != nil && c.EventClient != nil {
|
if cc.Broadcaster != nil && cc.EventClient != nil {
|
||||||
c.Broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.EventClient.Events("")})
|
cc.Broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: cc.EventClient.Events("")})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start up the healthz server.
|
// Start up the healthz server.
|
||||||
if c.InsecureServing != nil {
|
if cc.InsecureServing != nil {
|
||||||
separateMetrics := c.InsecureMetricsServing != nil
|
separateMetrics := cc.InsecureMetricsServing != nil
|
||||||
handler := buildHandlerChain(newHealthzHandler(&c.ComponentConfig, separateMetrics), nil, nil)
|
handler := buildHandlerChain(newHealthzHandler(&cc.ComponentConfig, separateMetrics), nil, nil)
|
||||||
if err := c.InsecureServing.Serve(handler, 0, stopCh); err != nil {
|
if err := cc.InsecureServing.Serve(handler, 0, stopCh); err != nil {
|
||||||
return fmt.Errorf("failed to start healthz server: %v", err)
|
return fmt.Errorf("failed to start healthz server: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if c.InsecureMetricsServing != nil {
|
if cc.InsecureMetricsServing != nil {
|
||||||
handler := buildHandlerChain(newMetricsHandler(&c.ComponentConfig), nil, nil)
|
handler := buildHandlerChain(newMetricsHandler(&cc.ComponentConfig), nil, nil)
|
||||||
if err := c.InsecureMetricsServing.Serve(handler, 0, stopCh); err != nil {
|
if err := cc.InsecureMetricsServing.Serve(handler, 0, stopCh); err != nil {
|
||||||
return fmt.Errorf("failed to start metrics server: %v", err)
|
return fmt.Errorf("failed to start metrics server: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if c.SecureServing != nil {
|
if cc.SecureServing != nil {
|
||||||
handler := buildHandlerChain(newHealthzHandler(&c.ComponentConfig, false), c.Authentication.Authenticator, c.Authorization.Authorizer)
|
handler := buildHandlerChain(newHealthzHandler(&cc.ComponentConfig, false), cc.Authentication.Authenticator, cc.Authorization.Authorizer)
|
||||||
if err := c.SecureServing.Serve(handler, 0, stopCh); err != nil {
|
if err := cc.SecureServing.Serve(handler, 0, stopCh); err != nil {
|
||||||
// fail early for secure handlers, removing the old error loop from above
|
// fail early for secure handlers, removing the old error loop from above
|
||||||
return fmt.Errorf("failed to start healthz server: %v", err)
|
return fmt.Errorf("failed to start healthz server: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start all informers.
|
// Start all informers.
|
||||||
go c.PodInformer.Informer().Run(stopCh)
|
go cc.PodInformer.Informer().Run(stopCh)
|
||||||
c.InformerFactory.Start(stopCh)
|
cc.InformerFactory.Start(stopCh)
|
||||||
|
|
||||||
// Wait for all caches to sync before scheduling.
|
// Wait for all caches to sync before scheduling.
|
||||||
c.InformerFactory.WaitForCacheSync(stopCh)
|
cc.InformerFactory.WaitForCacheSync(stopCh)
|
||||||
controller.WaitForCacheSync("scheduler", stopCh, c.PodInformer.Informer().HasSynced)
|
controller.WaitForCacheSync("scheduler", stopCh, cc.PodInformer.Informer().HasSynced)
|
||||||
|
|
||||||
// Prepare a reusable run function.
|
// Prepare a reusable run function.
|
||||||
run := func(ctx context.Context) {
|
run := func(ctx context.Context) {
|
||||||
|
@ -200,14 +203,14 @@ func Run(c schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}) error
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// If leader election is enabled, run via LeaderElector until done and exit.
|
// If leader election is enabled, run via LeaderElector until done and exit.
|
||||||
if c.LeaderElection != nil {
|
if cc.LeaderElection != nil {
|
||||||
c.LeaderElection.Callbacks = leaderelection.LeaderCallbacks{
|
cc.LeaderElection.Callbacks = leaderelection.LeaderCallbacks{
|
||||||
OnStartedLeading: run,
|
OnStartedLeading: run,
|
||||||
OnStoppedLeading: func() {
|
OnStoppedLeading: func() {
|
||||||
utilruntime.HandleError(fmt.Errorf("lost master"))
|
utilruntime.HandleError(fmt.Errorf("lost master"))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
leaderElector, err := leaderelection.NewLeaderElector(*c.LeaderElection)
|
leaderElector, err := leaderelection.NewLeaderElector(*cc.LeaderElection)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't create leader elector: %v", err)
|
return fmt.Errorf("couldn't create leader elector: %v", err)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue