mirror of https://github.com/k3s-io/k3s
Merge pull request #38572 from hongchaodeng/sr
Automatic merge from submit-queue (batch tested with PRs 38315, 38624, 38572, 38544) scheduler: refactor main entry Run() The kube-scheduler/app.Run() is the main entry of scheduler program. It's enormous. This PR tries to clean it up. Should be more modular and readable.pull/6/head
commit
65238bc390
|
@ -17,6 +17,7 @@ go_binary(
|
||||||
"//pkg/version/verflag:go_default_library",
|
"//pkg/version/verflag:go_default_library",
|
||||||
"//plugin/cmd/kube-scheduler/app:go_default_library",
|
"//plugin/cmd/kube-scheduler/app:go_default_library",
|
||||||
"//plugin/cmd/kube-scheduler/app/options:go_default_library",
|
"//plugin/cmd/kube-scheduler/app/options:go_default_library",
|
||||||
|
"//vendor:github.com/golang/glog",
|
||||||
"//vendor:github.com/spf13/pflag",
|
"//vendor:github.com/spf13/pflag",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
|
@ -73,65 +73,18 @@ through the API as necessary.`,
|
||||||
|
|
||||||
// Run runs the specified SchedulerServer. This should never exit.
|
// Run runs the specified SchedulerServer. This should never exit.
|
||||||
func Run(s *options.SchedulerServer) error {
|
func Run(s *options.SchedulerServer) error {
|
||||||
if c, err := configz.New("componentconfig"); err == nil {
|
kubecli, err := createClient(s)
|
||||||
c.Set(s.KubeSchedulerConfiguration)
|
|
||||||
} else {
|
|
||||||
glog.Errorf("unable to register configz: %s", err)
|
|
||||||
}
|
|
||||||
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("unable to build config from flags: %v", err)
|
return fmt.Errorf("unable to create kube client: %v", err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
config, err := createConfig(s, kubecli)
|
||||||
kubeconfig.ContentType = s.ContentType
|
|
||||||
// Override kubeconfig qps/burst settings from flags
|
|
||||||
kubeconfig.QPS = s.KubeAPIQPS
|
|
||||||
kubeconfig.Burst = int(s.KubeAPIBurst)
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("Invalid API configuration: %v", err)
|
return fmt.Errorf("failed to create scheduler configuration: %v", err)
|
||||||
}
|
}
|
||||||
leaderElectionClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
|
||||||
if err != nil {
|
|
||||||
glog.Fatalf("Invalid API configuration: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
mux := http.NewServeMux()
|
|
||||||
healthz.InstallHandler(mux)
|
|
||||||
if s.EnableProfiling {
|
|
||||||
mux.HandleFunc("/debug/pprof/", pprof.Index)
|
|
||||||
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
|
||||||
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
|
||||||
if s.EnableContentionProfiling {
|
|
||||||
goruntime.SetBlockProfileRate(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
configz.InstallHandler(mux)
|
|
||||||
mux.Handle("/metrics", prometheus.Handler())
|
|
||||||
|
|
||||||
server := &http.Server{
|
|
||||||
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
|
|
||||||
Handler: mux,
|
|
||||||
}
|
|
||||||
glog.Fatal(server.ListenAndServe())
|
|
||||||
}()
|
|
||||||
|
|
||||||
configFactory := factory.NewConfigFactory(leaderElectionClient, s.SchedulerName, s.HardPodAffinitySymmetricWeight, s.FailureDomains)
|
|
||||||
config, err := createConfig(s, configFactory)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
glog.Fatalf("Failed to create scheduler configuration: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
|
||||||
config.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: s.SchedulerName})
|
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: leaderElectionClient.Core().Events("")})
|
|
||||||
|
|
||||||
sched := scheduler.New(config)
|
sched := scheduler.New(config)
|
||||||
|
|
||||||
|
go startHTTP(s)
|
||||||
|
|
||||||
run := func(_ <-chan struct{}) {
|
run := func(_ <-chan struct{}) {
|
||||||
sched.Run()
|
sched.Run()
|
||||||
select {}
|
select {}
|
||||||
|
@ -139,31 +92,27 @@ func Run(s *options.SchedulerServer) error {
|
||||||
|
|
||||||
if !s.LeaderElection.LeaderElect {
|
if !s.LeaderElection.LeaderElect {
|
||||||
run(nil)
|
run(nil)
|
||||||
glog.Fatal("this statement is unreachable")
|
|
||||||
panic("unreachable")
|
panic("unreachable")
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := os.Hostname()
|
id, err := os.Hostname()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("unable to get hostname: %v", err)
|
return fmt.Errorf("unable to get hostname: %v", err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: enable other lock types
|
// TODO: enable other lock types
|
||||||
rl := resourcelock.EndpointsLock{
|
rl := &resourcelock.EndpointsLock{
|
||||||
EndpointsMeta: v1.ObjectMeta{
|
EndpointsMeta: v1.ObjectMeta{
|
||||||
Namespace: "kube-system",
|
Namespace: "kube-system",
|
||||||
Name: "kube-scheduler",
|
Name: "kube-scheduler",
|
||||||
},
|
},
|
||||||
Client: leaderElectionClient,
|
Client: kubecli,
|
||||||
LockConfig: resourcelock.ResourceLockConfig{
|
LockConfig: resourcelock.ResourceLockConfig{
|
||||||
Identity: id,
|
Identity: id,
|
||||||
EventRecorder: config.Recorder,
|
EventRecorder: config.Recorder,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
|
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
|
||||||
Lock: &rl,
|
Lock: rl,
|
||||||
LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
|
LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
|
||||||
RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
|
RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
|
||||||
RetryPeriod: s.LeaderElection.RetryPeriod.Duration,
|
RetryPeriod: s.LeaderElection.RetryPeriod.Duration,
|
||||||
|
@ -174,12 +123,55 @@ func Run(s *options.SchedulerServer) error {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
glog.Fatal("this statement is unreachable")
|
|
||||||
panic("unreachable")
|
panic("unreachable")
|
||||||
}
|
}
|
||||||
|
|
||||||
func createConfig(s *options.SchedulerServer, configFactory *factory.ConfigFactory) (*scheduler.Config, error) {
|
func startHTTP(s *options.SchedulerServer) {
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
healthz.InstallHandler(mux)
|
||||||
|
if s.EnableProfiling {
|
||||||
|
mux.HandleFunc("/debug/pprof/", pprof.Index)
|
||||||
|
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
||||||
|
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
||||||
|
if s.EnableContentionProfiling {
|
||||||
|
goruntime.SetBlockProfileRate(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c, err := configz.New("componentconfig"); err == nil {
|
||||||
|
c.Set(s.KubeSchedulerConfiguration)
|
||||||
|
} else {
|
||||||
|
glog.Errorf("unable to register configz: %s", err)
|
||||||
|
}
|
||||||
|
configz.InstallHandler(mux)
|
||||||
|
mux.Handle("/metrics", prometheus.Handler())
|
||||||
|
|
||||||
|
server := &http.Server{
|
||||||
|
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
|
||||||
|
Handler: mux,
|
||||||
|
}
|
||||||
|
glog.Fatal(server.ListenAndServe())
|
||||||
|
}
|
||||||
|
|
||||||
|
func createClient(s *options.SchedulerServer) (*clientset.Clientset, error) {
|
||||||
|
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to build config from flags: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
kubeconfig.ContentType = s.ContentType
|
||||||
|
// Override kubeconfig qps/burst settings from flags
|
||||||
|
kubeconfig.QPS = s.KubeAPIQPS
|
||||||
|
kubeconfig.Burst = int(s.KubeAPIBurst)
|
||||||
|
|
||||||
|
cli, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid API configuration: %v", err)
|
||||||
|
}
|
||||||
|
return cli, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createConfig(s *options.SchedulerServer, kubecli *clientset.Clientset) (*scheduler.Config, error) {
|
||||||
|
configFactory := factory.NewConfigFactory(kubecli, s.SchedulerName, s.HardPodAffinitySymmetricWeight, s.FailureDomains)
|
||||||
if _, err := os.Stat(s.PolicyConfigFile); err == nil {
|
if _, err := os.Stat(s.PolicyConfigFile); err == nil {
|
||||||
var (
|
var (
|
||||||
policy schedulerapi.Policy
|
policy schedulerapi.Policy
|
||||||
|
@ -196,5 +188,15 @@ func createConfig(s *options.SchedulerServer, configFactory *factory.ConfigFacto
|
||||||
}
|
}
|
||||||
|
|
||||||
// if the config file isn't provided, use the specified (or default) provider
|
// if the config file isn't provided, use the specified (or default) provider
|
||||||
return configFactory.CreateFromProvider(s.AlgorithmProvider)
|
config, err := configFactory.CreateFromProvider(s.AlgorithmProvider)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
|
config.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: s.SchedulerName})
|
||||||
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubecli.Core().Events("")})
|
||||||
|
|
||||||
|
return config, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"k8s.io/kubernetes/plugin/cmd/kube-scheduler/app"
|
"k8s.io/kubernetes/plugin/cmd/kube-scheduler/app"
|
||||||
"k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options"
|
"k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -36,5 +37,7 @@ func main() {
|
||||||
|
|
||||||
verflag.PrintAndExitIfRequested()
|
verflag.PrintAndExitIfRequested()
|
||||||
|
|
||||||
app.Run(s)
|
if err := app.Run(s); err != nil {
|
||||||
|
glog.Fatalf("scheduler app failed to run: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue