Rename AgentReady to ContainerRuntimeReady for better clarity

Signed-off-by: Derek Nola <derek.nola@suse.com>
pull/9578/head
Derek Nola 9 months ago
parent 80baec697f
commit 9c0e5a5ff8

@ -143,12 +143,12 @@ func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error {
return err
}
}
// the agent runtime is ready to host workloads when containerd is up and the airgap
// the container runtime is ready to host workloads when containerd is up and the airgap
// images have finished loading, as that portion of startup may block for an arbitrary
// amount of time depending on how long it takes to import whatever the user has placed
// in the images directory.
if cfg.AgentReady != nil {
close(cfg.AgentReady)
if cfg.ContainerRuntimeReady != nil {
close(cfg.ContainerRuntimeReady)
}
notifySocket := os.Getenv("NOTIFY_SOCKET")
@ -258,8 +258,8 @@ func RunStandalone(ctx context.Context, cfg cmds.Agent) error {
return err
}
if cfg.AgentReady != nil {
close(cfg.AgentReady)
if cfg.ContainerRuntimeReady != nil {
close(cfg.ContainerRuntimeReady)
}
if err := tunnelSetup(ctx, nodeConfig, cfg, proxy); err != nil {

@ -51,7 +51,7 @@ type Agent struct {
Taints cli.StringSlice
ImageCredProvBinDir string
ImageCredProvConfig string
AgentReady chan<- struct{}
ContainerRuntimeReady chan<- struct{}
AgentShared
}

@ -110,11 +110,11 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
}
}
agentReady := make(chan struct{})
containerRuntimeReady := make(chan struct{})
serverConfig := server.Config{}
serverConfig.DisableAgent = cfg.DisableAgent
serverConfig.ControlConfig.Runtime = config.NewRuntime(agentReady)
serverConfig.ControlConfig.Runtime = config.NewRuntime(containerRuntimeReady)
serverConfig.ControlConfig.Token = cfg.Token
serverConfig.ControlConfig.AgentToken = cfg.AgentToken
serverConfig.ControlConfig.JoinURL = cfg.ServerURL
@ -516,7 +516,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
}
agentConfig := cmds.AgentConfig
agentConfig.AgentReady = agentReady
agentConfig.ContainerRuntimeReady = containerRuntimeReady
agentConfig.Debug = app.GlobalBool("debug")
agentConfig.DataDir = filepath.Dir(serverConfig.ControlConfig.DataDir)
agentConfig.ServerURL = url

@ -291,7 +291,7 @@ type ControlRuntime struct {
HTTPBootstrap bool
APIServerReady <-chan struct{}
AgentReady <-chan struct{}
ContainerRuntimeReady <-chan struct{}
ETCDReady <-chan struct{}
StartupHooksWg *sync.WaitGroup
ClusterControllerStarts map[string]leader.Callback
@ -357,9 +357,9 @@ type ControlRuntime struct {
EtcdConfig endpoint.ETCDConfig
}
func NewRuntime(agentReady <-chan struct{}) *ControlRuntime {
func NewRuntime(containerRuntimeReady <-chan struct{}) *ControlRuntime {
return &ControlRuntime{
AgentReady: agentReady,
ContainerRuntimeReady: containerRuntimeReady,
ClusterControllerStarts: map[string]leader.Callback{},
LeaderElectedClusterControllerStarts: map[string]leader.Callback{},
}

@ -308,7 +308,7 @@ func (e *ETCD) IsInitialized() (bool, error) {
func (e *ETCD) Reset(ctx context.Context, rebootstrap func() error) error {
// Wait for etcd to come up as a new single-node cluster, then exit
go func() {
<-e.config.Runtime.AgentReady
<-e.config.Runtime.ContainerRuntimeReady
t := time.NewTicker(5 * time.Second)
defer t.Stop()
for range t.C {
@ -450,8 +450,8 @@ func (e *ETCD) Start(ctx context.Context, clientAccessInfo *clientaccess.Info) e
for {
select {
case <-time.After(30 * time.Second):
logrus.Infof("Waiting for agent to become ready before joining etcd cluster")
case <-e.config.Runtime.AgentReady:
logrus.Infof("Waiting for container runtime to become ready before joining etcd cluster")
case <-e.config.Runtime.ContainerRuntimeReady:
if err := wait.PollImmediateUntilWithContext(ctx, time.Second, func(ctx context.Context) (bool, error) {
if err := e.join(ctx, clientAccessInfo); err != nil {
// Retry the join if waiting for another member to be promoted, or waiting for peers to connect after promotion
@ -1040,7 +1040,7 @@ func (e *ETCD) RemovePeer(ctx context.Context, name, address string, allowSelfRe
// being promoted to full voting member. The checks only run on the cluster member that is
// the etcd leader.
func (e *ETCD) manageLearners(ctx context.Context) {
<-e.config.Runtime.AgentReady
<-e.config.Runtime.ContainerRuntimeReady
t := time.NewTicker(manageTickerTime)
defer t.Stop()

@ -27,8 +27,8 @@ func mustGetAddress() string {
}
func generateTestConfig() *config.Control {
agentReady := make(chan struct{})
close(agentReady)
containerRuntimeReady := make(chan struct{})
close(containerRuntimeReady)
criticalControlArgs := config.CriticalControlArgs{
ClusterDomain: "cluster.local",
ClusterDNS: net.ParseIP("10.43.0.10"),
@ -37,7 +37,7 @@ func generateTestConfig() *config.Control {
ServiceIPRange: testutil.ServiceIPNet(),
}
return &config.Control{
Runtime: config.NewRuntime(agentReady),
Runtime: config.NewRuntime(containerRuntimeReady),
HTTPSPort: 6443,
SupervisorPort: 6443,
AdvertisePort: 6443,

Loading…
Cancel
Save