Move some things around to allow for license updating via config reload

The bulk of this commit is moving the LeaderRoutineManager from the agent/consul package into its own package: lib/gort. It also got a renaming and its Start method now requires a context. Requiring that context required updating a whole bunch of other places in the code.
pull/10270/head
Matt Keeler 2021-05-20 10:07:23 -04:00 committed by Matt Keeler
parent f2eed912b2
commit da31e0449e
17 changed files with 148 additions and 103 deletions

2
.changelog/10267.txt Normal file
View File

@ -0,0 +1,2 @@
```release-note:improvement
licensing: **(Enterprise Only)** Consul Enterprise has gained the ability update its license via a configuration reload. The same environment variables and configurations will be used to determine the new license.```

View File

@ -49,6 +49,7 @@ import (
"github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/lib/file" "github.com/hashicorp/consul/lib/file"
"github.com/hashicorp/consul/lib/mutex" "github.com/hashicorp/consul/lib/mutex"
"github.com/hashicorp/consul/lib/routine"
"github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/logging"
"github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/tlsutil"
"github.com/hashicorp/consul/types" "github.com/hashicorp/consul/types"
@ -327,6 +328,10 @@ type Agent struct {
// into Agent, which will allow us to remove this field. // into Agent, which will allow us to remove this field.
rpcClientHealth *health.Client rpcClientHealth *health.Client
// routineManager is responsible for managing longer running go routines
// run by the Agent
routineManager *routine.Manager
// enterpriseAgent embeds fields that we only access in consul-enterprise builds // enterpriseAgent embeds fields that we only access in consul-enterprise builds
enterpriseAgent enterpriseAgent
} }
@ -371,6 +376,7 @@ func New(bd BaseDeps) (*Agent, error) {
tlsConfigurator: bd.TLSConfigurator, tlsConfigurator: bd.TLSConfigurator,
config: bd.RuntimeConfig, config: bd.RuntimeConfig,
cache: bd.Cache, cache: bd.Cache,
routineManager: routine.NewManager(bd.Logger),
} }
// TODO: create rpcClientHealth in BaseDeps once NetRPC is available without Agent // TODO: create rpcClientHealth in BaseDeps once NetRPC is available without Agent

View File

@ -290,6 +290,12 @@ type Config struct {
SegmentName *string `mapstructure:"segment"` SegmentName *string `mapstructure:"segment"`
// Enterprise Only // Enterprise Only
Segments []Segment `mapstructure:"segments"` Segments []Segment `mapstructure:"segments"`
// Enterprise Only - not user configurable
LicensePollBaseTime *string `mapstructure:"license_poll_base_time"`
LicensePollMaxTime *string `mapstructure:"license_poll_max_time"`
LicenseUpdateBaseTime *string `mapstructure:"license_update_base_time"`
LicenseUpdateMaxTime *string `mapstructure:"license_update_max_time"`
} }
type GossipLANConfig struct { type GossipLANConfig struct {

View File

@ -30,7 +30,7 @@ func (s *Server) reapExpiredTokens(ctx context.Context) error {
} }
} }
func (s *Server) startACLTokenReaping() { func (s *Server) startACLTokenReaping(ctx context.Context) {
// Do a quick check for config settings that would imply the goroutine // Do a quick check for config settings that would imply the goroutine
// below will just spin forever. // below will just spin forever.
// //
@ -41,7 +41,7 @@ func (s *Server) startACLTokenReaping() {
return return
} }
s.leaderRoutineManager.Start(aclTokenReapingRoutineName, s.reapExpiredTokens) s.leaderRoutineManager.Start(ctx, aclTokenReapingRoutineName, s.reapExpiredTokens)
} }
func (s *Server) stopACLTokenReaping() { func (s *Server) stopACLTokenReaping() {

View File

@ -3,6 +3,7 @@
package consul package consul
import ( import (
"context"
"errors" "errors"
"net" "net"
"strings" "strings"
@ -46,7 +47,7 @@ func (s *Server) handleEnterpriseLeave() {
return return
} }
func (s *Server) establishEnterpriseLeadership() error { func (s *Server) establishEnterpriseLeadership(_ context.Context) error {
return nil return nil
} }

View File

@ -115,7 +115,7 @@ func (s *Server) monitorLeadership() {
if canUpgrade := s.canUpgradeToNewACLs(weAreLeaderCh != nil); canUpgrade { if canUpgrade := s.canUpgradeToNewACLs(weAreLeaderCh != nil); canUpgrade {
if weAreLeaderCh != nil { if weAreLeaderCh != nil {
if err := s.initializeACLs(true); err != nil { if err := s.initializeACLs(&lib.StopChannelContext{StopCh: weAreLeaderCh}, true); err != nil {
s.logger.Error("error transitioning to using new ACLs", "error", err) s.logger.Error("error transitioning to using new ACLs", "error", err)
continue continue
} }
@ -308,12 +308,12 @@ func (s *Server) establishLeadership(ctx context.Context) error {
// check for the upgrade here - this helps us transition to new ACLs much // check for the upgrade here - this helps us transition to new ACLs much
// quicker if this is a new cluster or this is a test agent // quicker if this is a new cluster or this is a test agent
if canUpgrade := s.canUpgradeToNewACLs(true); canUpgrade { if canUpgrade := s.canUpgradeToNewACLs(true); canUpgrade {
if err := s.initializeACLs(true); err != nil { if err := s.initializeACLs(ctx, true); err != nil {
return err return err
} }
atomic.StoreInt32(&s.useNewACLs, 1) atomic.StoreInt32(&s.useNewACLs, 1)
s.updateACLAdvertisement() s.updateACLAdvertisement()
} else if err := s.initializeACLs(false); err != nil { } else if err := s.initializeACLs(ctx, false); err != nil {
return err return err
} }
@ -337,20 +337,20 @@ func (s *Server) establishLeadership(ctx context.Context) error {
return err return err
} }
if err := s.establishEnterpriseLeadership(); err != nil { if err := s.establishEnterpriseLeadership(ctx); err != nil {
return err return err
} }
s.getOrCreateAutopilotConfig() s.getOrCreateAutopilotConfig()
s.autopilot.Start(ctx) s.autopilot.Start(ctx)
s.startConfigReplication() s.startConfigReplication(ctx)
s.startFederationStateReplication() s.startFederationStateReplication(ctx)
s.startFederationStateAntiEntropy() s.startFederationStateAntiEntropy(ctx)
if err := s.startConnectLeader(); err != nil { if err := s.startConnectLeader(ctx); err != nil {
return err return err
} }
@ -499,7 +499,7 @@ func (s *Server) initializeLegacyACL() error {
// initializeACLs is used to setup the ACLs if we are the leader // initializeACLs is used to setup the ACLs if we are the leader
// and need to do this. // and need to do this.
func (s *Server) initializeACLs(upgrade bool) error { func (s *Server) initializeACLs(ctx context.Context, upgrade bool) error {
if !s.config.ACLsEnabled { if !s.config.ACLsEnabled {
return nil return nil
} }
@ -673,11 +673,11 @@ func (s *Server) initializeACLs(upgrade bool) error {
} }
} }
// launch the upgrade go routine to generate accessors for everything // launch the upgrade go routine to generate accessors for everything
s.startACLUpgrade() s.startACLUpgrade(ctx)
} else { } else {
if s.UseLegacyACLs() && !upgrade { if s.UseLegacyACLs() && !upgrade {
if s.IsACLReplicationEnabled() { if s.IsACLReplicationEnabled() {
s.startLegacyACLReplication() s.startLegacyACLReplication(ctx)
} }
// return early as we don't want to start new ACL replication // return early as we don't want to start new ACL replication
// or ACL token reaping as these are new ACL features. // or ACL token reaping as these are new ACL features.
@ -689,10 +689,10 @@ func (s *Server) initializeACLs(upgrade bool) error {
} }
// ACL replication is now mandatory // ACL replication is now mandatory
s.startACLReplication() s.startACLReplication(ctx)
} }
s.startACLTokenReaping() s.startACLTokenReaping(ctx)
return nil return nil
} }
@ -771,13 +771,13 @@ func (s *Server) legacyACLTokenUpgrade(ctx context.Context) error {
} }
} }
func (s *Server) startACLUpgrade() { func (s *Server) startACLUpgrade(ctx context.Context) {
if s.config.PrimaryDatacenter != s.config.Datacenter { if s.config.PrimaryDatacenter != s.config.Datacenter {
// token upgrades should only run in the primary // token upgrades should only run in the primary
return return
} }
s.leaderRoutineManager.Start(aclUpgradeRoutineName, s.legacyACLTokenUpgrade) s.leaderRoutineManager.Start(ctx, aclUpgradeRoutineName, s.legacyACLTokenUpgrade)
} }
func (s *Server) stopACLUpgrade() { func (s *Server) stopACLUpgrade() {
@ -826,7 +826,7 @@ func (s *Server) runLegacyACLReplication(ctx context.Context) error {
} }
} }
func (s *Server) startLegacyACLReplication() { func (s *Server) startLegacyACLReplication(ctx context.Context) {
if s.InACLDatacenter() { if s.InACLDatacenter() {
return return
} }
@ -840,12 +840,12 @@ func (s *Server) startLegacyACLReplication() {
s.initReplicationStatus() s.initReplicationStatus()
s.leaderRoutineManager.Start(legacyACLReplicationRoutineName, s.runLegacyACLReplication) s.leaderRoutineManager.Start(ctx, legacyACLReplicationRoutineName, s.runLegacyACLReplication)
s.logger.Info("started legacy ACL replication") s.logger.Info("started legacy ACL replication")
s.updateACLReplicationStatusRunning(structs.ACLReplicateLegacy) s.updateACLReplicationStatusRunning(structs.ACLReplicateLegacy)
} }
func (s *Server) startACLReplication() { func (s *Server) startACLReplication(ctx context.Context) {
if s.InACLDatacenter() { if s.InACLDatacenter() {
return return
} }
@ -858,11 +858,11 @@ func (s *Server) startACLReplication() {
} }
s.initReplicationStatus() s.initReplicationStatus()
s.leaderRoutineManager.Start(aclPolicyReplicationRoutineName, s.runACLPolicyReplicator) s.leaderRoutineManager.Start(ctx, aclPolicyReplicationRoutineName, s.runACLPolicyReplicator)
s.leaderRoutineManager.Start(aclRoleReplicationRoutineName, s.runACLRoleReplicator) s.leaderRoutineManager.Start(ctx, aclRoleReplicationRoutineName, s.runACLRoleReplicator)
if s.config.ACLTokenReplication { if s.config.ACLTokenReplication {
s.leaderRoutineManager.Start(aclTokenReplicationRoutineName, s.runACLTokenReplicator) s.leaderRoutineManager.Start(ctx, aclTokenReplicationRoutineName, s.runACLTokenReplicator)
s.updateACLReplicationStatusRunning(structs.ACLReplicateTokens) s.updateACLReplicationStatusRunning(structs.ACLReplicateTokens)
} else { } else {
s.updateACLReplicationStatusRunning(structs.ACLReplicatePolicies) s.updateACLReplicationStatusRunning(structs.ACLReplicatePolicies)
@ -973,13 +973,13 @@ func (s *Server) stopACLReplication() {
s.leaderRoutineManager.Stop(aclTokenReplicationRoutineName) s.leaderRoutineManager.Stop(aclTokenReplicationRoutineName)
} }
func (s *Server) startConfigReplication() { func (s *Server) startConfigReplication(ctx context.Context) {
if s.config.PrimaryDatacenter == "" || s.config.PrimaryDatacenter == s.config.Datacenter { if s.config.PrimaryDatacenter == "" || s.config.PrimaryDatacenter == s.config.Datacenter {
// replication shouldn't run in the primary DC // replication shouldn't run in the primary DC
return return
} }
s.leaderRoutineManager.Start(configReplicationRoutineName, s.configReplicator.Run) s.leaderRoutineManager.Start(ctx, configReplicationRoutineName, s.configReplicator.Run)
} }
func (s *Server) stopConfigReplication() { func (s *Server) stopConfigReplication() {
@ -987,7 +987,7 @@ func (s *Server) stopConfigReplication() {
s.leaderRoutineManager.Stop(configReplicationRoutineName) s.leaderRoutineManager.Stop(configReplicationRoutineName)
} }
func (s *Server) startFederationStateReplication() { func (s *Server) startFederationStateReplication(ctx context.Context) {
if s.config.PrimaryDatacenter == "" || s.config.PrimaryDatacenter == s.config.Datacenter { if s.config.PrimaryDatacenter == "" || s.config.PrimaryDatacenter == s.config.Datacenter {
// replication shouldn't run in the primary DC // replication shouldn't run in the primary DC
return return
@ -998,7 +998,7 @@ func (s *Server) startFederationStateReplication() {
s.gatewayLocator.SetLastFederationStateReplicationError(nil, false) s.gatewayLocator.SetLastFederationStateReplicationError(nil, false)
} }
s.leaderRoutineManager.Start(federationStateReplicationRoutineName, s.federationStateReplicator.Run) s.leaderRoutineManager.Start(ctx, federationStateReplicationRoutineName, s.federationStateReplicator.Run)
} }
func (s *Server) stopFederationStateReplication() { func (s *Server) stopFederationStateReplication() {

View File

@ -29,15 +29,15 @@ var (
) )
// startConnectLeader starts multi-dc connect leader routines. // startConnectLeader starts multi-dc connect leader routines.
func (s *Server) startConnectLeader() error { func (s *Server) startConnectLeader(ctx context.Context) error {
if !s.config.ConnectEnabled { if !s.config.ConnectEnabled {
return nil return nil
} }
s.caManager.Start() s.caManager.Start(ctx)
s.leaderRoutineManager.Start(caRootPruningRoutineName, s.runCARootPruning) s.leaderRoutineManager.Start(ctx, caRootPruningRoutineName, s.runCARootPruning)
return s.startIntentionConfigEntryMigration() return s.startIntentionConfigEntryMigration(ctx)
} }
// stopConnectLeader stops connect specific leader functions. // stopConnectLeader stops connect specific leader functions.

View File

@ -13,6 +13,7 @@ import (
"github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/agent/connect/ca"
"github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib/routine"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
uuid "github.com/hashicorp/go-uuid" uuid "github.com/hashicorp/go-uuid"
) )
@ -65,7 +66,7 @@ type CAManager struct {
primaryRoots structs.IndexedCARoots // The most recently seen state of the root CAs from the primary datacenter. primaryRoots structs.IndexedCARoots // The most recently seen state of the root CAs from the primary datacenter.
actingSecondaryCA bool // True if this datacenter has been initialized as a secondary CA. actingSecondaryCA bool // True if this datacenter has been initialized as a secondary CA.
leaderRoutineManager *LeaderRoutineManager leaderRoutineManager *routine.Manager
} }
type caDelegateWithState struct { type caDelegateWithState struct {
@ -76,7 +77,7 @@ func (c *caDelegateWithState) State() *state.Store {
return c.fsm.State() return c.fsm.State()
} }
func NewCAManager(delegate caServerDelegate, leaderRoutineManager *LeaderRoutineManager, logger hclog.Logger, config *Config) *CAManager { func NewCAManager(delegate caServerDelegate, leaderRoutineManager *routine.Manager, logger hclog.Logger, config *Config) *CAManager {
return &CAManager{ return &CAManager{
delegate: delegate, delegate: delegate,
logger: logger, logger: logger,
@ -247,7 +248,7 @@ func (c *CAManager) setCAProvider(newProvider ca.Provider, root *structs.CARoot)
c.providerLock.Unlock() c.providerLock.Unlock()
} }
func (c *CAManager) Start() { func (c *CAManager) Start(ctx context.Context) {
// Attempt to initialize the Connect CA now. This will // Attempt to initialize the Connect CA now. This will
// happen during leader establishment and it would be great // happen during leader establishment and it would be great
// if the CA was ready to go once that process was finished. // if the CA was ready to go once that process was finished.
@ -257,11 +258,11 @@ func (c *CAManager) Start() {
// we failed to fully initialize the CA so we need to spawn a // we failed to fully initialize the CA so we need to spawn a
// go routine to retry this process until it succeeds or we lose // go routine to retry this process until it succeeds or we lose
// leadership and the go routine gets stopped. // leadership and the go routine gets stopped.
c.leaderRoutineManager.Start(backgroundCAInitializationRoutineName, c.backgroundCAInitialization) c.leaderRoutineManager.Start(ctx, backgroundCAInitializationRoutineName, c.backgroundCAInitialization)
} else { } else {
// We only start these if CA initialization was successful. If not the completion of the // We only start these if CA initialization was successful. If not the completion of the
// background CA initialization will start these routines. // background CA initialization will start these routines.
c.startPostInitializeRoutines() c.startPostInitializeRoutines(ctx)
} }
} }
@ -271,13 +272,13 @@ func (c *CAManager) Stop() {
c.leaderRoutineManager.Stop(backgroundCAInitializationRoutineName) c.leaderRoutineManager.Stop(backgroundCAInitializationRoutineName)
} }
func (c *CAManager) startPostInitializeRoutines() { func (c *CAManager) startPostInitializeRoutines(ctx context.Context) {
// Start the Connect secondary DC actions if enabled. // Start the Connect secondary DC actions if enabled.
if c.serverConf.Datacenter != c.serverConf.PrimaryDatacenter { if c.serverConf.Datacenter != c.serverConf.PrimaryDatacenter {
c.leaderRoutineManager.Start(secondaryCARootWatchRoutineName, c.secondaryCARootWatch) c.leaderRoutineManager.Start(ctx, secondaryCARootWatchRoutineName, c.secondaryCARootWatch)
} }
c.leaderRoutineManager.Start(intermediateCertRenewWatchRoutineName, c.intermediateCertRenewalWatch) c.leaderRoutineManager.Start(ctx, intermediateCertRenewWatchRoutineName, c.intermediateCertRenewalWatch)
} }
func (c *CAManager) backgroundCAInitialization(ctx context.Context) error { func (c *CAManager) backgroundCAInitialization(ctx context.Context) error {
@ -294,7 +295,7 @@ func (c *CAManager) backgroundCAInitialization(ctx context.Context) error {
c.logger.Info("Successfully initialized the Connect CA") c.logger.Info("Successfully initialized the Connect CA")
c.startPostInitializeRoutines() c.startPostInitializeRoutines(ctx)
return nil return nil
} }

View File

@ -17,7 +17,7 @@ const (
federationStatePruneInterval = time.Hour federationStatePruneInterval = time.Hour
) )
func (s *Server) startFederationStateAntiEntropy() { func (s *Server) startFederationStateAntiEntropy(ctx context.Context) {
// Check to see if we can skip waiting for serf feature detection below. // Check to see if we can skip waiting for serf feature detection below.
if !s.DatacenterSupportsFederationStates() { if !s.DatacenterSupportsFederationStates() {
_, fedStates, err := s.fsm.State().FederationStateList(nil) _, fedStates, err := s.fsm.State().FederationStateList(nil)
@ -31,12 +31,12 @@ func (s *Server) startFederationStateAntiEntropy() {
if s.config.DisableFederationStateAntiEntropy { if s.config.DisableFederationStateAntiEntropy {
return return
} }
s.leaderRoutineManager.Start(federationStateAntiEntropyRoutineName, s.federationStateAntiEntropySync) s.leaderRoutineManager.Start(ctx, federationStateAntiEntropyRoutineName, s.federationStateAntiEntropySync)
// If this is the primary, then also prune any stale datacenters from the // If this is the primary, then also prune any stale datacenters from the
// list of federation states. // list of federation states.
if s.config.PrimaryDatacenter == s.config.Datacenter { if s.config.PrimaryDatacenter == s.config.Datacenter {
s.leaderRoutineManager.Start(federationStatePruningRoutineName, s.federationStatePruning) s.leaderRoutineManager.Start(ctx, federationStatePruningRoutineName, s.federationStatePruning)
} }
} }

View File

@ -15,7 +15,7 @@ const (
maxIntentionTxnSize = raftWarnSize / 4 maxIntentionTxnSize = raftWarnSize / 4
) )
func (s *Server) startIntentionConfigEntryMigration() error { func (s *Server) startIntentionConfigEntryMigration(ctx context.Context) error {
if !s.config.ConnectEnabled { if !s.config.ConnectEnabled {
return nil return nil
} }
@ -56,14 +56,14 @@ func (s *Server) startIntentionConfigEntryMigration() error {
} }
// When running in the primary we do all of the real work. // When running in the primary we do all of the real work.
s.leaderRoutineManager.Start(intentionMigrationRoutineName, s.legacyIntentionMigration) s.leaderRoutineManager.Start(ctx, intentionMigrationRoutineName, s.legacyIntentionMigration)
} else { } else {
// When running in the secondary we mostly just wait until the // When running in the secondary we mostly just wait until the
// primary finishes, and then wait until we're pretty sure the main // primary finishes, and then wait until we're pretty sure the main
// config entry replication thread has seen all of the // config entry replication thread has seen all of the
// migration-related config entry edits before zeroing OUR copy of // migration-related config entry edits before zeroing OUR copy of
// the old intentions table. // the old intentions table.
s.leaderRoutineManager.Start(intentionMigrationRoutineName, s.legacyIntentionMigrationInSecondaryDC) s.leaderRoutineManager.Start(ctx, intentionMigrationRoutineName, s.legacyIntentionMigrationInSecondaryDC)
} }
return nil return nil

View File

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/hashicorp/consul/lib/routine"
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
@ -12,7 +13,7 @@ import (
) )
func TestReplicationRestart(t *testing.T) { func TestReplicationRestart(t *testing.T) {
mgr := NewLeaderRoutineManager(testutil.Logger(t)) mgr := routine.NewManager(testutil.Logger(t))
config := ReplicatorConfig{ config := ReplicatorConfig{
Name: "mock", Name: "mock",
@ -30,9 +31,9 @@ func TestReplicationRestart(t *testing.T) {
repl, err := NewReplicator(&config) repl, err := NewReplicator(&config)
require.NoError(t, err) require.NoError(t, err)
mgr.Start("mock", repl.Run) mgr.Start(context.Background(), "mock", repl.Run)
mgr.Stop("mock") mgr.Stop("mock")
mgr.Start("mock", repl.Run) mgr.Start(context.Background(), "mock", repl.Run)
// Previously this would have segfaulted // Previously this would have segfaulted
mgr.Stop("mock") mgr.Stop("mock")
} }

View File

@ -43,6 +43,7 @@ import (
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/lib/routine"
"github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/logging"
"github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/proto/pbsubscribe"
"github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/tlsutil"
@ -298,7 +299,7 @@ type Server struct {
dcSupportsIntentionsAsConfigEntries int32 dcSupportsIntentionsAsConfigEntries int32
// Manager to handle starting/stopping go routines when establishing/revoking raft leadership // Manager to handle starting/stopping go routines when establishing/revoking raft leadership
leaderRoutineManager *LeaderRoutineManager leaderRoutineManager *routine.Manager
// embedded struct to hold all the enterprise specific data // embedded struct to hold all the enterprise specific data
EnterpriseServer EnterpriseServer
@ -375,7 +376,7 @@ func NewServer(config *Config, flat Deps) (*Server, error) {
tombstoneGC: gc, tombstoneGC: gc,
serverLookup: NewServerLookup(), serverLookup: NewServerLookup(),
shutdownCh: shutdownCh, shutdownCh: shutdownCh,
leaderRoutineManager: NewLeaderRoutineManager(logger), leaderRoutineManager: routine.NewManager(logger.Named(logging.Leader)),
aclAuthMethodValidators: authmethod.NewCache(), aclAuthMethodValidators: authmethod.NewCache(),
fsm: newFSMFromConfig(flat.Logger, gc, config), fsm: newFSMFromConfig(flat.Logger, gc, config),
} }
@ -1319,6 +1320,10 @@ func (s *Server) RegisterEndpoint(name string, handler interface{}) error {
return s.rpcServer.RegisterName(name, handler) return s.rpcServer.RegisterName(name, handler)
} }
func (s *Server) FSM() *fsm.FSM {
return s.fsm
}
// Stats is used to return statistics for debugging and insight // Stats is used to return statistics for debugging and insight
// for various sub-systems // for various sub-systems
func (s *Server) Stats() map[string]map[string]string { func (s *Server) Stats() map[string]map[string]string {

View File

@ -125,7 +125,7 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error)
TLSConfigurator: d.TLSConfigurator, TLSConfigurator: d.TLSConfigurator,
Cache: d.Cache, Cache: d.Cache,
Tokens: d.Tokens, Tokens: d.Tokens,
EnterpriseConfig: initEnterpriseAutoConfig(d.EnterpriseDeps), EnterpriseConfig: initEnterpriseAutoConfig(d.EnterpriseDeps, cfg),
} }
d.AutoConfig, err = autoconf.New(acConf) d.AutoConfig, err = autoconf.New(acConf)

View File

@ -15,6 +15,6 @@ func initEnterpriseBaseDeps(d BaseDeps, _ *config.RuntimeConfig) (BaseDeps, erro
} }
// initEnterpriseAutoConfig is responsible for setting up auto-config for enterprise // initEnterpriseAutoConfig is responsible for setting up auto-config for enterprise
func initEnterpriseAutoConfig(_ consul.EnterpriseDeps) autoconf.EnterpriseConfig { func initEnterpriseAutoConfig(_ consul.EnterpriseDeps, _ *config.RuntimeConfig) autoconf.EnterpriseConfig {
return autoconf.EnterpriseConfig{} return autoconf.EnterpriseConfig{}
} }

View File

@ -86,7 +86,9 @@ type TestAgent struct {
// The caller is responsible for calling Shutdown() to stop the agent and remove // The caller is responsible for calling Shutdown() to stop the agent and remove
// temporary directories. // temporary directories.
func NewTestAgent(t *testing.T, hcl string) *TestAgent { func NewTestAgent(t *testing.T, hcl string) *TestAgent {
return StartTestAgent(t, TestAgent{HCL: hcl}) a := StartTestAgent(t, TestAgent{HCL: hcl})
t.Cleanup(func() { a.Shutdown() })
return a
} }
// StartTestAgent and wait for it to become available. If the agent fails to // StartTestAgent and wait for it to become available. If the agent fails to

View File

@ -1,22 +1,21 @@
package consul package routine
import ( import (
"context" "context"
"os" "os"
"sync" "sync"
"github.com/hashicorp/consul/logging"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
) )
type LeaderRoutine func(ctx context.Context) error type Routine func(ctx context.Context) error
type leaderRoutine struct { type routineTracker struct {
cancel context.CancelFunc cancel context.CancelFunc
stoppedCh chan struct{} // closed when no longer running stoppedCh chan struct{} // closed when no longer running
} }
func (r *leaderRoutine) running() bool { func (r *routineTracker) running() bool {
select { select {
case <-r.stoppedCh: case <-r.stoppedCh:
return false return false
@ -25,27 +24,27 @@ func (r *leaderRoutine) running() bool {
} }
} }
type LeaderRoutineManager struct { type Manager struct {
lock sync.RWMutex lock sync.RWMutex
logger hclog.Logger logger hclog.Logger
routines map[string]*leaderRoutine routines map[string]*routineTracker
} }
func NewLeaderRoutineManager(logger hclog.Logger) *LeaderRoutineManager { func NewManager(logger hclog.Logger) *Manager {
if logger == nil { if logger == nil {
logger = hclog.New(&hclog.LoggerOptions{ logger = hclog.New(&hclog.LoggerOptions{
Output: os.Stderr, Output: os.Stderr,
}) })
} }
return &LeaderRoutineManager{ return &Manager{
logger: logger.Named(logging.Leader), logger: logger,
routines: make(map[string]*leaderRoutine), routines: make(map[string]*routineTracker),
} }
} }
func (m *LeaderRoutineManager) IsRunning(name string) bool { func (m *Manager) IsRunning(name string) bool {
m.lock.Lock() m.lock.Lock()
defer m.lock.Unlock() defer m.lock.Unlock()
@ -56,11 +55,7 @@ func (m *LeaderRoutineManager) IsRunning(name string) bool {
return false return false
} }
func (m *LeaderRoutineManager) Start(name string, routine LeaderRoutine) error { func (m *Manager) Start(ctx context.Context, name string, routine Routine) error {
return m.StartWithContext(context.TODO(), name, routine)
}
func (m *LeaderRoutineManager) StartWithContext(parentCtx context.Context, name string, routine LeaderRoutine) error {
m.lock.Lock() m.lock.Lock()
defer m.lock.Unlock() defer m.lock.Unlock()
@ -68,38 +63,41 @@ func (m *LeaderRoutineManager) StartWithContext(parentCtx context.Context, name
return nil return nil
} }
if parentCtx == nil { if ctx == nil {
parentCtx = context.Background() ctx = context.Background()
} }
ctx, cancel := context.WithCancel(parentCtx) rtCtx, cancel := context.WithCancel(ctx)
instance := &leaderRoutine{ instance := &routineTracker{
cancel: cancel, cancel: cancel,
stoppedCh: make(chan struct{}), stoppedCh: make(chan struct{}),
} }
go func() { go m.execute(rtCtx, name, routine, instance.stoppedCh)
defer func() {
close(instance.stoppedCh)
}()
err := routine(ctx)
if err != nil && err != context.DeadlineExceeded && err != context.Canceled {
m.logger.Error("routine exited with error",
"routine", name,
"error", err,
)
} else {
m.logger.Debug("stopped routine", "routine", name)
}
}()
m.routines[name] = instance m.routines[name] = instance
m.logger.Info("started routine", "routine", name) m.logger.Info("started routine", "routine", name)
return nil return nil
} }
func (m *LeaderRoutineManager) Stop(name string) <-chan struct{} { // execute will run the given routine in the foreground and close the given channel when its done executing
func (m *Manager) execute(ctx context.Context, name string, routine Routine, done chan struct{}) {
defer func() {
close(done)
}()
err := routine(ctx)
if err != nil && err != context.DeadlineExceeded && err != context.Canceled {
m.logger.Error("routine exited with error",
"routine", name,
"error", err,
)
} else {
m.logger.Debug("stopped routine", "routine", name)
}
}
func (m *Manager) Stop(name string) <-chan struct{} {
instance := m.stopInstance(name) instance := m.stopInstance(name)
if instance == nil { if instance == nil {
// Fabricate a closed channel so it won't block forever. // Fabricate a closed channel so it won't block forever.
@ -111,7 +109,7 @@ func (m *LeaderRoutineManager) Stop(name string) <-chan struct{} {
return instance.stoppedCh return instance.stoppedCh
} }
func (m *LeaderRoutineManager) stopInstance(name string) *leaderRoutine { func (m *Manager) stopInstance(name string) *routineTracker {
m.lock.Lock() m.lock.Lock()
defer m.lock.Unlock() defer m.lock.Unlock()
@ -133,7 +131,7 @@ func (m *LeaderRoutineManager) stopInstance(name string) *leaderRoutine {
return instance return instance
} }
func (m *LeaderRoutineManager) StopAll() { func (m *Manager) StopAll() {
m.lock.Lock() m.lock.Lock()
defer m.lock.Unlock() defer m.lock.Unlock()
@ -146,5 +144,5 @@ func (m *LeaderRoutineManager) StopAll() {
} }
// just wipe out the entire map // just wipe out the entire map
m.routines = make(map[string]*leaderRoutine) m.routines = make(map[string]*routineTracker)
} }

View File

@ -1,4 +1,4 @@
package consul package routine
import ( import (
"context" "context"
@ -10,13 +10,11 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestLeaderRoutineManager(t *testing.T) { func TestManager(t *testing.T) {
t.Parallel() t.Parallel()
var runs uint32 var runs uint32
var running uint32 var running uint32
// tlog := testutil.NewCancellableTestLogger(t) mgr := NewManager(testutil.Logger(t))
// defer tlog.Cancel()
mgr := NewLeaderRoutineManager(testutil.Logger(t))
run := func(ctx context.Context) error { run := func(ctx context.Context) error {
atomic.StoreUint32(&running, 1) atomic.StoreUint32(&running, 1)
@ -30,7 +28,7 @@ func TestLeaderRoutineManager(t *testing.T) {
require.False(t, mgr.IsRunning("not-found")) require.False(t, mgr.IsRunning("not-found"))
// start // start
require.NoError(t, mgr.Start("run", run)) require.NoError(t, mgr.Start(context.Background(), "run", run))
require.True(t, mgr.IsRunning("run")) require.True(t, mgr.IsRunning("run"))
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
require.Equal(r, uint32(1), atomic.LoadUint32(&runs)) require.Equal(r, uint32(1), atomic.LoadUint32(&runs))
@ -47,7 +45,7 @@ func TestLeaderRoutineManager(t *testing.T) {
}) })
// restart and stop // restart and stop
require.NoError(t, mgr.Start("run", run)) require.NoError(t, mgr.Start(context.Background(), "run", run))
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
require.Equal(r, uint32(2), atomic.LoadUint32(&runs)) require.Equal(r, uint32(2), atomic.LoadUint32(&runs))
require.Equal(r, uint32(1), atomic.LoadUint32(&running)) require.Equal(r, uint32(1), atomic.LoadUint32(&running))
@ -63,7 +61,7 @@ func TestLeaderRoutineManager(t *testing.T) {
// start with a context // start with a context
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
require.NoError(t, mgr.StartWithContext(ctx, "run", run)) require.NoError(t, mgr.Start(ctx, "run", run))
cancel() cancel()
// The function should exit of its own accord due to the parent // The function should exit of its own accord due to the parent
@ -76,3 +74,28 @@ func TestLeaderRoutineManager(t *testing.T) {
require.False(r, mgr.IsRunning("run")) require.False(r, mgr.IsRunning("run"))
}) })
} }
func TestManager_StopAll(t *testing.T) {
t.Parallel()
var runs uint32
var running uint32
mgr := NewManager(testutil.Logger(t))
run := func(ctx context.Context) error {
atomic.StoreUint32(&running, 1)
defer atomic.StoreUint32(&running, 0)
atomic.AddUint32(&runs, 1)
<-ctx.Done()
return nil
}
require.NoError(t, mgr.Start(context.Background(), "run1", run))
require.NoError(t, mgr.Start(context.Background(), "run2", run))
mgr.StopAll()
retry.Run(t, func(r *retry.R) {
require.False(r, mgr.IsRunning("run1"))
require.False(r, mgr.IsRunning("run2"))
})
}