Browse Source

GH-20889 - put conditionals are hcp initialization for consul server (#20926)

* put conditionals are hcp initialization for consul server

* put more things behind configuration flags

* add changelog

* TestServer_hcpManager

* fix TestAgent_scadaProvider
pull/20933/head
John Murret 8 months ago committed by GitHub
parent
commit
39112c7a98
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 3
      .changelog/20926.txt
  2. 2
      agent/agent.go
  3. 1
      agent/agent_test.go
  4. 4
      agent/config/builder.go
  5. 110
      agent/consul/server.go
  6. 3
      agent/consul/server_test.go
  7. 18
      agent/setup.go
  8. 7
      command/agent/agent.go

3
.changelog/20926.txt

@ -0,0 +1,3 @@
```release-note:bug
error running consul server in 1.18.0: failed to configure SCADA provider user's home directory path: $HOME is not defined
```

2
agent/agent.go

@ -1320,7 +1320,7 @@ func (a *Agent) listenHTTP() ([]apiServer, error) {
}
httpAddrs := a.config.HTTPAddrs
if a.scadaProvider != nil {
if a.config.IsCloudEnabled() && a.scadaProvider != nil {
httpAddrs = append(httpAddrs, scada.CAPCoreAPI)
}

1
agent/agent_test.go

@ -6341,6 +6341,7 @@ func TestAgent_scadaProvider(t *testing.T) {
pvd.EXPECT().Listen(scada.CAPCoreAPI.Capability()).Return(l, nil).Once()
pvd.EXPECT().Stop().Return(nil).Once()
a := TestAgent{
HCL: `cloud = { resource_id = "test-resource-id" client_id = "test-client-id" client_secret = "test-client-secret" }`,
OverrideDeps: func(deps *BaseDeps) {
deps.HCP.Provider = pvd
},

4
agent/config/builder.go

@ -1114,8 +1114,8 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
LocalProxyConfigResyncInterval: 30 * time.Second,
}
// host metrics are enabled by default to support HashiCorp Cloud Platform integration
rt.Telemetry.EnableHostMetrics = boolValWithDefault(c.Telemetry.EnableHostMetrics, true)
// host metrics are enabled if consul is configured with HashiCorp Cloud Platform integration
rt.Telemetry.EnableHostMetrics = boolValWithDefault(c.Telemetry.EnableHostMetrics, rt.IsCloudEnabled())
rt.TLS, err = b.buildTLSConfig(rt, c.TLS)
if err != nil {

110
agent/consul/server.go

@ -595,32 +595,34 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
StorageBackend: s.raftStorageBackend,
})
s.hcpManager = hcp.NewManager(hcp.ManagerConfig{
CloudConfig: flat.HCP.Config,
StatusFn: s.hcpServerStatus(flat),
Logger: logger.Named("hcp_manager"),
SCADAProvider: flat.HCP.Provider,
TelemetryProvider: flat.HCP.TelemetryProvider,
ManagementTokenUpserterFn: func(name, secretId string) error {
// Check the state of the server before attempting to upsert the token. Otherwise,
// the upsert will fail and log errors that do not require action from the user.
if s.config.ACLsEnabled && s.IsLeader() && s.InPrimaryDatacenter() {
// Idea for improvement: Upsert a token with a well-known accessorId here instead
// of a randomly generated one. This would prevent any possible insertion collision between
// this and the insertion that happens during the ACL initialization process (initializeACLs function)
return s.upsertManagementToken(name, secretId)
}
return nil
},
ManagementTokenDeleterFn: func(secretId string) error {
// Check the state of the server before attempting to delete the token.Otherwise,
// the delete will fail and log errors that do not require action from the user.
if s.config.ACLsEnabled && s.IsLeader() && s.InPrimaryDatacenter() {
return s.deleteManagementToken(secretId)
}
return nil
},
})
if s.config.Cloud.IsConfigured() {
s.hcpManager = hcp.NewManager(hcp.ManagerConfig{
CloudConfig: flat.HCP.Config,
StatusFn: s.hcpServerStatus(flat),
Logger: logger.Named("hcp_manager"),
SCADAProvider: flat.HCP.Provider,
TelemetryProvider: flat.HCP.TelemetryProvider,
ManagementTokenUpserterFn: func(name, secretId string) error {
// Check the state of the server before attempting to upsert the token. Otherwise,
// the upsert will fail and log errors that do not require action from the user.
if s.config.ACLsEnabled && s.IsLeader() && s.InPrimaryDatacenter() {
// Idea for improvement: Upsert a token with a well-known accessorId here instead
// of a randomly generated one. This would prevent any possible insertion collision between
// this and the insertion that happens during the ACL initialization process (initializeACLs function)
return s.upsertManagementToken(name, secretId)
}
return nil
},
ManagementTokenDeleterFn: func(secretId string) error {
// Check the state of the server before attempting to delete the token.Otherwise,
// the delete will fail and log errors that do not require action from the user.
if s.config.ACLsEnabled && s.IsLeader() && s.InPrimaryDatacenter() {
return s.deleteManagementToken(secretId)
}
return nil
},
})
}
var recorder *middleware.RequestRecorder
if flat.NewRequestRecorderFunc != nil {
@ -890,22 +892,24 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
// to enable RPC forwarding.
s.grpcLeaderForwarder = flat.LeaderForwarder
// Start watching HCP Link resource. This needs to be created after
// the GRPC services are set up in order for the resource service client to
// function. This uses the insecure grpc channel so that it doesn't need to
// present a valid ACL token.
go hcp.RunHCPLinkWatcher(
&lib.StopChannelContext{StopCh: shutdownCh},
logger.Named("hcp-link-watcher"),
pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan),
hcp.HCPManagerLifecycleFn(
s.hcpManager,
hcpclient.NewClient,
bootstrap.LoadManagementToken,
flat.HCP.Config,
flat.HCP.DataDir,
),
)
if s.config.Cloud.IsConfigured() {
// Start watching HCP Link resource. This needs to be created after
// the GRPC services are set up in order for the resource service client to
// function. This uses the insecure grpc channel so that it doesn't need to
// present a valid ACL token.
go hcp.RunHCPLinkWatcher(
&lib.StopChannelContext{StopCh: shutdownCh},
logger.Named("hcp-link-watcher"),
pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan),
hcp.HCPManagerLifecycleFn(
s.hcpManager,
hcpclient.NewClient,
bootstrap.LoadManagementToken,
flat.HCP.Config,
flat.HCP.DataDir,
),
)
}
s.controllerManager = controller.NewManager(
// Usage of the insecure + unsafe grpc chan is required for the controller
@ -1008,13 +1012,15 @@ func isV1CatalogRequest(rpcName string) bool {
}
func (s *Server) registerControllers(deps Deps, proxyUpdater ProxyUpdater) error {
hcpctl.RegisterControllers(
s.controllerManager, hcpctl.ControllerDependencies{
ResourceApisEnabled: s.useV2Resources,
HCPAllowV2ResourceApis: s.hcpAllowV2Resources,
CloudConfig: deps.HCP.Config,
},
)
if s.config.Cloud.IsConfigured() {
hcpctl.RegisterControllers(
s.controllerManager, hcpctl.ControllerDependencies{
ResourceApisEnabled: s.useV2Resources,
HCPAllowV2ResourceApis: s.hcpAllowV2Resources,
CloudConfig: deps.HCP.Config,
},
)
}
// When not enabled, the v1 tenancy bridge is used by default.
if s.useV2Tenancy {
@ -2075,8 +2081,10 @@ func (s *Server) trackLeaderChanges() {
s.raftStorageBackend.LeaderChanged()
s.controllerManager.SetRaftLeader(s.IsLeader())
// Trigger sending an update to HCP status
s.hcpManager.SendUpdate()
if s.config.Cloud.IsConfigured() {
// Trigger sending an update to HCP status
s.hcpManager.SendUpdate()
}
case <-s.shutdownCh:
s.raft.DeregisterObserver(observer)
return

3
agent/consul/server_test.go

@ -2100,6 +2100,9 @@ func TestServer_hcpManager(t *testing.T) {
// Configure the server for the StatusFn
conf1.BootstrapExpect = 1
conf1.RPCAdvertise = &net.TCPAddr{IP: []byte{127, 0, 0, 2}, Port: conf1.RPCAddr.Port}
conf1.Cloud.ClientID = "test-client-id"
conf1.Cloud.ResourceID = "test-resource-id"
conf1.Cloud.ClientSecret = "test-client-secret"
hcp1 := hcpclient.NewMockClient(t)
hcp1.EXPECT().PushServerStatus(mock.Anything, mock.MatchedBy(func(status *hcpclient.ServerStatus) bool {
return status.ID == string(conf1.NodeID)

18
agent/setup.go

@ -138,15 +138,17 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer, providedLogger hcl
cfg.Telemetry.PrometheusOpts.SummaryDefinitions = summaries
var extraSinks []metrics.MetricSink
// This values is set late within newNodeIDFromConfig above
cfg.Cloud.NodeID = cfg.NodeID
if cfg.IsCloudEnabled() {
// This values is set late within newNodeIDFromConfig above
cfg.Cloud.NodeID = cfg.NodeID
d.HCP, err = hcp.NewDeps(cfg.Cloud, d.Logger.Named("hcp"), cfg.DataDir)
if err != nil {
return d, err
}
if d.HCP.Sink != nil {
extraSinks = append(extraSinks, d.HCP.Sink)
d.HCP, err = hcp.NewDeps(cfg.Cloud, d.Logger.Named("hcp"), cfg.DataDir)
if err != nil {
return d, err
}
if d.HCP.Sink != nil {
extraSinks = append(extraSinks, d.HCP.Sink)
}
}
d.MetricsConfig, err = lib.InitTelemetry(cfg.Telemetry, d.Logger, extraSinks...)

7
command/agent/agent.go

@ -181,12 +181,9 @@ func (c *cmd) run(args []string) int {
ui.Error(err.Error())
return 1
}
}
// We unconditionally add an Access Control header to our config in order to allow the HCP UI to work.
// We do this unconditionally because the cluster can be linked to HCP at any time (not just at startup) and this
// is simpler than selectively reloading parts of config at runtime.
loader = hcpbootstrap.AddAclPolicyAccessControlHeader(loader)
loader = hcpbootstrap.AddAclPolicyAccessControlHeader(loader)
}
bd, err := agent.NewBaseDeps(loader, logGate, nil)
if err != nil {

Loading…
Cancel
Save