diff --git a/.changelog/20926.txt b/.changelog/20926.txt new file mode 100644 index 0000000000..9778a1a71e --- /dev/null +++ b/.changelog/20926.txt @@ -0,0 +1,3 @@ +```release-note:bug +error running consul server in 1.18.0: failed to configure SCADA provider user's home directory path: $HOME is not defined +``` \ No newline at end of file diff --git a/agent/agent.go b/agent/agent.go index 696f53f001..d066b4cba6 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -1320,7 +1320,7 @@ func (a *Agent) listenHTTP() ([]apiServer, error) { } httpAddrs := a.config.HTTPAddrs - if a.scadaProvider != nil { + if a.config.IsCloudEnabled() && a.scadaProvider != nil { httpAddrs = append(httpAddrs, scada.CAPCoreAPI) } diff --git a/agent/agent_test.go b/agent/agent_test.go index e41d836876..b448ec432a 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -6341,6 +6341,7 @@ func TestAgent_scadaProvider(t *testing.T) { pvd.EXPECT().Listen(scada.CAPCoreAPI.Capability()).Return(l, nil).Once() pvd.EXPECT().Stop().Return(nil).Once() a := TestAgent{ + HCL: `cloud = { resource_id = "test-resource-id" client_id = "test-client-id" client_secret = "test-client-secret" }`, OverrideDeps: func(deps *BaseDeps) { deps.HCP.Provider = pvd }, diff --git a/agent/config/builder.go b/agent/config/builder.go index 883be2b748..44784fcc61 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -1114,8 +1114,8 @@ func (b *builder) build() (rt RuntimeConfig, err error) { LocalProxyConfigResyncInterval: 30 * time.Second, } - // host metrics are enabled by default to support HashiCorp Cloud Platform integration - rt.Telemetry.EnableHostMetrics = boolValWithDefault(c.Telemetry.EnableHostMetrics, true) + // host metrics are enabled if consul is configured with HashiCorp Cloud Platform integration + rt.Telemetry.EnableHostMetrics = boolValWithDefault(c.Telemetry.EnableHostMetrics, rt.IsCloudEnabled()) rt.TLS, err = b.buildTLSConfig(rt, c.TLS) if err != nil { diff --git a/agent/consul/server.go b/agent/consul/server.go index c403c0400c..1aea2df125 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -595,32 +595,34 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, StorageBackend: s.raftStorageBackend, }) - s.hcpManager = hcp.NewManager(hcp.ManagerConfig{ - CloudConfig: flat.HCP.Config, - StatusFn: s.hcpServerStatus(flat), - Logger: logger.Named("hcp_manager"), - SCADAProvider: flat.HCP.Provider, - TelemetryProvider: flat.HCP.TelemetryProvider, - ManagementTokenUpserterFn: func(name, secretId string) error { - // Check the state of the server before attempting to upsert the token. Otherwise, - // the upsert will fail and log errors that do not require action from the user. - if s.config.ACLsEnabled && s.IsLeader() && s.InPrimaryDatacenter() { - // Idea for improvement: Upsert a token with a well-known accessorId here instead - // of a randomly generated one. This would prevent any possible insertion collision between - // this and the insertion that happens during the ACL initialization process (initializeACLs function) - return s.upsertManagementToken(name, secretId) - } - return nil - }, - ManagementTokenDeleterFn: func(secretId string) error { - // Check the state of the server before attempting to delete the token.Otherwise, - // the delete will fail and log errors that do not require action from the user. - if s.config.ACLsEnabled && s.IsLeader() && s.InPrimaryDatacenter() { - return s.deleteManagementToken(secretId) - } - return nil - }, - }) + if s.config.Cloud.IsConfigured() { + s.hcpManager = hcp.NewManager(hcp.ManagerConfig{ + CloudConfig: flat.HCP.Config, + StatusFn: s.hcpServerStatus(flat), + Logger: logger.Named("hcp_manager"), + SCADAProvider: flat.HCP.Provider, + TelemetryProvider: flat.HCP.TelemetryProvider, + ManagementTokenUpserterFn: func(name, secretId string) error { + // Check the state of the server before attempting to upsert the token. Otherwise, + // the upsert will fail and log errors that do not require action from the user. + if s.config.ACLsEnabled && s.IsLeader() && s.InPrimaryDatacenter() { + // Idea for improvement: Upsert a token with a well-known accessorId here instead + // of a randomly generated one. This would prevent any possible insertion collision between + // this and the insertion that happens during the ACL initialization process (initializeACLs function) + return s.upsertManagementToken(name, secretId) + } + return nil + }, + ManagementTokenDeleterFn: func(secretId string) error { + // Check the state of the server before attempting to delete the token.Otherwise, + // the delete will fail and log errors that do not require action from the user. + if s.config.ACLsEnabled && s.IsLeader() && s.InPrimaryDatacenter() { + return s.deleteManagementToken(secretId) + } + return nil + }, + }) + } var recorder *middleware.RequestRecorder if flat.NewRequestRecorderFunc != nil { @@ -890,22 +892,24 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, // to enable RPC forwarding. s.grpcLeaderForwarder = flat.LeaderForwarder - // Start watching HCP Link resource. This needs to be created after - // the GRPC services are set up in order for the resource service client to - // function. This uses the insecure grpc channel so that it doesn't need to - // present a valid ACL token. - go hcp.RunHCPLinkWatcher( - &lib.StopChannelContext{StopCh: shutdownCh}, - logger.Named("hcp-link-watcher"), - pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan), - hcp.HCPManagerLifecycleFn( - s.hcpManager, - hcpclient.NewClient, - bootstrap.LoadManagementToken, - flat.HCP.Config, - flat.HCP.DataDir, - ), - ) + if s.config.Cloud.IsConfigured() { + // Start watching HCP Link resource. This needs to be created after + // the GRPC services are set up in order for the resource service client to + // function. This uses the insecure grpc channel so that it doesn't need to + // present a valid ACL token. + go hcp.RunHCPLinkWatcher( + &lib.StopChannelContext{StopCh: shutdownCh}, + logger.Named("hcp-link-watcher"), + pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan), + hcp.HCPManagerLifecycleFn( + s.hcpManager, + hcpclient.NewClient, + bootstrap.LoadManagementToken, + flat.HCP.Config, + flat.HCP.DataDir, + ), + ) + } s.controllerManager = controller.NewManager( // Usage of the insecure + unsafe grpc chan is required for the controller @@ -1008,13 +1012,15 @@ func isV1CatalogRequest(rpcName string) bool { } func (s *Server) registerControllers(deps Deps, proxyUpdater ProxyUpdater) error { - hcpctl.RegisterControllers( - s.controllerManager, hcpctl.ControllerDependencies{ - ResourceApisEnabled: s.useV2Resources, - HCPAllowV2ResourceApis: s.hcpAllowV2Resources, - CloudConfig: deps.HCP.Config, - }, - ) + if s.config.Cloud.IsConfigured() { + hcpctl.RegisterControllers( + s.controllerManager, hcpctl.ControllerDependencies{ + ResourceApisEnabled: s.useV2Resources, + HCPAllowV2ResourceApis: s.hcpAllowV2Resources, + CloudConfig: deps.HCP.Config, + }, + ) + } // When not enabled, the v1 tenancy bridge is used by default. if s.useV2Tenancy { @@ -2075,8 +2081,10 @@ func (s *Server) trackLeaderChanges() { s.raftStorageBackend.LeaderChanged() s.controllerManager.SetRaftLeader(s.IsLeader()) - // Trigger sending an update to HCP status - s.hcpManager.SendUpdate() + if s.config.Cloud.IsConfigured() { + // Trigger sending an update to HCP status + s.hcpManager.SendUpdate() + } case <-s.shutdownCh: s.raft.DeregisterObserver(observer) return diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index 19cbd56849..e685f25ca4 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -2100,6 +2100,9 @@ func TestServer_hcpManager(t *testing.T) { // Configure the server for the StatusFn conf1.BootstrapExpect = 1 conf1.RPCAdvertise = &net.TCPAddr{IP: []byte{127, 0, 0, 2}, Port: conf1.RPCAddr.Port} + conf1.Cloud.ClientID = "test-client-id" + conf1.Cloud.ResourceID = "test-resource-id" + conf1.Cloud.ClientSecret = "test-client-secret" hcp1 := hcpclient.NewMockClient(t) hcp1.EXPECT().PushServerStatus(mock.Anything, mock.MatchedBy(func(status *hcpclient.ServerStatus) bool { return status.ID == string(conf1.NodeID) diff --git a/agent/setup.go b/agent/setup.go index 772cdfbcd5..e9103fb75a 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -138,15 +138,17 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer, providedLogger hcl cfg.Telemetry.PrometheusOpts.SummaryDefinitions = summaries var extraSinks []metrics.MetricSink - // This values is set late within newNodeIDFromConfig above - cfg.Cloud.NodeID = cfg.NodeID + if cfg.IsCloudEnabled() { + // This values is set late within newNodeIDFromConfig above + cfg.Cloud.NodeID = cfg.NodeID - d.HCP, err = hcp.NewDeps(cfg.Cloud, d.Logger.Named("hcp"), cfg.DataDir) - if err != nil { - return d, err - } - if d.HCP.Sink != nil { - extraSinks = append(extraSinks, d.HCP.Sink) + d.HCP, err = hcp.NewDeps(cfg.Cloud, d.Logger.Named("hcp"), cfg.DataDir) + if err != nil { + return d, err + } + if d.HCP.Sink != nil { + extraSinks = append(extraSinks, d.HCP.Sink) + } } d.MetricsConfig, err = lib.InitTelemetry(cfg.Telemetry, d.Logger, extraSinks...) diff --git a/command/agent/agent.go b/command/agent/agent.go index f1e0541145..b280fbf506 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -181,12 +181,9 @@ func (c *cmd) run(args []string) int { ui.Error(err.Error()) return 1 } - } - // We unconditionally add an Access Control header to our config in order to allow the HCP UI to work. - // We do this unconditionally because the cluster can be linked to HCP at any time (not just at startup) and this - // is simpler than selectively reloading parts of config at runtime. - loader = hcpbootstrap.AddAclPolicyAccessControlHeader(loader) + loader = hcpbootstrap.AddAclPolicyAccessControlHeader(loader) + } bd, err := agent.NewBaseDeps(loader, logGate, nil) if err != nil {