v2: remove HCP Link integration (#21883)

Also prevent de-registered retired v2 types from being restored from a
snapshot, such as these hcp resources. Without doing this, anyone with
any of these types in their state store will retain them forever with no
avenue to remove them.
pull/21934/head
R.B. Boyer 2 weeks ago committed by GitHub
parent 32515c77f2
commit a2e69236a2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -0,0 +1,3 @@
```release-note:feature
v2: remove HCP Link integration
```

@ -53,7 +53,6 @@ import (
"github.com/hashicorp/consul/agent/consul/xdscapacity" "github.com/hashicorp/consul/agent/consul/xdscapacity"
"github.com/hashicorp/consul/agent/grpc-external/services/peerstream" "github.com/hashicorp/consul/agent/grpc-external/services/peerstream"
"github.com/hashicorp/consul/agent/hcp" "github.com/hashicorp/consul/agent/hcp"
"github.com/hashicorp/consul/agent/hcp/bootstrap"
hcpclient "github.com/hashicorp/consul/agent/hcp/client" hcpclient "github.com/hashicorp/consul/agent/hcp/client"
logdrop "github.com/hashicorp/consul/agent/log-drop" logdrop "github.com/hashicorp/consul/agent/log-drop"
"github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/metadata"
@ -65,7 +64,6 @@ import (
"github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/gossip/librtt" "github.com/hashicorp/consul/internal/gossip/librtt"
hcpctl "github.com/hashicorp/consul/internal/hcp"
"github.com/hashicorp/consul/internal/multicluster" "github.com/hashicorp/consul/internal/multicluster"
"github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/demo" "github.com/hashicorp/consul/internal/resource/demo"
@ -838,25 +836,6 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
// to enable RPC forwarding. // to enable RPC forwarding.
s.grpcLeaderForwarder = flat.LeaderForwarder s.grpcLeaderForwarder = flat.LeaderForwarder
if s.config.Cloud.IsConfigured() {
// Start watching HCP Link resource. This needs to be created after
// the GRPC services are set up in order for the resource service client to
// function. This uses the insecure grpc channel so that it doesn't need to
// present a valid ACL token.
go hcp.RunHCPLinkWatcher(
&lib.StopChannelContext{StopCh: shutdownCh},
logger.Named("hcp-link-watcher"),
pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan),
hcp.HCPManagerLifecycleFn(
s.hcpManager,
hcpclient.NewClient,
bootstrap.LoadManagementToken,
flat.HCP.Config,
flat.HCP.DataDir,
),
)
}
s.controllerManager = controller.NewManager( s.controllerManager = controller.NewManager(
// Usage of the insecure + unsafe grpc chan is required for the controller // Usage of the insecure + unsafe grpc chan is required for the controller
// manager. It must be unauthorized so that controllers do not need to // manager. It must be unauthorized so that controllers do not need to
@ -928,15 +907,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
return s, nil return s, nil
} }
func (s *Server) registerControllers(deps Deps) error { func (s *Server) registerControllers(_ Deps) error {
if s.config.Cloud.IsConfigured() {
hcpctl.RegisterControllers(
s.controllerManager, hcpctl.ControllerDependencies{
CloudConfig: deps.HCP.Config,
},
)
}
shim := NewExportedServicesShim(s) shim := NewExportedServicesShim(s)
multicluster.RegisterCompatControllers(s.controllerManager, multicluster.DefaultCompatControllerDependencies(shim)) multicluster.RegisterCompatControllers(s.controllerManager, multicluster.DefaultCompatControllerDependencies(shim))

@ -7,8 +7,6 @@ flowchart TD
demo/v1/recordlabel demo/v1/recordlabel
demo/v2/album demo/v2/album
demo/v2/artist demo/v2/artist
hcp/v2/link
hcp/v2/telemetrystate
internal/v1/tombstone internal/v1/tombstone
multicluster/v2/computedexportedservices --> multicluster/v2/exportedservices multicluster/v2/computedexportedservices --> multicluster/v2/exportedservices
multicluster/v2/computedexportedservices --> multicluster/v2/namespaceexportedservices multicluster/v2/computedexportedservices --> multicluster/v2/namespaceexportedservices

@ -4,7 +4,6 @@
package consul package consul
import ( import (
"github.com/hashicorp/consul/internal/hcp"
"github.com/hashicorp/consul/internal/multicluster" "github.com/hashicorp/consul/internal/multicluster"
"github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/demo" "github.com/hashicorp/consul/internal/resource/demo"
@ -22,7 +21,6 @@ func NewTypeRegistry() resource.Registry {
demo.RegisterTypes(registry) demo.RegisterTypes(registry)
multicluster.RegisterTypes(registry) multicluster.RegisterTypes(registry)
hcp.RegisterTypes(registry)
return registry return registry
} }

@ -1,68 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package hcp
import (
"context"
"time"
"github.com/hashicorp/go-hclog"
hcpctl "github.com/hashicorp/consul/internal/hcp"
"github.com/hashicorp/consul/lib/retry"
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
"github.com/hashicorp/consul/proto-public/pbresource"
)
type LinkEventHandler = func(context.Context, hclog.Logger, *pbresource.WatchEvent)
func handleLinkEvents(ctx context.Context, logger hclog.Logger, watchClient pbresource.ResourceService_WatchListClient, linkEventHandler LinkEventHandler) {
for {
select {
case <-ctx.Done():
logger.Debug("context canceled, exiting")
return
default:
watchEvent, err := watchClient.Recv()
if err != nil {
logger.Error("error receiving link watch event", "error", err)
return
}
linkEventHandler(ctx, logger, watchEvent)
}
}
}
func RunHCPLinkWatcher(
ctx context.Context, logger hclog.Logger, client pbresource.ResourceServiceClient, linkEventHandler LinkEventHandler,
) {
errorBackoff := &retry.Waiter{
MinFailures: 10,
MinWait: 0,
MaxWait: 1 * time.Minute,
}
for {
select {
case <-ctx.Done():
logger.Debug("context canceled, exiting")
return
default:
watchClient, err := client.WatchList(
ctx, &pbresource.WatchListRequest{
Type: pbhcp.LinkType,
NamePrefix: hcpctl.LinkName,
},
)
if err != nil {
logger.Error("failed to create watch on Link", "error", err)
errorBackoff.Wait(ctx)
continue
}
errorBackoff.Reset()
handleLinkEvents(ctx, logger, watchClient, linkEventHandler)
}
}
}

@ -1,101 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package hcp
import (
"context"
"errors"
"testing"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"github.com/hashicorp/go-hclog"
mockpbresource "github.com/hashicorp/consul/grpcmocks/proto-public/pbresource"
hcpctl "github.com/hashicorp/consul/internal/hcp"
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// This tests that when we get a watch event from the Recv call, we get that same event on the
// output channel, then we
func TestLinkWatcher_Ok(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
testWatchEvent := &pbresource.WatchEvent{}
mockWatchListClient := mockpbresource.NewResourceService_WatchListClient(t)
mockWatchListClient.EXPECT().Recv().Return(testWatchEvent, nil)
eventCh := make(chan *pbresource.WatchEvent)
mockLinkHandler := func(_ context.Context, _ hclog.Logger, event *pbresource.WatchEvent) {
eventCh <- event
}
client := mockpbresource.NewResourceServiceClient(t)
client.EXPECT().WatchList(mock.Anything, &pbresource.WatchListRequest{
Type: pbhcp.LinkType,
NamePrefix: hcpctl.LinkName,
}).Return(mockWatchListClient, nil)
go RunHCPLinkWatcher(ctx, hclog.Default(), client, mockLinkHandler)
// Assert that the link handler is called with the testWatchEvent
receivedWatchEvent := <-eventCh
require.Equal(t, testWatchEvent, receivedWatchEvent)
}
func TestLinkWatcher_RecvError(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
// Our mock WatchListClient will simulate 5 errors, then will cancel the context.
// We expect RunHCPLinkWatcher to attempt to create the WatchListClient 6 times (initial attempt plus 5 retries)
// before exiting due to context cancellation.
mockWatchListClient := mockpbresource.NewResourceService_WatchListClient(t)
numFailures := 5
failures := 0
mockWatchListClient.EXPECT().Recv().RunAndReturn(func() (*pbresource.WatchEvent, error) {
if failures < numFailures {
failures++
return nil, errors.New("unexpectedError")
}
defer cancel()
return &pbresource.WatchEvent{}, nil
})
client := mockpbresource.NewResourceServiceClient(t)
client.EXPECT().WatchList(mock.Anything, &pbresource.WatchListRequest{
Type: pbhcp.LinkType,
NamePrefix: hcpctl.LinkName,
}).Return(mockWatchListClient, nil).Times(numFailures + 1)
RunHCPLinkWatcher(ctx, hclog.Default(), client, func(_ context.Context, _ hclog.Logger, _ *pbresource.WatchEvent) {})
}
func TestLinkWatcher_WatchListError(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
// Our mock WatchList will simulate 5 errors, then will cancel the context.
// We expect RunHCPLinkWatcher to attempt to create the WatchListClient 6 times (initial attempt plus 5 retries)
// before exiting due to context cancellation.
numFailures := 5
failures := 0
client := mockpbresource.NewResourceServiceClient(t)
client.EXPECT().WatchList(mock.Anything, &pbresource.WatchListRequest{
Type: pbhcp.LinkType,
NamePrefix: hcpctl.LinkName,
}).RunAndReturn(func(_ context.Context, _ *pbresource.WatchListRequest, _ ...grpc.CallOption) (pbresource.ResourceService_WatchListClient, error) {
if failures < numFailures {
failures++
return nil, errors.New("unexpectedError")
}
defer cancel()
return mockpbresource.NewResourceService_WatchListClient(t), nil
}).Times(numFailures + 1)
RunHCPLinkWatcher(ctx, hclog.Default(), client, func(_ context.Context, _ hclog.Logger, _ *pbresource.WatchEvent) {})
}

@ -1,107 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package hcp
import (
"context"
"os"
"path/filepath"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/agent/hcp/bootstrap/constants"
hcpclient "github.com/hashicorp/consul/agent/hcp/client"
"github.com/hashicorp/consul/agent/hcp/config"
hcpctl "github.com/hashicorp/consul/internal/hcp"
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// HCPManagerLifecycleFn returns a LinkEventHandler function which will appropriately
// Start and Stop the HCP Manager based on the Link event received. If a link is upserted,
// the HCP Manager is started, and if a link is deleted, the HCP manager is stopped.
func HCPManagerLifecycleFn(
m Manager,
hcpClientFn func(cfg config.CloudConfig) (hcpclient.Client, error),
loadMgmtTokenFn func(
ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string,
) (string, error),
cloudConfig config.CloudConfig,
dataDir string,
) LinkEventHandler {
return func(ctx context.Context, logger hclog.Logger, watchEvent *pbresource.WatchEvent) {
// This indicates that a Link was deleted
if watchEvent.GetDelete() != nil {
logger.Debug("HCP Link deleted, stopping HCP manager")
if dataDir != "" {
hcpConfigDir := filepath.Join(dataDir, constants.SubDir)
logger.Debug("deleting hcp-config dir", "dir", hcpConfigDir)
err := os.RemoveAll(hcpConfigDir)
if err != nil {
logger.Error("failed to delete hcp-config dir", "dir", hcpConfigDir, "err", err)
}
}
err := m.Stop()
if err != nil {
logger.Error("error stopping HCP manager", "error", err)
}
return
}
// This indicates that a Link was either created or updated
if watchEvent.GetUpsert() != nil {
logger.Debug("HCP Link upserted, starting manager if not already started")
res := watchEvent.GetUpsert().GetResource()
var link pbhcp.Link
if err := res.GetData().UnmarshalTo(&link); err != nil {
logger.Error("error unmarshalling link data", "error", err)
return
}
if validated, reason := hcpctl.IsValidated(res); !validated {
logger.Debug("HCP Link not validated, not starting manager", "reason", reason)
return
}
// Update the HCP manager configuration with the link values
// Merge the link data with the existing cloud config so that we only overwrite the
// fields that are provided by the link. This ensures that:
// 1. The HCP configuration (i.e., how to connect to HCP) is preserved
// 2. The Consul agent's node ID and node name are preserved
newCfg := config.CloudConfig{
ResourceID: link.ResourceId,
ClientID: link.ClientId,
ClientSecret: link.ClientSecret,
}
mergedCfg := config.Merge(cloudConfig, newCfg)
hcpClient, err := hcpClientFn(mergedCfg)
if err != nil {
logger.Error("error creating HCP client", "error", err)
return
}
// Load the management token if access is set to read-write. Read-only clusters
// will not have a management token provided by HCP.
var token string
if link.GetAccessLevel() == pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE {
token, err = loadMgmtTokenFn(ctx, logger, hcpClient, dataDir)
if err != nil {
logger.Error("error loading management token", "error", err)
return
}
}
mergedCfg.ManagementToken = token
m.UpdateConfig(hcpClient, mergedCfg)
err = m.Start(ctx)
if err != nil {
logger.Error("error starting HCP manager", "error", err)
}
}
}
}

@ -1,236 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package hcp
import (
"context"
"errors"
"io"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/types/known/anypb"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/agent/hcp/bootstrap/constants"
hcpclient "github.com/hashicorp/consul/agent/hcp/client"
"github.com/hashicorp/consul/agent/hcp/config"
hcpctl "github.com/hashicorp/consul/internal/hcp"
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/testutil"
)
func TestHCPManagerLifecycleFn(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
logger := hclog.New(&hclog.LoggerOptions{Output: io.Discard})
mockHCPClient := hcpclient.NewMockClient(t)
mockHcpClientFn := func(_ config.CloudConfig) (hcpclient.Client, error) {
return mockHCPClient, nil
}
mockLoadMgmtTokenFn := func(ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string) (string, error) {
return "test-mgmt-token", nil
}
dataDir := testutil.TempDir(t, "test-link-controller")
err := os.Mkdir(filepath.Join(dataDir, constants.SubDir), os.ModeDir)
require.NoError(t, err)
existingCfg := config.CloudConfig{
AuthURL: "test.com",
}
type testCase struct {
mutateLink func(*pbhcp.Link)
mutateUpsertEvent func(*pbresource.WatchEvent_Upsert)
applyMocksAndAssertions func(*testing.T, *MockManager, *pbhcp.Link)
hcpClientFn func(config.CloudConfig) (hcpclient.Client, error)
loadMgmtTokenFn func(context.Context, hclog.Logger, hcpclient.Client, string) (string, error)
}
testCases := map[string]testCase{
// HCP manager should be started when link is created and stopped when link is deleted
"Ok": {
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
mgr.EXPECT().Start(mock.Anything).Return(nil).Once()
expectedCfg := config.CloudConfig{
ResourceID: link.ResourceId,
ClientID: link.ClientId,
ClientSecret: link.ClientSecret,
AuthURL: "test.com",
ManagementToken: "test-mgmt-token",
}
mgr.EXPECT().UpdateConfig(mockHCPClient, expectedCfg).Once()
mgr.EXPECT().Stop().Return(nil).Once()
},
},
// HCP manager should not be updated with management token
"ReadOnly": {
mutateLink: func(link *pbhcp.Link) {
link.AccessLevel = pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_ONLY
},
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
mgr.EXPECT().Start(mock.Anything).Return(nil).Once()
expectedCfg := config.CloudConfig{
ResourceID: link.ResourceId,
ClientID: link.ClientId,
ClientSecret: link.ClientSecret,
AuthURL: "test.com",
ManagementToken: "",
}
mgr.EXPECT().UpdateConfig(mockHCPClient, expectedCfg).Once()
mgr.EXPECT().Stop().Return(nil).Once()
},
},
// HCP manager should not be started or updated if link is not validated
"ValidationError": {
mutateUpsertEvent: func(upsert *pbresource.WatchEvent_Upsert) {
upsert.Resource.Status = map[string]*pbresource.Status{
hcpctl.StatusKey: {
Conditions: []*pbresource.Condition{hcpctl.ConditionValidatedFailed},
},
}
},
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
mgr.AssertNotCalled(t, "Start", mock.Anything)
mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything)
mgr.EXPECT().Stop().Return(nil).Once()
},
},
"Error_InvalidLink": {
mutateUpsertEvent: func(upsert *pbresource.WatchEvent_Upsert) {
upsert.Resource = nil
},
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
mgr.AssertNotCalled(t, "Start", mock.Anything)
mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything)
mgr.EXPECT().Stop().Return(nil).Once()
},
},
"Error_HCPManagerStop": {
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
mgr.EXPECT().Start(mock.Anything).Return(nil).Once()
mgr.EXPECT().UpdateConfig(mock.Anything, mock.Anything).Return().Once()
mgr.EXPECT().Stop().Return(errors.New("could not stop HCP manager")).Once()
},
},
"Error_CreatingHCPClient": {
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
mgr.AssertNotCalled(t, "Start", mock.Anything)
mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything)
mgr.EXPECT().Stop().Return(nil).Once()
},
hcpClientFn: func(_ config.CloudConfig) (hcpclient.Client, error) {
return nil, errors.New("could not create HCP client")
},
},
// This should result in the HCP manager not being started
"Error_LoadMgmtToken": {
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
mgr.AssertNotCalled(t, "Start", mock.Anything)
mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything)
mgr.EXPECT().Stop().Return(nil).Once()
},
loadMgmtTokenFn: func(ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string) (string, error) {
return "", errors.New("could not load management token")
},
},
"Error_HCPManagerStart": {
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
mgr.EXPECT().Start(mock.Anything).Return(errors.New("could not start HCP manager")).Once()
mgr.EXPECT().UpdateConfig(mock.Anything, mock.Anything).Return().Once()
mgr.EXPECT().Stop().Return(nil).Once()
},
},
}
for name, test := range testCases {
t.Run(name, func(t2 *testing.T) {
mgr := NewMockManager(t2)
// Set up a link
link := pbhcp.Link{
ResourceId: "abc",
ClientId: "def",
ClientSecret: "ghi",
AccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE,
}
if test.mutateLink != nil {
test.mutateLink(&link)
}
linkResource, err := anypb.New(&link)
require.NoError(t2, err)
if test.applyMocksAndAssertions != nil {
test.applyMocksAndAssertions(t2, mgr, &link)
}
testHcpClientFn := mockHcpClientFn
if test.hcpClientFn != nil {
testHcpClientFn = test.hcpClientFn
}
testLoadMgmtToken := mockLoadMgmtTokenFn
if test.loadMgmtTokenFn != nil {
testLoadMgmtToken = test.loadMgmtTokenFn
}
updateManagerLifecycle := HCPManagerLifecycleFn(
mgr, testHcpClientFn,
testLoadMgmtToken, existingCfg, dataDir,
)
upsertEvent := &pbresource.WatchEvent_Upsert{
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Name: "global",
Type: pbhcp.LinkType,
},
Status: map[string]*pbresource.Status{
hcpctl.StatusKey: {
Conditions: []*pbresource.Condition{hcpctl.ConditionValidatedSuccess},
},
},
Data: linkResource,
},
}
if test.mutateUpsertEvent != nil {
test.mutateUpsertEvent(upsertEvent)
}
// Handle upsert event
updateManagerLifecycle(ctx, logger, &pbresource.WatchEvent{
Event: &pbresource.WatchEvent_Upsert_{
Upsert: upsertEvent,
},
})
// Handle delete event. This should stop HCP manager
updateManagerLifecycle(ctx, logger, &pbresource.WatchEvent{
Event: &pbresource.WatchEvent_Delete_{
Delete: &pbresource.WatchEvent_Delete{},
},
})
// Ensure hcp-config directory is removed
file := filepath.Join(dataDir, constants.SubDir)
if _, err := os.Stat(file); err == nil || !os.IsNotExist(err) {
require.Fail(t2, "should have removed hcp-config directory")
}
})
}
}

@ -12,75 +12,15 @@ import (
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/protobuf/types/known/anypb"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
hcpclient "github.com/hashicorp/consul/agent/hcp/client" hcpclient "github.com/hashicorp/consul/agent/hcp/client"
"github.com/hashicorp/consul/agent/hcp/config" "github.com/hashicorp/consul/agent/hcp/config"
"github.com/hashicorp/consul/agent/hcp/scada" "github.com/hashicorp/consul/agent/hcp/scada"
hcpctl "github.com/hashicorp/consul/internal/hcp"
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
) )
func TestManager_MonitorHCPLink(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
logger := hclog.New(&hclog.LoggerOptions{Output: io.Discard})
mgr := NewManager(
ManagerConfig{
Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}),
},
)
mockHCPClient := hcpclient.NewMockClient(t)
mockHcpClientFn := func(_ config.CloudConfig) (hcpclient.Client, error) {
return mockHCPClient, nil
}
loadMgmtTokenFn := func(ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string) (string, error) {
return "test-mgmt-token", nil
}
require.False(t, mgr.isRunning())
updateManagerLifecycle := HCPManagerLifecycleFn(
mgr, mockHcpClientFn,
loadMgmtTokenFn, config.CloudConfig{}, "",
)
// Set up a link
link := pbhcp.Link{
ResourceId: "abc",
ClientId: "def",
ClientSecret: "ghi",
AccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE,
}
linkResource, err := anypb.New(&link)
require.NoError(t, err)
updateManagerLifecycle(ctx, logger, &pbresource.WatchEvent{
Event: &pbresource.WatchEvent_Upsert_{
Upsert: &pbresource.WatchEvent_Upsert{
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Name: "global",
Type: pbhcp.LinkType,
},
Status: map[string]*pbresource.Status{
hcpctl.StatusKey: {
Conditions: []*pbresource.Condition{hcpctl.ConditionValidatedSuccess},
},
},
Data: linkResource,
},
},
},
})
// Validate that the HCP manager is started
require.True(t, mgr.isRunning())
}
func TestManager_Start(t *testing.T) { func TestManager_Start(t *testing.T) {
client := hcpclient.NewMockClient(t) client := hcpclient.NewMockClient(t)
statusF := func(ctx context.Context) (hcpclient.ServerStatus, error) { statusF := func(ctx context.Context) (hcpclient.ServerStatus, error) {

@ -1,34 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package hcp
import (
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/hcp/internal/controllers"
"github.com/hashicorp/consul/internal/hcp/internal/controllers/link"
"github.com/hashicorp/consul/internal/hcp/internal/types"
"github.com/hashicorp/consul/internal/resource"
)
// RegisterTypes adds all resource types within the "hcp" API group
// to the given type registry
func RegisterTypes(r resource.Registry) {
types.Register(r)
}
type ControllerDependencies = controllers.Dependencies
var IsValidated = link.IsValidated
var LinkName = types.LinkName
// RegisterControllers registers controllers for the catalog types with
// the given controller Manager.
func RegisterControllers(mgr *controller.Manager, deps ControllerDependencies) {
controllers.Register(mgr, deps)
}
// Needed for testing
var StatusKey = link.StatusKey
var ConditionValidatedSuccess = link.ConditionValidatedSuccess
var ConditionValidatedFailed = link.ConditionValidatedFailed

@ -1,234 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package link
import (
"context"
"crypto/tls"
"strings"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/anypb"
gnmmod "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/models"
hcpclient "github.com/hashicorp/consul/agent/hcp/client"
"github.com/hashicorp/consul/agent/hcp/config"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/hcp/internal/types"
"github.com/hashicorp/consul/internal/storage"
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// HCPClientFn is a function that can be used to create an HCP client from a Link object.
// This function type should be passed to a LinkController in order to tell it how to make a client from
// a Link. For normal use, DefaultHCPClientFn should be used, but tests can substitute in a function that creates a
// mock client.
type HCPClientFn func(config.CloudConfig) (hcpclient.Client, error)
var DefaultHCPClientFn HCPClientFn = func(cfg config.CloudConfig) (hcpclient.Client, error) {
hcpClient, err := hcpclient.NewClient(cfg)
if err != nil {
return nil, err
}
return hcpClient, nil
}
func LinkController(
hcpClientFn HCPClientFn,
cfg config.CloudConfig,
) *controller.Controller {
return controller.NewController("link", pbhcp.LinkType).
WithInitializer(
&linkInitializer{
cloudConfig: cfg,
},
).
WithReconciler(
&linkReconciler{
hcpClientFn: hcpClientFn,
cloudConfig: cfg,
},
)
}
type linkReconciler struct {
hcpClientFn HCPClientFn
cloudConfig config.CloudConfig
}
func hcpAccessLevelToConsul(level *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel) pbhcp.AccessLevel {
if level == nil {
return pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED
}
switch *level {
case gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELUNSPECIFIED:
return pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED
case gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADWRITE:
return pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE
case gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADONLY:
return pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_ONLY
default:
return pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED
}
}
func (r *linkReconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error {
// The runtime is passed by value so replacing it here for the remainder of this
// reconciliation request processing will not affect future invocations.
rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", StatusKey)
rt.Logger.Trace("reconciling link")
rsp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: req.ID})
switch {
case status.Code(err) == codes.NotFound:
rt.Logger.Trace("link has been deleted")
return nil
case err != nil:
rt.Logger.Error("the resource service has returned an unexpected error", "error", err)
return err
}
res := rsp.Resource
var link pbhcp.Link
if err := res.Data.UnmarshalTo(&link); err != nil {
rt.Logger.Error("error unmarshalling link data", "error", err)
return err
}
newStatus := &pbresource.Status{
ObservedGeneration: res.Generation,
Conditions: []*pbresource.Condition{},
}
defer writeStatusIfNotEqual(ctx, rt, res, newStatus)
newStatus.Conditions = append(newStatus.Conditions, ConditionValidatedSuccess)
// Merge the link data with the existing cloud config so that we only overwrite the
// fields that are provided by the link. This ensures that:
// 1. The HCP configuration (i.e., how to connect to HCP) is preserved
// 2. The Consul agent's node ID and node name are preserved
newCfg := CloudConfigFromLink(&link)
cfg := config.Merge(r.cloudConfig, newCfg)
hcpClient, err := r.hcpClientFn(cfg)
if err != nil {
rt.Logger.Error("error creating HCP client", "error", err)
return err
}
// Sync cluster data from HCP
cluster, err := hcpClient.GetCluster(ctx)
if err != nil {
rt.Logger.Error("error querying HCP for cluster", "error", err)
condition := linkingFailedCondition(err)
newStatus.Conditions = append(newStatus.Conditions, condition)
return err
}
accessLevel := hcpAccessLevelToConsul(cluster.AccessLevel)
if link.HcpClusterUrl != cluster.HCPPortalURL ||
link.AccessLevel != accessLevel {
link.HcpClusterUrl = cluster.HCPPortalURL
link.AccessLevel = accessLevel
updatedData, err := anypb.New(&link)
if err != nil {
rt.Logger.Error("error marshalling link data", "error", err)
return err
}
_, err = rt.Client.Write(
ctx, &pbresource.WriteRequest{Resource: &pbresource.Resource{
Id: &pbresource.ID{
Name: types.LinkName,
Type: pbhcp.LinkType,
},
Metadata: res.Metadata,
Data: updatedData,
}},
)
if err != nil {
rt.Logger.Error("error updating link", "error", err)
return err
}
}
newStatus.Conditions = append(newStatus.Conditions, ConditionLinked(link.ResourceId))
return writeStatusIfNotEqual(ctx, rt, res, newStatus)
}
type linkInitializer struct {
cloudConfig config.CloudConfig
}
func (i *linkInitializer) Initialize(ctx context.Context, rt controller.Runtime) error {
if !i.cloudConfig.IsConfigured() {
return nil
}
// Construct a link resource to reflect the configuration
data, err := anypb.New(
&pbhcp.Link{
ResourceId: i.cloudConfig.ResourceID,
ClientId: i.cloudConfig.ClientID,
ClientSecret: i.cloudConfig.ClientSecret,
},
)
if err != nil {
return err
}
// Create the link resource for a configuration-based link
_, err = rt.Client.Write(
ctx,
&pbresource.WriteRequest{
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Name: types.LinkName,
Type: pbhcp.LinkType,
},
Metadata: map[string]string{
types.MetadataSourceKey: types.MetadataSourceConfig,
},
Data: data,
},
},
)
if err != nil {
if strings.Contains(err.Error(), storage.ErrWrongUid.Error()) ||
strings.Contains(err.Error(), "leader unknown") {
// If the error is likely ignorable and could eventually resolve itself,
// log it as TRACE rather than ERROR.
rt.Logger.Trace("error initializing controller", "error", err)
} else {
rt.Logger.Error("error initializing controller", "error", err)
}
return err
}
return nil
}
func CloudConfigFromLink(link *pbhcp.Link) config.CloudConfig {
var cfg config.CloudConfig
if link == nil {
return cfg
}
cfg = config.CloudConfig{
ResourceID: link.GetResourceId(),
ClientID: link.GetClientId(),
ClientSecret: link.GetClientSecret(),
}
if link.GetHcpConfig() != nil {
cfg.AuthURL = link.GetHcpConfig().GetAuthUrl()
cfg.ScadaAddress = link.GetHcpConfig().GetScadaAddress()
cfg.Hostname = link.GetHcpConfig().GetApiAddress()
cfg.TLSConfig = &tls.Config{InsecureSkipVerify: link.GetHcpConfig().GetTlsInsecureSkipVerify()}
}
return cfg
}

@ -1,248 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package link
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
gnmmod "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/models"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
hcpclient "github.com/hashicorp/consul/agent/hcp/client"
"github.com/hashicorp/consul/agent/hcp/config"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/hcp/internal/types"
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/testutil"
)
type controllerSuite struct {
suite.Suite
ctx context.Context
client *rtest.Client
rt controller.Runtime
tenancies []*pbresource.Tenancy
}
func mockHcpClientFn(t *testing.T) (*hcpclient.MockClient, HCPClientFn) {
mockClient := hcpclient.NewMockClient(t)
mockClientFunc := func(config config.CloudConfig) (hcpclient.Client, error) {
return mockClient, nil
}
return mockClient, mockClientFunc
}
func (suite *controllerSuite) SetupTest() {
suite.ctx = testutil.TestContext(suite.T())
suite.tenancies = rtest.TestTenancies()
client := svctest.NewResourceServiceBuilder().
WithRegisterFns(types.Register).
WithTenancies(suite.tenancies...).
Run(suite.T())
suite.rt = controller.Runtime{
Client: client,
Logger: testutil.Logger(suite.T()),
}
suite.client = rtest.NewClient(client)
}
func TestLinkController(t *testing.T) {
suite.Run(t, new(controllerSuite))
}
func (suite *controllerSuite) deleteResourceFunc(id *pbresource.ID) func() {
return func() {
suite.client.MustDelete(suite.T(), id)
suite.client.WaitForDeletion(suite.T(), id)
}
}
func (suite *controllerSuite) TestController_Ok() {
// Run the controller manager
mgr := controller.NewManager(suite.client, suite.rt.Logger)
mockClient, mockClientFn := mockHcpClientFn(suite.T())
readWrite := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADWRITE
mockClient.EXPECT().GetCluster(mock.Anything).Return(&hcpclient.Cluster{
HCPPortalURL: "http://test.com",
AccessLevel: &readWrite,
}, nil)
mgr.Register(LinkController(
mockClientFn,
config.CloudConfig{},
))
mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx)
linkData := &pbhcp.Link{
ClientId: "abc",
ClientSecret: "abc",
ResourceId: types.GenerateTestResourceID(suite.T()),
}
link := rtest.Resource(pbhcp.LinkType, "global").
WithData(suite.T(), linkData).
Write(suite.T(), suite.client)
suite.T().Cleanup(suite.deleteResourceFunc(link.Id))
suite.client.WaitForStatusCondition(suite.T(), link.Id, StatusKey, ConditionLinked(linkData.ResourceId))
var updatedLink pbhcp.Link
updatedLinkResource := suite.client.WaitForNewVersion(suite.T(), link.Id, link.Version)
require.NoError(suite.T(), updatedLinkResource.Data.UnmarshalTo(&updatedLink))
require.Equal(suite.T(), "http://test.com", updatedLink.HcpClusterUrl)
require.Equal(suite.T(), pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE, updatedLink.AccessLevel)
}
func (suite *controllerSuite) TestController_Initialize() {
// Run the controller manager with a configured link
mgr := controller.NewManager(suite.client, suite.rt.Logger)
mockClient, mockClientFn := mockHcpClientFn(suite.T())
readOnly := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADONLY
mockClient.EXPECT().GetCluster(mock.Anything).Return(&hcpclient.Cluster{
HCPPortalURL: "http://test.com",
AccessLevel: &readOnly,
}, nil)
cloudCfg := config.CloudConfig{
ClientID: "client-id-abc",
ClientSecret: "client-secret-abc",
ResourceID: types.GenerateTestResourceID(suite.T()),
}
mgr.Register(LinkController(
mockClientFn,
cloudCfg,
))
mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx)
// Wait for link to be created by initializer
id := &pbresource.ID{
Type: pbhcp.LinkType,
Name: types.LinkName,
}
suite.T().Cleanup(suite.deleteResourceFunc(id))
r := suite.client.WaitForResourceExists(suite.T(), id)
// Check that created link has expected values
var link pbhcp.Link
err := r.Data.UnmarshalTo(&link)
require.NoError(suite.T(), err)
require.Equal(suite.T(), cloudCfg.ResourceID, link.ResourceId)
require.Equal(suite.T(), cloudCfg.ClientID, link.ClientId)
require.Equal(suite.T(), cloudCfg.ClientSecret, link.ClientSecret)
require.Equal(suite.T(), types.MetadataSourceConfig, r.Metadata[types.MetadataSourceKey])
// Wait for link to be connected successfully
suite.client.WaitForStatusCondition(suite.T(), id, StatusKey, ConditionLinked(link.ResourceId))
}
func (suite *controllerSuite) TestController_GetClusterError() {
type testCase struct {
expectErr error
expectCondition *pbresource.Condition
}
tt := map[string]testCase{
"unexpected": {
expectErr: fmt.Errorf("error"),
expectCondition: ConditionFailed,
},
"unauthorized": {
expectErr: hcpclient.ErrUnauthorized,
expectCondition: ConditionUnauthorized,
},
"forbidden": {
expectErr: hcpclient.ErrForbidden,
expectCondition: ConditionForbidden,
},
}
for name, tc := range tt {
suite.T().Run(name, func(t *testing.T) {
// Run the controller manager
mgr := controller.NewManager(suite.client, suite.rt.Logger)
mockClient, mockClientFunc := mockHcpClientFn(t)
mockClient.EXPECT().GetCluster(mock.Anything).Return(nil, tc.expectErr)
mgr.Register(LinkController(
mockClientFunc,
config.CloudConfig{},
))
mgr.SetRaftLeader(true)
ctx, cancel := context.WithCancel(suite.ctx)
t.Cleanup(cancel)
go mgr.Run(ctx)
linkData := &pbhcp.Link{
ClientId: "abc",
ClientSecret: "abc",
ResourceId: types.GenerateTestResourceID(t),
}
link := rtest.Resource(pbhcp.LinkType, "global").
WithData(t, linkData).
Write(t, suite.client)
t.Cleanup(suite.deleteResourceFunc(link.Id))
suite.client.WaitForStatusCondition(t, link.Id, StatusKey, tc.expectCondition)
})
}
}
func Test_hcpAccessModeToConsul(t *testing.T) {
type testCase struct {
hcpAccessLevel *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel
consulAccessLevel pbhcp.AccessLevel
}
tt := map[string]testCase{
"unspecified": {
hcpAccessLevel: func() *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel {
t := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELUNSPECIFIED
return &t
}(),
consulAccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED,
},
"invalid": {
hcpAccessLevel: nil,
consulAccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED,
},
"read_only": {
hcpAccessLevel: func() *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel {
t := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADONLY
return &t
}(),
consulAccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_ONLY,
},
"read_write": {
hcpAccessLevel: func() *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel {
t := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADWRITE
return &t
}(),
consulAccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE,
},
}
for name, tc := range tt {
t.Run(name, func(t *testing.T) {
accessLevel := hcpAccessLevelToConsul(tc.hcpAccessLevel)
require.Equal(t, tc.consulAccessLevel, accessLevel)
})
}
}

@ -1,142 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package link
import (
"context"
"errors"
"fmt"
"github.com/hashicorp/consul/agent/hcp/client"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
"github.com/hashicorp/consul/proto-public/pbresource"
)
const (
StatusKey = "consul.io/hcp/link"
// Statuses
StatusLinked = "linked"
StatusValidated = "validated"
LinkedSuccessReason = "SUCCESS"
LinkedFailedReason = "FAILED"
LinkedDisabledReasonV2ResourcesUnsupportedReason = "DISABLED_V2_RESOURCES_UNSUPPORTED"
LinkedUnauthorizedReason = "UNAUTHORIZED"
LinkedForbiddenReason = "FORBIDDEN"
ValidatedSuccessReason = "SUCCESS"
ValidatedFailedV2ResourcesReason = "V2_RESOURCES_UNSUPPORTED"
LinkedMessageFormat = "Successfully linked to cluster '%s'"
FailedMessage = "Failed to link to HCP due to unexpected error"
DisabledResourceAPIsEnabledMessage = "Link is disabled because resource-apis are enabled"
UnauthorizedMessage = "Access denied, check client_id and client_secret"
ForbiddenMessage = "Access denied, check the resource_id"
ValidatedSuccessMessage = "Successfully validated link"
ValidatedFailedV2ResourcesMessage = "Link is disabled because resource-apis are enabled"
)
var (
ConditionDisabled = &pbresource.Condition{
Type: StatusLinked,
State: pbresource.Condition_STATE_FALSE,
Reason: LinkedDisabledReasonV2ResourcesUnsupportedReason,
Message: DisabledResourceAPIsEnabledMessage,
}
ConditionFailed = &pbresource.Condition{
Type: StatusLinked,
State: pbresource.Condition_STATE_FALSE,
Reason: LinkedFailedReason,
Message: FailedMessage,
}
ConditionUnauthorized = &pbresource.Condition{
Type: StatusLinked,
State: pbresource.Condition_STATE_FALSE,
Reason: LinkedUnauthorizedReason,
Message: UnauthorizedMessage,
}
ConditionForbidden = &pbresource.Condition{
Type: StatusLinked,
State: pbresource.Condition_STATE_FALSE,
Reason: LinkedForbiddenReason,
Message: ForbiddenMessage,
}
ConditionValidatedSuccess = &pbresource.Condition{
Type: StatusValidated,
State: pbresource.Condition_STATE_TRUE,
Reason: ValidatedSuccessReason,
Message: ValidatedSuccessMessage,
}
ConditionValidatedFailed = &pbresource.Condition{
Type: StatusValidated,
State: pbresource.Condition_STATE_FALSE,
Reason: ValidatedFailedV2ResourcesReason,
Message: ValidatedFailedV2ResourcesMessage,
}
)
func ConditionLinked(resourceId string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusLinked,
State: pbresource.Condition_STATE_TRUE,
Reason: LinkedSuccessReason,
Message: fmt.Sprintf(LinkedMessageFormat, resourceId),
}
}
func writeStatusIfNotEqual(ctx context.Context, rt controller.Runtime, res *pbresource.Resource, status *pbresource.Status) error {
if resource.EqualStatus(res.Status[StatusKey], status, false) {
return nil
}
_, err := rt.Client.WriteStatus(
ctx, &pbresource.WriteStatusRequest{
Id: res.Id,
Key: StatusKey,
Status: status,
},
)
if err != nil {
rt.Logger.Error("error writing link status", "error", err)
}
return err
}
func linkingFailedCondition(err error) *pbresource.Condition {
switch {
case errors.Is(err, client.ErrUnauthorized):
return ConditionUnauthorized
case errors.Is(err, client.ErrForbidden):
return ConditionForbidden
default:
return ConditionFailed
}
}
func IsLinked(res *pbresource.Resource) (linked bool, reason string) {
return isConditionTrue(res, StatusLinked)
}
func IsValidated(res *pbresource.Resource) (linked bool, reason string) {
return isConditionTrue(res, StatusValidated)
}
func isConditionTrue(res *pbresource.Resource, statusType string) (bool, string) {
if !resource.EqualType(res.GetId().GetType(), pbhcp.LinkType) {
return false, "resource is not hcp.Link type"
}
linkStatus, ok := res.GetStatus()[StatusKey]
if !ok {
return false, "link status not set"
}
for _, cond := range linkStatus.GetConditions() {
if cond.Type == statusType && cond.GetState() == pbresource.Condition_STATE_TRUE {
return true, ""
}
}
return false, fmt.Sprintf("link status does not include positive %s condition", statusType)
}

@ -1,26 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package controllers
import (
"github.com/hashicorp/consul/agent/hcp/config"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/hcp/internal/controllers/link"
"github.com/hashicorp/consul/internal/hcp/internal/controllers/telemetrystate"
)
type Dependencies struct {
CloudConfig config.CloudConfig
}
func Register(mgr *controller.Manager, deps Dependencies) {
mgr.Register(
link.LinkController(
link.DefaultHCPClientFn,
deps.CloudConfig,
),
)
mgr.Register(telemetrystate.TelemetryStateController(link.DefaultHCPClientFn))
}

@ -1,203 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package telemetrystate
import (
"context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/controller/dependency"
"github.com/hashicorp/consul/internal/hcp/internal/controllers/link"
"github.com/hashicorp/consul/internal/hcp/internal/types"
"github.com/hashicorp/consul/internal/resource"
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
"github.com/hashicorp/consul/proto-public/pbresource"
)
var (
globalID = &pbresource.ID{
Name: "global",
Type: pbhcp.TelemetryStateType,
Tenancy: &pbresource.Tenancy{},
}
)
const MetaKeyDebugSkipDeletion = StatusKey + "/debug/skip-deletion"
func TelemetryStateController(hcpClientFn link.HCPClientFn) *controller.Controller {
return controller.NewController(StatusKey, pbhcp.TelemetryStateType).
WithWatch(pbhcp.LinkType, dependency.ReplaceType(pbhcp.TelemetryStateType)).
WithReconciler(&telemetryStateReconciler{
hcpClientFn: hcpClientFn,
})
}
type telemetryStateReconciler struct {
hcpClientFn link.HCPClientFn
}
func (r *telemetryStateReconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error {
// The runtime is passed by value so replacing it here for the remainder of this
// reconciliation request processing will not affect future invocations.
rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", StatusKey)
rt.Logger.Trace("reconciling telemetry-state")
// First get the link resource in order to build a hcp client. If the link resource
// doesn't exist then the telemetry-state should not exist either.
res, err := getLinkResource(ctx, rt)
if err != nil {
rt.Logger.Error("failed to lookup Link resource", "error", err)
return err
}
if res == nil {
return ensureTelemetryStateDeleted(ctx, rt)
}
// Check that the link resource indicates the cluster is linked
// If the cluster is not linked, the telemetry-state resource should not exist
if linked, reason := link.IsLinked(res.GetResource()); !linked {
rt.Logger.Trace("cluster is not linked", "reason", reason)
return ensureTelemetryStateDeleted(ctx, rt)
}
hcpClient, err := r.hcpClientFn(link.CloudConfigFromLink(res.GetData()))
if err != nil {
rt.Logger.Error("error creating HCP Client", "error", err)
return err
}
// Get the telemetry configuration and observability scoped credentials from hcp
tCfg, err := hcpClient.FetchTelemetryConfig(ctx)
if err != nil {
rt.Logger.Error("error requesting telemetry config", "error", err)
return err
}
clientID, clientSecret, err := hcpClient.GetObservabilitySecret(ctx)
if err != nil {
rt.Logger.Error("error requesting telemetry credentials", "error", err)
return nil
}
// TODO allow hcp client config override from hcp TelemetryConfig
hcpCfg := res.GetData().GetHcpConfig()
// TODO implement proxy options from hcp
proxyCfg := &pbhcp.ProxyConfig{}
state := &pbhcp.TelemetryState{
ResourceId: res.GetData().ResourceId,
ClientId: clientID,
ClientSecret: clientSecret,
HcpConfig: hcpCfg,
Proxy: proxyCfg,
Metrics: &pbhcp.MetricsConfig{
Labels: tCfg.MetricsConfig.Labels,
Disabled: tCfg.MetricsConfig.Disabled,
},
}
if tCfg.MetricsConfig.Endpoint != nil {
state.Metrics.Endpoint = tCfg.MetricsConfig.Endpoint.String()
}
if tCfg.MetricsConfig.Filters != nil {
state.Metrics.IncludeList = []string{tCfg.MetricsConfig.Filters.String()}
}
if err := writeTelemetryStateIfUpdated(ctx, rt, state); err != nil {
rt.Logger.Error("error updating telemetry-state", "error", err)
return err
}
return nil
}
func ensureTelemetryStateDeleted(ctx context.Context, rt controller.Runtime) error {
resp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: &pbresource.ID{Name: "global", Type: pbhcp.TelemetryStateType}})
switch {
case status.Code(err) == codes.NotFound:
return nil
case err != nil:
rt.Logger.Error("the resource service has returned an unexpected error", "error", err)
return err
}
rt.Logger.Trace("deleting telemetry-state")
if _, ok := resp.GetResource().Metadata[MetaKeyDebugSkipDeletion]; ok {
rt.Logger.Debug("skip-deletion metadata key found, skipping deletion of telemetry-state resource")
return nil
}
if _, err := rt.Client.Delete(ctx, &pbresource.DeleteRequest{Id: resp.GetResource().GetId()}); err != nil {
rt.Logger.Error("error deleting telemetry-state resource", "error", err)
return err
}
return nil
}
func writeTelemetryStateIfUpdated(ctx context.Context, rt controller.Runtime, state *pbhcp.TelemetryState) error {
currentState, err := getTelemetryStateResource(ctx, rt)
if err != nil {
return err
}
if currentState != nil && proto.Equal(currentState.GetData(), state) {
return nil
}
stateData, err := anypb.New(state)
if err != nil {
return err
}
_, err = rt.Client.Write(ctx, &pbresource.WriteRequest{Resource: &pbresource.Resource{
Id: &pbresource.ID{
Name: "global",
Type: pbhcp.TelemetryStateType,
},
Data: stateData,
}})
return err
}
func getGlobalResource(ctx context.Context, rt controller.Runtime, t *pbresource.Type) (*pbresource.Resource, error) {
resp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: &pbresource.ID{Name: "global", Type: t}})
switch {
case status.Code(err) == codes.NotFound:
return nil, nil
case err != nil:
return nil, err
}
return resp.GetResource(), nil
}
// getLinkResource returns the cluster scoped pbhcp.Link resource. If the resource is not found a nil
// pointer and no error will be returned.
func getLinkResource(ctx context.Context, rt controller.Runtime) (*types.DecodedLink, error) {
res, err := getGlobalResource(ctx, rt, pbhcp.LinkType)
if err != nil {
return nil, err
}
if res == nil {
return nil, nil
}
return resource.Decode[*pbhcp.Link](res)
}
func getTelemetryStateResource(ctx context.Context, rt controller.Runtime) (*types.DecodedTelemetryState, error) {
res, err := getGlobalResource(ctx, rt, pbhcp.TelemetryStateType)
if err != nil {
return nil, err
}
if res == nil {
return nil, nil
}
return resource.Decode[*pbhcp.TelemetryState](res)
}

@ -1,174 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package telemetrystate
import (
"context"
"net/url"
"regexp"
"testing"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
hcpclient "github.com/hashicorp/consul/agent/hcp/client"
"github.com/hashicorp/consul/agent/hcp/config"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/hcp/internal/controllers/link"
"github.com/hashicorp/consul/internal/hcp/internal/types"
"github.com/hashicorp/consul/internal/resource"
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/testutil"
)
type controllerSuite struct {
suite.Suite
ctx context.Context
client *rtest.Client
rt controller.Runtime
ctl *controller.TestController
tenancies []*pbresource.Tenancy
hcpMock *hcpclient.MockClient
}
func mockHcpClientFn(t *testing.T) (*hcpclient.MockClient, link.HCPClientFn) {
mockClient := hcpclient.NewMockClient(t)
mockClientFunc := func(link config.CloudConfig) (hcpclient.Client, error) {
return mockClient, nil
}
return mockClient, mockClientFunc
}
func (suite *controllerSuite) SetupTest() {
suite.ctx = testutil.TestContext(suite.T())
suite.tenancies = rtest.TestTenancies()
client := svctest.NewResourceServiceBuilder().
WithRegisterFns(types.Register).
WithTenancies(suite.tenancies...).
Run(suite.T())
hcpMock, hcpClientFn := mockHcpClientFn(suite.T())
suite.hcpMock = hcpMock
suite.ctl = controller.NewTestController(TelemetryStateController(hcpClientFn), client).
WithLogger(testutil.Logger(suite.T()))
suite.rt = suite.ctl.Runtime()
suite.client = rtest.NewClient(client)
}
func TestTelemetryStateController(t *testing.T) {
suite.Run(t, new(controllerSuite))
}
func (suite *controllerSuite) deleteResourceFunc(id *pbresource.ID) func() {
return func() {
suite.client.MustDelete(suite.T(), id)
}
}
func (suite *controllerSuite) TestController_Ok() {
// Run the controller manager
mgr := controller.NewManager(suite.client, suite.rt.Logger)
mockClient, mockClientFn := mockHcpClientFn(suite.T())
mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(&hcpclient.TelemetryConfig{
MetricsConfig: &hcpclient.MetricsConfig{
Endpoint: &url.URL{
Scheme: "http",
Host: "localhost",
Path: "/test",
},
Labels: map[string]string{"foo": "bar"},
Filters: regexp.MustCompile(".*"),
},
RefreshConfig: &hcpclient.RefreshConfig{},
}, nil)
mockClient.EXPECT().GetObservabilitySecret(mock.Anything).Return("xxx", "yyy", nil)
mgr.Register(TelemetryStateController(mockClientFn))
mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx)
link := suite.writeLinkResource()
tsRes := suite.client.WaitForResourceExists(suite.T(), &pbresource.ID{Name: "global", Type: pbhcp.TelemetryStateType})
decodedState, err := resource.Decode[*pbhcp.TelemetryState](tsRes)
require.NoError(suite.T(), err)
require.Equal(suite.T(), link.GetData().GetResourceId(), decodedState.GetData().ResourceId)
require.Equal(suite.T(), "xxx", decodedState.GetData().ClientId)
require.Equal(suite.T(), "http://localhost/test", decodedState.GetData().Metrics.Endpoint)
suite.client.MustDelete(suite.T(), link.Id)
suite.client.WaitForDeletion(suite.T(), tsRes.Id)
}
func (suite *controllerSuite) TestReconcile_AvoidReconciliationWriteLoop() {
suite.hcpMock.EXPECT().FetchTelemetryConfig(mock.Anything).Return(&hcpclient.TelemetryConfig{
MetricsConfig: &hcpclient.MetricsConfig{
Endpoint: &url.URL{
Scheme: "http",
Host: "localhost",
Path: "/test",
},
Labels: map[string]string{"foo": "bar"},
Filters: regexp.MustCompile(".*"),
},
RefreshConfig: &hcpclient.RefreshConfig{},
}, nil)
link := suite.writeLinkResource()
suite.hcpMock.EXPECT().GetObservabilitySecret(mock.Anything).Return("xxx", "yyy", nil)
suite.NoError(suite.ctl.Reconcile(context.Background(), controller.Request{ID: link.Id}))
tsRes := suite.client.WaitForResourceExists(suite.T(), &pbresource.ID{Name: "global", Type: pbhcp.TelemetryStateType})
suite.NoError(suite.ctl.Reconcile(context.Background(), controller.Request{ID: tsRes.Id}))
suite.client.RequireVersionUnchanged(suite.T(), tsRes.Id, tsRes.Version)
}
func (suite *controllerSuite) TestController_LinkingDisabled() {
// Run the controller manager
mgr := controller.NewManager(suite.client, suite.rt.Logger)
_, mockClientFn := mockHcpClientFn(suite.T())
mgr.Register(TelemetryStateController(mockClientFn))
mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx)
linkData := &pbhcp.Link{
ClientId: "abc",
ClientSecret: "abc",
ResourceId: types.GenerateTestResourceID(suite.T()),
}
rtest.Resource(pbhcp.LinkType, "global").
WithData(suite.T(), linkData).
WithStatus(link.StatusKey, &pbresource.Status{Conditions: []*pbresource.Condition{link.ConditionDisabled}}).
Write(suite.T(), suite.client)
suite.client.WaitForDeletion(suite.T(), &pbresource.ID{Name: "global", Type: pbhcp.TelemetryStateType})
}
func (suite *controllerSuite) writeLinkResource() *types.DecodedLink {
suite.T().Helper()
linkData := &pbhcp.Link{
ClientId: "abc",
ClientSecret: "abc",
ResourceId: types.GenerateTestResourceID(suite.T()),
}
res := rtest.Resource(pbhcp.LinkType, "global").
WithData(suite.T(), linkData).
WithStatus(link.StatusKey, &pbresource.Status{Conditions: []*pbresource.Condition{link.ConditionLinked(linkData.ResourceId)}}).
Write(suite.T(), suite.client)
suite.T().Cleanup(suite.deleteResourceFunc(res.Id))
link, err := resource.Decode[*pbhcp.Link](res)
require.NoError(suite.T(), err)
return link
}

@ -1,8 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package telemetrystate
const (
StatusKey = "consul.io/hcp/telemetry-state"
)

@ -1,117 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package types
import (
"errors"
"github.com/hashicorp/go-multierror"
hcpresource "github.com/hashicorp/hcp-sdk-go/resource"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/internal/resource"
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
"github.com/hashicorp/consul/proto-public/pbresource"
)
type DecodedLink = resource.DecodedResource[*pbhcp.Link]
const (
LinkName = "global"
MetadataSourceKey = "source"
MetadataSourceConfig = "config"
)
var (
errLinkConfigurationName = errors.New("only a single Link resource is allowed and it must be named global")
errInvalidHCPResourceID = errors.New("could not parse, invalid format")
)
func RegisterLink(r resource.Registry) {
r.Register(resource.Registration{
Type: pbhcp.LinkType,
Proto: &pbhcp.Link{},
Scope: resource.ScopeCluster,
Validate: ValidateLink,
ACLs: &resource.ACLHooks{
Read: aclReadHookLink,
Write: aclWriteHookLink,
List: aclListHookLink,
},
})
}
func aclReadHookLink(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, _ *pbresource.ID, _ *pbresource.Resource) error {
err := authorizer.ToAllowAuthorizer().OperatorReadAllowed(authzContext)
if err != nil {
return err
}
return nil
}
func aclWriteHookLink(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, _ *pbresource.Resource) error {
err := authorizer.ToAllowAuthorizer().OperatorWriteAllowed(authzContext)
if err != nil {
return err
}
err = authorizer.ToAllowAuthorizer().ACLWriteAllowed(authzContext)
if err != nil {
return err
}
return nil
}
func aclListHookLink(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext) error {
err := authorizer.ToAllowAuthorizer().OperatorReadAllowed(authzContext)
if err != nil {
return err
}
return nil
}
var ValidateLink = resource.DecodeAndValidate(validateLink)
func validateLink(res *DecodedLink) error {
var err error
if res.Id.Name != LinkName {
err = multierror.Append(err, resource.ErrInvalidField{
Name: "name",
Wrapped: errLinkConfigurationName,
})
}
if res.Data.ClientId == "" {
err = multierror.Append(err, resource.ErrInvalidField{
Name: "client_id",
Wrapped: resource.ErrMissing,
})
}
if res.Data.ClientSecret == "" {
err = multierror.Append(err, resource.ErrInvalidField{
Name: "client_secret",
Wrapped: resource.ErrMissing,
})
}
if res.Data.ResourceId == "" {
err = multierror.Append(err, resource.ErrInvalidField{
Name: "resource_id",
Wrapped: resource.ErrMissing,
})
} else {
_, parseErr := hcpresource.FromString(res.Data.ResourceId)
if parseErr != nil {
err = multierror.Append(err, resource.ErrInvalidField{
Name: "resource_id",
Wrapped: errInvalidHCPResourceID,
})
}
}
return err
}

@ -1,205 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package types
import (
"testing"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/known/anypb"
"github.com/hashicorp/consul/internal/resource"
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func createCloudLinkResource(t *testing.T, data protoreflect.ProtoMessage) *pbresource.Resource {
res := &pbresource.Resource{
Id: &pbresource.ID{
Type: pbhcp.LinkType,
Name: "global",
},
}
var err error
res.Data, err = anypb.New(data)
require.NoError(t, err)
return res
}
func TestValidateLink_Ok(t *testing.T) {
data := &pbhcp.Link{
ClientId: "abc",
ClientSecret: "abc",
ResourceId: GenerateTestResourceID(t),
}
res := createCloudLinkResource(t, data)
err := ValidateLink(res)
require.NoError(t, err)
}
func TestValidateLink_ParseError(t *testing.T) {
// Any type other than the Link type would work
// to cause the error we are expecting
data := &pbresource.Type{Group: "a", GroupVersion: "b", Kind: "c"}
res := createCloudLinkResource(t, data)
err := ValidateLink(res)
require.Error(t, err)
require.ErrorAs(t, err, &resource.ErrDataParse{})
}
func TestValidateLink_InvalidName(t *testing.T) {
data := &pbhcp.Link{
ClientId: "abc",
ClientSecret: "abc",
ResourceId: GenerateTestResourceID(t),
}
res := createCloudLinkResource(t, data)
res.Id.Name = "default"
err := ValidateLink(res)
expected := resource.ErrInvalidField{
Name: "name",
Wrapped: errLinkConfigurationName,
}
var actual resource.ErrInvalidField
require.ErrorAs(t, err, &actual)
require.Equal(t, expected, actual)
}
func TestValidateLink_MissingClientId(t *testing.T) {
data := &pbhcp.Link{
ClientId: "",
ClientSecret: "abc",
ResourceId: GenerateTestResourceID(t),
}
res := createCloudLinkResource(t, data)
err := ValidateLink(res)
expected := resource.ErrInvalidField{
Name: "client_id",
Wrapped: resource.ErrMissing,
}
var actual resource.ErrInvalidField
require.ErrorAs(t, err, &actual)
require.Equal(t, expected, actual)
}
func TestValidateLink_MissingClientSecret(t *testing.T) {
data := &pbhcp.Link{
ClientId: "abc",
ClientSecret: "",
ResourceId: GenerateTestResourceID(t),
}
res := createCloudLinkResource(t, data)
err := ValidateLink(res)
expected := resource.ErrInvalidField{
Name: "client_secret",
Wrapped: resource.ErrMissing,
}
var actual resource.ErrInvalidField
require.ErrorAs(t, err, &actual)
require.Equal(t, expected, actual)
}
func TestValidateLink_MissingResourceId(t *testing.T) {
data := &pbhcp.Link{
ClientId: "abc",
ClientSecret: "abc",
ResourceId: "",
}
res := createCloudLinkResource(t, data)
err := ValidateLink(res)
expected := resource.ErrInvalidField{
Name: "resource_id",
Wrapped: resource.ErrMissing,
}
var actual resource.ErrInvalidField
require.ErrorAs(t, err, &actual)
require.Equal(t, expected, actual)
}
func TestValidateLink_InvalidResourceId(t *testing.T) {
data := &pbhcp.Link{
ClientId: "abc",
ClientSecret: "abc",
ResourceId: "abc",
}
res := createCloudLinkResource(t, data)
err := ValidateLink(res)
expected := resource.ErrInvalidField{
Name: "resource_id",
Wrapped: errInvalidHCPResourceID,
}
var actual resource.ErrInvalidField
require.ErrorAs(t, err, &actual)
require.Equal(t, expected, actual)
}
// Currently, we have no specific ACLs configured so the default `operator` permissions are required
func TestLinkACLs(t *testing.T) {
registry := resource.NewRegistry()
RegisterLink(registry)
data := &pbhcp.Link{
ClientId: "abc",
ClientSecret: "abc",
ResourceId: GenerateTestResourceID(t),
}
link := createCloudLinkResource(t, data)
cases := map[string]rtest.ACLTestCase{
"no rules": {
Rules: ``,
Res: link,
ReadOK: rtest.DENY,
WriteOK: rtest.DENY,
ListOK: rtest.DENY,
},
"link test read and list": {
Rules: `{"operator": "read"}`,
Res: link,
ReadOK: rtest.ALLOW,
WriteOK: rtest.DENY,
ListOK: rtest.ALLOW,
},
"link test write": {
Rules: `{"operator": "write", "acl": "write"}`,
Res: link,
ReadOK: rtest.ALLOW,
WriteOK: rtest.ALLOW,
ListOK: rtest.ALLOW,
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
rtest.RunACLTestCase(t, tc, registry)
})
}
}

@ -1,85 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package types
import (
"errors"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/consul/internal/resource"
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
)
type DecodedTelemetryState = resource.DecodedResource[*pbhcp.TelemetryState]
var (
telemetryStateConfigurationNameError = errors.New("only a single Telemetry resource is allowed and it must be named global")
)
func RegisterTelemetryState(r resource.Registry) {
r.Register(resource.Registration{
Type: pbhcp.TelemetryStateType,
Proto: &pbhcp.TelemetryState{},
Scope: resource.ScopeCluster,
Validate: ValidateTelemetryState,
})
}
var ValidateTelemetryState = resource.DecodeAndValidate(validateTelemetryState)
func validateTelemetryState(res *DecodedTelemetryState) error {
var err error
if res.GetId().GetName() != "global" {
err = multierror.Append(err, resource.ErrInvalidField{
Name: "name",
Wrapped: telemetryStateConfigurationNameError,
})
}
if res.GetData().GetClientId() == "" {
err = multierror.Append(err, resource.ErrInvalidField{
Name: "client_id",
Wrapped: resource.ErrMissing,
})
}
if res.GetData().GetClientSecret() == "" {
err = multierror.Append(err, resource.ErrInvalidField{
Name: "client_secret",
Wrapped: resource.ErrMissing,
})
}
if res.GetData().GetResourceId() == "" {
err = multierror.Append(err, resource.ErrInvalidField{
Name: "resource_id",
Wrapped: resource.ErrMissing,
})
}
if res.GetData().GetMetrics().GetEndpoint() == "" {
err = multierror.Append(err, resource.ErrInvalidField{
Name: "metrics.endpoint",
Wrapped: resource.ErrMissing,
})
}
if res.GetData().GetMetrics().GetIncludeList() == nil {
err = multierror.Append(err, resource.ErrInvalidField{
Name: "metrics.include_list",
Wrapped: resource.ErrMissing,
})
}
if res.GetData().GetMetrics().GetLabels() == nil {
err = multierror.Append(err, resource.ErrInvalidField{
Name: "metrics.labels",
Wrapped: resource.ErrMissing,
})
}
return err
}

@ -1,23 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package types
import (
"fmt"
"testing"
"github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/require"
)
func GenerateTestResourceID(t *testing.T) string {
orgID, err := uuid.GenerateUUID()
require.NoError(t, err)
projectID, err := uuid.GenerateUUID()
require.NoError(t, err)
template := "organization/%s/project/%s/hashicorp.consul.global-network-manager.cluster/test-cluster"
return fmt.Sprintf(template, orgID, projectID)
}

@ -1,11 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package types
import "github.com/hashicorp/consul/internal/resource"
func Register(r resource.Registry) {
RegisterLink(r)
RegisterTelemetryState(r)
}

@ -14,10 +14,10 @@ import (
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/internal/storage"
"github.com/hashicorp/consul/internal/storage/inmem" "github.com/hashicorp/consul/internal/storage/inmem"
"github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto-public/pbresource"
pbstorage "github.com/hashicorp/consul/proto/private/pbstorage" pbstorage "github.com/hashicorp/consul/proto/private/pbstorage"
) )
@ -53,7 +53,7 @@ func NewBackend(h Handle, l hclog.Logger) (*Backend, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
b := &Backend{handle: h, store: s} b := &Backend{handle: h, store: s, logger: l}
b.forwardingServer = newForwardingServer(b) b.forwardingServer = newForwardingServer(b)
b.forwardingClient = newForwardingClient(h, l) b.forwardingClient = newForwardingClient(h, l)
return b, nil return b, nil
@ -80,6 +80,7 @@ type Handle interface {
type Backend struct { type Backend struct {
handle Handle handle Handle
store *inmem.Store store *inmem.Store
logger hclog.Logger
forwardingServer *forwardingServer forwardingServer *forwardingServer
forwardingClient *forwardingClient forwardingClient *forwardingClient
@ -225,6 +226,24 @@ func (b *Backend) ListByOwner(_ context.Context, id *pbresource.ID) ([]*pbresour
return b.store.ListByOwner(id) return b.store.ListByOwner(id)
} }
// isRetiredType ensures that types that have been formally retired (deprecated
// and deleted) do not sneak back in during a snapshot restore.
func isRetiredType(typ *pbresource.Type) bool {
switch typ.GetGroupVersion() {
case "v2":
switch typ.GetGroup() {
case "hcp":
return true
}
case "v2beta1":
switch typ.GetGroup() {
case "auth", "catalog", "mesh", "multicluster", "tenancy":
return true
}
}
return false
}
// Apply is called by the FSM with the bytes of a Raft log entry, with Consul's // Apply is called by the FSM with the bytes of a Raft log entry, with Consul's
// envelope (i.e. type prefix and msgpack wrapper) stripped off. // envelope (i.e. type prefix and msgpack wrapper) stripped off.
func (b *Backend) Apply(buf []byte, idx uint64) any { func (b *Backend) Apply(buf []byte, idx uint64) any {
@ -239,8 +258,18 @@ func (b *Backend) Apply(buf []byte, idx uint64) any {
oldVsn := res.Version oldVsn := res.Version
res.Version = strconv.Itoa(int(idx)) res.Version = strconv.Itoa(int(idx))
if err := b.store.WriteCAS(res, oldVsn); err != nil { if isRetiredType(res.GetId().GetType()) {
return err // When a type is retired, the caller should think that the write
// was applied, but we should simply skip loading it. This means
// that retired types will not linger in the database indefinitely.
b.logger.Warn("ignoring operation for retired type",
"operation", "apply",
"type", resource.ToGVK(res.GetId().GetType()),
)
} else {
if err := b.store.WriteCAS(res, oldVsn); err != nil {
return err
}
} }
return &pbstorage.LogResponse{ return &pbstorage.LogResponse{
@ -250,8 +279,19 @@ func (b *Backend) Apply(buf []byte, idx uint64) any {
} }
case pbstorage.LogType_LOG_TYPE_DELETE: case pbstorage.LogType_LOG_TYPE_DELETE:
req := req.GetDelete() req := req.GetDelete()
if err := b.store.DeleteCAS(req.Id, req.Version); err != nil {
return err if isRetiredType(req.GetId().GetType()) {
// When a type is retired, the caller should think that the write
// was applied, but we should simply skip loading it. This means
// that retired types will not linger in the database indefinitely.
b.logger.Warn("ignoring operation for retired type",
"operation", "delete",
"type", resource.ToGVK(req.GetId().GetType()),
)
} else {
if err := b.store.DeleteCAS(req.Id, req.Version); err != nil {
return err
}
} }
return &pbstorage.LogResponse{ return &pbstorage.LogResponse{
Response: &pbstorage.LogResponse_Delete{}, Response: &pbstorage.LogResponse_Delete{},

@ -0,0 +1,392 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package raft
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func TestIsRetiredType(t *testing.T) {
var retired []*pbresource.Type
{
const (
GroupName = "hcp"
Version = "v2"
LinkKind = "Link"
TelemetryStateKind = "TelemetryState"
)
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: LinkKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: TelemetryStateKind,
})
}
{
const (
GroupName = "tenancy"
Version = "v2beta1"
NamespaceKind = "Namespace"
PartitionKind = "Partition"
)
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: NamespaceKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: PartitionKind,
})
}
{
const (
GroupName = "multicluster"
Version = "v2beta1"
SamenessGroupKind = "SamenessGroup"
)
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: SamenessGroupKind,
})
}
{
const (
GroupName = "mesh"
Version = "v2beta1"
APIGatewayKind = "APIGateway"
ComputedExplicitDestinationsKind = "ComputedExplicitDestinations"
ComputedGatewayRoutesKind = "ComputedGatewayRoutes"
ComputedImplicitDestinationsKind = "ComputedImplicitDestinations"
ComputedProxyConfigurationKind = "ComputedProxyConfiguration"
ComputedRoutesKind = "ComputedRoutes"
DestinationPolicyKind = "DestinationPolicy"
DestinationsKind = "Destinations"
DestinationsConfigurationKind = "DestinationsConfiguration"
GRPCRouteKind = "GRPCRoute"
HTTPRouteKind = "HTTPRoute"
MeshConfigurationKind = "MeshConfiguration"
MeshGatewayKind = "MeshGateway"
ProxyConfigurationKind = "ProxyConfiguration"
ProxyStateTemplateKind = "ProxyStateTemplate"
TCPRouteKind = "TCPRoute"
)
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: APIGatewayKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ComputedExplicitDestinationsKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ComputedGatewayRoutesKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ComputedImplicitDestinationsKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ComputedProxyConfigurationKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ComputedRoutesKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: DestinationPolicyKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: DestinationsKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: DestinationsConfigurationKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: GRPCRouteKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: HTTPRouteKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: MeshConfigurationKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: MeshGatewayKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ProxyConfigurationKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ProxyStateTemplateKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: TCPRouteKind,
})
}
{
const (
GroupName = "auth"
Version = "v2beta1"
ComputedTrafficPermissionsKind = "ComputedTrafficPermissions"
NamespaceTrafficPermissionsKind = "NamespaceTrafficPermissions"
PartitionTrafficPermissionsKind = "PartitionTrafficPermissions"
TrafficPermissionsKind = "TrafficPermissions"
WorkloadIdentityKind = "WorkloadIdentity"
)
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ComputedTrafficPermissionsKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: NamespaceTrafficPermissionsKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: PartitionTrafficPermissionsKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: TrafficPermissionsKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: WorkloadIdentityKind,
})
}
{
const (
GroupName = "catalog"
Version = "v2beta1"
ComputedFailoverPolicyKind = "ComputedFailoverPolicy"
FailoverPolicyKind = "FailoverPolicy"
HealthChecksKind = "HealthChecks"
HealthStatusKind = "HealthStatus"
NodeKind = "Node"
NodeHealthStatusKind = "NodeHealthStatus"
ServiceKind = "Service"
ServiceEndpointsKind = "ServiceEndpoints"
VirtualIPsKind = "VirtualIPs"
WorkloadKind = "Workload"
)
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ComputedFailoverPolicyKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: FailoverPolicyKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: HealthChecksKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: HealthStatusKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: NodeKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: NodeHealthStatusKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ServiceKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ServiceEndpointsKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: VirtualIPsKind,
})
retired = append(retired, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: WorkloadKind,
})
}
/*
*/
var retained []*pbresource.Type
{
const (
GroupName = "demo"
Version = "v2"
AlbumKind = "Album"
ArtistKind = "Artist"
FestivalKind = "Festival"
)
retained = append(retained, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: AlbumKind,
})
retained = append(retained, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ArtistKind,
})
retained = append(retained, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: FestivalKind,
})
}
{
const (
GroupName = "demo"
Version = "v1"
AlbumKind = "Album"
ArtistKind = "Artist"
ConceptKind = "Concept"
ExecutiveKind = "Executive"
RecordLabelKind = "RecordLabel"
)
retained = append(retained, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: AlbumKind,
})
retained = append(retained, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ArtistKind,
})
retained = append(retained, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ConceptKind,
})
retained = append(retained, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ExecutiveKind,
})
retained = append(retained, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: RecordLabelKind,
})
}
{
const (
GroupName = "multicluster"
Version = "v2"
ComputedExportedServicesKind = "ComputedExportedServices"
ExportedServicesKind = "ExportedServices"
NamespaceExportedServicesKind = "NamespaceExportedServices"
PartitionExportedServicesKind = "PartitionExportedServices"
)
retained = append(retained, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ComputedExportedServicesKind,
})
retained = append(retained, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ExportedServicesKind,
})
retained = append(retained, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: NamespaceExportedServicesKind,
})
retained = append(retained, &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: PartitionExportedServicesKind,
})
}
for _, typ := range retired {
t.Run("gone - "+resource.ToGVK(typ), func(t *testing.T) {
require.True(t, isRetiredType(typ))
})
}
for _, typ := range retained {
t.Run("allowed - "+resource.ToGVK(typ), func(t *testing.T) {
require.False(t, isRetiredType(typ))
})
}
}

@ -1,18 +0,0 @@
// Code generated by protoc-gen-go-binary. DO NOT EDIT.
// source: pbhcp/v2/hcp_config.proto
package hcpv2
import (
"google.golang.org/protobuf/proto"
)
// MarshalBinary implements encoding.BinaryMarshaler
func (msg *HCPConfig) MarshalBinary() ([]byte, error) {
return proto.Marshal(msg)
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (msg *HCPConfig) UnmarshalBinary(b []byte) error {
return proto.Unmarshal(b, msg)
}

@ -1,199 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.33.0
// protoc (unknown)
// source: pbhcp/v2/hcp_config.proto
package hcpv2
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// HCPConfig is used to configure the HCP SDK for communicating with
// the HashiCorp Cloud Platform. All configuration is optional with default
// values provided by the SDK.
type HCPConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// AuthUrl is the URL which will be used to authenticate.
AuthUrl string `protobuf:"bytes,1,opt,name=auth_url,json=authUrl,proto3" json:"auth_url,omitempty"`
// ApiAddress is the address (<hostname>[:port]) of the HCP api.
ApiAddress string `protobuf:"bytes,2,opt,name=api_address,json=apiAddress,proto3" json:"api_address,omitempty"`
// ScadaAddress is the address (<hostname>[:port]) of the HCP SCADA endpoint.
ScadaAddress string `protobuf:"bytes,3,opt,name=scada_address,json=scadaAddress,proto3" json:"scada_address,omitempty"`
// TlsInsecureSkipVerify if true will ignore server name verification when making HTTPS requests
TlsInsecureSkipVerify bool `protobuf:"varint,4,opt,name=tls_insecure_skip_verify,json=tlsInsecureSkipVerify,proto3" json:"tls_insecure_skip_verify,omitempty"`
}
func (x *HCPConfig) Reset() {
*x = HCPConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pbhcp_v2_hcp_config_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *HCPConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HCPConfig) ProtoMessage() {}
func (x *HCPConfig) ProtoReflect() protoreflect.Message {
mi := &file_pbhcp_v2_hcp_config_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HCPConfig.ProtoReflect.Descriptor instead.
func (*HCPConfig) Descriptor() ([]byte, []int) {
return file_pbhcp_v2_hcp_config_proto_rawDescGZIP(), []int{0}
}
func (x *HCPConfig) GetAuthUrl() string {
if x != nil {
return x.AuthUrl
}
return ""
}
func (x *HCPConfig) GetApiAddress() string {
if x != nil {
return x.ApiAddress
}
return ""
}
func (x *HCPConfig) GetScadaAddress() string {
if x != nil {
return x.ScadaAddress
}
return ""
}
func (x *HCPConfig) GetTlsInsecureSkipVerify() bool {
if x != nil {
return x.TlsInsecureSkipVerify
}
return false
}
var File_pbhcp_v2_hcp_config_proto protoreflect.FileDescriptor
var file_pbhcp_v2_hcp_config_proto_rawDesc = []byte{
0x0a, 0x19, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x63, 0x70, 0x5f, 0x63,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x68, 0x61, 0x73,
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63,
0x70, 0x2e, 0x76, 0x32, 0x22, 0xa5, 0x01, 0x0a, 0x09, 0x48, 0x43, 0x50, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x75, 0x74, 0x68, 0x55, 0x72, 0x6c, 0x12, 0x1f, 0x0a,
0x0b, 0x61, 0x70, 0x69, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x23,
0x0a, 0x0d, 0x73, 0x63, 0x61, 0x64, 0x61, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x63, 0x61, 0x64, 0x61, 0x41, 0x64, 0x64, 0x72,
0x65, 0x73, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x74, 0x6c, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x65, 0x63,
0x75, 0x72, 0x65, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18,
0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x74, 0x6c, 0x73, 0x49, 0x6e, 0x73, 0x65, 0x63, 0x75,
0x72, 0x65, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x42, 0xe5, 0x01, 0x0a,
0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63,
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x42, 0x0e, 0x48, 0x63,
0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69,
0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76,
0x32, 0x3b, 0x68, 0x63, 0x70, 0x76, 0x32, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x48, 0xaa, 0x02, 0x17,
0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
0x2e, 0x48, 0x63, 0x70, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x17, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63,
0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, 0x5c, 0x56,
0x32, 0xe2, 0x02, 0x23, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f,
0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, 0x5c, 0x56, 0x32, 0x5c, 0x47, 0x50, 0x42, 0x4d,
0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63,
0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x48, 0x63, 0x70,
0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_pbhcp_v2_hcp_config_proto_rawDescOnce sync.Once
file_pbhcp_v2_hcp_config_proto_rawDescData = file_pbhcp_v2_hcp_config_proto_rawDesc
)
func file_pbhcp_v2_hcp_config_proto_rawDescGZIP() []byte {
file_pbhcp_v2_hcp_config_proto_rawDescOnce.Do(func() {
file_pbhcp_v2_hcp_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbhcp_v2_hcp_config_proto_rawDescData)
})
return file_pbhcp_v2_hcp_config_proto_rawDescData
}
var file_pbhcp_v2_hcp_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_pbhcp_v2_hcp_config_proto_goTypes = []interface{}{
(*HCPConfig)(nil), // 0: hashicorp.consul.hcp.v2.HCPConfig
}
var file_pbhcp_v2_hcp_config_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_pbhcp_v2_hcp_config_proto_init() }
func file_pbhcp_v2_hcp_config_proto_init() {
if File_pbhcp_v2_hcp_config_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_pbhcp_v2_hcp_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*HCPConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pbhcp_v2_hcp_config_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_pbhcp_v2_hcp_config_proto_goTypes,
DependencyIndexes: file_pbhcp_v2_hcp_config_proto_depIdxs,
MessageInfos: file_pbhcp_v2_hcp_config_proto_msgTypes,
}.Build()
File_pbhcp_v2_hcp_config_proto = out.File
file_pbhcp_v2_hcp_config_proto_rawDesc = nil
file_pbhcp_v2_hcp_config_proto_goTypes = nil
file_pbhcp_v2_hcp_config_proto_depIdxs = nil
}

@ -1,23 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
syntax = "proto3";
package hashicorp.consul.hcp.v2;
// HCPConfig is used to configure the HCP SDK for communicating with
// the HashiCorp Cloud Platform. All configuration is optional with default
// values provided by the SDK.
message HCPConfig {
// AuthUrl is the URL which will be used to authenticate.
string auth_url = 1;
// ApiAddress is the address (<hostname>[:port]) of the HCP api.
string api_address = 2;
// ScadaAddress is the address (<hostname>[:port]) of the HCP SCADA endpoint.
string scada_address = 3;
// TlsInsecureSkipVerify if true will ignore server name verification when making HTTPS requests
bool tls_insecure_skip_verify = 4;
}

@ -1,27 +0,0 @@
// Code generated by protoc-gen-deepcopy. DO NOT EDIT.
package hcpv2
import (
proto "google.golang.org/protobuf/proto"
)
// DeepCopyInto supports using HCPConfig within kubernetes types, where deepcopy-gen is used.
func (in *HCPConfig) DeepCopyInto(out *HCPConfig) {
proto.Reset(out)
proto.Merge(out, proto.Clone(in))
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HCPConfig. Required by controller-gen.
func (in *HCPConfig) DeepCopy() *HCPConfig {
if in == nil {
return nil
}
out := new(HCPConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new HCPConfig. Required by controller-gen.
func (in *HCPConfig) DeepCopyInterface() interface{} {
return in.DeepCopy()
}

@ -1,22 +0,0 @@
// Code generated by protoc-json-shim. DO NOT EDIT.
package hcpv2
import (
protojson "google.golang.org/protobuf/encoding/protojson"
)
// MarshalJSON is a custom marshaler for HCPConfig
func (this *HCPConfig) MarshalJSON() ([]byte, error) {
str, err := HcpConfigMarshaler.Marshal(this)
return []byte(str), err
}
// UnmarshalJSON is a custom unmarshaler for HCPConfig
func (this *HCPConfig) UnmarshalJSON(b []byte) error {
return HcpConfigUnmarshaler.Unmarshal(b, this)
}
var (
HcpConfigMarshaler = &protojson.MarshalOptions{}
HcpConfigUnmarshaler = &protojson.UnmarshalOptions{DiscardUnknown: false}
)

@ -1,18 +0,0 @@
// Code generated by protoc-gen-go-binary. DO NOT EDIT.
// source: pbhcp/v2/link.proto
package hcpv2
import (
"google.golang.org/protobuf/proto"
)
// MarshalBinary implements encoding.BinaryMarshaler
func (msg *Link) MarshalBinary() ([]byte, error) {
return proto.Marshal(msg)
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (msg *Link) UnmarshalBinary(b []byte) error {
return proto.Unmarshal(b, msg)
}

@ -1,283 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.33.0
// protoc (unknown)
// source: pbhcp/v2/link.proto
package hcpv2
import (
_ "github.com/hashicorp/consul/proto-public/pbresource"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type AccessLevel int32
const (
AccessLevel_ACCESS_LEVEL_UNSPECIFIED AccessLevel = 0
AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE AccessLevel = 1
AccessLevel_ACCESS_LEVEL_GLOBAL_READ_ONLY AccessLevel = 2
)
// Enum value maps for AccessLevel.
var (
AccessLevel_name = map[int32]string{
0: "ACCESS_LEVEL_UNSPECIFIED",
1: "ACCESS_LEVEL_GLOBAL_READ_WRITE",
2: "ACCESS_LEVEL_GLOBAL_READ_ONLY",
}
AccessLevel_value = map[string]int32{
"ACCESS_LEVEL_UNSPECIFIED": 0,
"ACCESS_LEVEL_GLOBAL_READ_WRITE": 1,
"ACCESS_LEVEL_GLOBAL_READ_ONLY": 2,
}
)
func (x AccessLevel) Enum() *AccessLevel {
p := new(AccessLevel)
*p = x
return p
}
func (x AccessLevel) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (AccessLevel) Descriptor() protoreflect.EnumDescriptor {
return file_pbhcp_v2_link_proto_enumTypes[0].Descriptor()
}
func (AccessLevel) Type() protoreflect.EnumType {
return &file_pbhcp_v2_link_proto_enumTypes[0]
}
func (x AccessLevel) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use AccessLevel.Descriptor instead.
func (AccessLevel) EnumDescriptor() ([]byte, []int) {
return file_pbhcp_v2_link_proto_rawDescGZIP(), []int{0}
}
type Link struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"`
ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
ClientSecret string `protobuf:"bytes,3,opt,name=client_secret,json=clientSecret,proto3" json:"client_secret,omitempty"`
HcpClusterUrl string `protobuf:"bytes,4,opt,name=hcp_cluster_url,json=hcpClusterUrl,proto3" json:"hcp_cluster_url,omitempty"`
AccessLevel AccessLevel `protobuf:"varint,5,opt,name=access_level,json=accessLevel,proto3,enum=hashicorp.consul.hcp.v2.AccessLevel" json:"access_level,omitempty"`
HcpConfig *HCPConfig `protobuf:"bytes,6,opt,name=hcp_config,json=hcpConfig,proto3" json:"hcp_config,omitempty"`
}
func (x *Link) Reset() {
*x = Link{}
if protoimpl.UnsafeEnabled {
mi := &file_pbhcp_v2_link_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Link) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Link) ProtoMessage() {}
func (x *Link) ProtoReflect() protoreflect.Message {
mi := &file_pbhcp_v2_link_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Link.ProtoReflect.Descriptor instead.
func (*Link) Descriptor() ([]byte, []int) {
return file_pbhcp_v2_link_proto_rawDescGZIP(), []int{0}
}
func (x *Link) GetResourceId() string {
if x != nil {
return x.ResourceId
}
return ""
}
func (x *Link) GetClientId() string {
if x != nil {
return x.ClientId
}
return ""
}
func (x *Link) GetClientSecret() string {
if x != nil {
return x.ClientSecret
}
return ""
}
func (x *Link) GetHcpClusterUrl() string {
if x != nil {
return x.HcpClusterUrl
}
return ""
}
func (x *Link) GetAccessLevel() AccessLevel {
if x != nil {
return x.AccessLevel
}
return AccessLevel_ACCESS_LEVEL_UNSPECIFIED
}
func (x *Link) GetHcpConfig() *HCPConfig {
if x != nil {
return x.HcpConfig
}
return nil
}
var File_pbhcp_v2_link_proto protoreflect.FileDescriptor
var file_pbhcp_v2_link_proto_rawDesc = []byte{
0x0a, 0x13, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x2f, 0x6c, 0x69, 0x6e, 0x6b, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x1a, 0x19,
0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x63, 0x70, 0x5f, 0x63, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x70, 0x62, 0x72, 0x65, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x02, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b,
0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49,
0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x23,
0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63,
0x72, 0x65, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x68, 0x63, 0x70, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74,
0x65, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x68, 0x63,
0x70, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x55, 0x72, 0x6c, 0x12, 0x47, 0x0a, 0x0c, 0x61,
0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28,
0x0e, 0x32, 0x24, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f,
0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65,
0x73, 0x73, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c,
0x65, 0x76, 0x65, 0x6c, 0x12, 0x41, 0x0a, 0x0a, 0x68, 0x63, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69,
0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e,
0x76, 0x32, 0x2e, 0x48, 0x43, 0x50, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x68, 0x63,
0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08, 0x01, 0x2a,
0x72, 0x0a, 0x0b, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x1c,
0x0a, 0x18, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55,
0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x22, 0x0a, 0x1e,
0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x47, 0x4c, 0x4f,
0x42, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01,
0x12, 0x21, 0x0a, 0x1d, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c,
0x5f, 0x47, 0x4c, 0x4f, 0x42, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c,
0x59, 0x10, 0x02, 0x42, 0xe0, 0x01, 0x0a, 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68,
0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70,
0x2e, 0x76, 0x32, 0x42, 0x09, 0x4c, 0x69, 0x6e, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73,
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x68, 0x63, 0x70,
0x2f, 0x76, 0x32, 0x3b, 0x68, 0x63, 0x70, 0x76, 0x32, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x48, 0xaa,
0x02, 0x17, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73,
0x75, 0x6c, 0x2e, 0x48, 0x63, 0x70, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x17, 0x48, 0x61, 0x73, 0x68,
0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70,
0x5c, 0x56, 0x32, 0xe2, 0x02, 0x23, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c,
0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, 0x5c, 0x56, 0x32, 0x5c, 0x47, 0x50,
0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68,
0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x48,
0x63, 0x70, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_pbhcp_v2_link_proto_rawDescOnce sync.Once
file_pbhcp_v2_link_proto_rawDescData = file_pbhcp_v2_link_proto_rawDesc
)
func file_pbhcp_v2_link_proto_rawDescGZIP() []byte {
file_pbhcp_v2_link_proto_rawDescOnce.Do(func() {
file_pbhcp_v2_link_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbhcp_v2_link_proto_rawDescData)
})
return file_pbhcp_v2_link_proto_rawDescData
}
var file_pbhcp_v2_link_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_pbhcp_v2_link_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_pbhcp_v2_link_proto_goTypes = []interface{}{
(AccessLevel)(0), // 0: hashicorp.consul.hcp.v2.AccessLevel
(*Link)(nil), // 1: hashicorp.consul.hcp.v2.Link
(*HCPConfig)(nil), // 2: hashicorp.consul.hcp.v2.HCPConfig
}
var file_pbhcp_v2_link_proto_depIdxs = []int32{
0, // 0: hashicorp.consul.hcp.v2.Link.access_level:type_name -> hashicorp.consul.hcp.v2.AccessLevel
2, // 1: hashicorp.consul.hcp.v2.Link.hcp_config:type_name -> hashicorp.consul.hcp.v2.HCPConfig
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_pbhcp_v2_link_proto_init() }
func file_pbhcp_v2_link_proto_init() {
if File_pbhcp_v2_link_proto != nil {
return
}
file_pbhcp_v2_hcp_config_proto_init()
if !protoimpl.UnsafeEnabled {
file_pbhcp_v2_link_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Link); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pbhcp_v2_link_proto_rawDesc,
NumEnums: 1,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_pbhcp_v2_link_proto_goTypes,
DependencyIndexes: file_pbhcp_v2_link_proto_depIdxs,
EnumInfos: file_pbhcp_v2_link_proto_enumTypes,
MessageInfos: file_pbhcp_v2_link_proto_msgTypes,
}.Build()
File_pbhcp_v2_link_proto = out.File
file_pbhcp_v2_link_proto_rawDesc = nil
file_pbhcp_v2_link_proto_goTypes = nil
file_pbhcp_v2_link_proto_depIdxs = nil
}

@ -1,26 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
syntax = "proto3";
package hashicorp.consul.hcp.v2;
import "pbhcp/v2/hcp_config.proto";
import "pbresource/annotations.proto";
enum AccessLevel {
ACCESS_LEVEL_UNSPECIFIED = 0;
ACCESS_LEVEL_GLOBAL_READ_WRITE = 1;
ACCESS_LEVEL_GLOBAL_READ_ONLY = 2;
}
message Link {
option (hashicorp.consul.resource.spec) = {scope: SCOPE_CLUSTER};
string resource_id = 1;
string client_id = 2;
string client_secret = 3;
string hcp_cluster_url = 4;
AccessLevel access_level = 5;
HCPConfig hcp_config = 6;
}

@ -1,27 +0,0 @@
// Code generated by protoc-gen-deepcopy. DO NOT EDIT.
package hcpv2
import (
proto "google.golang.org/protobuf/proto"
)
// DeepCopyInto supports using Link within kubernetes types, where deepcopy-gen is used.
func (in *Link) DeepCopyInto(out *Link) {
proto.Reset(out)
proto.Merge(out, proto.Clone(in))
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Link. Required by controller-gen.
func (in *Link) DeepCopy() *Link {
if in == nil {
return nil
}
out := new(Link)
in.DeepCopyInto(out)
return out
}
// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Link. Required by controller-gen.
func (in *Link) DeepCopyInterface() interface{} {
return in.DeepCopy()
}

@ -1,22 +0,0 @@
// Code generated by protoc-json-shim. DO NOT EDIT.
package hcpv2
import (
protojson "google.golang.org/protobuf/encoding/protojson"
)
// MarshalJSON is a custom marshaler for Link
func (this *Link) MarshalJSON() ([]byte, error) {
str, err := LinkMarshaler.Marshal(this)
return []byte(str), err
}
// UnmarshalJSON is a custom unmarshaler for Link
func (this *Link) UnmarshalJSON(b []byte) error {
return LinkUnmarshaler.Unmarshal(b, this)
}
var (
LinkMarshaler = &protojson.MarshalOptions{}
LinkUnmarshaler = &protojson.UnmarshalOptions{DiscardUnknown: false}
)

@ -1,29 +0,0 @@
// Code generated by protoc-gen-resource-types. DO NOT EDIT.
package hcpv2
import (
"github.com/hashicorp/consul/proto-public/pbresource"
)
const (
GroupName = "hcp"
Version = "v2"
LinkKind = "Link"
TelemetryStateKind = "TelemetryState"
)
var (
LinkType = &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: LinkKind,
}
TelemetryStateType = &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: TelemetryStateKind,
}
)

@ -1,38 +0,0 @@
// Code generated by protoc-gen-go-binary. DO NOT EDIT.
// source: pbhcp/v2/telemetry_state.proto
package hcpv2
import (
"google.golang.org/protobuf/proto"
)
// MarshalBinary implements encoding.BinaryMarshaler
func (msg *TelemetryState) MarshalBinary() ([]byte, error) {
return proto.Marshal(msg)
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (msg *TelemetryState) UnmarshalBinary(b []byte) error {
return proto.Unmarshal(b, msg)
}
// MarshalBinary implements encoding.BinaryMarshaler
func (msg *MetricsConfig) MarshalBinary() ([]byte, error) {
return proto.Marshal(msg)
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (msg *MetricsConfig) UnmarshalBinary(b []byte) error {
return proto.Unmarshal(b, msg)
}
// MarshalBinary implements encoding.BinaryMarshaler
func (msg *ProxyConfig) MarshalBinary() ([]byte, error) {
return proto.Marshal(msg)
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (msg *ProxyConfig) UnmarshalBinary(b []byte) error {
return proto.Unmarshal(b, msg)
}

@ -1,426 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.33.0
// protoc (unknown)
// source: pbhcp/v2/telemetry_state.proto
package hcpv2
import (
_ "github.com/hashicorp/consul/proto-public/pbresource"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// TelemetryState describes configuration required to forward telemetry to the HashiCorp Cloud Platform.
// This resource is managed internally and is only written if the cluster is linked to HCP. Any
// manual changes to the resource will be reconciled and overwritten with the internally computed
// state.
type TelemetryState struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// ResourceId is the identifier for the cluster linked with HCP.
ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"`
// ClientId is the oauth client identifier for cluster.
// This client has capabilities limited to writing telemetry data for this cluster.
ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
// ClientSecret is the oauth secret used to authenticate requests to send telemetry data to HCP.
ClientSecret string `protobuf:"bytes,3,opt,name=client_secret,json=clientSecret,proto3" json:"client_secret,omitempty"`
HcpConfig *HCPConfig `protobuf:"bytes,4,opt,name=hcp_config,json=hcpConfig,proto3" json:"hcp_config,omitempty"`
Proxy *ProxyConfig `protobuf:"bytes,5,opt,name=proxy,proto3" json:"proxy,omitempty"`
Metrics *MetricsConfig `protobuf:"bytes,6,opt,name=metrics,proto3" json:"metrics,omitempty"`
}
func (x *TelemetryState) Reset() {
*x = TelemetryState{}
if protoimpl.UnsafeEnabled {
mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TelemetryState) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TelemetryState) ProtoMessage() {}
func (x *TelemetryState) ProtoReflect() protoreflect.Message {
mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TelemetryState.ProtoReflect.Descriptor instead.
func (*TelemetryState) Descriptor() ([]byte, []int) {
return file_pbhcp_v2_telemetry_state_proto_rawDescGZIP(), []int{0}
}
func (x *TelemetryState) GetResourceId() string {
if x != nil {
return x.ResourceId
}
return ""
}
func (x *TelemetryState) GetClientId() string {
if x != nil {
return x.ClientId
}
return ""
}
func (x *TelemetryState) GetClientSecret() string {
if x != nil {
return x.ClientSecret
}
return ""
}
func (x *TelemetryState) GetHcpConfig() *HCPConfig {
if x != nil {
return x.HcpConfig
}
return nil
}
func (x *TelemetryState) GetProxy() *ProxyConfig {
if x != nil {
return x.Proxy
}
return nil
}
func (x *TelemetryState) GetMetrics() *MetricsConfig {
if x != nil {
return x.Metrics
}
return nil
}
// MetricsConfig configures metric specific collection details
type MetricsConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Endpoint is the HTTPS address and path to forward metrics to
Endpoint string `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
// IncludeList contains patterns to match against metric names. Only matched metrics are forwarded.
IncludeList []string `protobuf:"bytes,2,rep,name=include_list,json=includeList,proto3" json:"include_list,omitempty"`
// Labels contains key value pairs that are associated with all metrics collected and fowarded.
Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Disabled toggles metric forwarding. If true, metric forwarding will stop until disabled is set to false.
Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"`
}
func (x *MetricsConfig) Reset() {
*x = MetricsConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MetricsConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MetricsConfig) ProtoMessage() {}
func (x *MetricsConfig) ProtoReflect() protoreflect.Message {
mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MetricsConfig.ProtoReflect.Descriptor instead.
func (*MetricsConfig) Descriptor() ([]byte, []int) {
return file_pbhcp_v2_telemetry_state_proto_rawDescGZIP(), []int{1}
}
func (x *MetricsConfig) GetEndpoint() string {
if x != nil {
return x.Endpoint
}
return ""
}
func (x *MetricsConfig) GetIncludeList() []string {
if x != nil {
return x.IncludeList
}
return nil
}
func (x *MetricsConfig) GetLabels() map[string]string {
if x != nil {
return x.Labels
}
return nil
}
func (x *MetricsConfig) GetDisabled() bool {
if x != nil {
return x.Disabled
}
return false
}
// ProxyConfig describes configuration for forwarding requests through an http proxy
type ProxyConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// HttpProxy configures the http proxy to use for HTTP (non-TLS) requests.
HttpProxy string `protobuf:"bytes,1,opt,name=http_proxy,json=httpProxy,proto3" json:"http_proxy,omitempty"`
// HttpsProxy configures the http proxy to use for HTTPS (TLS) requests.
HttpsProxy string `protobuf:"bytes,2,opt,name=https_proxy,json=httpsProxy,proto3" json:"https_proxy,omitempty"`
// NoProxy can be configured to include domains which should NOT be forwarded through the configured http proxy
NoProxy []string `protobuf:"bytes,3,rep,name=no_proxy,json=noProxy,proto3" json:"no_proxy,omitempty"`
}
func (x *ProxyConfig) Reset() {
*x = ProxyConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ProxyConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ProxyConfig) ProtoMessage() {}
func (x *ProxyConfig) ProtoReflect() protoreflect.Message {
mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ProxyConfig.ProtoReflect.Descriptor instead.
func (*ProxyConfig) Descriptor() ([]byte, []int) {
return file_pbhcp_v2_telemetry_state_proto_rawDescGZIP(), []int{2}
}
func (x *ProxyConfig) GetHttpProxy() string {
if x != nil {
return x.HttpProxy
}
return ""
}
func (x *ProxyConfig) GetHttpsProxy() string {
if x != nil {
return x.HttpsProxy
}
return ""
}
func (x *ProxyConfig) GetNoProxy() []string {
if x != nil {
return x.NoProxy
}
return nil
}
var File_pbhcp_v2_telemetry_state_proto protoreflect.FileDescriptor
var file_pbhcp_v2_telemetry_state_proto_rawDesc = []byte{
0x0a, 0x1e, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d,
0x65, 0x74, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x17, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73,
0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x70, 0x62, 0x68, 0x63, 0x70,
0x2f, 0x76, 0x32, 0x2f, 0x68, 0x63, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x70, 0x62, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x22, 0xbc, 0x02, 0x0a, 0x0e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65,
0x63, 0x72, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65,
0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x41, 0x0a, 0x0a, 0x68, 0x63, 0x70, 0x5f,
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x68,
0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e,
0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x43, 0x50, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x52, 0x09, 0x68, 0x63, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x05, 0x70,
0x72, 0x6f, 0x78, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x68, 0x61, 0x73,
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63,
0x70, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x52, 0x05, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69,
0x63, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69,
0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e,
0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08,
0x01, 0x22, 0xf1, 0x01, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12,
0x21, 0x0a, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18,
0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4c, 0x69,
0x73, 0x74, 0x12, 0x4a, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x32, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63,
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74,
0x72, 0x69, 0x63, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c,
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x1a,
0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61,
0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x68, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x72, 0x6f,
0x78, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x68, 0x74, 0x74, 0x70, 0x50, 0x72,
0x6f, 0x78, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, 0x70, 0x72, 0x6f,
0x78, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x73, 0x50,
0x72, 0x6f, 0x78, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79,
0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x6f, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x42,
0xea, 0x01, 0x0a, 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x42,
0x13, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x50,
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e,
0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
0x2f, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x3b, 0x68, 0x63, 0x70, 0x76, 0x32, 0xa2,
0x02, 0x03, 0x48, 0x43, 0x48, 0xaa, 0x02, 0x17, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x48, 0x63, 0x70, 0x2e, 0x56, 0x32, 0xca,
0x02, 0x17, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73,
0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, 0x5c, 0x56, 0x32, 0xe2, 0x02, 0x23, 0x48, 0x61, 0x73, 0x68,
0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70,
0x5c, 0x56, 0x32, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea,
0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e,
0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x48, 0x63, 0x70, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_pbhcp_v2_telemetry_state_proto_rawDescOnce sync.Once
file_pbhcp_v2_telemetry_state_proto_rawDescData = file_pbhcp_v2_telemetry_state_proto_rawDesc
)
func file_pbhcp_v2_telemetry_state_proto_rawDescGZIP() []byte {
file_pbhcp_v2_telemetry_state_proto_rawDescOnce.Do(func() {
file_pbhcp_v2_telemetry_state_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbhcp_v2_telemetry_state_proto_rawDescData)
})
return file_pbhcp_v2_telemetry_state_proto_rawDescData
}
var file_pbhcp_v2_telemetry_state_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_pbhcp_v2_telemetry_state_proto_goTypes = []interface{}{
(*TelemetryState)(nil), // 0: hashicorp.consul.hcp.v2.TelemetryState
(*MetricsConfig)(nil), // 1: hashicorp.consul.hcp.v2.MetricsConfig
(*ProxyConfig)(nil), // 2: hashicorp.consul.hcp.v2.ProxyConfig
nil, // 3: hashicorp.consul.hcp.v2.MetricsConfig.LabelsEntry
(*HCPConfig)(nil), // 4: hashicorp.consul.hcp.v2.HCPConfig
}
var file_pbhcp_v2_telemetry_state_proto_depIdxs = []int32{
4, // 0: hashicorp.consul.hcp.v2.TelemetryState.hcp_config:type_name -> hashicorp.consul.hcp.v2.HCPConfig
2, // 1: hashicorp.consul.hcp.v2.TelemetryState.proxy:type_name -> hashicorp.consul.hcp.v2.ProxyConfig
1, // 2: hashicorp.consul.hcp.v2.TelemetryState.metrics:type_name -> hashicorp.consul.hcp.v2.MetricsConfig
3, // 3: hashicorp.consul.hcp.v2.MetricsConfig.labels:type_name -> hashicorp.consul.hcp.v2.MetricsConfig.LabelsEntry
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_pbhcp_v2_telemetry_state_proto_init() }
func file_pbhcp_v2_telemetry_state_proto_init() {
if File_pbhcp_v2_telemetry_state_proto != nil {
return
}
file_pbhcp_v2_hcp_config_proto_init()
if !protoimpl.UnsafeEnabled {
file_pbhcp_v2_telemetry_state_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TelemetryState); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pbhcp_v2_telemetry_state_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MetricsConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pbhcp_v2_telemetry_state_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ProxyConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pbhcp_v2_telemetry_state_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_pbhcp_v2_telemetry_state_proto_goTypes,
DependencyIndexes: file_pbhcp_v2_telemetry_state_proto_depIdxs,
MessageInfos: file_pbhcp_v2_telemetry_state_proto_msgTypes,
}.Build()
File_pbhcp_v2_telemetry_state_proto = out.File
file_pbhcp_v2_telemetry_state_proto_rawDesc = nil
file_pbhcp_v2_telemetry_state_proto_goTypes = nil
file_pbhcp_v2_telemetry_state_proto_depIdxs = nil
}

@ -1,55 +0,0 @@
syntax = "proto3";
package hashicorp.consul.hcp.v2;
import "pbhcp/v2/hcp_config.proto";
import "pbresource/annotations.proto";
// TelemetryState describes configuration required to forward telemetry to the HashiCorp Cloud Platform.
// This resource is managed internally and is only written if the cluster is linked to HCP. Any
// manual changes to the resource will be reconciled and overwritten with the internally computed
// state.
message TelemetryState {
option (hashicorp.consul.resource.spec) = {scope: SCOPE_CLUSTER};
// ResourceId is the identifier for the cluster linked with HCP.
string resource_id = 1;
// ClientId is the oauth client identifier for cluster.
// This client has capabilities limited to writing telemetry data for this cluster.
string client_id = 2;
// ClientSecret is the oauth secret used to authenticate requests to send telemetry data to HCP.
string client_secret = 3;
HCPConfig hcp_config = 4;
ProxyConfig proxy = 5;
MetricsConfig metrics = 6;
}
// MetricsConfig configures metric specific collection details
message MetricsConfig {
// Endpoint is the HTTPS address and path to forward metrics to
string endpoint = 1;
// IncludeList contains patterns to match against metric names. Only matched metrics are forwarded.
repeated string include_list = 2;
// Labels contains key value pairs that are associated with all metrics collected and fowarded.
map<string, string> labels = 3;
// Disabled toggles metric forwarding. If true, metric forwarding will stop until disabled is set to false.
bool disabled = 4;
}
// ProxyConfig describes configuration for forwarding requests through an http proxy
message ProxyConfig {
// HttpProxy configures the http proxy to use for HTTP (non-TLS) requests.
string http_proxy = 1;
// HttpsProxy configures the http proxy to use for HTTPS (TLS) requests.
string https_proxy = 2;
// NoProxy can be configured to include domains which should NOT be forwarded through the configured http proxy
repeated string no_proxy = 3;
}

@ -1,69 +0,0 @@
// Code generated by protoc-gen-deepcopy. DO NOT EDIT.
package hcpv2
import (
proto "google.golang.org/protobuf/proto"
)
// DeepCopyInto supports using TelemetryState within kubernetes types, where deepcopy-gen is used.
func (in *TelemetryState) DeepCopyInto(out *TelemetryState) {
proto.Reset(out)
proto.Merge(out, proto.Clone(in))
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryState. Required by controller-gen.
func (in *TelemetryState) DeepCopy() *TelemetryState {
if in == nil {
return nil
}
out := new(TelemetryState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryState. Required by controller-gen.
func (in *TelemetryState) DeepCopyInterface() interface{} {
return in.DeepCopy()
}
// DeepCopyInto supports using MetricsConfig within kubernetes types, where deepcopy-gen is used.
func (in *MetricsConfig) DeepCopyInto(out *MetricsConfig) {
proto.Reset(out)
proto.Merge(out, proto.Clone(in))
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsConfig. Required by controller-gen.
func (in *MetricsConfig) DeepCopy() *MetricsConfig {
if in == nil {
return nil
}
out := new(MetricsConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new MetricsConfig. Required by controller-gen.
func (in *MetricsConfig) DeepCopyInterface() interface{} {
return in.DeepCopy()
}
// DeepCopyInto supports using ProxyConfig within kubernetes types, where deepcopy-gen is used.
func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) {
proto.Reset(out)
proto.Merge(out, proto.Clone(in))
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. Required by controller-gen.
func (in *ProxyConfig) DeepCopy() *ProxyConfig {
if in == nil {
return nil
}
out := new(ProxyConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. Required by controller-gen.
func (in *ProxyConfig) DeepCopyInterface() interface{} {
return in.DeepCopy()
}

@ -1,44 +0,0 @@
// Code generated by protoc-json-shim. DO NOT EDIT.
package hcpv2
import (
protojson "google.golang.org/protobuf/encoding/protojson"
)
// MarshalJSON is a custom marshaler for TelemetryState
func (this *TelemetryState) MarshalJSON() ([]byte, error) {
str, err := TelemetryStateMarshaler.Marshal(this)
return []byte(str), err
}
// UnmarshalJSON is a custom unmarshaler for TelemetryState
func (this *TelemetryState) UnmarshalJSON(b []byte) error {
return TelemetryStateUnmarshaler.Unmarshal(b, this)
}
// MarshalJSON is a custom marshaler for MetricsConfig
func (this *MetricsConfig) MarshalJSON() ([]byte, error) {
str, err := TelemetryStateMarshaler.Marshal(this)
return []byte(str), err
}
// UnmarshalJSON is a custom unmarshaler for MetricsConfig
func (this *MetricsConfig) UnmarshalJSON(b []byte) error {
return TelemetryStateUnmarshaler.Unmarshal(b, this)
}
// MarshalJSON is a custom marshaler for ProxyConfig
func (this *ProxyConfig) MarshalJSON() ([]byte, error) {
str, err := TelemetryStateMarshaler.Marshal(this)
return []byte(str), err
}
// UnmarshalJSON is a custom unmarshaler for ProxyConfig
func (this *ProxyConfig) UnmarshalJSON(b []byte) error {
return TelemetryStateUnmarshaler.Unmarshal(b, this)
}
var (
TelemetryStateMarshaler = &protojson.MarshalOptions{}
TelemetryStateUnmarshaler = &protojson.UnmarshalOptions{DiscardUnknown: false}
)
Loading…
Cancel
Save