diff --git a/.changelog/21655.txt b/.changelog/21655.txt new file mode 100644 index 0000000000..3ef13bdf88 --- /dev/null +++ b/.changelog/21655.txt @@ -0,0 +1,3 @@ +```release-note:improvement +xds: configures Envoy to load balance over all instances of an external service configured with hostnames when "envoy_dns_discovery_type" is set to "STRICT_DNS" +``` diff --git a/.changelog/21795.txt b/.changelog/21795.txt new file mode 100644 index 0000000000..82382255bb --- /dev/null +++ b/.changelog/21795.txt @@ -0,0 +1,3 @@ +```release-note:feature +docs: added the docs for the grafana dashboards +``` diff --git a/.changelog/21871.txt b/.changelog/21871.txt new file mode 100644 index 0000000000..425ba2b5e2 --- /dev/null +++ b/.changelog/21871.txt @@ -0,0 +1,3 @@ +```release-note:bug +proxycfg: fix a bug where peered upstreams watches are canceled even when another target needs it. +``` diff --git a/.changelog/21883.txt b/.changelog/21883.txt new file mode 100644 index 0000000000..44eb78fc43 --- /dev/null +++ b/.changelog/21883.txt @@ -0,0 +1,3 @@ +```release-note:feature +v2: remove HCP Link integration +``` diff --git a/.changelog/21908.txt b/.changelog/21908.txt new file mode 100644 index 0000000000..16668cf7eb --- /dev/null +++ b/.changelog/21908.txt @@ -0,0 +1,3 @@ +```release-note:security +Resolved issue where hcl would allow duplicates of the same key in acl policy configuration. +``` \ No newline at end of file diff --git a/.changelog/21909.txt b/.changelog/21909.txt new file mode 100644 index 0000000000..b49562f137 --- /dev/null +++ b/.changelog/21909.txt @@ -0,0 +1,3 @@ +```release-note:bug +state: ensure that identical manual virtual IP updates result in not bumping the modify indexes +``` diff --git a/.changelog/21930.txt b/.changelog/21930.txt new file mode 100644 index 0000000000..bfcf2748f0 --- /dev/null +++ b/.changelog/21930.txt @@ -0,0 +1,3 @@ +```release-note:security +api: Enforces strict content-type header validation to protect against XSS vulnerability. +``` \ No newline at end of file diff --git a/.changelog/21950.txt b/.changelog/21950.txt new file mode 100644 index 0000000000..e15f9d6c80 --- /dev/null +++ b/.changelog/21950.txt @@ -0,0 +1,3 @@ +```release-note:security +Removed ability to use bexpr to filter results without ACL read on endpoint +``` \ No newline at end of file diff --git a/.changelog/21951.txt b/.changelog/21951.txt new file mode 100644 index 0000000000..89796b0e37 --- /dev/null +++ b/.changelog/21951.txt @@ -0,0 +1,3 @@ +```release-note:security +Update `github.com/golang-jwt/jwt/v4` to v4.5.1 to address [GHSA-29wx-vh33-7x7r](https://github.com/golang-jwt/jwt/security/advisories/GHSA-29wx-vh33-7x7r). +``` \ No newline at end of file diff --git a/.changelog/22001.txt b/.changelog/22001.txt new file mode 100644 index 0000000000..04b211c9ed --- /dev/null +++ b/.changelog/22001.txt @@ -0,0 +1,3 @@ +```release-note:security +Update `golang.org/x/crypto` to v0.31.0 to address [GO-2024-3321](https://pkg.go.dev/vuln/GO-2024-3321). +``` \ No newline at end of file diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d9af3f042a..da52ae11b7 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,3 +1,5 @@ +* @hashicorp/consul-selfmanage-maintainers + # Techical Writer Review /website/content/docs/ @hashicorp/consul-docs @@ -6,8 +8,8 @@ # release configuration -/.release/ @hashicorp/release-engineering @hashicorp/github-consul-core -/.github/workflows/build.yml @hashicorp/release-engineering @hashicorp/github-consul-core +/.release/ @hashicorp/team-selfmanaged-releng @hashicorp/consul-selfmanage-maintainers +/.github/workflows/build.yml @hashicorp/team-selfmanaged-releng @hashicorp/consul-selfmanage-maintainers # Staff Engineer Review (protocol buffer definitions) diff --git a/.github/workflows/nightly-test-integrations.yml b/.github/workflows/nightly-test-integrations.yml index cfaa253030..184937c093 100644 --- a/.github/workflows/nightly-test-integrations.yml +++ b/.github/workflows/nightly-test-integrations.yml @@ -217,7 +217,7 @@ jobs: # matrix.consul-version (i.e. whenever the highest common Envoy version across active # Consul versions changes). The minor Envoy version does not necessarily need to be # kept current for the purpose of these tests, but the major (1.N) version should be. - ENVOY_VERSION: 1.27.6 + ENVOY_VERSION: 1.28.7 steps: - name: Checkout code uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 diff --git a/.release/security-scan.hcl b/.release/security-scan.hcl index 3edf2cac82..7bb8f179f9 100644 --- a/.release/security-scan.hcl +++ b/.release/security-scan.hcl @@ -14,7 +14,7 @@ container { dependencies = true - alpine_secdb = true + osv = true secrets { matchers { @@ -36,9 +36,14 @@ container { # periodically cleaned up to remove items that are no longer found by the scanner. triage { suppress { - # N.b. `vulnerabilites` is the correct spelling for this tool. - vulnerabilites = [ + vulnerabilities = [ "CVE-2024-8096", # curl@8.9.1-r2, + "CVE-2024-9143", # openssl@3.3.2-r0, + "CVE-2024-3596", # openssl@3.3.2-r0, + "CVE-2024-2236", # openssl@3.3.2-r0, + "CVE-2024-26458", # openssl@3.3.2-r0, + "CVE-2024-2511", # openssl@3.3.2-r0, + #the above can be resolved when they're resolved in the alpine image ] paths = [ "internal/tools/proto-gen-rpc-glue/e2e/consul/*", @@ -78,8 +83,8 @@ binary { # periodically cleaned up to remove items that are no longer found by the scanner. triage { suppress { - # N.b. `vulnerabilites` is the correct spelling for this tool. - vulnerabilites = [ + vulnerabilities = [ + "GO-2022-0635", // github.com/aws/aws-sdk-go@v1.55.5 ] paths = [ "internal/tools/proto-gen-rpc-glue/e2e/consul/*", diff --git a/CHANGELOG.md b/CHANGELOG.md index 675b8d5bf5..69978221de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +## 1.20.1 (October 29, 2024) +BREAKING CHANGES: + +* mesh: Enable Envoy `HttpConnectionManager.normalize_path` by default on inbound traffic to mesh proxies. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005). [[GH-21816](https://github.com/hashicorp/consul/issues/21816)] + +SECURITY: + +* mesh: Add `contains` and `ignoreCase` to L7 Intentions HTTP header matching criteria to support configuration resilient to variable casing and multiple values. This resolves [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006). [[GH-21816](https://github.com/hashicorp/consul/issues/21816)] +* mesh: Add `http.incoming.requestNormalization` to Mesh configuration entry to support inbound service traffic request normalization. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005) and [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006). [[GH-21816](https://github.com/hashicorp/consul/issues/21816)] + +IMPROVEMENTS: + +* api: remove dependency on proto-public, protobuf, and grpc [[GH-21780](https://github.com/hashicorp/consul/issues/21780)] +* snapshot agent: **(Enterprise only)** Implement Service Principal Auth for snapshot agent on azure. +* xds: configures Envoy to load balance over all instances of an external service configured with hostnames when "envoy_dns_discovery_type" is set to "STRICT_DNS" [[GH-21655](https://github.com/hashicorp/consul/issues/21655)] + ## 1.20.0 (October 14, 2024) SECURITY: @@ -59,6 +75,38 @@ BUG FIXES: * jwt-provider: change dns lookup family from the default of AUTO which would prefer ipv6 to ALL if LOGICAL_DNS is used or PREFER_IPV4 if STRICT_DNS is used to gracefully handle transitions to ipv6. [[GH-21703](https://github.com/hashicorp/consul/issues/21703)] +## 1.19.3 Enterprise (October 29, 2024) +BREAKING CHANGES: + +* mesh: **(Enterprise Only)** Enable Envoy `HttpConnectionManager.normalize_path` by default on inbound traffic to mesh proxies. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005). + +SECURITY: + +* Explicitly set 'Content-Type' header to mitigate XSS vulnerability. [[GH-21704](https://github.com/hashicorp/consul/issues/21704)] +* Implement HTML sanitization for user-generated content to prevent XSS attacks in the UI. [[GH-21711](https://github.com/hashicorp/consul/issues/21711)] +* UI: Remove codemirror linting due to package dependency [[GH-21726](https://github.com/hashicorp/consul/issues/21726)] +* Upgrade Go to use 1.22.7. This addresses CVE +[CVE-2024-34155](https://nvd.nist.gov/vuln/detail/CVE-2024-34155) [[GH-21705](https://github.com/hashicorp/consul/issues/21705)] +* Upgrade to support aws/aws-sdk-go `v1.55.5 or higher`. This resolves CVEs +[CVE-2020-8911](https://nvd.nist.gov/vuln/detail/cve-2020-8911) and +[CVE-2020-8912](https://nvd.nist.gov/vuln/detail/cve-2020-8912). [[GH-21684](https://github.com/hashicorp/consul/issues/21684)] +* mesh: **(Enterprise Only)** Add `contains` and `ignoreCase` to L7 Intentions HTTP header matching criteria to support configuration resilient to variable casing and multiple values. This resolves [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006). +* mesh: **(Enterprise Only)** Add `http.incoming.requestNormalization` to Mesh configuration entry to support inbound service traffic request normalization. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005) and [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006). +* ui: Pin a newer resolution of Braces [[GH-21710](https://github.com/hashicorp/consul/issues/21710)] +* ui: Pin a newer resolution of Codemirror [[GH-21715](https://github.com/hashicorp/consul/issues/21715)] +* ui: Pin a newer resolution of Markdown-it [[GH-21717](https://github.com/hashicorp/consul/issues/21717)] +* ui: Pin a newer resolution of ansi-html [[GH-21735](https://github.com/hashicorp/consul/issues/21735)] + +IMPROVEMENTS: + +* security: upgrade ubi base image to 9.4 [[GH-21750](https://github.com/hashicorp/consul/issues/21750)] +* api: remove dependency on proto-public, protobuf, and grpc [[GH-21780](https://github.com/hashicorp/consul/issues/21780)] +* xds: configures Envoy to load balance over all instances of an external service configured with hostnames when "envoy_dns_discovery_type" is set to "STRICT_DNS" [[GH-21655](https://github.com/hashicorp/consul/issues/21655)] + +BUG FIXES: + +* jwt-provider: change dns lookup family from the default of AUTO which would prefer ipv6 to ALL if LOGICAL_DNS is used or PREFER_IPV4 if STRICT_DNS is used to gracefully handle transitions to ipv6. [[GH-21703](https://github.com/hashicorp/consul/issues/21703)] + ## 1.19.2 (August 26, 2024) SECURITY: @@ -73,6 +121,39 @@ BUG FIXES: * api-gateway: **(Enterprise only)** ensure clusters are properly created for JWT providers with a remote URI for the JWKS endpoint [[GH-21604](https://github.com/hashicorp/consul/issues/21604)] +## 1.18.5 Enterprise (October 29, 2024) + +Enterprise LTS: Consul Enterprise 1.18 is a Long-Term Support (LTS) release. +BREAKING CHANGES: + +* mesh: **(Enterprise Only)** Enable Envoy `HttpConnectionManager.normalize_path` by default on inbound traffic to mesh proxies. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005). + +SECURITY: + +* Explicitly set 'Content-Type' header to mitigate XSS vulnerability. [[GH-21704](https://github.com/hashicorp/consul/issues/21704)] +* Implement HTML sanitization for user-generated content to prevent XSS attacks in the UI. [[GH-21711](https://github.com/hashicorp/consul/issues/21711)] +* Upgrade Go to use 1.22.7. This addresses CVE +[CVE-2024-34155](https://nvd.nist.gov/vuln/detail/CVE-2024-34155) [[GH-21705](https://github.com/hashicorp/consul/issues/21705)] +* Upgrade to support aws/aws-sdk-go `v1.55.5 or higher`. This resolves CVEs +[CVE-2020-8911](https://nvd.nist.gov/vuln/detail/cve-2020-8911) and +[CVE-2020-8912](https://nvd.nist.gov/vuln/detail/cve-2020-8912). [[GH-21684](https://github.com/hashicorp/consul/issues/21684)] +* mesh: **(Enterprise Only)** Add `contains` and `ignoreCase` to L7 Intentions HTTP header matching criteria to support configuration resilient to variable casing and multiple values. This resolves [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006). +* mesh: **(Enterprise Only)** Add `http.incoming.requestNormalization` to Mesh configuration entry to support inbound service traffic request normalization. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005) and [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006). +* ui: Pin a newer resolution of Braces [[GH-21710](https://github.com/hashicorp/consul/issues/21710)] +* ui: Pin a newer resolution of Codemirror [[GH-21715](https://github.com/hashicorp/consul/issues/21715)] +* ui: Pin a newer resolution of Markdown-it [[GH-21717](https://github.com/hashicorp/consul/issues/21717)] +* ui: Pin a newer resolution of ansi-html [[GH-21735](https://github.com/hashicorp/consul/issues/21735)] + +IMPROVEMENTS: + +* security: upgrade ubi base image to 9.4 [[GH-21750](https://github.com/hashicorp/consul/issues/21750)] +* api: remove dependency on proto-public, protobuf, and grpc [[GH-21780](https://github.com/hashicorp/consul/issues/21780)] +* xds: configures Envoy to load balance over all instances of an external service configured with hostnames when "envoy_dns_discovery_type" is set to "STRICT_DNS" [[GH-21655](https://github.com/hashicorp/consul/issues/21655)] + +BUG FIXES: + +* jwt-provider: change dns lookup family from the default of AUTO which would prefer ipv6 to ALL if LOGICAL_DNS is used or PREFER_IPV4 if STRICT_DNS is used to gracefully handle transitions to ipv6. [[GH-21703](https://github.com/hashicorp/consul/issues/21703)] + ## 1.18.4 Enterprise (August 26, 2024) Enterprise LTS: Consul Enterprise 1.18 is a Long-Term Support (LTS) release. @@ -93,6 +174,35 @@ IMPROVEMENTS: * Use Envoy's default for a route's validate_clusters option, which is false. This fixes a case where non-existent clusters could cause a route to no longer route to any of its backends, including existing ones. [[GH-21587](https://github.com/hashicorp/consul/issues/21587)] +## 1.15.15 Enterprise (October 29, 2024) + +Enterprise LTS: Consul Enterprise 1.15 is a Long-Term Support (LTS) release. +BREAKING CHANGES: + +* mesh: **(Enterprise Only)** Enable Envoy `HttpConnectionManager.normalize_path` by default on inbound traffic to mesh proxies. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005). + +SECURITY: + +* Explicitly set 'Content-Type' header to mitigate XSS vulnerability. [[GH-21704](https://github.com/hashicorp/consul/issues/21704)] +* Implement HTML sanitization for user-generated content to prevent XSS attacks in the UI. [[GH-21711](https://github.com/hashicorp/consul/issues/21711)] +* UI: Remove codemirror linting due to package dependency [[GH-21726](https://github.com/hashicorp/consul/issues/21726)] +* Upgrade Go to use 1.22.7. This addresses CVE +[CVE-2024-34155](https://nvd.nist.gov/vuln/detail/CVE-2024-34155) [[GH-21705](https://github.com/hashicorp/consul/issues/21705)] +* Upgrade to support aws/aws-sdk-go `v1.55.5 or higher`. This resolves CVEs +[CVE-2020-8911](https://nvd.nist.gov/vuln/detail/cve-2020-8911) and +[CVE-2020-8912](https://nvd.nist.gov/vuln/detail/cve-2020-8912). [[GH-21684](https://github.com/hashicorp/consul/issues/21684)] +* mesh: **(Enterprise Only)** Add `contains` and `ignoreCase` to L7 Intentions HTTP header matching criteria to support configuration resilient to variable casing and multiple values. This resolves [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006). +* mesh: **(Enterprise Only)** Add `http.incoming.requestNormalization` to Mesh configuration entry to support inbound service traffic request normalization. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005) and [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006). +* ui: Pin a newer resolution of Braces [[GH-21710](https://github.com/hashicorp/consul/issues/21710)] +* ui: Pin a newer resolution of Codemirror [[GH-21715](https://github.com/hashicorp/consul/issues/21715)] +* ui: Pin a newer resolution of Markdown-it [[GH-21717](https://github.com/hashicorp/consul/issues/21717)] +* ui: Pin a newer resolution of ansi-html [[GH-21735](https://github.com/hashicorp/consul/issues/21735)] + +IMPROVEMENTS: + +* security: upgrade ubi base image to 9.4 [[GH-21750](https://github.com/hashicorp/consul/issues/21750)] +* xds: configures Envoy to load balance over all instances of an external service configured with hostnames when "envoy_dns_discovery_type" is set to "STRICT_DNS" [[GH-21655](https://github.com/hashicorp/consul/issues/21655)] + ## 1.15.14 Enterprise (August 26, 2024) Enterprise LTS: Consul Enterprise 1.15 is a Long-Term Support (LTS) release. diff --git a/acl/acl.go b/acl/acl.go index 753db01516..035aa06db3 100644 --- a/acl/acl.go +++ b/acl/acl.go @@ -22,6 +22,9 @@ type Config struct { // WildcardName is the string that represents a request to authorize a wildcard permission WildcardName string + //by default errors, but in certain instances we want to make sure to maintain backwards compatabilty + WarnOnDuplicateKey bool + // embedded enterprise configuration EnterpriseConfig } diff --git a/acl/policy.go b/acl/policy.go index 86c9e83cfc..54eb4e6587 100644 --- a/acl/policy.go +++ b/acl/policy.go @@ -310,8 +310,8 @@ func (pr *PolicyRules) Validate(conf *Config) error { return nil } -func parse(rules string, conf *Config, meta *EnterprisePolicyMeta) (*Policy, error) { - p, err := decodeRules(rules, conf, meta) +func parse(rules string, warnOnDuplicateKey bool, conf *Config, meta *EnterprisePolicyMeta) (*Policy, error) { + p, err := decodeRules(rules, warnOnDuplicateKey, conf, meta) if err != nil { return nil, err } @@ -338,7 +338,11 @@ func NewPolicyFromSource(rules string, conf *Config, meta *EnterprisePolicyMeta) var policy *Policy var err error - policy, err = parse(rules, conf, meta) + warnOnDuplicateKey := false + if conf != nil { + warnOnDuplicateKey = conf.WarnOnDuplicateKey + } + policy, err = parse(rules, warnOnDuplicateKey, conf, meta) return policy, err } diff --git a/acl/policy_ce.go b/acl/policy_ce.go index fe139ef7ab..457563f048 100644 --- a/acl/policy_ce.go +++ b/acl/policy_ce.go @@ -7,8 +7,9 @@ package acl import ( "fmt" - + "github.com/hashicorp/go-hclog" "github.com/hashicorp/hcl" + "strings" ) // EnterprisePolicyMeta stub @@ -30,12 +31,28 @@ func (r *EnterprisePolicyRules) Validate(*Config) error { return nil } -func decodeRules(rules string, _ *Config, _ *EnterprisePolicyMeta) (*Policy, error) { +func decodeRules(rules string, warnOnDuplicateKey bool, _ *Config, _ *EnterprisePolicyMeta) (*Policy, error) { p := &Policy{} - if err := hcl.Decode(p, rules); err != nil { + err := hcl.DecodeErrorOnDuplicates(p, rules) + + if errIsDuplicateKey(err) && warnOnDuplicateKey { + //because the snapshot saves the unparsed rules we have to assume some snapshots exist that shouldn't fail, but + // have duplicates + if err := hcl.Decode(p, rules); err != nil { + hclog.Default().Warn("Warning- Duplicate key in ACL Policy ignored", "errorMessage", err.Error()) + return nil, fmt.Errorf("Failed to parse ACL rules: %v", err) + } + } else if err != nil { return nil, fmt.Errorf("Failed to parse ACL rules: %v", err) } return p, nil } + +func errIsDuplicateKey(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "was already set. Each argument can only be defined once") +} diff --git a/acl/policy_test.go b/acl/policy_test.go index 2ce0b32892..e09ae535e1 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -342,6 +342,12 @@ func TestPolicySourceParse(t *testing.T) { RulesJSON: `{ "acl": "list" }`, // there is no list policy but this helps to exercise another check in isPolicyValid Err: "Invalid acl policy", }, + { + Name: "Bad Policy - Duplicate ACL Key", + Rules: `acl="read" + acl="write"`, + Err: "Failed to parse ACL rules: The argument \"acl\" at", + }, { Name: "Bad Policy - Agent", Rules: `agent "foo" { policy = "nope" }`, diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 996212c97e..0a4402ffcb 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -380,16 +380,14 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request return nil, err } - raw, err := filter.Execute(agentSvcs) - if err != nil { - return nil, err - } - agentSvcs = raw.(map[string]*api.AgentService) - - // Note: we filter the results with ACLs *after* applying the user-supplied - // bexpr filter, to ensure total (and the filter-by-acls header we set below) - // do not include results that would be filtered out even if the user did have - // permission. + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. total := len(agentSvcs) if err := s.agent.filterServicesWithAuthorizer(authz, agentSvcs); err != nil { return nil, err @@ -407,6 +405,12 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request setResultsFilteredByACLs(resp, total != len(agentSvcs)) } + raw, err := filter.Execute(agentSvcs) + if err != nil { + return nil, err + } + agentSvcs = raw.(map[string]*api.AgentService) + return agentSvcs, nil } @@ -540,16 +544,14 @@ func (s *HTTPHandlers) AgentChecks(resp http.ResponseWriter, req *http.Request) } } - raw, err := filter.Execute(agentChecks) - if err != nil { - return nil, err - } - agentChecks = raw.(map[types.CheckID]*structs.HealthCheck) - - // Note: we filter the results with ACLs *after* applying the user-supplied - // bexpr filter, to ensure total (and the filter-by-acls header we set below) - // do not include results that would be filtered out even if the user did have - // permission. + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. total := len(agentChecks) if err := s.agent.filterChecksWithAuthorizer(authz, agentChecks); err != nil { return nil, err @@ -567,6 +569,12 @@ func (s *HTTPHandlers) AgentChecks(resp http.ResponseWriter, req *http.Request) setResultsFilteredByACLs(resp, total != len(agentChecks)) } + raw, err := filter.Execute(agentChecks) + if err != nil { + return nil, err + } + agentChecks = raw.(map[types.CheckID]*structs.HealthCheck) + return agentChecks, nil } @@ -623,21 +631,14 @@ func (s *HTTPHandlers) AgentMembers(resp http.ResponseWriter, req *http.Request) } } - // filter the members by parsed filter expression - var filterExpression string - s.parseFilter(req, &filterExpression) - if filterExpression != "" { - filter, err := bexpr.CreateFilter(filterExpression, nil, members) - if err != nil { - return nil, err - } - raw, err := filter.Execute(members) - if err != nil { - return nil, err - } - members = raw.([]serf.Member) - } - + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. total := len(members) if err := s.agent.filterMembers(token, &members); err != nil { return nil, err @@ -655,6 +656,21 @@ func (s *HTTPHandlers) AgentMembers(resp http.ResponseWriter, req *http.Request) setResultsFilteredByACLs(resp, total != len(members)) } + // filter the members by parsed filter expression + var filterExpression string + s.parseFilter(req, &filterExpression) + if filterExpression != "" { + filter, err := bexpr.CreateFilter(filterExpression, nil, members) + if err != nil { + return nil, err + } + raw, err := filter.Execute(members) + if err != nil { + return nil, err + } + members = raw.([]serf.Member) + } + return members, nil } diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 69551d7c36..451c412b4b 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -433,6 +433,60 @@ func TestAgent_Services_ACLFilter(t *testing.T) { require.Len(t, val, 2) require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) + + // ensure ACL filtering occurs before bexpr filtering. + const bexprMatchingUserTokenPermissions = "Service matches `web.*`" + const bexprNotMatchingUserTokenPermissions = "Service matches `api.*`" + + tokenWithWebRead := testCreateToken(t, a, ` + service "web" { + policy = "read" + } + `) + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req, _ := http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape(bexprMatchingUserTokenPermissions), nil) + req.Header.Add("X-Consul-Token", tokenWithWebRead) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[string]*api.AgentService + err := dec.Decode(&val) + if err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(t, val, 1) + require.NotEmpty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req, _ := http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape(bexprNotMatchingUserTokenPermissions), nil) + req.Header.Add("X-Consul-Token", tokenWithWebRead) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[string]*api.AgentService + err := dec.Decode(&val) + if err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(t, val, 0) + require.NotEmpty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + }) + + t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + req, _ := http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape(bexprNotMatchingUserTokenPermissions), nil) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[string]*api.AgentService + err := dec.Decode(&val) + if err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(t, val, 0) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + }) } func TestAgent_Service(t *testing.T) { @@ -1432,6 +1486,57 @@ func TestAgent_Checks_ACLFilter(t *testing.T) { require.Len(t, val, 2) require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) + + // ensure ACL filtering occurs before bexpr filtering. + const bexprMatchingUserTokenPermissions = "ServiceName matches `web.*`" + const bexprNotMatchingUserTokenPermissions = "ServiceName matches `api.*`" + + tokenWithWebRead := testCreateToken(t, a, ` + service "web" { + policy = "read" + } + `) + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req, _ := http.NewRequest("GET", "/v1/agent/checks?filter="+url.QueryEscape(bexprMatchingUserTokenPermissions), nil) + req.Header.Add("X-Consul-Token", tokenWithWebRead) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + val := make(map[types.CheckID]*structs.HealthCheck) + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(t, val, 1) + require.NotEmpty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req, _ := http.NewRequest("GET", "/v1/agent/checks?filter="+url.QueryEscape(bexprNotMatchingUserTokenPermissions), nil) + req.Header.Add("X-Consul-Token", tokenWithWebRead) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + val := make(map[types.CheckID]*structs.HealthCheck) + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(t, val, 0) + require.NotEmpty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + }) + + t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + req, _ := http.NewRequest("GET", "/v1/agent/checks?filter="+url.QueryEscape(bexprNotMatchingUserTokenPermissions), nil) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + val := make(map[types.CheckID]*structs.HealthCheck) + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(t, val, 0) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + }) } func TestAgent_Self(t *testing.T) { @@ -2110,6 +2215,57 @@ func TestAgent_Members_ACLFilter(t *testing.T) { require.Len(t, val, 2) require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) + + // ensure ACL filtering occurs before bexpr filtering. + bexprMatchingUserTokenPermissions := fmt.Sprintf("Name matches `%s.*`", b.Config.NodeName) + bexprNotMatchingUserTokenPermissions := fmt.Sprintf("Name matches `%s.*`", a.Config.NodeName) + + tokenWithReadOnMemberB := testCreateToken(t, a, fmt.Sprintf(` + node "%s" { + policy = "read" + } + `, b.Config.NodeName)) + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req, _ := http.NewRequest("GET", "/v1/agent/members?filter="+url.QueryEscape(bexprMatchingUserTokenPermissions), nil) + req.Header.Add("X-Consul-Token", tokenWithReadOnMemberB) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + val := make([]serf.Member, 0) + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(t, val, 1) + require.NotEmpty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req, _ := http.NewRequest("GET", "/v1/agent/members?filter="+url.QueryEscape(bexprNotMatchingUserTokenPermissions), nil) + req.Header.Add("X-Consul-Token", tokenWithReadOnMemberB) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + val := make([]serf.Member, 0) + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(t, val, 0) + require.NotEmpty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + }) + + t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + req, _ := http.NewRequest("GET", "/v1/agent/members?filter="+url.QueryEscape(bexprNotMatchingUserTokenPermissions), nil) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + val := make([]serf.Member, 0) + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(t, val, 0) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + }) } func TestAgent_Join(t *testing.T) { diff --git a/agent/catalog_endpoint_test.go b/agent/catalog_endpoint_test.go index c06efc748c..af1f2d36e3 100644 --- a/agent/catalog_endpoint_test.go +++ b/agent/catalog_endpoint_test.go @@ -122,7 +122,7 @@ func TestCatalogDeregister(t *testing.T) { a := NewTestAgent(t, "") defer a.Shutdown() - // Register node + // Deregister node args := &structs.DeregisterRequest{Node: "foo"} req, _ := http.NewRequest("PUT", "/v1/catalog/deregister", jsonReader(args)) obj, err := a.srv.CatalogDeregister(nil, req) diff --git a/agent/consul/acl_endpoint_test.go b/agent/consul/acl_endpoint_test.go index 4ed159a8aa..2b00aae6d5 100644 --- a/agent/consul/acl_endpoint_test.go +++ b/agent/consul/acl_endpoint_test.go @@ -2169,6 +2169,22 @@ func TestACLEndpoint_PolicySet(t *testing.T) { require.Error(t, err) }) + t.Run("Key Dup", func(t *testing.T) { + req := structs.ACLPolicySetRequest{ + Datacenter: "dc1", + Policy: structs.ACLPolicy{ + Description: "foobar", + Name: "baz2", + Rules: "service \"\" { policy = \"read\" policy = \"write\" }", + }, + WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, + } + resp := structs.ACLPolicy{} + + err := aclEp.PolicySet(&req, &resp) + require.Error(t, err) + }) + t.Run("Update it", func(t *testing.T) { req := structs.ACLPolicySetRequest{ Datacenter: "dc1", diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index 36426afe25..09c23d90c5 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -533,19 +533,24 @@ func (c *Catalog) ListNodes(args *structs.DCSpecificRequest, reply *structs.Inde return nil } + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. + if err := c.srv.filterACL(args.Token, reply); err != nil { + return err + } + raw, err := filter.Execute(reply.Nodes) if err != nil { return err } reply.Nodes = raw.(structs.Nodes) - // Note: we filter the results with ACLs *after* applying the user-supplied - // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include - // results that would be filtered out even if the user did have permission. - if err := c.srv.filterACL(args.Token, reply); err != nil { - return err - } - return c.srv.sortNodesByDistanceFrom(args.Source, reply.Nodes) }) } @@ -607,14 +612,25 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I return nil } - raw, err := filter.Execute(serviceNodes) + // need to temporarily create an IndexedServiceNode so that the ACL filter can be applied + // to the service nodes and then re-use those same node to run the filter expression. + idxServiceNodeReply := &structs.IndexedServiceNodes{ + ServiceNodes: serviceNodes, + QueryMeta: reply.QueryMeta, + } + + // enforce ACLs + c.srv.filterACLWithAuthorizer(authz, idxServiceNodeReply) + + // run the filter expression + raw, err := filter.Execute(idxServiceNodeReply.ServiceNodes) if err != nil { return err } + // convert the result back to the original type reply.Services = servicesTagsByName(raw.(structs.ServiceNodes)) - - c.srv.filterACLWithAuthorizer(authz, reply) + reply.QueryMeta = idxServiceNodeReply.QueryMeta return nil }) @@ -813,6 +829,18 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru reply.ServiceNodes = filtered } + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. + if err := c.srv.filterACL(args.Token, reply); err != nil { + return err + } + // This is safe to do even when the filter is nil - its just a no-op then raw, err := filter.Execute(reply.ServiceNodes) if err != nil { @@ -820,13 +848,6 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru } reply.ServiceNodes = raw.(structs.ServiceNodes) - // Note: we filter the results with ACLs *after* applying the user-supplied - // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include - // results that would be filtered out even if the user did have permission. - if err := c.srv.filterACL(args.Token, reply); err != nil { - return err - } - return c.srv.sortNodesByDistanceFrom(args.Source, reply.ServiceNodes) }) @@ -904,6 +925,18 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs } reply.Index, reply.NodeServices = index, services + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. + if err := c.srv.filterACL(args.Token, reply); err != nil { + return err + } + if reply.NodeServices != nil { raw, err := filter.Execute(reply.NodeServices.Services) if err != nil { @@ -912,13 +945,6 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs reply.NodeServices.Services = raw.(map[string]*structs.NodeService) } - // Note: we filter the results with ACLs *after* applying the user-supplied - // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include - // results that would be filtered out even if the user did have permission. - if err := c.srv.filterACL(args.Token, reply); err != nil { - return err - } - return nil }) } @@ -1009,21 +1035,26 @@ func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *stru if mergedServices != nil { reply.NodeServices = *mergedServices - - raw, err := filter.Execute(reply.NodeServices.Services) - if err != nil { - return err - } - reply.NodeServices.Services = raw.([]*structs.NodeService) } - // Note: we filter the results with ACLs *after* applying the user-supplied - // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include - // results that would be filtered out even if the user did have permission. + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. if err := c.srv.filterACL(args.Token, reply); err != nil { return err } + raw, err := filter.Execute(reply.NodeServices.Services) + if err != nil { + return err + } + reply.NodeServices.Services = raw.([]*structs.NodeService) + return nil }) } diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index 18827dc981..1f3311b0cd 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -984,6 +984,63 @@ func TestCatalog_RPC_Filter(t *testing.T) { require.Equal(t, "baz", out.Nodes[0].Node) }) + t.Run("ListServices", func(t *testing.T) { + args := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "redis", + QueryOptions: structs.QueryOptions{Filter: "ServiceMeta.version == 1"}, + } + + out := new(structs.IndexedServices) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out)) + require.Len(t, out.Services, 2) + require.Len(t, out.Services["redis"], 1) + require.Len(t, out.Services["web"], 2) + + args.Filter = "ServiceMeta.version == 2" + out = new(structs.IndexedServices) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out)) + require.Len(t, out.Services, 4) + require.Len(t, out.Services["redis"], 1) + require.Len(t, out.Services["web"], 2) + require.Len(t, out.Services["critical"], 1) + require.Len(t, out.Services["warning"], 1) + }) + + t.Run("NodeServices", func(t *testing.T) { + args := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: "baz", + QueryOptions: structs.QueryOptions{Filter: "Service == web"}, + } + + out := new(structs.IndexedNodeServices) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &out)) + require.Len(t, out.NodeServices.Services, 2) + + args.Filter = "Service == web and Meta.version == 2" + out = new(structs.IndexedNodeServices) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &out)) + require.Len(t, out.NodeServices.Services, 1) + }) + + t.Run("NodeServiceList", func(t *testing.T) { + args := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: "baz", + QueryOptions: structs.QueryOptions{Filter: "Service == web"}, + } + + out := new(structs.IndexedNodeServiceList) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServiceList", &args, &out)) + require.Len(t, out.NodeServices.Services, 2) + + args.Filter = "Service == web and Meta.version == 2" + out = new(structs.IndexedNodeServiceList) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServiceList", &args, &out)) + require.Len(t, out.NodeServices.Services, 1) + }) + t.Run("ServiceNodes", func(t *testing.T) { args := structs.ServiceSpecificRequest{ Datacenter: "dc1", @@ -1006,22 +1063,6 @@ func TestCatalog_RPC_Filter(t *testing.T) { require.Equal(t, "foo", out.ServiceNodes[0].Node) }) - t.Run("NodeServices", func(t *testing.T) { - args := structs.NodeSpecificRequest{ - Datacenter: "dc1", - Node: "baz", - QueryOptions: structs.QueryOptions{Filter: "Service == web"}, - } - - out := new(structs.IndexedNodeServices) - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &out)) - require.Len(t, out.NodeServices.Services, 2) - - args.Filter = "Service == web and Meta.version == 2" - out = new(structs.IndexedNodeServices) - require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &out)) - require.Len(t, out.NodeServices.Services, 1) - }) } func TestCatalog_ListNodes_StaleRead(t *testing.T) { @@ -1332,6 +1373,7 @@ func TestCatalog_ListNodes_ACLFilter(t *testing.T) { Datacenter: "dc1", } + readToken := token("read") t.Run("deny", func(t *testing.T) { args.Token = token("deny") @@ -1348,7 +1390,7 @@ func TestCatalog_ListNodes_ACLFilter(t *testing.T) { }) t.Run("allow", func(t *testing.T) { - args.Token = token("read") + args.Token = readToken var reply structs.IndexedNodes if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply); err != nil { @@ -1361,6 +1403,67 @@ func TestCatalog_ListNodes_ACLFilter(t *testing.T) { t.Fatal("ResultsFilteredByACLs should not true") } }) + + // Register additional node + regArgs := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + WriteRequest: structs.WriteRequest{ + Token: "root", + }, + } + + var out struct{} + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", regArgs, &out)) + + bexprMatchingUserTokenPermissions := fmt.Sprintf("Node matches `%s.*`", s1.config.NodeName) + const bexpNotMatchingUserTokenPermissions = "Node matches `node-deny.*`" + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + var reply structs.IndexedNodes + args = structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: readToken, + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply = structs.IndexedNodes{} + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply)) + require.Equal(t, 1, len(reply.Nodes)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + var reply structs.IndexedNodes + args = structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: readToken, + Filter: bexpNotMatchingUserTokenPermissions, + }, + } + reply = structs.IndexedNodes{} + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply)) + require.Empty(t, reply.Nodes) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + var reply structs.IndexedNodes + args = structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: "", // no token + Filter: bexpNotMatchingUserTokenPermissions, + }, + } + reply = structs.IndexedNodes{} + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply)) + require.Empty(t, reply.Nodes) + require.False(t, reply.ResultsFilteredByACLs) + }) } func Benchmark_Catalog_ListNodes(t *testing.B) { @@ -2758,6 +2861,14 @@ service "foo" { node_prefix "" { policy = "read" } + +node "node-deny" { + policy = "deny" +} + +service "service-deny" { + policy = "deny" +} ` token = createToken(t, codec, rules) @@ -2915,23 +3026,76 @@ func TestCatalog_ListServices_FilterACL(t *testing.T) { defer codec.Close() testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken("root")) - opt := structs.DCSpecificRequest{ - Datacenter: "dc1", - QueryOptions: structs.QueryOptions{Token: token}, - } - reply := structs.IndexedServices{} - if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &opt, &reply); err != nil { - t.Fatalf("err: %s", err) - } - if _, ok := reply.Services["foo"]; !ok { - t.Fatalf("bad: %#v", reply.Services) - } - if _, ok := reply.Services["bar"]; ok { - t.Fatalf("bad: %#v", reply.Services) - } - if !reply.QueryMeta.ResultsFilteredByACLs { - t.Fatal("ResultsFilteredByACLs should be true") - } + t.Run("request with user token without filter param sets ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + reply := structs.IndexedServices{} + if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + if _, ok := reply.Services["foo"]; !ok { + t.Fatalf("bad: %#v", reply.Services) + } + if _, ok := reply.Services["bar"]; ok { + t.Fatalf("bad: %#v", reply.Services) + } + if !reply.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should be true") + } + }) + + const bexprMatchingUserTokenPermissions = "ServiceName matches `f.*`" + const bexpNotMatchingUserTokenPermissions = "ServiceName matches `b.*`" + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedServices{} + if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Equal(t, 1, len(reply.Services)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexpNotMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedServices{} + if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Zero(t, len(reply.Services)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + req := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: "", // no token + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedServices{} + if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Zero(t, len(reply.Services)) + require.False(t, reply.ResultsFilteredByACLs) + }) } func TestCatalog_ServiceNodes_FilterACL(t *testing.T) { @@ -2982,11 +3146,80 @@ func TestCatalog_ServiceNodes_FilterACL(t *testing.T) { } require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") - // We've already proven that we call the ACL filtering function so we - // test node filtering down in acl.go for node cases. This also proves - // that we respect the version 8 ACL flag, since the test server sets - // that to false (the regression value of *not* changing this is better - // for now until we change the sense of the version 8 ACL flag). + bexprMatchingUserTokenPermissions := fmt.Sprintf("Node matches `%s.*`", srv.config.NodeName) + const bexpNotMatchingUserTokenPermissions = "Node matches `node-deny.*`" + + // Register a service of the same name on the denied node + regArg := structs.RegisterRequest{ + Datacenter: "dc1", + Node: "node-deny", + Address: "127.0.0.1", + Service: &structs.NodeService{ + ID: "foo", + Service: "foo", + }, + Check: &structs.HealthCheck{ + CheckID: "service:foo", + Name: "service:foo", + ServiceID: "foo", + Status: api.HealthPassing, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", ®Arg, nil); err != nil { + t.Fatalf("err: %s", err) + } + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + opt = structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "foo", + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply = structs.IndexedServiceNodes{} + if err := msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &opt, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Equal(t, 1, len(reply.ServiceNodes)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + opt = structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "foo", + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexpNotMatchingUserTokenPermissions, + }, + } + reply = structs.IndexedServiceNodes{} + if err := msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &opt, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Zero(t, len(reply.ServiceNodes)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + opt = structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "foo", + QueryOptions: structs.QueryOptions{ + Token: "", // no token + Filter: bexpNotMatchingUserTokenPermissions, + }, + } + reply = structs.IndexedServiceNodes{} + if err := msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &opt, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Zero(t, len(reply.ServiceNodes)) + require.False(t, reply.ResultsFilteredByACLs) + }) } func TestCatalog_NodeServices_ACL(t *testing.T) { @@ -3075,6 +3308,139 @@ func TestCatalog_NodeServices_FilterACL(t *testing.T) { svc, ok := reply.NodeServices.Services["foo"] require.True(t, ok) require.Equal(t, "foo", svc.ID) + + const bexprMatchingUserTokenPermissions = "Service matches `f.*`" + const bexpNotMatchingUserTokenPermissions = "Service matches `b.*`" + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: srv.config.NodeName, + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply = structs.IndexedNodeServices{} + if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Equal(t, 1, len(reply.NodeServices.Services)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: srv.config.NodeName, + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexpNotMatchingUserTokenPermissions, + }, + } + reply = structs.IndexedNodeServices{} + if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Zero(t, len(reply.NodeServices.Services)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + req := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: srv.config.NodeName, + QueryOptions: structs.QueryOptions{ + Token: "", // no token + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply = structs.IndexedNodeServices{} + if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Nil(t, reply.NodeServices) + require.False(t, reply.ResultsFilteredByACLs) + }) +} + +func TestCatalog_NodeServicesList_FilterACL(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + dir, token, srv, codec := testACLFilterServer(t) + defer os.RemoveAll(dir) + defer srv.Shutdown() + defer codec.Close() + testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken("root")) + + opt := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: srv.config.NodeName, + QueryOptions: structs.QueryOptions{Token: token}, + } + + var reply structs.IndexedNodeServiceList + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServiceList", &opt, &reply)) + + require.NotNil(t, reply.NodeServices) + require.Len(t, reply.NodeServices.Services, 1) + + const bexprMatchingUserTokenPermissions = "Service matches `f.*`" + const bexpNotMatchingUserTokenPermissions = "Service matches `b.*`" + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: srv.config.NodeName, + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply = structs.IndexedNodeServiceList{} + if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServiceList", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Equal(t, 1, len(reply.NodeServices.Services)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: srv.config.NodeName, + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexpNotMatchingUserTokenPermissions, + }, + } + reply = structs.IndexedNodeServiceList{} + if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServiceList", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Zero(t, len(reply.NodeServices.Services)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + req := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: srv.config.NodeName, + QueryOptions: structs.QueryOptions{ + Token: "", // no token + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply = structs.IndexedNodeServiceList{} + if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServiceList", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Empty(t, reply.NodeServices.Services) + require.False(t, reply.ResultsFilteredByACLs) + }) } func TestCatalog_GatewayServices_TerminatingGateway(t *testing.T) { diff --git a/agent/consul/config_endpoint.go b/agent/consul/config_endpoint.go index 96906dac68..a0d78cc6d2 100644 --- a/agent/consul/config_endpoint.go +++ b/agent/consul/config_endpoint.go @@ -9,7 +9,6 @@ import ( "time" metrics "github.com/armon/go-metrics" - "github.com/armon/go-metrics/prometheus" hashstructure_v2 "github.com/mitchellh/hashstructure/v2" "github.com/hashicorp/go-bexpr" @@ -22,33 +21,6 @@ import ( "github.com/hashicorp/consul/agent/structs" ) -var ConfigSummaries = []prometheus.SummaryDefinition{ - { - Name: []string{"config_entry", "apply"}, - Help: "", - }, - { - Name: []string{"config_entry", "get"}, - Help: "", - }, - { - Name: []string{"config_entry", "list"}, - Help: "", - }, - { - Name: []string{"config_entry", "listAll"}, - Help: "", - }, - { - Name: []string{"config_entry", "delete"}, - Help: "", - }, - { - Name: []string{"config_entry", "resolve_service_config"}, - Help: "", - }, -} - // The ConfigEntry endpoint is used to query centralized config information type ConfigEntry struct { srv *Server @@ -280,7 +252,14 @@ func (c *ConfigEntry) List(args *structs.ConfigEntryQuery, reply *structs.Indexe return err } - // Filter the entries returned by ACL permissions. + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. filteredEntries := make([]structs.ConfigEntry, 0, len(entries)) for _, entry := range entries { if err := entry.CanRead(authz); err != nil { diff --git a/agent/consul/config_endpoint_test.go b/agent/consul/config_endpoint_test.go index 49a10dce21..cf503ee525 100644 --- a/agent/consul/config_endpoint_test.go +++ b/agent/consul/config_endpoint_test.go @@ -783,7 +783,7 @@ service "foo" { } operator = "read" ` - id := createToken(t, codec, rules) + token := createToken(t, codec, rules) // Create some dummy service/proxy configs to be looked up. state := s1.fsm.State() @@ -804,7 +804,7 @@ operator = "read" args := structs.ConfigEntryQuery{ Kind: structs.ServiceDefaults, Datacenter: s1.config.Datacenter, - QueryOptions: structs.QueryOptions{Token: id}, + QueryOptions: structs.QueryOptions{Token: token}, } var out structs.IndexedConfigEntries err := msgpackrpc.CallWithCodec(codec, "ConfigEntry.List", &args, &out) @@ -828,6 +828,58 @@ operator = "read" require.Equal(t, structs.ProxyConfigGlobal, proxyConf.Name) require.Equal(t, structs.ProxyDefaults, proxyConf.Kind) require.False(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + + // ensure ACL filtering occurs before bexpr filtering. + const bexprMatchingUserTokenPermissions = "Name matches `f.*`" + const bexprNotMatchingUserTokenPermissions = "Name matches `db.*`" + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + args = structs.ConfigEntryQuery{ + Kind: structs.ServiceDefaults, + Datacenter: s1.config.Datacenter, + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprMatchingUserTokenPermissions, + }, + } + var reply structs.IndexedConfigEntries + err = msgpackrpc.CallWithCodec(codec, "ConfigEntry.List", &args, &reply) + require.NoError(t, err) + require.Equal(t, 1, len(reply.Entries)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + args = structs.ConfigEntryQuery{ + Kind: structs.ServiceDefaults, + Datacenter: s1.config.Datacenter, + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprNotMatchingUserTokenPermissions, + }, + } + var reply structs.IndexedConfigEntries + err = msgpackrpc.CallWithCodec(codec, "ConfigEntry.List", &args, &reply) + require.NoError(t, err) + require.Zero(t, len(reply.Entries)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + args = structs.ConfigEntryQuery{ + Kind: structs.ServiceDefaults, + Datacenter: s1.config.Datacenter, + QueryOptions: structs.QueryOptions{ + Token: "", // no token + Filter: bexprNotMatchingUserTokenPermissions, + }, + } + var reply structs.IndexedConfigEntries + err = msgpackrpc.CallWithCodec(codec, "ConfigEntry.List", &args, &reply) + require.NoError(t, err) + require.Zero(t, len(reply.Entries)) + require.False(t, reply.ResultsFilteredByACLs) + }) } func TestConfigEntry_ListAll_ACLDeny(t *testing.T) { diff --git a/agent/consul/health_endpoint.go b/agent/consul/health_endpoint.go index 6f00ec4b08..4ded41bcce 100644 --- a/agent/consul/health_endpoint.go +++ b/agent/consul/health_endpoint.go @@ -63,19 +63,24 @@ func (h *Health) ChecksInState(args *structs.ChecksInStateRequest, } reply.Index, reply.HealthChecks = index, checks + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. + if err := h.srv.filterACL(args.Token, reply); err != nil { + return err + } + raw, err := filter.Execute(reply.HealthChecks) if err != nil { return err } reply.HealthChecks = raw.(structs.HealthChecks) - // Note: we filter the results with ACLs *after* applying the user-supplied - // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include - // results that would be filtered out even if the user did have permission. - if err := h.srv.filterACL(args.Token, reply); err != nil { - return err - } - return h.srv.sortNodesByDistanceFrom(args.Source, reply.HealthChecks) }) } @@ -111,19 +116,24 @@ func (h *Health) NodeChecks(args *structs.NodeSpecificRequest, } reply.Index, reply.HealthChecks = index, checks + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. + if err := h.srv.filterACL(args.Token, reply); err != nil { + return err + } + raw, err := filter.Execute(reply.HealthChecks) if err != nil { return err } reply.HealthChecks = raw.(structs.HealthChecks) - // Note: we filter the results with ACLs *after* applying the user-supplied - // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include - // results that would be filtered out even if the user did have permission. - if err := h.srv.filterACL(args.Token, reply); err != nil { - return err - } - return nil }) } @@ -303,6 +313,18 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc thisReply.Nodes = nodeMetaFilter(arg.NodeMetaFilters, thisReply.Nodes) } + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. + if err := h.srv.filterACL(arg.Token, &thisReply); err != nil { + return err + } + raw, err := filter.Execute(thisReply.Nodes) if err != nil { return err @@ -310,13 +332,6 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc filteredNodes := raw.(structs.CheckServiceNodes) thisReply.Nodes = filteredNodes.Filter(structs.CheckServiceNodeFilterOptions{FilterType: arg.HealthFilterType}) - // Note: we filter the results with ACLs *after* applying the user-supplied - // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include - // results that would be filtered out even if the user did have permission. - if err := h.srv.filterACL(arg.Token, &thisReply); err != nil { - return err - } - if err := h.srv.sortNodesByDistanceFrom(arg.Source, thisReply.Nodes); err != nil { return err } diff --git a/agent/consul/health_endpoint_test.go b/agent/consul/health_endpoint_test.go index 07f23cc2e0..fef8d285a6 100644 --- a/agent/consul/health_endpoint_test.go +++ b/agent/consul/health_endpoint_test.go @@ -1527,11 +1527,62 @@ func TestHealth_NodeChecks_FilterACL(t *testing.T) { require.True(t, found, "bad: %#v", reply.HealthChecks) require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") - // We've already proven that we call the ACL filtering function so we - // test node filtering down in acl.go for node cases. This also proves - // that we respect the version 8 ACL flag, since the test server sets - // that to false (the regression value of *not* changing this is better - // for now until we change the sense of the version 8 ACL flag). + const bexprMatchingUserTokenPermissions = "ServiceName matches `f.*`" + const bexprNotMatchingUserTokenPermissions = "ServiceName matches `b.*`" + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + opt := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: srv.config.NodeName, + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedHealthChecks{} + + if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Equal(t, 1, len(reply.HealthChecks)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + opt := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: srv.config.NodeName, + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprNotMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedHealthChecks{} + + if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Zero(t, len(reply.HealthChecks)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + opt := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: srv.config.NodeName, + QueryOptions: structs.QueryOptions{ + Token: "", // no token + Filter: bexprNotMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedHealthChecks{} + + if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Zero(t, len(reply.HealthChecks)) + require.False(t, reply.ResultsFilteredByACLs) + }) } func TestHealth_ServiceChecks_FilterACL(t *testing.T) { @@ -1571,11 +1622,77 @@ func TestHealth_ServiceChecks_FilterACL(t *testing.T) { require.Empty(t, reply.HealthChecks) require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") - // We've already proven that we call the ACL filtering function so we - // test node filtering down in acl.go for node cases. This also proves - // that we respect the version 8 ACL flag, since the test server sets - // that to false (the regression value of *not* changing this is better - // for now until we change the sense of the version 8 ACL flag). + // Register a service of the same name on the denied node + regArg := structs.RegisterRequest{ + Datacenter: "dc1", + Node: "node-deny", + Address: "127.0.0.1", + Service: &structs.NodeService{ + ID: "foo", + Service: "foo", + }, + Check: &structs.HealthCheck{ + CheckID: "service:foo", + Name: "service:foo", + ServiceID: "foo", + Status: api.HealthPassing, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", ®Arg, nil); err != nil { + t.Fatalf("err: %s", err) + } + const bexprMatchingUserTokenPermissions = "ServiceName matches `f.*`" + const bexprNotMatchingUserTokenPermissions = "Node matches `node-deny.*`" + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + opt := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "foo", + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedHealthChecks{} + err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply) + require.NoError(t, err) + + require.Equal(t, 1, len(reply.HealthChecks)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + opt := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "foo", + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprNotMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedHealthChecks{} + err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply) + require.NoError(t, err) + require.Zero(t, len(reply.HealthChecks)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + opt := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "foo", + QueryOptions: structs.QueryOptions{ + Token: "", // no token + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedHealthChecks{} + err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply) + require.NoError(t, err) + require.Zero(t, len(reply.HealthChecks)) + require.False(t, reply.ResultsFilteredByACLs) + }) } func TestHealth_ServiceNodes_FilterACL(t *testing.T) { @@ -1607,11 +1724,77 @@ func TestHealth_ServiceNodes_FilterACL(t *testing.T) { require.Empty(t, reply.Nodes) require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") - // We've already proven that we call the ACL filtering function so we - // test node filtering down in acl.go for node cases. This also proves - // that we respect the version 8 ACL flag, since the test server sets - // that to false (the regression value of *not* changing this is better - // for now until we change the sense of the version 8 ACL flag). + // Register a service of the same name on the denied node + regArg := structs.RegisterRequest{ + Datacenter: "dc1", + Node: "node-deny", + Address: "127.0.0.1", + Service: &structs.NodeService{ + ID: "foo", + Service: "foo", + }, + Check: &structs.HealthCheck{ + CheckID: "service:foo", + Name: "service:foo", + ServiceID: "foo", + Status: api.HealthPassing, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", ®Arg, nil); err != nil { + t.Fatalf("err: %s", err) + } + const bexprMatchingUserTokenPermissions = "Service.Service matches `f.*`" + const bexprNotMatchingUserTokenPermissions = "Node.Node matches `node-deny.*`" + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + opt := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "foo", + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedCheckServiceNodes{} + err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply) + require.NoError(t, err) + + require.Equal(t, 1, len(reply.Nodes)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + opt := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "foo", + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprNotMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedCheckServiceNodes{} + err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply) + require.NoError(t, err) + require.Zero(t, len(reply.Nodes)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + opt := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "foo", + QueryOptions: structs.QueryOptions{ + Token: "", // no token + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedCheckServiceNodes{} + err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply) + require.NoError(t, err) + require.Zero(t, len(reply.Nodes)) + require.False(t, reply.ResultsFilteredByACLs) + }) } func TestHealth_ChecksInState_FilterACL(t *testing.T) { @@ -1647,11 +1830,59 @@ func TestHealth_ChecksInState_FilterACL(t *testing.T) { require.True(t, found, "missing service 'foo': %#v", reply.HealthChecks) require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") - // We've already proven that we call the ACL filtering function so we - // test node filtering down in acl.go for node cases. This also proves - // that we respect the version 8 ACL flag, since the test server sets - // that to false (the regression value of *not* changing this is better - // for now until we change the sense of the version 8 ACL flag). + const bexprMatchingUserTokenPermissions = "ServiceName matches `f.*`" + const bexprNotMatchingUserTokenPermissions = "ServiceName matches `b.*`" + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := structs.ChecksInStateRequest{ + Datacenter: "dc1", + State: api.HealthPassing, + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedHealthChecks{} + if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Equal(t, 1, len(reply.HealthChecks)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := structs.ChecksInStateRequest{ + Datacenter: "dc1", + State: api.HealthPassing, + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprNotMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedHealthChecks{} + if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Zero(t, len(reply.HealthChecks)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + req := structs.ChecksInStateRequest{ + Datacenter: "dc1", + State: api.HealthPassing, + QueryOptions: structs.QueryOptions{ + Token: "", // no token + Filter: bexprNotMatchingUserTokenPermissions, + }, + } + reply := structs.IndexedHealthChecks{} + if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Zero(t, len(reply.HealthChecks)) + require.False(t, reply.ResultsFilteredByACLs) + }) } func TestHealth_RPC_Filter(t *testing.T) { diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index df05428145..6fc7f8132a 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -550,19 +550,25 @@ func (s *Intention) List(args *structs.IntentionListRequest, reply *structs.Inde } else { reply.DataOrigin = structs.IntentionDataOriginLegacy } + + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. + if err := s.srv.filterACL(args.Token, reply); err != nil { + return err + } + raw, err := filter.Execute(reply.Intentions) if err != nil { return err } reply.Intentions = raw.(structs.Intentions) - // Note: we filter the results with ACLs *after* applying the user-supplied - // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include - // results that would be filtered out even if the user did have permission. - if err := s.srv.filterACL(args.Token, reply); err != nil { - return err - } - return nil }, ) diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 08480501d7..d1a70fd5bc 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -1639,6 +1639,11 @@ func TestIntentionList_acl(t *testing.T) { token, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `service_prefix "foo" { policy = "write" }`) require.NoError(t, err) + const ( + bexprMatch = "DestinationName matches `f.*`" + bexprNoMatch = "DestinationName matches `nomatch.*`" + ) + // Create a few records for _, name := range []string{"foobar", "bar", "baz"} { ixn := structs.IntentionRequest{ @@ -1691,12 +1696,29 @@ func TestIntentionList_acl(t *testing.T) { require.True(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) - t.Run("filtered", func(t *testing.T) { + // maskResultsFilteredByACLs() in rpc.go sets ResultsFilteredByACLs to false if the token is an empty string + // after resp.QueryMeta.ResultsFilteredByACLs has been determined to be true from filterACLs(). + t.Run("filtered with no token should return no results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + req := &structs.IntentionListRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Filter: bexprMatch, + }, + } + + var resp structs.IndexedIntentions + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp)) + require.Len(t, resp.Intentions, 0) + require.False(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + // has access to everything + t.Run("filtered with initial management token should return 1 and ResultsFilteredByACLs equal to false", func(t *testing.T) { req := &structs.IntentionListRequest{ Datacenter: "dc1", QueryOptions: structs.QueryOptions{ Token: TestDefaultInitialManagementToken, - Filter: "DestinationName == foobar", + Filter: bexprMatch, }, } @@ -1705,6 +1727,54 @@ func TestIntentionList_acl(t *testing.T) { require.Len(t, resp.Intentions, 1) require.False(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") }) + + // ResultsFilteredByACLs should reflect user does not have access to read all intentions but has access to some. + t.Run("filtered with user token whose permissions match filter should return 1 and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := &structs.IntentionListRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: token.SecretID, + Filter: bexprMatch, + }, + } + + var resp structs.IndexedIntentions + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp)) + require.Len(t, resp.Intentions, 1) + require.True(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + // ResultsFilteredByACLs need to act as though no filter was applied. + t.Run("filtered with user token whose permissions do match filter should return 0 and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := &structs.IntentionListRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: token.SecretID, + Filter: bexprNoMatch, + }, + } + + var resp structs.IndexedIntentions + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp)) + require.Len(t, resp.Intentions, 0) + require.True(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + // ResultsFilteredByACLs should reflect user does not have access to read any intentions + t.Run("filtered with anonymous token should return 0 and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := &structs.IntentionListRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: "anonymous", + Filter: bexprMatch, + }, + } + + var resp structs.IndexedIntentions + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp)) + require.Len(t, resp.Intentions, 0) + require.True(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } // Test basic matching. We don't need to exhaustively test inputs since this diff --git a/agent/consul/internal_endpoint.go b/agent/consul/internal_endpoint.go index af27842d20..6afb8405f3 100644 --- a/agent/consul/internal_endpoint.go +++ b/agent/consul/internal_endpoint.go @@ -7,15 +7,18 @@ import ( "fmt" "net" + hashstructure_v2 "github.com/mitchellh/hashstructure/v2" + "golang.org/x/exp/maps" + "github.com/hashicorp/go-bexpr" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" "github.com/hashicorp/serf/serf" - hashstructure_v2 "github.com/mitchellh/hashstructure/v2" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/lib/stringslice" ) const MaximumManualVIPsPerService = 8 @@ -117,6 +120,18 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest, } reply.Index = maxIndex + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. + if err := m.srv.filterACL(args.Token, reply); err != nil { + return err + } + raw, err := filter.Execute(reply.Dump) if err != nil { return fmt.Errorf("could not filter local node dump: %w", err) @@ -129,13 +144,6 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest, } reply.ImportedDump = importedRaw.(structs.NodeDump) - // Note: we filter the results with ACLs *after* applying the user-supplied - // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include - // results that would be filtered out even if the user did have permission. - if err := m.srv.filterACL(args.Token, reply); err != nil { - return err - } - return nil }) } @@ -235,13 +243,26 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs. } } reply.Index = maxIndex - raw, err := filter.Execute(reply.Nodes) - if err != nil { - return fmt.Errorf("could not filter local service dump: %w", err) - } - reply.Nodes = raw.(structs.CheckServiceNodes) } + // Note: we filter the results with ACLs *before* applying the user-supplied + // bexpr filter to ensure that the user can only run expressions on data that + // they have access to. This is a security measure to prevent users from + // running arbitrary expressions on data they don't have access to. + // QueryMeta.ResultsFilteredByACLs being true already indicates to the user + // that results they don't have access to have been removed. If they were + // also allowed to run the bexpr filter on the data, they could potentially + // infer the specific attributes of data they don't have access to. + if err := m.srv.filterACL(args.Token, reply); err != nil { + return err + } + + raw, err := filter.Execute(reply.Nodes) + if err != nil { + return fmt.Errorf("could not filter local service dump: %w", err) + } + reply.Nodes = raw.(structs.CheckServiceNodes) + if !args.NodesOnly { importedRaw, err := filter.Execute(reply.ImportedNodes) if err != nil { @@ -249,12 +270,6 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs. } reply.ImportedNodes = importedRaw.(structs.CheckServiceNodes) } - // Note: we filter the results with ACLs *after* applying the user-supplied - // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include - // results that would be filtered out even if the user did have permission. - if err := m.srv.filterACL(args.Token, reply); err != nil { - return err - } return nil }) @@ -770,17 +785,38 @@ func (m *Internal) AssignManualServiceVIPs(args *structs.AssignServiceManualVIPs return fmt.Errorf("cannot associate more than %d manual virtual IPs with the same service", MaximumManualVIPsPerService) } + vipMap := make(map[string]struct{}) for _, ip := range args.ManualVIPs { parsedIP := net.ParseIP(ip) if parsedIP == nil || parsedIP.To4() == nil { return fmt.Errorf("%q is not a valid IPv4 address", parsedIP.String()) } + vipMap[ip] = struct{}{} + } + // Silently ignore duplicates. + args.ManualVIPs = maps.Keys(vipMap) + + psn := structs.PeeredServiceName{ + ServiceName: structs.NewServiceName(args.Service, &args.EnterpriseMeta), + } + + // Check to see if we can skip the raft apply entirely. + { + existingIPs, err := m.srv.fsm.State().ServiceManualVIPs(psn) + if err != nil { + return fmt.Errorf("error checking for existing manual ips for service: %w", err) + } + if existingIPs != nil && stringslice.EqualMapKeys(existingIPs.ManualIPs, vipMap) { + *reply = structs.AssignServiceManualVIPsResponse{ + Found: true, + UnassignedFrom: nil, + } + return nil + } } req := state.ServiceVirtualIP{ - Service: structs.PeeredServiceName{ - ServiceName: structs.NewServiceName(args.Service, &args.EnterpriseMeta), - }, + Service: psn, ManualIPs: args.ManualVIPs, } resp, err := m.srv.raftApplyMsgpack(structs.UpdateVirtualIPRequestType, req) diff --git a/agent/consul/internal_endpoint_test.go b/agent/consul/internal_endpoint_test.go index e4b9a14b70..2a46c85e25 100644 --- a/agent/consul/internal_endpoint_test.go +++ b/agent/consul/internal_endpoint_test.go @@ -12,11 +12,11 @@ import ( "testing" "time" - "github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" + "github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" @@ -656,11 +656,73 @@ func TestInternal_NodeDump_FilterACL(t *testing.T) { t.Fatal("ResultsFilteredByACLs should be true") } - // We've already proven that we call the ACL filtering function so we - // test node filtering down in acl.go for node cases. This also proves - // that we respect the version 8 ACL flag, since the test server sets - // that to false (the regression value of *not* changing this is better - // for now until we change the sense of the version 8 ACL flag). + // need to ensure that ACLs are filtered prior to bexprFiltering + // Register additional node + regArgs := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + WriteRequest: structs.WriteRequest{ + Token: "root", + }, + } + + var out struct{} + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", regArgs, &out)) + + bexprMatchingUserTokenPermissions := fmt.Sprintf("Node matches `%s.*`", srv.config.NodeName) + const bexpNotMatchingUserTokenPermissions = "Node matches `node-deny.*`" + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprMatchingUserTokenPermissions, + }, + } + + reply = structs.IndexedNodeDump{} + if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Equal(t, 1, len(reply.Dump)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + req := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexpNotMatchingUserTokenPermissions, + }, + } + + reply = structs.IndexedNodeDump{} + if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Zero(t, len(reply.Dump)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that would match only record without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + req := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: "", + Filter: bexprMatchingUserTokenPermissions, + }, + } + + reply = structs.IndexedNodeDump{} + if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Empty(t, reply.Dump) + require.False(t, reply.ResultsFilteredByACLs) + }) } func TestInternal_EventFire_Token(t *testing.T) { @@ -1064,6 +1126,113 @@ func TestInternal_ServiceDump_ACL(t *testing.T) { require.Empty(t, out.Gateways) require.True(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) + + // need to ensure that ACLs are filtered prior to bexprFiltering + // Register additional node + regArgs := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "node-deny", + ID: types.NodeID("e0155642-135d-4739-9853-b1ee6c9f945b"), + Address: "192.18.1.2", + Service: &structs.NodeService{ + Kind: structs.ServiceKindTypical, + ID: "memcached", + Service: "memcached", + Port: 5678, + }, + Check: &structs.HealthCheck{ + Name: "memcached check", + Status: api.HealthPassing, + ServiceID: "memcached", + }, + WriteRequest: structs.WriteRequest{ + Token: "root", + }, + } + + var out struct{} + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", regArgs, &out)) + + const ( + bexprMatchingUserTokenPermissions = "Service.Service matches `redis.*`" + bexpNotMatchingUserTokenPermissions = "Node.Node matches `node-deny.*`" + ) + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + token := tokenWithRules(t, ` + node "node-deny" { + policy = "deny" + } + node "node1" { + policy = "read" + } + service "redis" { + policy = "read" + } + `) + var reply structs.IndexedNodesWithGateways + req := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexprMatchingUserTokenPermissions, + }, + } + + reply = structs.IndexedNodesWithGateways{} + if err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Equal(t, 1, len(reply.Nodes)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + token := tokenWithRules(t, ` + node "node-deny" { + policy = "deny" + } + node "node1" { + policy = "read" + } + service "redis" { + policy = "read" + } + `) + var reply structs.IndexedNodesWithGateways + req := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: token, + Filter: bexpNotMatchingUserTokenPermissions, + }, + } + + reply = structs.IndexedNodesWithGateways{} + if err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Zero(t, len(reply.Nodes)) + require.True(t, reply.ResultsFilteredByACLs) + }) + + t.Run("request with filter that would match only record without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) { + var reply structs.IndexedNodesWithGateways + req := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{ + Token: "", // no token + Filter: bexpNotMatchingUserTokenPermissions, + }, + } + + reply = structs.IndexedNodesWithGateways{} + if err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + require.Empty(t, reply.Nodes) + require.False(t, reply.ResultsFilteredByACLs) + }) } func TestInternal_GatewayServiceDump_Terminating(t *testing.T) { @@ -3716,21 +3885,41 @@ func TestInternal_AssignManualServiceVIPs(t *testing.T) { require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.AssignManualServiceVIPs", req, &resp)) type testcase struct { - name string - req structs.AssignServiceManualVIPsRequest - expect structs.AssignServiceManualVIPsResponse - expectErr string + name string + req structs.AssignServiceManualVIPsRequest + expect structs.AssignServiceManualVIPsResponse + expectAgain structs.AssignServiceManualVIPsResponse + expectErr string + expectIPs []string } - run := func(t *testing.T, tc testcase) { - var resp structs.AssignServiceManualVIPsResponse - err := msgpackrpc.CallWithCodec(codec, "Internal.AssignManualServiceVIPs", tc.req, &resp) - if tc.expectErr != "" { - require.Error(t, err) - require.Contains(t, err.Error(), tc.expectErr) - return + + run := func(t *testing.T, tc testcase, again bool) { + if tc.expectErr != "" && again { + return // we don't retest known errors + } + + var resp structs.AssignServiceManualVIPsResponse + idx1 := s1.raft.CommitIndex() + err := msgpackrpc.CallWithCodec(codec, "Internal.AssignManualServiceVIPs", tc.req, &resp) + idx2 := s1.raft.CommitIndex() + if tc.expectErr != "" { + testutil.RequireErrorContains(t, err, tc.expectErr) + } else { + if again { + require.Equal(t, tc.expectAgain, resp) + require.Equal(t, idx1, idx2, "no raft operations occurred") + } else { + require.Equal(t, tc.expect, resp) + } + + psn := structs.PeeredServiceName{ServiceName: structs.NewServiceName(tc.req.Service, nil)} + got, err := s1.fsm.State().ServiceManualVIPs(psn) + require.NoError(t, err) + require.NotNil(t, got) + require.Equal(t, tc.expectIPs, got.ManualIPs) } - require.Equal(t, tc.expect, resp) } + tcs := []testcase{ { name: "successful manual ip assignment", @@ -3738,7 +3927,19 @@ func TestInternal_AssignManualServiceVIPs(t *testing.T) { Service: "web", ManualVIPs: []string{"1.1.1.1", "2.2.2.2"}, }, - expect: structs.AssignServiceManualVIPsResponse{Found: true}, + expectIPs: []string{"1.1.1.1", "2.2.2.2"}, + expect: structs.AssignServiceManualVIPsResponse{Found: true}, + expectAgain: structs.AssignServiceManualVIPsResponse{Found: true}, + }, + { + name: "successfully ignoring duplicates", + req: structs.AssignServiceManualVIPsRequest{ + Service: "web", + ManualVIPs: []string{"1.2.3.4", "5.6.7.8", "1.2.3.4", "5.6.7.8"}, + }, + expectIPs: []string{"1.2.3.4", "5.6.7.8"}, + expect: structs.AssignServiceManualVIPsResponse{Found: true}, + expectAgain: structs.AssignServiceManualVIPsResponse{Found: true}, }, { name: "reassign existing ip", @@ -3746,6 +3947,7 @@ func TestInternal_AssignManualServiceVIPs(t *testing.T) { Service: "web", ManualVIPs: []string{"8.8.8.8"}, }, + expectIPs: []string{"8.8.8.8"}, expect: structs.AssignServiceManualVIPsResponse{ Found: true, UnassignedFrom: []structs.PeeredServiceName{ @@ -3754,6 +3956,8 @@ func TestInternal_AssignManualServiceVIPs(t *testing.T) { }, }, }, + // When we repeat this operation the second time it's a no-op. + expectAgain: structs.AssignServiceManualVIPsResponse{Found: true}, }, { name: "invalid ip", @@ -3761,13 +3965,19 @@ func TestInternal_AssignManualServiceVIPs(t *testing.T) { Service: "web", ManualVIPs: []string{"3.3.3.3", "invalid"}, }, - expect: structs.AssignServiceManualVIPsResponse{}, expectErr: "not a valid", }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - run(t, tc) + t.Run("initial", func(t *testing.T) { + run(t, tc, false) + }) + if tc.expectErr == "" { + t.Run("repeat", func(t *testing.T) { + run(t, tc, true) // only repeat a write if it isn't an known error + }) + } }) } } diff --git a/agent/consul/server.go b/agent/consul/server.go index 979d9e3cd4..e33fd493ae 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -53,7 +53,6 @@ import ( "github.com/hashicorp/consul/agent/consul/xdscapacity" "github.com/hashicorp/consul/agent/grpc-external/services/peerstream" "github.com/hashicorp/consul/agent/hcp" - "github.com/hashicorp/consul/agent/hcp/bootstrap" hcpclient "github.com/hashicorp/consul/agent/hcp/client" logdrop "github.com/hashicorp/consul/agent/log-drop" "github.com/hashicorp/consul/agent/metadata" @@ -65,7 +64,6 @@ import ( "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/gossip/librtt" - hcpctl "github.com/hashicorp/consul/internal/hcp" "github.com/hashicorp/consul/internal/multicluster" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" @@ -838,25 +836,6 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, // to enable RPC forwarding. s.grpcLeaderForwarder = flat.LeaderForwarder - if s.config.Cloud.IsConfigured() { - // Start watching HCP Link resource. This needs to be created after - // the GRPC services are set up in order for the resource service client to - // function. This uses the insecure grpc channel so that it doesn't need to - // present a valid ACL token. - go hcp.RunHCPLinkWatcher( - &lib.StopChannelContext{StopCh: shutdownCh}, - logger.Named("hcp-link-watcher"), - pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan), - hcp.HCPManagerLifecycleFn( - s.hcpManager, - hcpclient.NewClient, - bootstrap.LoadManagementToken, - flat.HCP.Config, - flat.HCP.DataDir, - ), - ) - } - s.controllerManager = controller.NewManager( // Usage of the insecure + unsafe grpc chan is required for the controller // manager. It must be unauthorized so that controllers do not need to @@ -928,15 +907,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, return s, nil } -func (s *Server) registerControllers(deps Deps) error { - if s.config.Cloud.IsConfigured() { - hcpctl.RegisterControllers( - s.controllerManager, hcpctl.ControllerDependencies{ - CloudConfig: deps.HCP.Config, - }, - ) - } - +func (s *Server) registerControllers(_ Deps) error { shim := NewExportedServicesShim(s) multicluster.RegisterCompatControllers(s.controllerManager, multicluster.DefaultCompatControllerDependencies(shim)) diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index dcfe4ec91f..b8588f17cc 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -8,6 +8,8 @@ import ( "fmt" "net" "reflect" + "slices" + "sort" "strings" "github.com/hashicorp/go-memdb" @@ -18,6 +20,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib/maps" + "github.com/hashicorp/consul/lib/stringslice" "github.com/hashicorp/consul/types" ) @@ -1106,6 +1109,9 @@ func (s *Store) AssignManualServiceVIPs(idx uint64, psn structs.PeeredServiceNam for _, ip := range ips { assignedIPs[ip] = struct{}{} } + + txnNeedsCommit := false + modifiedEntries := make(map[structs.PeeredServiceName]struct{}) for ip := range assignedIPs { entry, err := tx.First(tableServiceVirtualIPs, indexManualVIPs, psn.ServiceName.PartitionOrDefault(), ip) @@ -1118,7 +1124,13 @@ func (s *Store) AssignManualServiceVIPs(idx uint64, psn structs.PeeredServiceNam } newEntry := entry.(ServiceVirtualIP) - if newEntry.Service.ServiceName.Matches(psn.ServiceName) { + + var ( + thisServiceName = newEntry.Service.ServiceName + thisPeer = newEntry.Service.Peer + ) + + if thisServiceName.Matches(psn.ServiceName) && thisPeer == psn.Peer { continue } @@ -1130,6 +1142,7 @@ func (s *Store) AssignManualServiceVIPs(idx uint64, psn structs.PeeredServiceNam filteredIPs = append(filteredIPs, existingIP) } } + sort.Strings(filteredIPs) newEntry.ManualIPs = filteredIPs newEntry.ModifyIndex = idx @@ -1137,6 +1150,12 @@ func (s *Store) AssignManualServiceVIPs(idx uint64, psn structs.PeeredServiceNam return false, nil, fmt.Errorf("failed inserting service virtual IP entry: %s", err) } modifiedEntries[newEntry.Service] = struct{}{} + + if err := updateVirtualIPMaxIndexes(tx, idx, thisServiceName.PartitionOrDefault(), thisPeer); err != nil { + return false, nil, err + } + + txnNeedsCommit = true } entry, err := tx.First(tableServiceVirtualIPs, indexID, psn) @@ -1149,23 +1168,37 @@ func (s *Store) AssignManualServiceVIPs(idx uint64, psn structs.PeeredServiceNam } newEntry := entry.(ServiceVirtualIP) - newEntry.ManualIPs = ips - newEntry.ModifyIndex = idx - if err := tx.Insert(tableServiceVirtualIPs, newEntry); err != nil { - return false, nil, fmt.Errorf("failed inserting service virtual IP entry: %s", err) + // Check to see if the slice already contains the same ips. + if !stringslice.EqualMapKeys(newEntry.ManualIPs, assignedIPs) { + newEntry.ManualIPs = slices.Clone(ips) + newEntry.ModifyIndex = idx + + sort.Strings(newEntry.ManualIPs) + + if err := tx.Insert(tableServiceVirtualIPs, newEntry); err != nil { + return false, nil, fmt.Errorf("failed inserting service virtual IP entry: %s", err) + } + if err := updateVirtualIPMaxIndexes(tx, idx, psn.ServiceName.PartitionOrDefault(), psn.Peer); err != nil { + return false, nil, err + } + txnNeedsCommit = true } - if err := updateVirtualIPMaxIndexes(tx, idx, psn.ServiceName.PartitionOrDefault(), psn.Peer); err != nil { - return false, nil, err - } - if err = tx.Commit(); err != nil { - return false, nil, err + + if txnNeedsCommit { + if err = tx.Commit(); err != nil { + return false, nil, err + } } return true, maps.SliceOfKeys(modifiedEntries), nil } func updateVirtualIPMaxIndexes(txn WriteTxn, idx uint64, partition, peerName string) error { + // update global max index (for snapshots) + if err := indexUpdateMaxTxn(txn, idx, tableServiceVirtualIPs); err != nil { + return fmt.Errorf("failed while updating index: %w", err) + } // update per-partition max index if err := indexUpdateMaxTxn(txn, idx, partitionedIndexEntryName(tableServiceVirtualIPs, partition)); err != nil { return fmt.Errorf("failed while updating partitioned index: %w", err) @@ -3086,6 +3119,7 @@ func servicesVirtualIPsTxn(tx ReadTxn, ws memdb.WatchSet) (uint64, []ServiceVirt vips = append(vips, vip) } + // Pull from the global one idx := maxIndexWatchTxn(tx, nil, tableServiceVirtualIPs) return idx, vips, nil diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index cef608bc1c..8445acf987 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -13,15 +13,15 @@ import ( "testing" "time" - "github.com/hashicorp/consul/acl" - "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/go-memdb" - "github.com/hashicorp/go-uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-uuid" + + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib/stringslice" @@ -1963,81 +1963,289 @@ func TestStateStore_AssignManualVirtualIPs(t *testing.T) { s := testStateStore(t) setVirtualIPFlags(t, s) - // Attempt to assign manual virtual IPs to a service that doesn't exist - should be a no-op. - psn := structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "foo", EnterpriseMeta: *acl.DefaultEnterpriseMeta()}} - found, svcs, err := s.AssignManualServiceVIPs(0, psn, []string{"7.7.7.7", "8.8.8.8"}) - require.NoError(t, err) - require.False(t, found) - require.Empty(t, svcs) - serviceVIP, err := s.ServiceManualVIPs(psn) - require.NoError(t, err) - require.Nil(t, serviceVIP) - - // Create the service registration. - entMeta := structs.DefaultEnterpriseMetaInDefaultPartition() - ns1 := &structs.NodeService{ - ID: "foo", - Service: "foo", - Address: "1.1.1.1", - Port: 1111, - Connect: structs.ServiceConnect{Native: true}, - EnterpriseMeta: *entMeta, + newPSN := func(name, peer string) structs.PeeredServiceName { + return structs.PeeredServiceName{ + ServiceName: structs.ServiceName{ + Name: name, + EnterpriseMeta: *acl.DefaultEnterpriseMeta(), + }, + Peer: peer, + } } - // Service successfully registers into the state store. - testRegisterNode(t, s, 0, "node1") - require.NoError(t, s.EnsureService(1, "node1", ns1)) + checkMaxIndexes := func(t *testing.T, expect, expectImported uint64) { + t.Helper() + tx := s.db.Txn(false) + defer tx.Abort() - // Make sure there's a virtual IP for the foo service. - vip, err := s.VirtualIPForService(psn) - require.NoError(t, err) - assert.Equal(t, "240.0.0.1", vip) + idx := maxIndexWatchTxn(tx, nil, tableServiceVirtualIPs) + require.Equal(t, expect, idx) - // No manual IP should be set yet. - serviceVIP, err = s.ServiceManualVIPs(psn) - require.NoError(t, err) - require.Equal(t, "0.0.0.1", serviceVIP.IP.String()) - require.Empty(t, serviceVIP.ManualIPs) + entMeta := acl.DefaultEnterpriseMeta() - // Attempt to assign manual virtual IPs again. - found, svcs, err = s.AssignManualServiceVIPs(2, psn, []string{"7.7.7.7", "8.8.8.8"}) - require.NoError(t, err) - require.True(t, found) - require.Empty(t, svcs) - serviceVIP, err = s.ServiceManualVIPs(psn) - require.NoError(t, err) - require.Equal(t, "0.0.0.1", serviceVIP.IP.String()) - require.Equal(t, serviceVIP.ManualIPs, []string{"7.7.7.7", "8.8.8.8"}) + importedIdx := maxIndexTxn(tx, partitionedIndexEntryName(tableServiceVirtualIPs+".imported", entMeta.PartitionOrDefault())) + require.Equal(t, expectImported, importedIdx) + } - // Register another service via config entry. - s.EnsureConfigEntry(3, &structs.ServiceResolverConfigEntry{ - Kind: structs.ServiceResolver, - Name: "bar", + assignManual := func( + t *testing.T, + idx uint64, + psn structs.PeeredServiceName, + ips []string, + modified ...structs.PeeredServiceName, + ) { + t.Helper() + found, svcs, err := s.AssignManualServiceVIPs(idx, psn, ips) + require.NoError(t, err) + require.True(t, found) + if len(modified) == 0 { + require.Empty(t, svcs) + } else { + require.ElementsMatch(t, modified, svcs) + } + } + + checkVIP := func( + t *testing.T, + psn structs.PeeredServiceName, + expectVIP string, + ) { + t.Helper() + // Make sure there's a virtual IP for the foo service. + vip, err := s.VirtualIPForService(psn) + require.NoError(t, err) + assert.Equal(t, expectVIP, vip) + } + + checkManualVIP := func( + t *testing.T, + psn structs.PeeredServiceName, + expectIP string, + expectManual []string, + expectIndex uint64, + ) { + t.Helper() + serviceVIP, err := s.ServiceManualVIPs(psn) + require.NoError(t, err) + require.Equal(t, expectIP, serviceVIP.IP.String()) + if len(expectManual) == 0 { + require.Empty(t, serviceVIP.ManualIPs) + } else { + require.Equal(t, expectManual, serviceVIP.ManualIPs) + } + require.Equal(t, expectIndex, serviceVIP.ModifyIndex) + } + + psn := newPSN("foo", "") + + lastIndex := uint64(0) + nextIndex := func() uint64 { + lastIndex++ + return lastIndex + } + + testutil.RunStep(t, "assign to nonexistent service is noop", func(t *testing.T) { + useIdx := nextIndex() + + // Attempt to assign manual virtual IPs to a service that doesn't exist - should be a no-op. + found, svcs, err := s.AssignManualServiceVIPs(useIdx, psn, []string{"7.7.7.7", "8.8.8.8"}) + require.NoError(t, err) + require.False(t, found) + require.Empty(t, svcs) + + serviceVIP, err := s.ServiceManualVIPs(psn) + require.NoError(t, err) + require.Nil(t, serviceVIP) + + checkMaxIndexes(t, 0, 0) }) - psn2 := structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "bar"}} - vip, err = s.VirtualIPForService(psn2) - require.NoError(t, err) - assert.Equal(t, "240.0.0.2", vip) + // Create the service registration. + var regIndex1 uint64 + testutil.RunStep(t, "create service 1", func(t *testing.T) { + useIdx := nextIndex() + regIndex1 = useIdx + + entMeta := acl.DefaultEnterpriseMeta() + ns1 := &structs.NodeService{ + ID: "foo", + Service: "foo", + Address: "1.1.1.1", + Port: 1111, + Connect: structs.ServiceConnect{Native: true}, + EnterpriseMeta: *entMeta, + } + + // Service successfully registers into the state store. + testRegisterNode(t, s, useIdx, "node1") + require.NoError(t, s.EnsureService(useIdx, "node1", ns1)) + + // Make sure there's a virtual IP for the foo service. + checkVIP(t, psn, "240.0.0.1") + + // No manual IP should be set yet. + checkManualVIP(t, psn, "0.0.0.1", []string{}, regIndex1) + + checkMaxIndexes(t, regIndex1, 0) + }) + + // Attempt to assign manual virtual IPs again. + var assignIndex1 uint64 + testutil.RunStep(t, "assign to existent service does something", func(t *testing.T) { + useIdx := nextIndex() + assignIndex1 = useIdx + + // inserting in the wrong order to test the string sort + assignManual(t, useIdx, psn, []string{"7.7.7.7", "8.8.8.8", "6.6.6.6"}) + + checkManualVIP(t, psn, "0.0.0.1", []string{ + "6.6.6.6", "7.7.7.7", "8.8.8.8", + }, assignIndex1) + + checkMaxIndexes(t, assignIndex1, 0) + }) + + psn2 := newPSN("bar", "") + + var regIndex2 uint64 + testutil.RunStep(t, "create service 2", func(t *testing.T) { + useIdx := nextIndex() + regIndex2 = useIdx + + // Register another service via config entry. + s.EnsureConfigEntry(useIdx, &structs.ServiceResolverConfigEntry{ + Kind: structs.ServiceResolver, + Name: "bar", + }) + + checkVIP(t, psn2, "240.0.0.2") + + // No manual IP should be set yet. + checkManualVIP(t, psn2, "0.0.0.2", []string{}, regIndex2) + + checkMaxIndexes(t, regIndex2, 0) + }) // Attempt to assign manual virtual IPs for bar, with one IP overlapping with foo. // This should cause the ip to be removed from foo's list of manual IPs. - found, svcs, err = s.AssignManualServiceVIPs(4, psn2, []string{"7.7.7.7", "9.9.9.9"}) - require.NoError(t, err) - require.True(t, found) - require.ElementsMatch(t, svcs, []structs.PeeredServiceName{psn}) + var assignIndex2 uint64 + testutil.RunStep(t, "assign to existent service and ip is removed from another", func(t *testing.T) { + useIdx := nextIndex() + assignIndex2 = useIdx - serviceVIP, err = s.ServiceManualVIPs(psn) - require.NoError(t, err) - require.Equal(t, "0.0.0.1", serviceVIP.IP.String()) - require.Equal(t, []string{"8.8.8.8"}, serviceVIP.ManualIPs) - require.Equal(t, uint64(4), serviceVIP.ModifyIndex) + assignManual(t, useIdx, psn2, []string{"7.7.7.7", "9.9.9.9"}, psn) - serviceVIP, err = s.ServiceManualVIPs(psn2) - require.NoError(t, err) - require.Equal(t, "0.0.0.2", serviceVIP.IP.String()) - require.Equal(t, []string{"7.7.7.7", "9.9.9.9"}, serviceVIP.ManualIPs) - require.Equal(t, uint64(4), serviceVIP.ModifyIndex) + checkManualVIP(t, psn, "0.0.0.1", []string{ + "6.6.6.6", "8.8.8.8", // 7.7.7.7 was stolen by psn2 + }, assignIndex2) + checkManualVIP(t, psn2, "0.0.0.2", []string{ + "7.7.7.7", "9.9.9.9", + }, assignIndex2) + + checkMaxIndexes(t, assignIndex2, 0) + }) + + psn3 := newPSN("gir", "peer1") + + var regIndex3 uint64 + testutil.RunStep(t, "create peered service 1", func(t *testing.T) { + useIdx := nextIndex() + regIndex3 = useIdx + + // Create the service registration. + entMetaPeer := acl.DefaultEnterpriseMeta() + nsPeer1 := &structs.NodeService{ + ID: "gir", + Service: "gir", + Address: "9.9.9.9", + Port: 2222, + PeerName: "peer1", + Connect: structs.ServiceConnect{Native: true}, + EnterpriseMeta: *entMetaPeer, + } + + // Service successfully registers into the state store. + testRegisterPeering(t, s, useIdx, "peer1") + testRegisterNodeOpts(t, s, useIdx, "node9", func(n *structs.Node) error { + n.PeerName = "peer1" + return nil + }) + require.NoError(t, s.EnsureService(useIdx, "node9", nsPeer1)) + + checkVIP(t, psn3, "240.0.0.3") + + // No manual IP should be set yet. + checkManualVIP(t, psn3, "0.0.0.3", []string{}, regIndex3) + + checkMaxIndexes(t, regIndex3, regIndex3) + }) + + // Assign manual virtual IPs to peered service. + var assignIndex3 uint64 + testutil.RunStep(t, "assign to peered service and steal from non-peered", func(t *testing.T) { + useIdx := nextIndex() + assignIndex3 = useIdx + + // 5.5.5.5 is stolen from psn + assignManual(t, useIdx, psn3, []string{"5.5.5.5", "6.6.6.6"}, psn) + + checkManualVIP(t, psn, "0.0.0.1", []string{ + "8.8.8.8", // 5.5.5.5 was stolen by psn3 + }, assignIndex3) + checkManualVIP(t, psn2, "0.0.0.2", []string{ + "7.7.7.7", "9.9.9.9", + }, assignIndex2) + checkManualVIP(t, psn3, "0.0.0.3", []string{ + "5.5.5.5", "6.6.6.6", + }, assignIndex3) + + checkMaxIndexes(t, assignIndex3, assignIndex3) + }) + + var assignIndex4 uint64 + testutil.RunStep(t, "assign to non-peered service and steal from peered", func(t *testing.T) { + useIdx := nextIndex() + assignIndex4 = useIdx + + // 6.6.6.6 is stolen from psn3 + assignManual(t, useIdx, psn2, []string{ + "7.7.7.7", "9.9.9.9", "6.6.6.6", + }, psn3) + + checkManualVIP(t, psn, "0.0.0.1", []string{ + "8.8.8.8", // 5.5.5.5 was stolen by psn3 + }, assignIndex3) + checkManualVIP(t, psn2, "0.0.0.2", []string{ + "6.6.6.6", "7.7.7.7", "9.9.9.9", + }, assignIndex4) + checkManualVIP(t, psn3, "0.0.0.3", []string{ + "5.5.5.5", + }, assignIndex4) + + checkMaxIndexes(t, assignIndex4, assignIndex4) + }) + + testutil.RunStep(t, "repeat the last write and no indexes should be bumped", func(t *testing.T) { + useIdx := nextIndex() + + assignManual(t, useIdx, psn2, []string{ + "7.7.7.7", "9.9.9.9", "6.6.6.6", + }) // no modified this time + + // no changes + checkManualVIP(t, psn, "0.0.0.1", []string{ + "8.8.8.8", + }, assignIndex3) + checkManualVIP(t, psn2, "0.0.0.2", []string{ + "6.6.6.6", "7.7.7.7", "9.9.9.9", + }, assignIndex4) + checkManualVIP(t, psn3, "0.0.0.3", []string{ + "5.5.5.5", + }, assignIndex4) + + // no change + checkMaxIndexes(t, assignIndex4, assignIndex4) + }) } func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) { diff --git a/agent/consul/testdata/v2-resource-dependencies.md b/agent/consul/testdata/v2-resource-dependencies.md index 7bcb0d55c4..b69cb967f9 100644 --- a/agent/consul/testdata/v2-resource-dependencies.md +++ b/agent/consul/testdata/v2-resource-dependencies.md @@ -7,8 +7,6 @@ flowchart TD demo/v1/recordlabel demo/v2/album demo/v2/artist - hcp/v2/link - hcp/v2/telemetrystate internal/v1/tombstone multicluster/v2/computedexportedservices --> multicluster/v2/exportedservices multicluster/v2/computedexportedservices --> multicluster/v2/namespaceexportedservices @@ -16,4 +14,4 @@ flowchart TD multicluster/v2/exportedservices multicluster/v2/namespaceexportedservices multicluster/v2/partitionexportedservices -``` \ No newline at end of file +``` diff --git a/agent/consul/type_registry.go b/agent/consul/type_registry.go index cd2087e48f..8ab91f7946 100644 --- a/agent/consul/type_registry.go +++ b/agent/consul/type_registry.go @@ -4,7 +4,6 @@ package consul import ( - "github.com/hashicorp/consul/internal/hcp" "github.com/hashicorp/consul/internal/multicluster" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" @@ -22,7 +21,6 @@ func NewTypeRegistry() resource.Registry { demo.RegisterTypes(registry) multicluster.RegisterTypes(registry) - hcp.RegisterTypes(registry) return registry } diff --git a/agent/dns_test.go b/agent/dns_test.go index c5a8c1db2c..56c549cb6b 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -2367,7 +2367,7 @@ func TestDNS_trimUDPResponse_NoTrim(t *testing.T) { }, } - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `) + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `) if trimmed := trimUDPResponse(req, resp, cfg.DNSUDPAnswerLimit); trimmed { t.Fatalf("Bad %#v", *resp) } @@ -2400,7 +2400,7 @@ func TestDNS_trimUDPResponse_NoTrim(t *testing.T) { } func TestDNS_trimUDPResponse_TrimLimit(t *testing.T) { - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `) + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `) req, resp, expected := &dns.Msg{}, &dns.Msg{}, &dns.Msg{} for i := 0; i < cfg.DNSUDPAnswerLimit+1; i++ { @@ -2439,7 +2439,7 @@ func TestDNS_trimUDPResponse_TrimLimit(t *testing.T) { } func TestDNS_trimUDPResponse_TrimLimitWithNS(t *testing.T) { - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `) + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `) req, resp, expected := &dns.Msg{}, &dns.Msg{}, &dns.Msg{} for i := 0; i < cfg.DNSUDPAnswerLimit+1; i++ { @@ -2486,7 +2486,7 @@ func TestDNS_trimUDPResponse_TrimLimitWithNS(t *testing.T) { } func TestDNS_trimTCPResponse_TrimLimitWithNS(t *testing.T) { - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `) + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `) req, resp, expected := &dns.Msg{}, &dns.Msg{}, &dns.Msg{} for i := 0; i < 5000; i++ { @@ -2542,7 +2542,7 @@ func loadRuntimeConfig(t *testing.T, hcl string) *config.RuntimeConfig { } func TestDNS_trimUDPResponse_TrimSize(t *testing.T) { - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `) + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `) req, resp := &dns.Msg{}, &dns.Msg{} for i := 0; i < 100; i++ { @@ -2594,7 +2594,7 @@ func TestDNS_trimUDPResponse_TrimSize(t *testing.T) { } func TestDNS_trimUDPResponse_TrimSizeEDNS(t *testing.T) { - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `) + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `) req, resp := &dns.Msg{}, &dns.Msg{} @@ -2672,7 +2672,7 @@ func TestDNS_trimUDPResponse_TrimSizeEDNS(t *testing.T) { } func TestDNS_trimUDPResponse_TrimSizeMaxSize(t *testing.T) { - cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `) + cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `) resp := &dns.Msg{} diff --git a/agent/hcp/link_watch.go b/agent/hcp/link_watch.go deleted file mode 100644 index b89ba942e4..0000000000 --- a/agent/hcp/link_watch.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hcp - -import ( - "context" - "time" - - "github.com/hashicorp/go-hclog" - - hcpctl "github.com/hashicorp/consul/internal/hcp" - "github.com/hashicorp/consul/lib/retry" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -type LinkEventHandler = func(context.Context, hclog.Logger, *pbresource.WatchEvent) - -func handleLinkEvents(ctx context.Context, logger hclog.Logger, watchClient pbresource.ResourceService_WatchListClient, linkEventHandler LinkEventHandler) { - for { - select { - case <-ctx.Done(): - logger.Debug("context canceled, exiting") - return - default: - watchEvent, err := watchClient.Recv() - - if err != nil { - logger.Error("error receiving link watch event", "error", err) - return - } - - linkEventHandler(ctx, logger, watchEvent) - } - } -} - -func RunHCPLinkWatcher( - ctx context.Context, logger hclog.Logger, client pbresource.ResourceServiceClient, linkEventHandler LinkEventHandler, -) { - errorBackoff := &retry.Waiter{ - MinFailures: 10, - MinWait: 0, - MaxWait: 1 * time.Minute, - } - for { - select { - case <-ctx.Done(): - logger.Debug("context canceled, exiting") - return - default: - watchClient, err := client.WatchList( - ctx, &pbresource.WatchListRequest{ - Type: pbhcp.LinkType, - NamePrefix: hcpctl.LinkName, - }, - ) - if err != nil { - logger.Error("failed to create watch on Link", "error", err) - errorBackoff.Wait(ctx) - continue - } - errorBackoff.Reset() - handleLinkEvents(ctx, logger, watchClient, linkEventHandler) - } - } -} diff --git a/agent/hcp/link_watch_test.go b/agent/hcp/link_watch_test.go deleted file mode 100644 index 22d2204a81..0000000000 --- a/agent/hcp/link_watch_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hcp - -import ( - "context" - "errors" - "testing" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - - "github.com/hashicorp/go-hclog" - - mockpbresource "github.com/hashicorp/consul/grpcmocks/proto-public/pbresource" - hcpctl "github.com/hashicorp/consul/internal/hcp" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -// This tests that when we get a watch event from the Recv call, we get that same event on the -// output channel, then we -func TestLinkWatcher_Ok(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - testWatchEvent := &pbresource.WatchEvent{} - mockWatchListClient := mockpbresource.NewResourceService_WatchListClient(t) - mockWatchListClient.EXPECT().Recv().Return(testWatchEvent, nil) - - eventCh := make(chan *pbresource.WatchEvent) - mockLinkHandler := func(_ context.Context, _ hclog.Logger, event *pbresource.WatchEvent) { - eventCh <- event - } - - client := mockpbresource.NewResourceServiceClient(t) - client.EXPECT().WatchList(mock.Anything, &pbresource.WatchListRequest{ - Type: pbhcp.LinkType, - NamePrefix: hcpctl.LinkName, - }).Return(mockWatchListClient, nil) - - go RunHCPLinkWatcher(ctx, hclog.Default(), client, mockLinkHandler) - - // Assert that the link handler is called with the testWatchEvent - receivedWatchEvent := <-eventCh - require.Equal(t, testWatchEvent, receivedWatchEvent) -} - -func TestLinkWatcher_RecvError(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - - // Our mock WatchListClient will simulate 5 errors, then will cancel the context. - // We expect RunHCPLinkWatcher to attempt to create the WatchListClient 6 times (initial attempt plus 5 retries) - // before exiting due to context cancellation. - mockWatchListClient := mockpbresource.NewResourceService_WatchListClient(t) - numFailures := 5 - failures := 0 - mockWatchListClient.EXPECT().Recv().RunAndReturn(func() (*pbresource.WatchEvent, error) { - if failures < numFailures { - failures++ - return nil, errors.New("unexpectedError") - } - defer cancel() - return &pbresource.WatchEvent{}, nil - }) - - client := mockpbresource.NewResourceServiceClient(t) - client.EXPECT().WatchList(mock.Anything, &pbresource.WatchListRequest{ - Type: pbhcp.LinkType, - NamePrefix: hcpctl.LinkName, - }).Return(mockWatchListClient, nil).Times(numFailures + 1) - - RunHCPLinkWatcher(ctx, hclog.Default(), client, func(_ context.Context, _ hclog.Logger, _ *pbresource.WatchEvent) {}) -} - -func TestLinkWatcher_WatchListError(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - - // Our mock WatchList will simulate 5 errors, then will cancel the context. - // We expect RunHCPLinkWatcher to attempt to create the WatchListClient 6 times (initial attempt plus 5 retries) - // before exiting due to context cancellation. - numFailures := 5 - failures := 0 - - client := mockpbresource.NewResourceServiceClient(t) - client.EXPECT().WatchList(mock.Anything, &pbresource.WatchListRequest{ - Type: pbhcp.LinkType, - NamePrefix: hcpctl.LinkName, - }).RunAndReturn(func(_ context.Context, _ *pbresource.WatchListRequest, _ ...grpc.CallOption) (pbresource.ResourceService_WatchListClient, error) { - if failures < numFailures { - failures++ - return nil, errors.New("unexpectedError") - } - defer cancel() - return mockpbresource.NewResourceService_WatchListClient(t), nil - }).Times(numFailures + 1) - - RunHCPLinkWatcher(ctx, hclog.Default(), client, func(_ context.Context, _ hclog.Logger, _ *pbresource.WatchEvent) {}) -} diff --git a/agent/hcp/manager_lifecycle.go b/agent/hcp/manager_lifecycle.go deleted file mode 100644 index 6b7b6a46dc..0000000000 --- a/agent/hcp/manager_lifecycle.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hcp - -import ( - "context" - "os" - "path/filepath" - - "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/consul/agent/hcp/bootstrap/constants" - hcpclient "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/agent/hcp/config" - hcpctl "github.com/hashicorp/consul/internal/hcp" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -// HCPManagerLifecycleFn returns a LinkEventHandler function which will appropriately -// Start and Stop the HCP Manager based on the Link event received. If a link is upserted, -// the HCP Manager is started, and if a link is deleted, the HCP manager is stopped. -func HCPManagerLifecycleFn( - m Manager, - hcpClientFn func(cfg config.CloudConfig) (hcpclient.Client, error), - loadMgmtTokenFn func( - ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string, - ) (string, error), - cloudConfig config.CloudConfig, - dataDir string, -) LinkEventHandler { - return func(ctx context.Context, logger hclog.Logger, watchEvent *pbresource.WatchEvent) { - // This indicates that a Link was deleted - if watchEvent.GetDelete() != nil { - logger.Debug("HCP Link deleted, stopping HCP manager") - - if dataDir != "" { - hcpConfigDir := filepath.Join(dataDir, constants.SubDir) - logger.Debug("deleting hcp-config dir", "dir", hcpConfigDir) - err := os.RemoveAll(hcpConfigDir) - if err != nil { - logger.Error("failed to delete hcp-config dir", "dir", hcpConfigDir, "err", err) - } - } - - err := m.Stop() - if err != nil { - logger.Error("error stopping HCP manager", "error", err) - } - return - } - - // This indicates that a Link was either created or updated - if watchEvent.GetUpsert() != nil { - logger.Debug("HCP Link upserted, starting manager if not already started") - - res := watchEvent.GetUpsert().GetResource() - var link pbhcp.Link - if err := res.GetData().UnmarshalTo(&link); err != nil { - logger.Error("error unmarshalling link data", "error", err) - return - } - - if validated, reason := hcpctl.IsValidated(res); !validated { - logger.Debug("HCP Link not validated, not starting manager", "reason", reason) - return - } - - // Update the HCP manager configuration with the link values - // Merge the link data with the existing cloud config so that we only overwrite the - // fields that are provided by the link. This ensures that: - // 1. The HCP configuration (i.e., how to connect to HCP) is preserved - // 2. The Consul agent's node ID and node name are preserved - newCfg := config.CloudConfig{ - ResourceID: link.ResourceId, - ClientID: link.ClientId, - ClientSecret: link.ClientSecret, - } - mergedCfg := config.Merge(cloudConfig, newCfg) - hcpClient, err := hcpClientFn(mergedCfg) - if err != nil { - logger.Error("error creating HCP client", "error", err) - return - } - - // Load the management token if access is set to read-write. Read-only clusters - // will not have a management token provided by HCP. - var token string - if link.GetAccessLevel() == pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE { - token, err = loadMgmtTokenFn(ctx, logger, hcpClient, dataDir) - if err != nil { - logger.Error("error loading management token", "error", err) - return - } - } - - mergedCfg.ManagementToken = token - m.UpdateConfig(hcpClient, mergedCfg) - - err = m.Start(ctx) - if err != nil { - logger.Error("error starting HCP manager", "error", err) - } - } - } -} diff --git a/agent/hcp/manager_lifecycle_test.go b/agent/hcp/manager_lifecycle_test.go deleted file mode 100644 index b40a772ab4..0000000000 --- a/agent/hcp/manager_lifecycle_test.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hcp - -import ( - "context" - "errors" - "io" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/hashicorp/go-hclog" - - "github.com/hashicorp/consul/agent/hcp/bootstrap/constants" - hcpclient "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/agent/hcp/config" - hcpctl "github.com/hashicorp/consul/internal/hcp" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/sdk/testutil" -) - -func TestHCPManagerLifecycleFn(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - logger := hclog.New(&hclog.LoggerOptions{Output: io.Discard}) - - mockHCPClient := hcpclient.NewMockClient(t) - mockHcpClientFn := func(_ config.CloudConfig) (hcpclient.Client, error) { - return mockHCPClient, nil - } - - mockLoadMgmtTokenFn := func(ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string) (string, error) { - return "test-mgmt-token", nil - } - - dataDir := testutil.TempDir(t, "test-link-controller") - err := os.Mkdir(filepath.Join(dataDir, constants.SubDir), os.ModeDir) - require.NoError(t, err) - existingCfg := config.CloudConfig{ - AuthURL: "test.com", - } - - type testCase struct { - mutateLink func(*pbhcp.Link) - mutateUpsertEvent func(*pbresource.WatchEvent_Upsert) - applyMocksAndAssertions func(*testing.T, *MockManager, *pbhcp.Link) - hcpClientFn func(config.CloudConfig) (hcpclient.Client, error) - loadMgmtTokenFn func(context.Context, hclog.Logger, hcpclient.Client, string) (string, error) - } - - testCases := map[string]testCase{ - // HCP manager should be started when link is created and stopped when link is deleted - "Ok": { - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.EXPECT().Start(mock.Anything).Return(nil).Once() - - expectedCfg := config.CloudConfig{ - ResourceID: link.ResourceId, - ClientID: link.ClientId, - ClientSecret: link.ClientSecret, - AuthURL: "test.com", - ManagementToken: "test-mgmt-token", - } - mgr.EXPECT().UpdateConfig(mockHCPClient, expectedCfg).Once() - - mgr.EXPECT().Stop().Return(nil).Once() - }, - }, - // HCP manager should not be updated with management token - "ReadOnly": { - mutateLink: func(link *pbhcp.Link) { - link.AccessLevel = pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_ONLY - }, - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.EXPECT().Start(mock.Anything).Return(nil).Once() - - expectedCfg := config.CloudConfig{ - ResourceID: link.ResourceId, - ClientID: link.ClientId, - ClientSecret: link.ClientSecret, - AuthURL: "test.com", - ManagementToken: "", - } - mgr.EXPECT().UpdateConfig(mockHCPClient, expectedCfg).Once() - - mgr.EXPECT().Stop().Return(nil).Once() - }, - }, - // HCP manager should not be started or updated if link is not validated - "ValidationError": { - mutateUpsertEvent: func(upsert *pbresource.WatchEvent_Upsert) { - upsert.Resource.Status = map[string]*pbresource.Status{ - hcpctl.StatusKey: { - Conditions: []*pbresource.Condition{hcpctl.ConditionValidatedFailed}, - }, - } - }, - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.AssertNotCalled(t, "Start", mock.Anything) - mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything) - mgr.EXPECT().Stop().Return(nil).Once() - }, - }, - "Error_InvalidLink": { - mutateUpsertEvent: func(upsert *pbresource.WatchEvent_Upsert) { - upsert.Resource = nil - }, - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.AssertNotCalled(t, "Start", mock.Anything) - mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything) - mgr.EXPECT().Stop().Return(nil).Once() - }, - }, - "Error_HCPManagerStop": { - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.EXPECT().Start(mock.Anything).Return(nil).Once() - mgr.EXPECT().UpdateConfig(mock.Anything, mock.Anything).Return().Once() - mgr.EXPECT().Stop().Return(errors.New("could not stop HCP manager")).Once() - }, - }, - "Error_CreatingHCPClient": { - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.AssertNotCalled(t, "Start", mock.Anything) - mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything) - mgr.EXPECT().Stop().Return(nil).Once() - }, - hcpClientFn: func(_ config.CloudConfig) (hcpclient.Client, error) { - return nil, errors.New("could not create HCP client") - }, - }, - // This should result in the HCP manager not being started - "Error_LoadMgmtToken": { - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.AssertNotCalled(t, "Start", mock.Anything) - mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything) - mgr.EXPECT().Stop().Return(nil).Once() - }, - loadMgmtTokenFn: func(ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string) (string, error) { - return "", errors.New("could not load management token") - }, - }, - "Error_HCPManagerStart": { - applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) { - mgr.EXPECT().Start(mock.Anything).Return(errors.New("could not start HCP manager")).Once() - mgr.EXPECT().UpdateConfig(mock.Anything, mock.Anything).Return().Once() - mgr.EXPECT().Stop().Return(nil).Once() - }, - }, - } - - for name, test := range testCases { - t.Run(name, func(t2 *testing.T) { - mgr := NewMockManager(t2) - - // Set up a link - link := pbhcp.Link{ - ResourceId: "abc", - ClientId: "def", - ClientSecret: "ghi", - AccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE, - } - - if test.mutateLink != nil { - test.mutateLink(&link) - } - - linkResource, err := anypb.New(&link) - require.NoError(t2, err) - - if test.applyMocksAndAssertions != nil { - test.applyMocksAndAssertions(t2, mgr, &link) - } - - testHcpClientFn := mockHcpClientFn - if test.hcpClientFn != nil { - testHcpClientFn = test.hcpClientFn - } - - testLoadMgmtToken := mockLoadMgmtTokenFn - if test.loadMgmtTokenFn != nil { - testLoadMgmtToken = test.loadMgmtTokenFn - } - - updateManagerLifecycle := HCPManagerLifecycleFn( - mgr, testHcpClientFn, - testLoadMgmtToken, existingCfg, dataDir, - ) - - upsertEvent := &pbresource.WatchEvent_Upsert{ - Resource: &pbresource.Resource{ - Id: &pbresource.ID{ - Name: "global", - Type: pbhcp.LinkType, - }, - Status: map[string]*pbresource.Status{ - hcpctl.StatusKey: { - Conditions: []*pbresource.Condition{hcpctl.ConditionValidatedSuccess}, - }, - }, - Data: linkResource, - }, - } - if test.mutateUpsertEvent != nil { - test.mutateUpsertEvent(upsertEvent) - } - - // Handle upsert event - updateManagerLifecycle(ctx, logger, &pbresource.WatchEvent{ - Event: &pbresource.WatchEvent_Upsert_{ - Upsert: upsertEvent, - }, - }) - - // Handle delete event. This should stop HCP manager - updateManagerLifecycle(ctx, logger, &pbresource.WatchEvent{ - Event: &pbresource.WatchEvent_Delete_{ - Delete: &pbresource.WatchEvent_Delete{}, - }, - }) - - // Ensure hcp-config directory is removed - file := filepath.Join(dataDir, constants.SubDir) - if _, err := os.Stat(file); err == nil || !os.IsNotExist(err) { - require.Fail(t2, "should have removed hcp-config directory") - } - }) - } -} diff --git a/agent/hcp/manager_test.go b/agent/hcp/manager_test.go index 8377379172..75e1e3d284 100644 --- a/agent/hcp/manager_test.go +++ b/agent/hcp/manager_test.go @@ -12,75 +12,15 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "golang.org/x/net/context" - "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/go-hclog" hcpclient "github.com/hashicorp/consul/agent/hcp/client" "github.com/hashicorp/consul/agent/hcp/config" "github.com/hashicorp/consul/agent/hcp/scada" - hcpctl "github.com/hashicorp/consul/internal/hcp" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/sdk/testutil" ) -func TestManager_MonitorHCPLink(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - logger := hclog.New(&hclog.LoggerOptions{Output: io.Discard}) - - mgr := NewManager( - ManagerConfig{ - Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}), - }, - ) - mockHCPClient := hcpclient.NewMockClient(t) - mockHcpClientFn := func(_ config.CloudConfig) (hcpclient.Client, error) { - return mockHCPClient, nil - } - loadMgmtTokenFn := func(ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string) (string, error) { - return "test-mgmt-token", nil - } - - require.False(t, mgr.isRunning()) - updateManagerLifecycle := HCPManagerLifecycleFn( - mgr, mockHcpClientFn, - loadMgmtTokenFn, config.CloudConfig{}, "", - ) - - // Set up a link - link := pbhcp.Link{ - ResourceId: "abc", - ClientId: "def", - ClientSecret: "ghi", - AccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE, - } - linkResource, err := anypb.New(&link) - require.NoError(t, err) - updateManagerLifecycle(ctx, logger, &pbresource.WatchEvent{ - Event: &pbresource.WatchEvent_Upsert_{ - Upsert: &pbresource.WatchEvent_Upsert{ - Resource: &pbresource.Resource{ - Id: &pbresource.ID{ - Name: "global", - Type: pbhcp.LinkType, - }, - Status: map[string]*pbresource.Status{ - hcpctl.StatusKey: { - Conditions: []*pbresource.Condition{hcpctl.ConditionValidatedSuccess}, - }, - }, - Data: linkResource, - }, - }, - }, - }) - - // Validate that the HCP manager is started - require.True(t, mgr.isRunning()) -} - func TestManager_Start(t *testing.T) { client := hcpclient.NewMockClient(t) statusF := func(ctx context.Context) (hcpclient.ServerStatus, error) { diff --git a/agent/http.go b/agent/http.go index 506377074a..eb6e186cd8 100644 --- a/agent/http.go +++ b/agent/http.go @@ -6,7 +6,6 @@ package agent import ( "encoding/json" "fmt" - "github.com/hashicorp/go-hclog" "io" "net" "net/http" @@ -20,6 +19,8 @@ import ( "sync/atomic" "time" + "github.com/hashicorp/go-hclog" + "github.com/NYTimes/gziphandler" "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" @@ -348,16 +349,24 @@ func withRemoteAddrHandler(next http.Handler) http.Handler { }) } -// Injects content type explicitly if not already set into response to prevent XSS +// ensureContentTypeHeader injects content-type explicitly if not already set into response to prevent XSS func ensureContentTypeHeader(next http.Handler, logger hclog.Logger) http.Handler { - return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { next.ServeHTTP(resp, req) - val := resp.Header().Get(contentTypeHeader) - if val == "" { - resp.Header().Set(contentTypeHeader, plainContentType) - logger.Debug("warning: content-type header not explicitly set.", "request-path", req.URL) + contentType := api.GetContentType(req) + + if req != nil { + logger.Debug("warning: request content-type is not supported", "request-path", req.URL) + req.Header.Set(contentTypeHeader, contentType) + } + + if resp != nil { + respContentType := resp.Header().Get(contentTypeHeader) + if respContentType == "" || respContentType != contentType { + logger.Debug("warning: response content-type header not explicitly set.", "request-path", req.URL) + resp.Header().Set(contentTypeHeader, contentType) + } } }) } diff --git a/agent/http_test.go b/agent/http_test.go index 497789f689..73a599546a 100644 --- a/agent/http_test.go +++ b/agent/http_test.go @@ -617,7 +617,6 @@ func TestHTTPAPI_DefaultACLPolicy(t *testing.T) { }) } } - func TestHTTPAPIResponseHeaders(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -646,6 +645,87 @@ func TestHTTPAPIResponseHeaders(t *testing.T) { requireHasHeadersSet(t, a, "/", "text/plain; charset=utf-8") } +func TestHTTPAPIValidateContentTypeHeaders(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + type testcase struct { + name string + endpoint string + method string + requestBody io.Reader + expectedContentType string + } + + cases := []testcase{ + { + name: "snapshot endpoint expect non-default content type", + method: http.MethodPut, + endpoint: "/v1/snapshot", + requestBody: bytes.NewBuffer([]byte("test")), + expectedContentType: "application/octet-stream", + }, + { + name: "kv endpoint expect non-default content type", + method: http.MethodPut, + endpoint: "/v1/kv", + requestBody: bytes.NewBuffer([]byte("test")), + expectedContentType: "application/octet-stream", + }, + { + name: "event/fire endpoint expect default content type", + method: http.MethodPut, + endpoint: "/v1/event/fire", + requestBody: bytes.NewBuffer([]byte("test")), + expectedContentType: "application/octet-stream", + }, + { + name: "peering/token endpoint expect default content type", + method: http.MethodPost, + endpoint: "/v1/peering/token", + requestBody: bytes.NewBuffer([]byte("test")), + expectedContentType: "application/json", + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t, "") + defer a.Shutdown() + + requireContentTypeHeadersSet(t, a, tc.method, tc.endpoint, tc.requestBody, tc.expectedContentType) + }) + } +} + +func requireContentTypeHeadersSet(t *testing.T, a *TestAgent, method, path string, body io.Reader, contentType string) { + t.Helper() + + resp := httptest.NewRecorder() + req, _ := http.NewRequest(method, path, body) + a.enableDebug.Store(true) + + a.srv.handler().ServeHTTP(resp, req) + + reqHdrs := req.Header + respHdrs := resp.Header() + + // require request content-type + require.NotEmpty(t, reqHdrs.Get("Content-Type")) + require.Equal(t, contentType, reqHdrs.Get("Content-Type"), + "Request Header Content-Type value incorrect") + + // require response content-type + require.NotEmpty(t, respHdrs.Get("Content-Type")) + require.Equal(t, contentType, respHdrs.Get("Content-Type"), + "Response Header Content-Type value incorrect") +} + func requireHasHeadersSet(t *testing.T, a *TestAgent, path string, contentType string) { t.Helper() @@ -663,7 +743,7 @@ func requireHasHeadersSet(t *testing.T, a *TestAgent, path string, contentType s "X-XSS-Protection header value incorrect") require.Equal(t, contentType, hdrs.Get("Content-Type"), - "") + "Response Content-Type header value incorrect") } func TestUIResponseHeaders(t *testing.T) { @@ -704,7 +784,7 @@ func TestErrorContentTypeHeaderSet(t *testing.T) { `) defer a.Shutdown() - requireHasHeadersSet(t, a, "/fake-path-doesn't-exist", "text/plain; charset=utf-8") + requireHasHeadersSet(t, a, "/fake-path-doesn't-exist", "application/json") } func TestAcceptEncodingGzip(t *testing.T) { diff --git a/agent/proxycfg-glue/internal_service_dump.go b/agent/proxycfg-glue/internal_service_dump.go index d1c701083d..dd8293f781 100644 --- a/agent/proxycfg-glue/internal_service_dump.go +++ b/agent/proxycfg-glue/internal_service_dump.go @@ -81,19 +81,21 @@ func (s *serverInternalServiceDump) Notify(ctx context.Context, req *structs.Ser return 0, nil, err } + totalNodeLength := len(nodes) + aclfilter.New(authz, s.deps.Logger).Filter(&nodes) + raw, err := filter.Execute(nodes) if err != nil { return 0, nil, fmt.Errorf("could not filter local service dump: %w", err) } nodes = raw.(structs.CheckServiceNodes) - aclfilter.New(authz, s.deps.Logger).Filter(&nodes) - return idx, &structs.IndexedCheckServiceNodes{ Nodes: nodes, QueryMeta: structs.QueryMeta{ - Index: idx, - Backend: structs.QueryBackendBlocking, + Index: idx, + Backend: structs.QueryBackendBlocking, + ResultsFilteredByACLs: totalNodeLength != len(nodes), }, }, nil }, diff --git a/agent/proxycfg-glue/internal_service_dump_test.go b/agent/proxycfg-glue/internal_service_dump_test.go index 1eba4c0438..e3d65ac6ae 100644 --- a/agent/proxycfg-glue/internal_service_dump_test.go +++ b/agent/proxycfg-glue/internal_service_dump_test.go @@ -55,6 +55,10 @@ func TestServerInternalServiceDump(t *testing.T) { Service: "web", Kind: structs.ServiceKindTypical, }, + { + Service: "web-deny", + Kind: structs.ServiceKindTypical, + }, { Service: "db", Kind: structs.ServiceKindTypical, @@ -67,14 +71,14 @@ func TestServerInternalServiceDump(t *testing.T) { })) } - authz := newStaticResolver( - policyAuthorizer(t, ` + policyAuth := policyAuthorizer(t, ` service "mgw" { policy = "read" } service "web" { policy = "read" } + service "web-deny" { policy = "deny" } service "db" { policy = "read" } node_prefix "node-" { policy = "read" } - `), - ) + `) + authz := newStaticResolver(policyAuth) dataSource := ServerInternalServiceDump(ServerDataSourceDeps{ GetStore: func() Store { return store }, @@ -121,6 +125,42 @@ func TestServerInternalServiceDump(t *testing.T) { result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh) require.Empty(t, result.Nodes) }) + + const ( + bexprMatchingUserTokenPermissions = "Service.Service matches `web.*`" + bexpNotMatchingUserTokenPermissions = "Service.Service matches `mgw.*`" + ) + + authz.SwapAuthorizer(policyAuthorizer(t, ` + service "mgw" { policy = "deny" } + service "web" { policy = "read" } + service "web-deny" { policy = "deny" } + service "db" { policy = "read" } + node_prefix "node-" { policy = "read" } + `)) + + t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) { + eventCh := make(chan proxycfg.UpdateEvent) + require.NoError(t, dataSource.Notify(ctx, &structs.ServiceDumpRequest{ + QueryOptions: structs.QueryOptions{Filter: bexprMatchingUserTokenPermissions}, + }, "", eventCh)) + + result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh) + require.Len(t, result.Nodes, 1) + require.Equal(t, "web", result.Nodes[0].Service.Service) + require.True(t, result.ResultsFilteredByACLs) + }) + + t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) { + eventCh := make(chan proxycfg.UpdateEvent) + require.NoError(t, dataSource.Notify(ctx, &structs.ServiceDumpRequest{ + QueryOptions: structs.QueryOptions{Filter: bexpNotMatchingUserTokenPermissions}, + }, "", eventCh)) + + result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh) + require.Len(t, result.Nodes, 0) + require.True(t, result.ResultsFilteredByACLs) + }) }) } diff --git a/agent/proxycfg/api_gateway.go b/agent/proxycfg/api_gateway.go index eb5d246462..0c44e5bfd4 100644 --- a/agent/proxycfg/api_gateway.go +++ b/agent/proxycfg/api_gateway.go @@ -476,16 +476,12 @@ func (h *handlerAPIGateway) handleRouteConfigUpdate(ctx context.Context, u Updat cancelUpstream() delete(snap.APIGateway.WatchedUpstreams[upstreamID], targetID) delete(snap.APIGateway.WatchedUpstreamEndpoints[upstreamID], targetID) - - if targetUID := NewUpstreamIDFromTargetID(targetID); targetUID.Peer != "" { - snap.APIGateway.PeerUpstreamEndpoints.CancelWatch(targetUID) - snap.APIGateway.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer) - } } cancelDiscoChain() delete(snap.APIGateway.WatchedDiscoveryChains, upstreamID) } + reconcilePeeringWatches(snap.APIGateway.DiscoveryChain, snap.APIGateway.UpstreamConfig, snap.APIGateway.PeeredUpstreams, snap.APIGateway.PeerUpstreamEndpoints, snap.APIGateway.UpstreamPeerTrustBundles) return nil } diff --git a/agent/proxycfg/connect_proxy.go b/agent/proxycfg/connect_proxy.go index 0a8c173792..35c0462cb3 100644 --- a/agent/proxycfg/connect_proxy.go +++ b/agent/proxycfg/connect_proxy.go @@ -380,49 +380,7 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s // // Clean up data // - - peeredChainTargets := make(map[UpstreamID]struct{}) - for _, discoChain := range snap.ConnectProxy.DiscoveryChain { - for _, target := range discoChain.Targets { - if target.Peer == "" { - continue - } - uid := NewUpstreamIDFromTargetID(target.ID) - peeredChainTargets[uid] = struct{}{} - } - } - - validPeerNames := make(map[string]struct{}) - - // Iterate through all known endpoints and remove references to upstream IDs that weren't in the update - snap.ConnectProxy.PeerUpstreamEndpoints.ForEachKey(func(uid UpstreamID) bool { - // Peered upstream is explicitly defined in upstream config - if _, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok { - validPeerNames[uid.Peer] = struct{}{} - return true - } - // Peered upstream came from dynamic source of imported services - if _, ok := seenUpstreams[uid]; ok { - validPeerNames[uid.Peer] = struct{}{} - return true - } - // Peered upstream came from a discovery chain target - if _, ok := peeredChainTargets[uid]; ok { - validPeerNames[uid.Peer] = struct{}{} - return true - } - snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(uid) - return true - }) - - // Iterate through all known trust bundles and remove references to any unseen peer names - snap.ConnectProxy.UpstreamPeerTrustBundles.ForEachKey(func(peerName PeerName) bool { - if _, ok := validPeerNames[peerName]; !ok { - snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(peerName) - } - return true - }) - + reconcilePeeringWatches(snap.ConnectProxy.DiscoveryChain, snap.ConnectProxy.UpstreamConfig, snap.ConnectProxy.PeeredUpstreams, snap.ConnectProxy.PeerUpstreamEndpoints, snap.ConnectProxy.UpstreamPeerTrustBundles) case u.CorrelationID == intentionUpstreamsID: resp, ok := u.Result.(*structs.IndexedServiceList) if !ok { @@ -490,18 +448,13 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s continue } if _, ok := seenUpstreams[uid]; !ok { - for targetID, cancelFn := range targets { + for _, cancelFn := range targets { cancelFn() - - targetUID := NewUpstreamIDFromTargetID(targetID) - if targetUID.Peer != "" { - snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(targetUID) - snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer) - } } delete(snap.ConnectProxy.WatchedUpstreams, uid) } } + reconcilePeeringWatches(snap.ConnectProxy.DiscoveryChain, snap.ConnectProxy.UpstreamConfig, snap.ConnectProxy.PeeredUpstreams, snap.ConnectProxy.PeerUpstreamEndpoints, snap.ConnectProxy.UpstreamPeerTrustBundles) for uid := range snap.ConnectProxy.WatchedUpstreamEndpoints { if upstream, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok && !upstream.CentrallyConfigured { continue diff --git a/agent/proxycfg/ingress_gateway.go b/agent/proxycfg/ingress_gateway.go index 3ab5828add..0262ffcb37 100644 --- a/agent/proxycfg/ingress_gateway.go +++ b/agent/proxycfg/ingress_gateway.go @@ -171,18 +171,13 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent, delete(snap.IngressGateway.WatchedUpstreams[uid], targetID) delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID) cancelUpstreamFn() - - targetUID := NewUpstreamIDFromTargetID(targetID) - if targetUID.Peer != "" { - snap.IngressGateway.PeerUpstreamEndpoints.CancelWatch(targetUID) - snap.IngressGateway.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer) - } } cancelFn() delete(snap.IngressGateway.WatchedDiscoveryChains, uid) } } + reconcilePeeringWatches(snap.IngressGateway.DiscoveryChain, snap.IngressGateway.UpstreamConfig, snap.IngressGateway.PeeredUpstreams, snap.IngressGateway.PeerUpstreamEndpoints, snap.IngressGateway.UpstreamPeerTrustBundles) if err := s.watchIngressLeafCert(ctx, snap); err != nil { return err diff --git a/agent/proxycfg/state.go b/agent/proxycfg/state.go index b6b9c78f32..d0ae44fbab 100644 --- a/agent/proxycfg/state.go +++ b/agent/proxycfg/state.go @@ -13,12 +13,15 @@ import ( "sync/atomic" "time" - "github.com/hashicorp/go-hclog" "golang.org/x/time/rate" + "github.com/hashicorp/go-hclog" + cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/proxycfg/internal/watch" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/logging" + "github.com/hashicorp/consul/proto/private/pbpeering" ) const ( @@ -551,3 +554,48 @@ func watchMeshGateway(ctx context.Context, opts gatewayWatchOpts) error { EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(opts.key.Partition), }, correlationId, opts.notifyCh) } + +func reconcilePeeringWatches(compiledDiscoveryChains map[UpstreamID]*structs.CompiledDiscoveryChain, upstreams map[UpstreamID]*structs.Upstream, peeredUpstreams map[UpstreamID]struct{}, peerUpstreamEndpoints watch.Map[UpstreamID, structs.CheckServiceNodes], upstreamPeerTrustBundles watch.Map[PeerName, *pbpeering.PeeringTrustBundle]) { + + peeredChainTargets := make(map[UpstreamID]struct{}) + for _, discoChain := range compiledDiscoveryChains { + for _, target := range discoChain.Targets { + if target.Peer == "" { + continue + } + uid := NewUpstreamIDFromTargetID(target.ID) + peeredChainTargets[uid] = struct{}{} + } + } + + validPeerNames := make(map[string]struct{}) + + // Iterate through all known endpoints and remove references to upstream IDs that weren't in the update + peerUpstreamEndpoints.ForEachKey(func(uid UpstreamID) bool { + // Peered upstream is explicitly defined in upstream config + if _, ok := upstreams[uid]; ok { + validPeerNames[uid.Peer] = struct{}{} + return true + } + // Peered upstream came from dynamic source of imported services + if _, ok := peeredUpstreams[uid]; ok { + validPeerNames[uid.Peer] = struct{}{} + return true + } + // Peered upstream came from a discovery chain target + if _, ok := peeredChainTargets[uid]; ok { + validPeerNames[uid.Peer] = struct{}{} + return true + } + peerUpstreamEndpoints.CancelWatch(uid) + return true + }) + + // Iterate through all known trust bundles and remove references to any unseen peer names + upstreamPeerTrustBundles.ForEachKey(func(peerName PeerName) bool { + if _, ok := validPeerNames[peerName]; !ok { + upstreamPeerTrustBundles.CancelWatch(peerName) + } + return true + }) +} diff --git a/agent/proxycfg/upstreams.go b/agent/proxycfg/upstreams.go index 209a3446d9..052e91eb10 100644 --- a/agent/proxycfg/upstreams.go +++ b/agent/proxycfg/upstreams.go @@ -102,6 +102,7 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv if err := s.resetWatchesFromChain(ctx, uid, resp.Chain, upstreamsSnapshot); err != nil { return err } + reconcilePeeringWatches(upstreamsSnapshot.DiscoveryChain, upstreamsSnapshot.UpstreamConfig, upstreamsSnapshot.PeeredUpstreams, upstreamsSnapshot.PeerUpstreamEndpoints, upstreamsSnapshot.UpstreamPeerTrustBundles) case strings.HasPrefix(u.CorrelationID, upstreamPeerWatchIDPrefix): resp, ok := u.Result.(*structs.IndexedCheckServiceNodes) @@ -301,12 +302,6 @@ func (s *handlerUpstreams) resetWatchesFromChain( delete(snap.WatchedUpstreams[uid], targetID) delete(snap.WatchedUpstreamEndpoints[uid], targetID) cancelFn() - - targetUID := NewUpstreamIDFromTargetID(targetID) - if targetUID.Peer != "" { - snap.PeerUpstreamEndpoints.CancelWatch(targetUID) - snap.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer) - } } var ( @@ -479,8 +474,8 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config var entMeta acl.EnterpriseMeta entMeta.Merge(opts.entMeta) - ctx, cancel := context.WithCancel(ctx) - err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{ + peerCtx, cancel := context.WithCancel(ctx) + err := s.dataSources.Health.Notify(peerCtx, &structs.ServiceSpecificRequest{ PeerName: opts.peer, Datacenter: opts.datacenter, QueryOptions: structs.QueryOptions{ @@ -506,25 +501,25 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config return nil } - if ok := snap.PeerUpstreamEndpoints.IsWatched(uid); !ok { + if !snap.PeerUpstreamEndpoints.IsWatched(uid) { snap.PeerUpstreamEndpoints.InitWatch(uid, cancel) } - // Check whether a watch for this peer exists to avoid duplicates. - if ok := snap.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok { - peerCtx, cancel := context.WithCancel(ctx) - if err := s.dataSources.TrustBundle.Notify(peerCtx, &cachetype.TrustBundleReadRequest{ + + if !snap.UpstreamPeerTrustBundles.IsWatched(uid.Peer) { + peerCtx2, cancel2 := context.WithCancel(ctx) + if err := s.dataSources.TrustBundle.Notify(peerCtx2, &cachetype.TrustBundleReadRequest{ Request: &pbpeering.TrustBundleReadRequest{ Name: uid.Peer, Partition: uid.PartitionOrDefault(), }, QueryOptions: structs.QueryOptions{Token: s.token}, }, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil { - cancel() + cancel2() return fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err) } - snap.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel) + snap.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel2) } return nil diff --git a/agent/routine-leak-checker/leak_test.go b/agent/routine-leak-checker/leak_test.go index f6b3c2a749..53e1e1ea42 100644 --- a/agent/routine-leak-checker/leak_test.go +++ b/agent/routine-leak-checker/leak_test.go @@ -64,9 +64,7 @@ func setupPrimaryServer(t *testing.T) *agent.TestAgent { config := ` server = true - datacenter = "primary" - primary_datacenter = "primary" - + datacenter = "primary" connect { enabled = true } diff --git a/agent/structs/acl.go b/agent/structs/acl.go index 579e8d231e..e4ced5e6c1 100644 --- a/agent/structs/acl.go +++ b/agent/structs/acl.go @@ -800,6 +800,11 @@ func (policies ACLPolicies) resolveWithCache(cache *ACLCaches, entConf *acl.Conf continue } + //pulling from the cache, we don't want to break any rules that are already in the cache + if entConf == nil { + entConf = &acl.Config{} + } + entConf.WarnOnDuplicateKey = true p, err := acl.NewPolicyFromSource(policy.Rules, entConf, policy.EnterprisePolicyMeta()) if err != nil { return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err) diff --git a/agent/structs/acl_test.go b/agent/structs/acl_test.go index e1fb35263b..0e6878e612 100644 --- a/agent/structs/acl_test.go +++ b/agent/structs/acl_test.go @@ -403,7 +403,7 @@ func TestStructs_ACLPolicies_resolveWithCache(t *testing.T) { ID: "5d5653a1-2c2b-4b36-b083-fc9f1398eb7b", Name: "policy1", Description: "policy1", - Rules: `node_prefix "" { policy = "read" }`, + Rules: `node_prefix "" { policy = "read", policy = "read", },`, RaftIndex: RaftIndex{ CreateIndex: 1, ModifyIndex: 2, @@ -413,7 +413,7 @@ func TestStructs_ACLPolicies_resolveWithCache(t *testing.T) { ID: "b35541f0-a88a-48da-bc66-43553c60b628", Name: "policy2", Description: "policy2", - Rules: `agent_prefix "" { policy = "read" }`, + Rules: `agent_prefix "" { policy = "read" } `, RaftIndex: RaftIndex{ CreateIndex: 3, ModifyIndex: 4, @@ -433,7 +433,8 @@ func TestStructs_ACLPolicies_resolveWithCache(t *testing.T) { ID: "8bf38965-95e5-4e86-9be7-f6070cc0708b", Name: "policy4", Description: "policy4", - Rules: `service_prefix "" { policy = "read" }`, + //test should still pass even with the duplicate key since its resolving the cache + Rules: `service_prefix "" { policy = "read" policy = "read" }`, RaftIndex: RaftIndex{ CreateIndex: 7, ModifyIndex: 8, diff --git a/agent/xds/clusters.go b/agent/xds/clusters.go index 1d69f804c7..244585dfdf 100644 --- a/agent/xds/clusters.go +++ b/agent/xds/clusters.go @@ -1824,13 +1824,15 @@ func configureClusterWithHostnames( cluster.DnsRefreshRate = durationpb.New(rate) cluster.DnsLookupFamily = envoy_cluster_v3.Cluster_V4_ONLY + envoyMaxEndpoints := 1 discoveryType := envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_LOGICAL_DNS} if dnsDiscoveryType == "strict_dns" { discoveryType.Type = envoy_cluster_v3.Cluster_STRICT_DNS + envoyMaxEndpoints = len(hostnameEndpoints) } cluster.ClusterDiscoveryType = &discoveryType - endpoints := make([]*envoy_endpoint_v3.LbEndpoint, 0, 1) + endpoints := make([]*envoy_endpoint_v3.LbEndpoint, 0, envoyMaxEndpoints) uniqueHostnames := make(map[string]bool) var ( @@ -1848,12 +1850,15 @@ func configureClusterWithHostnames( continue } - if len(endpoints) == 0 { + if len(endpoints) < envoyMaxEndpoints { endpoints = append(endpoints, makeLbEndpoint(addr, port, health, weight)) hostname = addr idx = i - break + + if len(endpoints) == envoyMaxEndpoints { + break + } } } @@ -1867,8 +1872,8 @@ func configureClusterWithHostnames( endpoints = append(endpoints, fallback) } - if len(uniqueHostnames) > 1 { - logger.Warn(fmt.Sprintf("service contains instances with more than one unique hostname; only %q be resolved by Envoy", hostname), + if len(uniqueHostnames) > 1 && envoyMaxEndpoints == 1 { + logger.Warn(fmt.Sprintf("service contains instances with more than one unique hostname; only %q will be resolved by Envoy", hostname), "dc", dc, "service", service.String()) } diff --git a/api/api.go b/api/api.go index d4d853d5d4..27af1ea569 100644 --- a/api/api.go +++ b/api/api.go @@ -1087,8 +1087,23 @@ func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { if err != nil { return 0, nil, err } + + contentType := GetContentType(req) + + if req != nil { + req.Header.Set(contentTypeHeader, contentType) + } + start := time.Now() resp, err := c.config.HttpClient.Do(req) + + if resp != nil { + respContentType := resp.Header.Get(contentTypeHeader) + if respContentType == "" || respContentType != contentType { + resp.Header.Set(contentTypeHeader, contentType) + } + } + diff := time.Since(start) return diff, resp, err } diff --git a/api/api_test.go b/api/api_test.go index e8a03f7218..9a3ed7374c 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -935,11 +935,11 @@ func TestAPI_Headers(t *testing.T) { _, _, err = kv.Get("test-headers", nil) require.NoError(t, err) - require.Equal(t, "", request.Header.Get("Content-Type")) + require.Equal(t, "application/json", request.Header.Get("Content-Type")) _, err = kv.Delete("test-headers", nil) require.NoError(t, err) - require.Equal(t, "", request.Header.Get("Content-Type")) + require.Equal(t, "application/json", request.Header.Get("Content-Type")) err = c.Snapshot().Restore(nil, strings.NewReader("foo")) require.Error(t, err) diff --git a/api/content_type.go b/api/content_type.go new file mode 100644 index 0000000000..37c8cf60aa --- /dev/null +++ b/api/content_type.go @@ -0,0 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "net/http" + "strings" +) + +const ( + contentTypeHeader = "Content-Type" + plainContentType = "text/plain; charset=utf-8" + octetStream = "application/octet-stream" + jsonContentType = "application/json" // Default content type +) + +// ContentTypeRule defines a rule for determining the content type of an HTTP request. +// This rule is based on the combination of the HTTP path, method, and the desired content type. +type ContentTypeRule struct { + path string + httpMethod string + contentType string +} + +var ContentTypeRules = []ContentTypeRule{ + { + path: "/v1/snapshot", + httpMethod: http.MethodPut, + contentType: octetStream, + }, + { + path: "/v1/kv", + httpMethod: http.MethodPut, + contentType: octetStream, + }, + { + path: "/v1/event/fire", + httpMethod: http.MethodPut, + contentType: octetStream, + }, +} + +// GetContentType returns the content type for a request +// This function isused as routing logic or middleware to determine and enforce +// the appropriate content type for HTTP requests. +func GetContentType(req *http.Request) string { + reqContentType := req.Header.Get(contentTypeHeader) + + if isIndexPage(req) { + return plainContentType + } + + // For GET, DELETE, or internal API paths, ensure a valid Content-Type is returned. + if req.Method == http.MethodGet || req.Method == http.MethodDelete || strings.HasPrefix(req.URL.Path, "/v1/internal") { + if reqContentType == "" { + // Default to JSON Content-Type if no Content-Type is provided. + return jsonContentType + } + // Return the provided Content-Type if it exists. + return reqContentType + } + + for _, rule := range ContentTypeRules { + if matchesRule(req, rule) { + return rule.contentType + } + } + return jsonContentType +} + +// matchesRule checks if a request matches a content type rule +func matchesRule(req *http.Request, rule ContentTypeRule) bool { + return strings.HasPrefix(req.URL.Path, rule.path) && + (rule.httpMethod == "" || req.Method == rule.httpMethod) +} + +// isIndexPage checks if the request is for the index page +func isIndexPage(req *http.Request) bool { + return req.URL.Path == "/" || req.URL.Path == "/ui" +} diff --git a/api/go.mod b/api/go.mod index 715a594dcf..8df6a0aaa1 100644 --- a/api/go.mod +++ b/api/go.mod @@ -5,6 +5,7 @@ go 1.19 replace github.com/hashicorp/consul/sdk => ../sdk retract ( + v1.29.5 // cut from incorrect branch v1.28.0 // tag was mutated v1.27.1 // tag was mutated v1.21.2 // tag was mutated diff --git a/envoyextensions/xdscommon/ENVOY_VERSIONS b/envoyextensions/xdscommon/ENVOY_VERSIONS index 884f305732..b1ad88432c 100644 --- a/envoyextensions/xdscommon/ENVOY_VERSIONS +++ b/envoyextensions/xdscommon/ENVOY_VERSIONS @@ -8,7 +8,7 @@ # # See https://www.consul.io/docs/connect/proxies/envoy#supported-versions for more information on Consul's Envoy # version support. -1.31.2 -1.30.6 -1.29.9 +1.31.4 +1.30.8 +1.29.11 1.28.7 diff --git a/go.mod b/go.mod index 2cdc8bbaab..1e436fb00c 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/go-jose/go-jose/v3 v3.0.3 github.com/go-openapi/runtime v0.26.2 github.com/go-openapi/strfmt v0.21.10 - github.com/google/go-cmp v0.5.9 + github.com/google/go-cmp v0.6.0 github.com/google/gofuzz v1.2.0 github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2 @@ -68,7 +68,7 @@ require ( github.com/hashicorp/go-version v1.2.1 github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/hcdiag v0.5.1 - github.com/hashicorp/hcl v1.0.0 + github.com/hashicorp/hcl v1.0.1-vault-7 github.com/hashicorp/hcl/v2 v2.14.1 github.com/hashicorp/hcp-scada-provider v0.2.4 github.com/hashicorp/hcp-sdk-go v0.80.0 @@ -114,12 +114,12 @@ require ( go.opentelemetry.io/otel/sdk/metric v0.39.0 go.opentelemetry.io/proto/otlp v1.0.0 go.uber.org/goleak v1.1.10 - golang.org/x/crypto v0.22.0 + golang.org/x/crypto v0.31.0 golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 - golang.org/x/net v0.24.0 + golang.org/x/net v0.25.0 golang.org/x/oauth2 v0.15.0 - golang.org/x/sync v0.4.0 - golang.org/x/sys v0.20.0 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 golang.org/x/time v0.3.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 google.golang.org/grpc v1.58.3 @@ -186,7 +186,7 @@ require ( github.com/go-openapi/validate v0.22.4 // indirect github.com/go-ozzo/ozzo-validation v3.6.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect @@ -263,10 +263,10 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/trace v1.17.0 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect - golang.org/x/mod v0.13.0 // indirect - golang.org/x/term v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.14.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/api v0.126.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect diff --git a/go.sum b/go.sum index fd2246257f..26849a802d 100644 --- a/go.sum +++ b/go.sum @@ -287,8 +287,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= @@ -348,8 +348,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -488,8 +489,9 @@ github.com/hashicorp/golang-lru/v2 v2.0.0 h1:Lf+9eD8m5pncvHAOCQj49GSN6aQI8XGfI5O github.com/hashicorp/golang-lru/v2 v2.0.0/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcdiag v0.5.1 h1:KZcx9xzRfEOQ2OMbwPxVvHyXwLLRqYpSHxCEOtHfQ6w= github.com/hashicorp/hcdiag v0.5.1/go.mod h1:RMC2KkffN9uJ+5mFSaL67ZFVj4CDeetPF2d/53XpwXo= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hashicorp/hcl/v2 v2.14.1 h1:x0BpjfZ+CYdbiz+8yZTQ+gdLO7IXvOut7Da+XJayx34= github.com/hashicorp/hcl/v2 v2.14.1/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= github.com/hashicorp/hcp-scada-provider v0.2.4 h1:XvctVEd4VqWVlqN1VA4vIhJANstZrc4gd2oCfrFLWZc= @@ -919,8 +921,8 @@ golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -961,8 +963,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180611182652-db08ff08e862/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1013,8 +1015,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1045,8 +1047,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1126,15 +1128,15 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1147,8 +1149,9 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1214,8 +1217,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/hcp/exports.go b/internal/hcp/exports.go deleted file mode 100644 index 18ede30d27..0000000000 --- a/internal/hcp/exports.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package hcp - -import ( - "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/hcp/internal/controllers" - "github.com/hashicorp/consul/internal/hcp/internal/controllers/link" - "github.com/hashicorp/consul/internal/hcp/internal/types" - "github.com/hashicorp/consul/internal/resource" -) - -// RegisterTypes adds all resource types within the "hcp" API group -// to the given type registry -func RegisterTypes(r resource.Registry) { - types.Register(r) -} - -type ControllerDependencies = controllers.Dependencies - -var IsValidated = link.IsValidated -var LinkName = types.LinkName - -// RegisterControllers registers controllers for the catalog types with -// the given controller Manager. -func RegisterControllers(mgr *controller.Manager, deps ControllerDependencies) { - controllers.Register(mgr, deps) -} - -// Needed for testing -var StatusKey = link.StatusKey -var ConditionValidatedSuccess = link.ConditionValidatedSuccess -var ConditionValidatedFailed = link.ConditionValidatedFailed diff --git a/internal/hcp/internal/controllers/link/controller.go b/internal/hcp/internal/controllers/link/controller.go deleted file mode 100644 index 77c439f5cb..0000000000 --- a/internal/hcp/internal/controllers/link/controller.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package link - -import ( - "context" - "crypto/tls" - "strings" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/anypb" - - gnmmod "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/models" - - hcpclient "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/agent/hcp/config" - "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/hcp/internal/types" - "github.com/hashicorp/consul/internal/storage" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -// HCPClientFn is a function that can be used to create an HCP client from a Link object. -// This function type should be passed to a LinkController in order to tell it how to make a client from -// a Link. For normal use, DefaultHCPClientFn should be used, but tests can substitute in a function that creates a -// mock client. -type HCPClientFn func(config.CloudConfig) (hcpclient.Client, error) - -var DefaultHCPClientFn HCPClientFn = func(cfg config.CloudConfig) (hcpclient.Client, error) { - hcpClient, err := hcpclient.NewClient(cfg) - if err != nil { - return nil, err - } - return hcpClient, nil -} - -func LinkController( - hcpClientFn HCPClientFn, - cfg config.CloudConfig, -) *controller.Controller { - return controller.NewController("link", pbhcp.LinkType). - WithInitializer( - &linkInitializer{ - cloudConfig: cfg, - }, - ). - WithReconciler( - &linkReconciler{ - hcpClientFn: hcpClientFn, - cloudConfig: cfg, - }, - ) -} - -type linkReconciler struct { - hcpClientFn HCPClientFn - cloudConfig config.CloudConfig -} - -func hcpAccessLevelToConsul(level *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel) pbhcp.AccessLevel { - if level == nil { - return pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED - } - - switch *level { - case gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELUNSPECIFIED: - return pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED - case gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADWRITE: - return pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE - case gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADONLY: - return pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_ONLY - default: - return pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED - } -} - -func (r *linkReconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error { - // The runtime is passed by value so replacing it here for the remainder of this - // reconciliation request processing will not affect future invocations. - rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", StatusKey) - - rt.Logger.Trace("reconciling link") - - rsp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: req.ID}) - switch { - case status.Code(err) == codes.NotFound: - rt.Logger.Trace("link has been deleted") - return nil - case err != nil: - rt.Logger.Error("the resource service has returned an unexpected error", "error", err) - return err - } - - res := rsp.Resource - var link pbhcp.Link - if err := res.Data.UnmarshalTo(&link); err != nil { - rt.Logger.Error("error unmarshalling link data", "error", err) - return err - } - - newStatus := &pbresource.Status{ - ObservedGeneration: res.Generation, - Conditions: []*pbresource.Condition{}, - } - defer writeStatusIfNotEqual(ctx, rt, res, newStatus) - newStatus.Conditions = append(newStatus.Conditions, ConditionValidatedSuccess) - - // Merge the link data with the existing cloud config so that we only overwrite the - // fields that are provided by the link. This ensures that: - // 1. The HCP configuration (i.e., how to connect to HCP) is preserved - // 2. The Consul agent's node ID and node name are preserved - newCfg := CloudConfigFromLink(&link) - cfg := config.Merge(r.cloudConfig, newCfg) - hcpClient, err := r.hcpClientFn(cfg) - if err != nil { - rt.Logger.Error("error creating HCP client", "error", err) - return err - } - - // Sync cluster data from HCP - cluster, err := hcpClient.GetCluster(ctx) - if err != nil { - rt.Logger.Error("error querying HCP for cluster", "error", err) - condition := linkingFailedCondition(err) - newStatus.Conditions = append(newStatus.Conditions, condition) - return err - } - accessLevel := hcpAccessLevelToConsul(cluster.AccessLevel) - - if link.HcpClusterUrl != cluster.HCPPortalURL || - link.AccessLevel != accessLevel { - - link.HcpClusterUrl = cluster.HCPPortalURL - link.AccessLevel = accessLevel - - updatedData, err := anypb.New(&link) - if err != nil { - rt.Logger.Error("error marshalling link data", "error", err) - return err - } - _, err = rt.Client.Write( - ctx, &pbresource.WriteRequest{Resource: &pbresource.Resource{ - Id: &pbresource.ID{ - Name: types.LinkName, - Type: pbhcp.LinkType, - }, - Metadata: res.Metadata, - Data: updatedData, - }}, - ) - if err != nil { - rt.Logger.Error("error updating link", "error", err) - return err - } - } - - newStatus.Conditions = append(newStatus.Conditions, ConditionLinked(link.ResourceId)) - - return writeStatusIfNotEqual(ctx, rt, res, newStatus) -} - -type linkInitializer struct { - cloudConfig config.CloudConfig -} - -func (i *linkInitializer) Initialize(ctx context.Context, rt controller.Runtime) error { - if !i.cloudConfig.IsConfigured() { - return nil - } - - // Construct a link resource to reflect the configuration - data, err := anypb.New( - &pbhcp.Link{ - ResourceId: i.cloudConfig.ResourceID, - ClientId: i.cloudConfig.ClientID, - ClientSecret: i.cloudConfig.ClientSecret, - }, - ) - if err != nil { - return err - } - - // Create the link resource for a configuration-based link - _, err = rt.Client.Write( - ctx, - &pbresource.WriteRequest{ - Resource: &pbresource.Resource{ - Id: &pbresource.ID{ - Name: types.LinkName, - Type: pbhcp.LinkType, - }, - Metadata: map[string]string{ - types.MetadataSourceKey: types.MetadataSourceConfig, - }, - Data: data, - }, - }, - ) - if err != nil { - if strings.Contains(err.Error(), storage.ErrWrongUid.Error()) || - strings.Contains(err.Error(), "leader unknown") { - // If the error is likely ignorable and could eventually resolve itself, - // log it as TRACE rather than ERROR. - rt.Logger.Trace("error initializing controller", "error", err) - } else { - rt.Logger.Error("error initializing controller", "error", err) - } - return err - } - - return nil -} - -func CloudConfigFromLink(link *pbhcp.Link) config.CloudConfig { - var cfg config.CloudConfig - if link == nil { - return cfg - } - cfg = config.CloudConfig{ - ResourceID: link.GetResourceId(), - ClientID: link.GetClientId(), - ClientSecret: link.GetClientSecret(), - } - if link.GetHcpConfig() != nil { - cfg.AuthURL = link.GetHcpConfig().GetAuthUrl() - cfg.ScadaAddress = link.GetHcpConfig().GetScadaAddress() - cfg.Hostname = link.GetHcpConfig().GetApiAddress() - cfg.TLSConfig = &tls.Config{InsecureSkipVerify: link.GetHcpConfig().GetTlsInsecureSkipVerify()} - } - return cfg -} diff --git a/internal/hcp/internal/controllers/link/controller_test.go b/internal/hcp/internal/controllers/link/controller_test.go deleted file mode 100644 index fc23c3c627..0000000000 --- a/internal/hcp/internal/controllers/link/controller_test.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package link - -import ( - "context" - "fmt" - "testing" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - gnmmod "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/models" - - svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" - hcpclient "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/agent/hcp/config" - "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/hcp/internal/types" - rtest "github.com/hashicorp/consul/internal/resource/resourcetest" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/sdk/testutil" -) - -type controllerSuite struct { - suite.Suite - - ctx context.Context - client *rtest.Client - rt controller.Runtime - - tenancies []*pbresource.Tenancy -} - -func mockHcpClientFn(t *testing.T) (*hcpclient.MockClient, HCPClientFn) { - mockClient := hcpclient.NewMockClient(t) - - mockClientFunc := func(config config.CloudConfig) (hcpclient.Client, error) { - return mockClient, nil - } - - return mockClient, mockClientFunc -} - -func (suite *controllerSuite) SetupTest() { - suite.ctx = testutil.TestContext(suite.T()) - suite.tenancies = rtest.TestTenancies() - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(types.Register). - WithTenancies(suite.tenancies...). - Run(suite.T()) - - suite.rt = controller.Runtime{ - Client: client, - Logger: testutil.Logger(suite.T()), - } - suite.client = rtest.NewClient(client) -} - -func TestLinkController(t *testing.T) { - suite.Run(t, new(controllerSuite)) -} - -func (suite *controllerSuite) deleteResourceFunc(id *pbresource.ID) func() { - return func() { - suite.client.MustDelete(suite.T(), id) - suite.client.WaitForDeletion(suite.T(), id) - } -} - -func (suite *controllerSuite) TestController_Ok() { - // Run the controller manager - mgr := controller.NewManager(suite.client, suite.rt.Logger) - mockClient, mockClientFn := mockHcpClientFn(suite.T()) - readWrite := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADWRITE - mockClient.EXPECT().GetCluster(mock.Anything).Return(&hcpclient.Cluster{ - HCPPortalURL: "http://test.com", - AccessLevel: &readWrite, - }, nil) - - mgr.Register(LinkController( - mockClientFn, - config.CloudConfig{}, - )) - mgr.SetRaftLeader(true) - go mgr.Run(suite.ctx) - - linkData := &pbhcp.Link{ - ClientId: "abc", - ClientSecret: "abc", - ResourceId: types.GenerateTestResourceID(suite.T()), - } - - link := rtest.Resource(pbhcp.LinkType, "global"). - WithData(suite.T(), linkData). - Write(suite.T(), suite.client) - - suite.T().Cleanup(suite.deleteResourceFunc(link.Id)) - - suite.client.WaitForStatusCondition(suite.T(), link.Id, StatusKey, ConditionLinked(linkData.ResourceId)) - var updatedLink pbhcp.Link - updatedLinkResource := suite.client.WaitForNewVersion(suite.T(), link.Id, link.Version) - require.NoError(suite.T(), updatedLinkResource.Data.UnmarshalTo(&updatedLink)) - require.Equal(suite.T(), "http://test.com", updatedLink.HcpClusterUrl) - require.Equal(suite.T(), pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE, updatedLink.AccessLevel) -} - -func (suite *controllerSuite) TestController_Initialize() { - // Run the controller manager with a configured link - mgr := controller.NewManager(suite.client, suite.rt.Logger) - - mockClient, mockClientFn := mockHcpClientFn(suite.T()) - readOnly := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADONLY - mockClient.EXPECT().GetCluster(mock.Anything).Return(&hcpclient.Cluster{ - HCPPortalURL: "http://test.com", - AccessLevel: &readOnly, - }, nil) - - cloudCfg := config.CloudConfig{ - ClientID: "client-id-abc", - ClientSecret: "client-secret-abc", - ResourceID: types.GenerateTestResourceID(suite.T()), - } - - mgr.Register(LinkController( - mockClientFn, - cloudCfg, - )) - mgr.SetRaftLeader(true) - go mgr.Run(suite.ctx) - - // Wait for link to be created by initializer - id := &pbresource.ID{ - Type: pbhcp.LinkType, - Name: types.LinkName, - } - suite.T().Cleanup(suite.deleteResourceFunc(id)) - r := suite.client.WaitForResourceExists(suite.T(), id) - - // Check that created link has expected values - var link pbhcp.Link - err := r.Data.UnmarshalTo(&link) - require.NoError(suite.T(), err) - - require.Equal(suite.T(), cloudCfg.ResourceID, link.ResourceId) - require.Equal(suite.T(), cloudCfg.ClientID, link.ClientId) - require.Equal(suite.T(), cloudCfg.ClientSecret, link.ClientSecret) - require.Equal(suite.T(), types.MetadataSourceConfig, r.Metadata[types.MetadataSourceKey]) - - // Wait for link to be connected successfully - suite.client.WaitForStatusCondition(suite.T(), id, StatusKey, ConditionLinked(link.ResourceId)) -} - -func (suite *controllerSuite) TestController_GetClusterError() { - type testCase struct { - expectErr error - expectCondition *pbresource.Condition - } - tt := map[string]testCase{ - "unexpected": { - expectErr: fmt.Errorf("error"), - expectCondition: ConditionFailed, - }, - "unauthorized": { - expectErr: hcpclient.ErrUnauthorized, - expectCondition: ConditionUnauthorized, - }, - "forbidden": { - expectErr: hcpclient.ErrForbidden, - expectCondition: ConditionForbidden, - }, - } - - for name, tc := range tt { - suite.T().Run(name, func(t *testing.T) { - // Run the controller manager - mgr := controller.NewManager(suite.client, suite.rt.Logger) - mockClient, mockClientFunc := mockHcpClientFn(t) - mockClient.EXPECT().GetCluster(mock.Anything).Return(nil, tc.expectErr) - - mgr.Register(LinkController( - mockClientFunc, - config.CloudConfig{}, - )) - - mgr.SetRaftLeader(true) - ctx, cancel := context.WithCancel(suite.ctx) - t.Cleanup(cancel) - go mgr.Run(ctx) - - linkData := &pbhcp.Link{ - ClientId: "abc", - ClientSecret: "abc", - ResourceId: types.GenerateTestResourceID(t), - } - link := rtest.Resource(pbhcp.LinkType, "global"). - WithData(t, linkData). - Write(t, suite.client) - - t.Cleanup(suite.deleteResourceFunc(link.Id)) - - suite.client.WaitForStatusCondition(t, link.Id, StatusKey, tc.expectCondition) - }) - } -} - -func Test_hcpAccessModeToConsul(t *testing.T) { - type testCase struct { - hcpAccessLevel *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel - consulAccessLevel pbhcp.AccessLevel - } - tt := map[string]testCase{ - "unspecified": { - hcpAccessLevel: func() *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel { - t := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELUNSPECIFIED - return &t - }(), - consulAccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED, - }, - "invalid": { - hcpAccessLevel: nil, - consulAccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED, - }, - "read_only": { - hcpAccessLevel: func() *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel { - t := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADONLY - return &t - }(), - consulAccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_ONLY, - }, - "read_write": { - hcpAccessLevel: func() *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel { - t := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADWRITE - return &t - }(), - consulAccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE, - }, - } - for name, tc := range tt { - t.Run(name, func(t *testing.T) { - accessLevel := hcpAccessLevelToConsul(tc.hcpAccessLevel) - require.Equal(t, tc.consulAccessLevel, accessLevel) - }) - } -} diff --git a/internal/hcp/internal/controllers/link/status.go b/internal/hcp/internal/controllers/link/status.go deleted file mode 100644 index 88210c4a49..0000000000 --- a/internal/hcp/internal/controllers/link/status.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package link - -import ( - "context" - "errors" - "fmt" - - "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/resource" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -const ( - StatusKey = "consul.io/hcp/link" - - // Statuses - StatusLinked = "linked" - StatusValidated = "validated" - - LinkedSuccessReason = "SUCCESS" - LinkedFailedReason = "FAILED" - LinkedDisabledReasonV2ResourcesUnsupportedReason = "DISABLED_V2_RESOURCES_UNSUPPORTED" - LinkedUnauthorizedReason = "UNAUTHORIZED" - LinkedForbiddenReason = "FORBIDDEN" - ValidatedSuccessReason = "SUCCESS" - ValidatedFailedV2ResourcesReason = "V2_RESOURCES_UNSUPPORTED" - - LinkedMessageFormat = "Successfully linked to cluster '%s'" - FailedMessage = "Failed to link to HCP due to unexpected error" - DisabledResourceAPIsEnabledMessage = "Link is disabled because resource-apis are enabled" - UnauthorizedMessage = "Access denied, check client_id and client_secret" - ForbiddenMessage = "Access denied, check the resource_id" - ValidatedSuccessMessage = "Successfully validated link" - ValidatedFailedV2ResourcesMessage = "Link is disabled because resource-apis are enabled" -) - -var ( - ConditionDisabled = &pbresource.Condition{ - Type: StatusLinked, - State: pbresource.Condition_STATE_FALSE, - Reason: LinkedDisabledReasonV2ResourcesUnsupportedReason, - Message: DisabledResourceAPIsEnabledMessage, - } - ConditionFailed = &pbresource.Condition{ - Type: StatusLinked, - State: pbresource.Condition_STATE_FALSE, - Reason: LinkedFailedReason, - Message: FailedMessage, - } - ConditionUnauthorized = &pbresource.Condition{ - Type: StatusLinked, - State: pbresource.Condition_STATE_FALSE, - Reason: LinkedUnauthorizedReason, - Message: UnauthorizedMessage, - } - ConditionForbidden = &pbresource.Condition{ - Type: StatusLinked, - State: pbresource.Condition_STATE_FALSE, - Reason: LinkedForbiddenReason, - Message: ForbiddenMessage, - } - ConditionValidatedSuccess = &pbresource.Condition{ - Type: StatusValidated, - State: pbresource.Condition_STATE_TRUE, - Reason: ValidatedSuccessReason, - Message: ValidatedSuccessMessage, - } - ConditionValidatedFailed = &pbresource.Condition{ - Type: StatusValidated, - State: pbresource.Condition_STATE_FALSE, - Reason: ValidatedFailedV2ResourcesReason, - Message: ValidatedFailedV2ResourcesMessage, - } -) - -func ConditionLinked(resourceId string) *pbresource.Condition { - return &pbresource.Condition{ - Type: StatusLinked, - State: pbresource.Condition_STATE_TRUE, - Reason: LinkedSuccessReason, - Message: fmt.Sprintf(LinkedMessageFormat, resourceId), - } -} - -func writeStatusIfNotEqual(ctx context.Context, rt controller.Runtime, res *pbresource.Resource, status *pbresource.Status) error { - if resource.EqualStatus(res.Status[StatusKey], status, false) { - return nil - } - _, err := rt.Client.WriteStatus( - ctx, &pbresource.WriteStatusRequest{ - Id: res.Id, - Key: StatusKey, - Status: status, - }, - ) - if err != nil { - rt.Logger.Error("error writing link status", "error", err) - } - return err -} - -func linkingFailedCondition(err error) *pbresource.Condition { - switch { - case errors.Is(err, client.ErrUnauthorized): - return ConditionUnauthorized - case errors.Is(err, client.ErrForbidden): - return ConditionForbidden - default: - return ConditionFailed - } -} - -func IsLinked(res *pbresource.Resource) (linked bool, reason string) { - return isConditionTrue(res, StatusLinked) -} - -func IsValidated(res *pbresource.Resource) (linked bool, reason string) { - return isConditionTrue(res, StatusValidated) -} - -func isConditionTrue(res *pbresource.Resource, statusType string) (bool, string) { - if !resource.EqualType(res.GetId().GetType(), pbhcp.LinkType) { - return false, "resource is not hcp.Link type" - } - - linkStatus, ok := res.GetStatus()[StatusKey] - if !ok { - return false, "link status not set" - } - - for _, cond := range linkStatus.GetConditions() { - if cond.Type == statusType && cond.GetState() == pbresource.Condition_STATE_TRUE { - return true, "" - } - } - return false, fmt.Sprintf("link status does not include positive %s condition", statusType) -} diff --git a/internal/hcp/internal/controllers/register.go b/internal/hcp/internal/controllers/register.go deleted file mode 100644 index 05203f525c..0000000000 --- a/internal/hcp/internal/controllers/register.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package controllers - -import ( - "github.com/hashicorp/consul/agent/hcp/config" - "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/hcp/internal/controllers/link" - "github.com/hashicorp/consul/internal/hcp/internal/controllers/telemetrystate" -) - -type Dependencies struct { - CloudConfig config.CloudConfig -} - -func Register(mgr *controller.Manager, deps Dependencies) { - mgr.Register( - link.LinkController( - link.DefaultHCPClientFn, - deps.CloudConfig, - ), - ) - - mgr.Register(telemetrystate.TelemetryStateController(link.DefaultHCPClientFn)) -} diff --git a/internal/hcp/internal/controllers/telemetrystate/controller.go b/internal/hcp/internal/controllers/telemetrystate/controller.go deleted file mode 100644 index e21c85685e..0000000000 --- a/internal/hcp/internal/controllers/telemetrystate/controller.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package telemetrystate - -import ( - "context" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/controller/dependency" - "github.com/hashicorp/consul/internal/hcp/internal/controllers/link" - "github.com/hashicorp/consul/internal/hcp/internal/types" - "github.com/hashicorp/consul/internal/resource" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -var ( - globalID = &pbresource.ID{ - Name: "global", - Type: pbhcp.TelemetryStateType, - Tenancy: &pbresource.Tenancy{}, - } -) - -const MetaKeyDebugSkipDeletion = StatusKey + "/debug/skip-deletion" - -func TelemetryStateController(hcpClientFn link.HCPClientFn) *controller.Controller { - return controller.NewController(StatusKey, pbhcp.TelemetryStateType). - WithWatch(pbhcp.LinkType, dependency.ReplaceType(pbhcp.TelemetryStateType)). - WithReconciler(&telemetryStateReconciler{ - hcpClientFn: hcpClientFn, - }) -} - -type telemetryStateReconciler struct { - hcpClientFn link.HCPClientFn -} - -func (r *telemetryStateReconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error { - // The runtime is passed by value so replacing it here for the remainder of this - // reconciliation request processing will not affect future invocations. - rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", StatusKey) - - rt.Logger.Trace("reconciling telemetry-state") - - // First get the link resource in order to build a hcp client. If the link resource - // doesn't exist then the telemetry-state should not exist either. - res, err := getLinkResource(ctx, rt) - if err != nil { - rt.Logger.Error("failed to lookup Link resource", "error", err) - return err - } - if res == nil { - return ensureTelemetryStateDeleted(ctx, rt) - } - - // Check that the link resource indicates the cluster is linked - // If the cluster is not linked, the telemetry-state resource should not exist - if linked, reason := link.IsLinked(res.GetResource()); !linked { - rt.Logger.Trace("cluster is not linked", "reason", reason) - return ensureTelemetryStateDeleted(ctx, rt) - } - - hcpClient, err := r.hcpClientFn(link.CloudConfigFromLink(res.GetData())) - if err != nil { - rt.Logger.Error("error creating HCP Client", "error", err) - return err - } - - // Get the telemetry configuration and observability scoped credentials from hcp - tCfg, err := hcpClient.FetchTelemetryConfig(ctx) - if err != nil { - rt.Logger.Error("error requesting telemetry config", "error", err) - return err - } - clientID, clientSecret, err := hcpClient.GetObservabilitySecret(ctx) - if err != nil { - rt.Logger.Error("error requesting telemetry credentials", "error", err) - return nil - } - - // TODO allow hcp client config override from hcp TelemetryConfig - hcpCfg := res.GetData().GetHcpConfig() - - // TODO implement proxy options from hcp - proxyCfg := &pbhcp.ProxyConfig{} - - state := &pbhcp.TelemetryState{ - ResourceId: res.GetData().ResourceId, - ClientId: clientID, - ClientSecret: clientSecret, - HcpConfig: hcpCfg, - Proxy: proxyCfg, - Metrics: &pbhcp.MetricsConfig{ - Labels: tCfg.MetricsConfig.Labels, - Disabled: tCfg.MetricsConfig.Disabled, - }, - } - - if tCfg.MetricsConfig.Endpoint != nil { - state.Metrics.Endpoint = tCfg.MetricsConfig.Endpoint.String() - } - if tCfg.MetricsConfig.Filters != nil { - state.Metrics.IncludeList = []string{tCfg.MetricsConfig.Filters.String()} - } - - if err := writeTelemetryStateIfUpdated(ctx, rt, state); err != nil { - rt.Logger.Error("error updating telemetry-state", "error", err) - return err - } - - return nil -} - -func ensureTelemetryStateDeleted(ctx context.Context, rt controller.Runtime) error { - resp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: &pbresource.ID{Name: "global", Type: pbhcp.TelemetryStateType}}) - switch { - case status.Code(err) == codes.NotFound: - return nil - case err != nil: - rt.Logger.Error("the resource service has returned an unexpected error", "error", err) - return err - } - - rt.Logger.Trace("deleting telemetry-state") - if _, ok := resp.GetResource().Metadata[MetaKeyDebugSkipDeletion]; ok { - rt.Logger.Debug("skip-deletion metadata key found, skipping deletion of telemetry-state resource") - return nil - } - - if _, err := rt.Client.Delete(ctx, &pbresource.DeleteRequest{Id: resp.GetResource().GetId()}); err != nil { - rt.Logger.Error("error deleting telemetry-state resource", "error", err) - return err - } - return nil -} - -func writeTelemetryStateIfUpdated(ctx context.Context, rt controller.Runtime, state *pbhcp.TelemetryState) error { - currentState, err := getTelemetryStateResource(ctx, rt) - if err != nil { - return err - } - - if currentState != nil && proto.Equal(currentState.GetData(), state) { - return nil - } - - stateData, err := anypb.New(state) - if err != nil { - return err - } - - _, err = rt.Client.Write(ctx, &pbresource.WriteRequest{Resource: &pbresource.Resource{ - Id: &pbresource.ID{ - Name: "global", - Type: pbhcp.TelemetryStateType, - }, - Data: stateData, - }}) - return err -} - -func getGlobalResource(ctx context.Context, rt controller.Runtime, t *pbresource.Type) (*pbresource.Resource, error) { - resp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: &pbresource.ID{Name: "global", Type: t}}) - switch { - case status.Code(err) == codes.NotFound: - return nil, nil - case err != nil: - return nil, err - } - - return resp.GetResource(), nil -} - -// getLinkResource returns the cluster scoped pbhcp.Link resource. If the resource is not found a nil -// pointer and no error will be returned. -func getLinkResource(ctx context.Context, rt controller.Runtime) (*types.DecodedLink, error) { - res, err := getGlobalResource(ctx, rt, pbhcp.LinkType) - if err != nil { - return nil, err - } - if res == nil { - return nil, nil - } - return resource.Decode[*pbhcp.Link](res) -} - -func getTelemetryStateResource(ctx context.Context, rt controller.Runtime) (*types.DecodedTelemetryState, error) { - res, err := getGlobalResource(ctx, rt, pbhcp.TelemetryStateType) - if err != nil { - return nil, err - } - if res == nil { - return nil, nil - } - return resource.Decode[*pbhcp.TelemetryState](res) -} diff --git a/internal/hcp/internal/controllers/telemetrystate/controller_test.go b/internal/hcp/internal/controllers/telemetrystate/controller_test.go deleted file mode 100644 index e11c1e3063..0000000000 --- a/internal/hcp/internal/controllers/telemetrystate/controller_test.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package telemetrystate - -import ( - "context" - "net/url" - "regexp" - "testing" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" - hcpclient "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/agent/hcp/config" - "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/internal/hcp/internal/controllers/link" - "github.com/hashicorp/consul/internal/hcp/internal/types" - "github.com/hashicorp/consul/internal/resource" - rtest "github.com/hashicorp/consul/internal/resource/resourcetest" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/sdk/testutil" -) - -type controllerSuite struct { - suite.Suite - - ctx context.Context - client *rtest.Client - rt controller.Runtime - - ctl *controller.TestController - tenancies []*pbresource.Tenancy - - hcpMock *hcpclient.MockClient -} - -func mockHcpClientFn(t *testing.T) (*hcpclient.MockClient, link.HCPClientFn) { - mockClient := hcpclient.NewMockClient(t) - - mockClientFunc := func(link config.CloudConfig) (hcpclient.Client, error) { - return mockClient, nil - } - - return mockClient, mockClientFunc -} - -func (suite *controllerSuite) SetupTest() { - suite.ctx = testutil.TestContext(suite.T()) - suite.tenancies = rtest.TestTenancies() - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(types.Register). - WithTenancies(suite.tenancies...). - Run(suite.T()) - - hcpMock, hcpClientFn := mockHcpClientFn(suite.T()) - suite.hcpMock = hcpMock - suite.ctl = controller.NewTestController(TelemetryStateController(hcpClientFn), client). - WithLogger(testutil.Logger(suite.T())) - - suite.rt = suite.ctl.Runtime() - suite.client = rtest.NewClient(client) -} - -func TestTelemetryStateController(t *testing.T) { - suite.Run(t, new(controllerSuite)) -} - -func (suite *controllerSuite) deleteResourceFunc(id *pbresource.ID) func() { - return func() { - suite.client.MustDelete(suite.T(), id) - } -} - -func (suite *controllerSuite) TestController_Ok() { - // Run the controller manager - mgr := controller.NewManager(suite.client, suite.rt.Logger) - mockClient, mockClientFn := mockHcpClientFn(suite.T()) - mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(&hcpclient.TelemetryConfig{ - MetricsConfig: &hcpclient.MetricsConfig{ - Endpoint: &url.URL{ - Scheme: "http", - Host: "localhost", - Path: "/test", - }, - Labels: map[string]string{"foo": "bar"}, - Filters: regexp.MustCompile(".*"), - }, - RefreshConfig: &hcpclient.RefreshConfig{}, - }, nil) - mockClient.EXPECT().GetObservabilitySecret(mock.Anything).Return("xxx", "yyy", nil) - mgr.Register(TelemetryStateController(mockClientFn)) - mgr.SetRaftLeader(true) - go mgr.Run(suite.ctx) - - link := suite.writeLinkResource() - - tsRes := suite.client.WaitForResourceExists(suite.T(), &pbresource.ID{Name: "global", Type: pbhcp.TelemetryStateType}) - decodedState, err := resource.Decode[*pbhcp.TelemetryState](tsRes) - require.NoError(suite.T(), err) - require.Equal(suite.T(), link.GetData().GetResourceId(), decodedState.GetData().ResourceId) - require.Equal(suite.T(), "xxx", decodedState.GetData().ClientId) - require.Equal(suite.T(), "http://localhost/test", decodedState.GetData().Metrics.Endpoint) - - suite.client.MustDelete(suite.T(), link.Id) - suite.client.WaitForDeletion(suite.T(), tsRes.Id) -} - -func (suite *controllerSuite) TestReconcile_AvoidReconciliationWriteLoop() { - suite.hcpMock.EXPECT().FetchTelemetryConfig(mock.Anything).Return(&hcpclient.TelemetryConfig{ - MetricsConfig: &hcpclient.MetricsConfig{ - Endpoint: &url.URL{ - Scheme: "http", - Host: "localhost", - Path: "/test", - }, - Labels: map[string]string{"foo": "bar"}, - Filters: regexp.MustCompile(".*"), - }, - RefreshConfig: &hcpclient.RefreshConfig{}, - }, nil) - link := suite.writeLinkResource() - suite.hcpMock.EXPECT().GetObservabilitySecret(mock.Anything).Return("xxx", "yyy", nil) - suite.NoError(suite.ctl.Reconcile(context.Background(), controller.Request{ID: link.Id})) - tsRes := suite.client.WaitForResourceExists(suite.T(), &pbresource.ID{Name: "global", Type: pbhcp.TelemetryStateType}) - suite.NoError(suite.ctl.Reconcile(context.Background(), controller.Request{ID: tsRes.Id})) - suite.client.RequireVersionUnchanged(suite.T(), tsRes.Id, tsRes.Version) -} - -func (suite *controllerSuite) TestController_LinkingDisabled() { - // Run the controller manager - mgr := controller.NewManager(suite.client, suite.rt.Logger) - _, mockClientFn := mockHcpClientFn(suite.T()) - mgr.Register(TelemetryStateController(mockClientFn)) - mgr.SetRaftLeader(true) - go mgr.Run(suite.ctx) - - linkData := &pbhcp.Link{ - ClientId: "abc", - ClientSecret: "abc", - ResourceId: types.GenerateTestResourceID(suite.T()), - } - - rtest.Resource(pbhcp.LinkType, "global"). - WithData(suite.T(), linkData). - WithStatus(link.StatusKey, &pbresource.Status{Conditions: []*pbresource.Condition{link.ConditionDisabled}}). - Write(suite.T(), suite.client) - - suite.client.WaitForDeletion(suite.T(), &pbresource.ID{Name: "global", Type: pbhcp.TelemetryStateType}) -} - -func (suite *controllerSuite) writeLinkResource() *types.DecodedLink { - suite.T().Helper() - - linkData := &pbhcp.Link{ - ClientId: "abc", - ClientSecret: "abc", - ResourceId: types.GenerateTestResourceID(suite.T()), - } - - res := rtest.Resource(pbhcp.LinkType, "global"). - WithData(suite.T(), linkData). - WithStatus(link.StatusKey, &pbresource.Status{Conditions: []*pbresource.Condition{link.ConditionLinked(linkData.ResourceId)}}). - Write(suite.T(), suite.client) - - suite.T().Cleanup(suite.deleteResourceFunc(res.Id)) - link, err := resource.Decode[*pbhcp.Link](res) - require.NoError(suite.T(), err) - return link -} diff --git a/internal/hcp/internal/controllers/telemetrystate/status.go b/internal/hcp/internal/controllers/telemetrystate/status.go deleted file mode 100644 index d68873b6d9..0000000000 --- a/internal/hcp/internal/controllers/telemetrystate/status.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package telemetrystate - -const ( - StatusKey = "consul.io/hcp/telemetry-state" -) diff --git a/internal/hcp/internal/types/link.go b/internal/hcp/internal/types/link.go deleted file mode 100644 index 111f58a858..0000000000 --- a/internal/hcp/internal/types/link.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package types - -import ( - "errors" - - "github.com/hashicorp/go-multierror" - hcpresource "github.com/hashicorp/hcp-sdk-go/resource" - - "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/internal/resource" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -type DecodedLink = resource.DecodedResource[*pbhcp.Link] - -const ( - LinkName = "global" - MetadataSourceKey = "source" - MetadataSourceConfig = "config" -) - -var ( - errLinkConfigurationName = errors.New("only a single Link resource is allowed and it must be named global") - errInvalidHCPResourceID = errors.New("could not parse, invalid format") -) - -func RegisterLink(r resource.Registry) { - r.Register(resource.Registration{ - Type: pbhcp.LinkType, - Proto: &pbhcp.Link{}, - Scope: resource.ScopeCluster, - Validate: ValidateLink, - ACLs: &resource.ACLHooks{ - Read: aclReadHookLink, - Write: aclWriteHookLink, - List: aclListHookLink, - }, - }) -} - -func aclReadHookLink(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, _ *pbresource.ID, _ *pbresource.Resource) error { - err := authorizer.ToAllowAuthorizer().OperatorReadAllowed(authzContext) - if err != nil { - return err - } - return nil -} - -func aclWriteHookLink(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, _ *pbresource.Resource) error { - err := authorizer.ToAllowAuthorizer().OperatorWriteAllowed(authzContext) - if err != nil { - return err - } - - err = authorizer.ToAllowAuthorizer().ACLWriteAllowed(authzContext) - if err != nil { - return err - } - - return nil -} - -func aclListHookLink(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext) error { - err := authorizer.ToAllowAuthorizer().OperatorReadAllowed(authzContext) - if err != nil { - return err - } - return nil -} - -var ValidateLink = resource.DecodeAndValidate(validateLink) - -func validateLink(res *DecodedLink) error { - var err error - - if res.Id.Name != LinkName { - err = multierror.Append(err, resource.ErrInvalidField{ - Name: "name", - Wrapped: errLinkConfigurationName, - }) - } - - if res.Data.ClientId == "" { - err = multierror.Append(err, resource.ErrInvalidField{ - Name: "client_id", - Wrapped: resource.ErrMissing, - }) - } - - if res.Data.ClientSecret == "" { - err = multierror.Append(err, resource.ErrInvalidField{ - Name: "client_secret", - Wrapped: resource.ErrMissing, - }) - } - - if res.Data.ResourceId == "" { - err = multierror.Append(err, resource.ErrInvalidField{ - Name: "resource_id", - Wrapped: resource.ErrMissing, - }) - } else { - _, parseErr := hcpresource.FromString(res.Data.ResourceId) - if parseErr != nil { - err = multierror.Append(err, resource.ErrInvalidField{ - Name: "resource_id", - Wrapped: errInvalidHCPResourceID, - }) - } - } - - return err -} diff --git a/internal/hcp/internal/types/link_test.go b/internal/hcp/internal/types/link_test.go deleted file mode 100644 index 2de25091d2..0000000000 --- a/internal/hcp/internal/types/link_test.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package types - -import ( - "testing" - - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/hashicorp/consul/internal/resource" - rtest "github.com/hashicorp/consul/internal/resource/resourcetest" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" - "github.com/hashicorp/consul/proto-public/pbresource" -) - -func createCloudLinkResource(t *testing.T, data protoreflect.ProtoMessage) *pbresource.Resource { - res := &pbresource.Resource{ - Id: &pbresource.ID{ - Type: pbhcp.LinkType, - Name: "global", - }, - } - - var err error - res.Data, err = anypb.New(data) - require.NoError(t, err) - return res -} - -func TestValidateLink_Ok(t *testing.T) { - data := &pbhcp.Link{ - ClientId: "abc", - ClientSecret: "abc", - ResourceId: GenerateTestResourceID(t), - } - - res := createCloudLinkResource(t, data) - - err := ValidateLink(res) - require.NoError(t, err) -} - -func TestValidateLink_ParseError(t *testing.T) { - // Any type other than the Link type would work - // to cause the error we are expecting - data := &pbresource.Type{Group: "a", GroupVersion: "b", Kind: "c"} - - res := createCloudLinkResource(t, data) - - err := ValidateLink(res) - require.Error(t, err) - require.ErrorAs(t, err, &resource.ErrDataParse{}) -} - -func TestValidateLink_InvalidName(t *testing.T) { - data := &pbhcp.Link{ - ClientId: "abc", - ClientSecret: "abc", - ResourceId: GenerateTestResourceID(t), - } - - res := createCloudLinkResource(t, data) - res.Id.Name = "default" - - err := ValidateLink(res) - - expected := resource.ErrInvalidField{ - Name: "name", - Wrapped: errLinkConfigurationName, - } - - var actual resource.ErrInvalidField - require.ErrorAs(t, err, &actual) - require.Equal(t, expected, actual) -} - -func TestValidateLink_MissingClientId(t *testing.T) { - data := &pbhcp.Link{ - ClientId: "", - ClientSecret: "abc", - ResourceId: GenerateTestResourceID(t), - } - - res := createCloudLinkResource(t, data) - - err := ValidateLink(res) - - expected := resource.ErrInvalidField{ - Name: "client_id", - Wrapped: resource.ErrMissing, - } - - var actual resource.ErrInvalidField - require.ErrorAs(t, err, &actual) - require.Equal(t, expected, actual) -} - -func TestValidateLink_MissingClientSecret(t *testing.T) { - data := &pbhcp.Link{ - ClientId: "abc", - ClientSecret: "", - ResourceId: GenerateTestResourceID(t), - } - - res := createCloudLinkResource(t, data) - - err := ValidateLink(res) - - expected := resource.ErrInvalidField{ - Name: "client_secret", - Wrapped: resource.ErrMissing, - } - - var actual resource.ErrInvalidField - require.ErrorAs(t, err, &actual) - require.Equal(t, expected, actual) -} - -func TestValidateLink_MissingResourceId(t *testing.T) { - data := &pbhcp.Link{ - ClientId: "abc", - ClientSecret: "abc", - ResourceId: "", - } - - res := createCloudLinkResource(t, data) - - err := ValidateLink(res) - - expected := resource.ErrInvalidField{ - Name: "resource_id", - Wrapped: resource.ErrMissing, - } - - var actual resource.ErrInvalidField - require.ErrorAs(t, err, &actual) - require.Equal(t, expected, actual) -} - -func TestValidateLink_InvalidResourceId(t *testing.T) { - data := &pbhcp.Link{ - ClientId: "abc", - ClientSecret: "abc", - ResourceId: "abc", - } - - res := createCloudLinkResource(t, data) - - err := ValidateLink(res) - - expected := resource.ErrInvalidField{ - Name: "resource_id", - Wrapped: errInvalidHCPResourceID, - } - - var actual resource.ErrInvalidField - require.ErrorAs(t, err, &actual) - require.Equal(t, expected, actual) -} - -// Currently, we have no specific ACLs configured so the default `operator` permissions are required -func TestLinkACLs(t *testing.T) { - registry := resource.NewRegistry() - RegisterLink(registry) - - data := &pbhcp.Link{ - ClientId: "abc", - ClientSecret: "abc", - ResourceId: GenerateTestResourceID(t), - } - link := createCloudLinkResource(t, data) - - cases := map[string]rtest.ACLTestCase{ - "no rules": { - Rules: ``, - Res: link, - ReadOK: rtest.DENY, - WriteOK: rtest.DENY, - ListOK: rtest.DENY, - }, - "link test read and list": { - Rules: `{"operator": "read"}`, - Res: link, - ReadOK: rtest.ALLOW, - WriteOK: rtest.DENY, - ListOK: rtest.ALLOW, - }, - "link test write": { - Rules: `{"operator": "write", "acl": "write"}`, - Res: link, - ReadOK: rtest.ALLOW, - WriteOK: rtest.ALLOW, - ListOK: rtest.ALLOW, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - rtest.RunACLTestCase(t, tc, registry) - }) - } -} diff --git a/internal/hcp/internal/types/telemetry_state.go b/internal/hcp/internal/types/telemetry_state.go deleted file mode 100644 index 7c6b3971cf..0000000000 --- a/internal/hcp/internal/types/telemetry_state.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package types - -import ( - "errors" - - "github.com/hashicorp/go-multierror" - - "github.com/hashicorp/consul/internal/resource" - pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2" -) - -type DecodedTelemetryState = resource.DecodedResource[*pbhcp.TelemetryState] - -var ( - telemetryStateConfigurationNameError = errors.New("only a single Telemetry resource is allowed and it must be named global") -) - -func RegisterTelemetryState(r resource.Registry) { - r.Register(resource.Registration{ - Type: pbhcp.TelemetryStateType, - Proto: &pbhcp.TelemetryState{}, - Scope: resource.ScopeCluster, - Validate: ValidateTelemetryState, - }) -} - -var ValidateTelemetryState = resource.DecodeAndValidate(validateTelemetryState) - -func validateTelemetryState(res *DecodedTelemetryState) error { - var err error - - if res.GetId().GetName() != "global" { - err = multierror.Append(err, resource.ErrInvalidField{ - Name: "name", - Wrapped: telemetryStateConfigurationNameError, - }) - } - - if res.GetData().GetClientId() == "" { - err = multierror.Append(err, resource.ErrInvalidField{ - Name: "client_id", - Wrapped: resource.ErrMissing, - }) - } - - if res.GetData().GetClientSecret() == "" { - err = multierror.Append(err, resource.ErrInvalidField{ - Name: "client_secret", - Wrapped: resource.ErrMissing, - }) - } - - if res.GetData().GetResourceId() == "" { - err = multierror.Append(err, resource.ErrInvalidField{ - Name: "resource_id", - Wrapped: resource.ErrMissing, - }) - } - - if res.GetData().GetMetrics().GetEndpoint() == "" { - err = multierror.Append(err, resource.ErrInvalidField{ - Name: "metrics.endpoint", - Wrapped: resource.ErrMissing, - }) - } - - if res.GetData().GetMetrics().GetIncludeList() == nil { - err = multierror.Append(err, resource.ErrInvalidField{ - Name: "metrics.include_list", - Wrapped: resource.ErrMissing, - }) - } - - if res.GetData().GetMetrics().GetLabels() == nil { - err = multierror.Append(err, resource.ErrInvalidField{ - Name: "metrics.labels", - Wrapped: resource.ErrMissing, - }) - } - - return err -} diff --git a/internal/hcp/internal/types/testing.go b/internal/hcp/internal/types/testing.go deleted file mode 100644 index 420f79e920..0000000000 --- a/internal/hcp/internal/types/testing.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package types - -import ( - "fmt" - "testing" - - "github.com/hashicorp/go-uuid" - "github.com/stretchr/testify/require" -) - -func GenerateTestResourceID(t *testing.T) string { - orgID, err := uuid.GenerateUUID() - require.NoError(t, err) - - projectID, err := uuid.GenerateUUID() - require.NoError(t, err) - - template := "organization/%s/project/%s/hashicorp.consul.global-network-manager.cluster/test-cluster" - return fmt.Sprintf(template, orgID, projectID) -} diff --git a/internal/hcp/internal/types/types.go b/internal/hcp/internal/types/types.go deleted file mode 100644 index 17b495fabf..0000000000 --- a/internal/hcp/internal/types/types.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package types - -import "github.com/hashicorp/consul/internal/resource" - -func Register(r resource.Registry) { - RegisterLink(r) - RegisterTelemetryState(r) -} diff --git a/internal/resource/filter.go b/internal/resource/filter.go deleted file mode 100644 index 09e251e218..0000000000 --- a/internal/resource/filter.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package resource - -import ( - "fmt" - - "github.com/hashicorp/go-bexpr" - - "github.com/hashicorp/consul/proto-public/pbresource" -) - -type MetadataFilterableResources interface { - GetMetadata() map[string]string -} - -// FilterResourcesByMetadata will use the provided go-bexpr based filter to -// retain matching items from the provided slice. -// -// The only variables usable in the expressions are the metadata keys prefixed -// by "metadata." -// -// If no filter is provided, then this does nothing and returns the input. -func FilterResourcesByMetadata[T MetadataFilterableResources](resources []T, filter string) ([]T, error) { - if filter == "" || len(resources) == 0 { - return resources, nil - } - - eval, err := createMetadataFilterEvaluator(filter) - if err != nil { - return nil, err - } - - filtered := make([]T, 0, len(resources)) - for _, res := range resources { - vars := &metadataFilterFieldDetails{ - Meta: res.GetMetadata(), - } - match, err := eval.Evaluate(vars) - if err != nil { - return nil, err - } - if match { - filtered = append(filtered, res) - } - } - if len(filtered) == 0 { - return nil, nil - } - return filtered, nil -} - -// FilterMatchesResourceMetadata will use the provided go-bexpr based filter to -// determine if the provided resource matches. -// -// The only variables usable in the expressions are the metadata keys prefixed -// by "metadata." -// -// If no filter is provided, then this returns true. -func FilterMatchesResourceMetadata(res *pbresource.Resource, filter string) (bool, error) { - if res == nil { - return false, nil - } else if filter == "" { - return true, nil - } - - eval, err := createMetadataFilterEvaluator(filter) - if err != nil { - return false, err - } - - vars := &metadataFilterFieldDetails{ - Meta: res.Metadata, - } - match, err := eval.Evaluate(vars) - if err != nil { - return false, err - } - return match, nil -} - -// ValidateMetadataFilter will validate that the provided filter is going to be -// a valid input to the FilterResourcesByMetadata function. -// -// This is best called from a Validate hook. -func ValidateMetadataFilter(filter string) error { - if filter == "" { - return nil - } - - _, err := createMetadataFilterEvaluator(filter) - return err -} - -func createMetadataFilterEvaluator(filter string) (*bexpr.Evaluator, error) { - sampleVars := &metadataFilterFieldDetails{ - Meta: make(map[string]string), - } - eval, err := bexpr.CreateEvaluatorForType(filter, nil, sampleVars) - if err != nil { - return nil, fmt.Errorf("filter %q is invalid: %w", filter, err) - } - return eval, nil -} - -type metadataFilterFieldDetails struct { - Meta map[string]string `bexpr:"metadata"` -} diff --git a/internal/resource/filter_test.go b/internal/resource/filter_test.go deleted file mode 100644 index e15ec08030..0000000000 --- a/internal/resource/filter_test.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package resource - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/proto/private/prototest" - "github.com/hashicorp/consul/sdk/testutil" -) - -func TestFilterResourcesByMetadata(t *testing.T) { - type testcase struct { - in []*pbresource.Resource - filter string - expect []*pbresource.Resource - expectErr string - } - - create := func(name string, kvs ...string) *pbresource.Resource { - require.True(t, len(kvs)%2 == 0) - - meta := make(map[string]string) - for i := 0; i < len(kvs); i += 2 { - meta[kvs[i]] = kvs[i+1] - } - - return &pbresource.Resource{ - Id: &pbresource.ID{ - Name: name, - }, - Metadata: meta, - } - } - - run := func(t *testing.T, tc testcase) { - got, err := FilterResourcesByMetadata(tc.in, tc.filter) - if tc.expectErr != "" { - require.Error(t, err) - testutil.RequireErrorContains(t, err, tc.expectErr) - } else { - require.NoError(t, err) - prototest.AssertDeepEqual(t, tc.expect, got) - } - } - - cases := map[string]testcase{ - "nil input": {}, - "no filter": { - in: []*pbresource.Resource{ - create("one"), - create("two"), - create("three"), - create("four"), - }, - filter: "", - expect: []*pbresource.Resource{ - create("one"), - create("two"), - create("three"), - create("four"), - }, - }, - "bad filter": { - in: []*pbresource.Resource{ - create("one"), - create("two"), - create("three"), - create("four"), - }, - filter: "garbage.value == zzz", - expectErr: `Selector "garbage" is not valid`, - }, - "filter everything out": { - in: []*pbresource.Resource{ - create("one"), - create("two"), - create("three"), - create("four"), - }, - filter: "metadata.foo == bar", - }, - "filter simply": { - in: []*pbresource.Resource{ - create("one", "foo", "bar"), - create("two", "foo", "baz"), - create("three", "zim", "gir"), - create("four", "zim", "gaz", "foo", "bar"), - }, - filter: "metadata.foo == bar", - expect: []*pbresource.Resource{ - create("one", "foo", "bar"), - create("four", "zim", "gaz", "foo", "bar"), - }, - }, - "filter prefix": { - in: []*pbresource.Resource{ - create("one", "foo", "bar"), - create("two", "foo", "baz"), - create("three", "zim", "gir"), - create("four", "zim", "gaz", "foo", "bar"), - create("four", "zim", "zzz"), - }, - filter: "(zim in metadata) and (metadata.zim matches `^g.`)", - expect: []*pbresource.Resource{ - create("three", "zim", "gir"), - create("four", "zim", "gaz", "foo", "bar"), - }, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - run(t, tc) - }) - } -} - -func TestFilterMatchesResourceMetadata(t *testing.T) { - type testcase struct { - res *pbresource.Resource - filter string - expect bool - expectErr string - } - - create := func(name string, kvs ...string) *pbresource.Resource { - require.True(t, len(kvs)%2 == 0) - - meta := make(map[string]string) - for i := 0; i < len(kvs); i += 2 { - meta[kvs[i]] = kvs[i+1] - } - - return &pbresource.Resource{ - Id: &pbresource.ID{ - Name: name, - }, - Metadata: meta, - } - } - - run := func(t *testing.T, tc testcase) { - got, err := FilterMatchesResourceMetadata(tc.res, tc.filter) - if tc.expectErr != "" { - require.Error(t, err) - testutil.RequireErrorContains(t, err, tc.expectErr) - } else { - require.NoError(t, err) - require.Equal(t, tc.expect, got) - } - } - - cases := map[string]testcase{ - "nil input": {}, - "no filter": { - res: create("one"), - filter: "", - expect: true, - }, - "bad filter": { - res: create("one"), - filter: "garbage.value == zzz", - expectErr: `Selector "garbage" is not valid`, - }, - "no match": { - res: create("one"), - filter: "metadata.foo == bar", - }, - "match simply": { - res: create("one", "foo", "bar"), - filter: "metadata.foo == bar", - expect: true, - }, - "match via prefix": { - res: create("four", "zim", "gaz", "foo", "bar"), - filter: "(zim in metadata) and (metadata.zim matches `^g.`)", - expect: true, - }, - "no match via prefix": { - res: create("four", "zim", "zzz", "foo", "bar"), - filter: "(zim in metadata) and (metadata.zim matches `^g.`)", - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - run(t, tc) - }) - } -} diff --git a/internal/storage/raft/backend.go b/internal/storage/raft/backend.go index 8a7a973c3e..3db72f2546 100644 --- a/internal/storage/raft/backend.go +++ b/internal/storage/raft/backend.go @@ -14,10 +14,10 @@ import ( "github.com/hashicorp/go-hclog" + "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/internal/storage/inmem" "github.com/hashicorp/consul/proto-public/pbresource" - pbstorage "github.com/hashicorp/consul/proto/private/pbstorage" ) @@ -53,7 +53,7 @@ func NewBackend(h Handle, l hclog.Logger) (*Backend, error) { if err != nil { return nil, err } - b := &Backend{handle: h, store: s} + b := &Backend{handle: h, store: s, logger: l} b.forwardingServer = newForwardingServer(b) b.forwardingClient = newForwardingClient(h, l) return b, nil @@ -80,6 +80,7 @@ type Handle interface { type Backend struct { handle Handle store *inmem.Store + logger hclog.Logger forwardingServer *forwardingServer forwardingClient *forwardingClient @@ -225,6 +226,24 @@ func (b *Backend) ListByOwner(_ context.Context, id *pbresource.ID) ([]*pbresour return b.store.ListByOwner(id) } +// isRetiredType ensures that types that have been formally retired (deprecated +// and deleted) do not sneak back in during a snapshot restore. +func isRetiredType(typ *pbresource.Type) bool { + switch typ.GetGroupVersion() { + case "v2": + switch typ.GetGroup() { + case "hcp": + return true + } + case "v2beta1": + switch typ.GetGroup() { + case "auth", "catalog", "mesh", "multicluster", "tenancy": + return true + } + } + return false +} + // Apply is called by the FSM with the bytes of a Raft log entry, with Consul's // envelope (i.e. type prefix and msgpack wrapper) stripped off. func (b *Backend) Apply(buf []byte, idx uint64) any { @@ -239,8 +258,18 @@ func (b *Backend) Apply(buf []byte, idx uint64) any { oldVsn := res.Version res.Version = strconv.Itoa(int(idx)) - if err := b.store.WriteCAS(res, oldVsn); err != nil { - return err + if isRetiredType(res.GetId().GetType()) { + // When a type is retired, the caller should think that the write + // was applied, but we should simply skip loading it. This means + // that retired types will not linger in the database indefinitely. + b.logger.Warn("ignoring operation for retired type", + "operation", "apply", + "type", resource.ToGVK(res.GetId().GetType()), + ) + } else { + if err := b.store.WriteCAS(res, oldVsn); err != nil { + return err + } } return &pbstorage.LogResponse{ @@ -250,8 +279,19 @@ func (b *Backend) Apply(buf []byte, idx uint64) any { } case pbstorage.LogType_LOG_TYPE_DELETE: req := req.GetDelete() - if err := b.store.DeleteCAS(req.Id, req.Version); err != nil { - return err + + if isRetiredType(req.GetId().GetType()) { + // When a type is retired, the caller should think that the write + // was applied, but we should simply skip loading it. This means + // that retired types will not linger in the database indefinitely. + b.logger.Warn("ignoring operation for retired type", + "operation", "delete", + "type", resource.ToGVK(req.GetId().GetType()), + ) + } else { + if err := b.store.DeleteCAS(req.Id, req.Version); err != nil { + return err + } } return &pbstorage.LogResponse{ Response: &pbstorage.LogResponse_Delete{}, diff --git a/internal/storage/raft/backend_test.go b/internal/storage/raft/backend_test.go new file mode 100644 index 0000000000..fb959c8cf1 --- /dev/null +++ b/internal/storage/raft/backend_test.go @@ -0,0 +1,392 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package raft + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +func TestIsRetiredType(t *testing.T) { + var retired []*pbresource.Type + { + const ( + GroupName = "hcp" + Version = "v2" + + LinkKind = "Link" + TelemetryStateKind = "TelemetryState" + ) + + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: LinkKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: TelemetryStateKind, + }) + } + { + const ( + GroupName = "tenancy" + Version = "v2beta1" + + NamespaceKind = "Namespace" + PartitionKind = "Partition" + ) + + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: NamespaceKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: PartitionKind, + }) + } + { + const ( + GroupName = "multicluster" + Version = "v2beta1" + + SamenessGroupKind = "SamenessGroup" + ) + + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: SamenessGroupKind, + }) + } + { + const ( + GroupName = "mesh" + Version = "v2beta1" + + APIGatewayKind = "APIGateway" + ComputedExplicitDestinationsKind = "ComputedExplicitDestinations" + ComputedGatewayRoutesKind = "ComputedGatewayRoutes" + ComputedImplicitDestinationsKind = "ComputedImplicitDestinations" + ComputedProxyConfigurationKind = "ComputedProxyConfiguration" + ComputedRoutesKind = "ComputedRoutes" + DestinationPolicyKind = "DestinationPolicy" + DestinationsKind = "Destinations" + DestinationsConfigurationKind = "DestinationsConfiguration" + GRPCRouteKind = "GRPCRoute" + HTTPRouteKind = "HTTPRoute" + MeshConfigurationKind = "MeshConfiguration" + MeshGatewayKind = "MeshGateway" + ProxyConfigurationKind = "ProxyConfiguration" + ProxyStateTemplateKind = "ProxyStateTemplate" + TCPRouteKind = "TCPRoute" + ) + + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: APIGatewayKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ComputedExplicitDestinationsKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ComputedGatewayRoutesKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ComputedImplicitDestinationsKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ComputedProxyConfigurationKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ComputedRoutesKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: DestinationPolicyKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: DestinationsKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: DestinationsConfigurationKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: GRPCRouteKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: HTTPRouteKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: MeshConfigurationKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: MeshGatewayKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ProxyConfigurationKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ProxyStateTemplateKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: TCPRouteKind, + }) + } + { + const ( + GroupName = "auth" + Version = "v2beta1" + + ComputedTrafficPermissionsKind = "ComputedTrafficPermissions" + NamespaceTrafficPermissionsKind = "NamespaceTrafficPermissions" + PartitionTrafficPermissionsKind = "PartitionTrafficPermissions" + TrafficPermissionsKind = "TrafficPermissions" + WorkloadIdentityKind = "WorkloadIdentity" + ) + + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ComputedTrafficPermissionsKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: NamespaceTrafficPermissionsKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: PartitionTrafficPermissionsKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: TrafficPermissionsKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: WorkloadIdentityKind, + }) + } + { + const ( + GroupName = "catalog" + Version = "v2beta1" + + ComputedFailoverPolicyKind = "ComputedFailoverPolicy" + FailoverPolicyKind = "FailoverPolicy" + HealthChecksKind = "HealthChecks" + HealthStatusKind = "HealthStatus" + NodeKind = "Node" + NodeHealthStatusKind = "NodeHealthStatus" + ServiceKind = "Service" + ServiceEndpointsKind = "ServiceEndpoints" + VirtualIPsKind = "VirtualIPs" + WorkloadKind = "Workload" + ) + + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ComputedFailoverPolicyKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: FailoverPolicyKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: HealthChecksKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: HealthStatusKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: NodeKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: NodeHealthStatusKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ServiceKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ServiceEndpointsKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: VirtualIPsKind, + }) + retired = append(retired, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: WorkloadKind, + }) + } + /* + */ + + var retained []*pbresource.Type + { + const ( + GroupName = "demo" + Version = "v2" + + AlbumKind = "Album" + ArtistKind = "Artist" + FestivalKind = "Festival" + ) + + retained = append(retained, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: AlbumKind, + }) + retained = append(retained, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ArtistKind, + }) + retained = append(retained, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: FestivalKind, + }) + } + { + const ( + GroupName = "demo" + Version = "v1" + + AlbumKind = "Album" + ArtistKind = "Artist" + ConceptKind = "Concept" + ExecutiveKind = "Executive" + RecordLabelKind = "RecordLabel" + ) + + retained = append(retained, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: AlbumKind, + }) + retained = append(retained, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ArtistKind, + }) + retained = append(retained, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ConceptKind, + }) + retained = append(retained, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ExecutiveKind, + }) + retained = append(retained, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: RecordLabelKind, + }) + } + { + const ( + GroupName = "multicluster" + Version = "v2" + + ComputedExportedServicesKind = "ComputedExportedServices" + ExportedServicesKind = "ExportedServices" + NamespaceExportedServicesKind = "NamespaceExportedServices" + PartitionExportedServicesKind = "PartitionExportedServices" + ) + + retained = append(retained, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ComputedExportedServicesKind, + }) + retained = append(retained, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ExportedServicesKind, + }) + retained = append(retained, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: NamespaceExportedServicesKind, + }) + retained = append(retained, &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: PartitionExportedServicesKind, + }) + } + + for _, typ := range retired { + t.Run("gone - "+resource.ToGVK(typ), func(t *testing.T) { + require.True(t, isRetiredType(typ)) + }) + } + for _, typ := range retained { + t.Run("allowed - "+resource.ToGVK(typ), func(t *testing.T) { + require.False(t, isRetiredType(typ)) + }) + } +} diff --git a/lib/stringslice/stringslice.go b/lib/stringslice/stringslice.go index 7c32864b94..71e90dba2a 100644 --- a/lib/stringslice/stringslice.go +++ b/lib/stringslice/stringslice.go @@ -80,3 +80,17 @@ func CloneStringSlice(s []string) []string { copy(out, s) return out } + +// EqualMapKeys returns true if the slice equals the keys of +// the map ignoring any ordering. +func EqualMapKeys[V any](a []string, b map[string]V) bool { + if len(a) != len(b) { + return false + } + for _, ip := range a { + if _, ok := b[ip]; !ok { + return false + } + } + return true +} diff --git a/lib/stringslice/stringslice_test.go b/lib/stringslice/stringslice_test.go index dd25071757..b861d9c38a 100644 --- a/lib/stringslice/stringslice_test.go +++ b/lib/stringslice/stringslice_test.go @@ -63,3 +63,28 @@ func TestMergeSorted(t *testing.T) { }) } } + +func TestEqualMapKeys(t *testing.T) { + for _, tc := range []struct { + a []string + b map[string]int + same bool + }{ + // same + {nil, nil, true}, + {[]string{}, nil, true}, + {nil, map[string]int{}, true}, + {[]string{}, map[string]int{}, true}, + {[]string{"a"}, map[string]int{"a": 1}, true}, + {[]string{"b", "a"}, map[string]int{"a": 1, "b": 1}, true}, + // different + {[]string{"a"}, map[string]int{}, false}, + {[]string{}, map[string]int{"a": 1}, false}, + {[]string{"b", "a"}, map[string]int{"c": 1, "a": 1, "b": 1}, false}, + {[]string{"b", "a"}, map[string]int{"c": 1, "a": 1, "b": 1}, false}, + {[]string{"b", "a", "c"}, map[string]int{"a": 1, "b": 1}, false}, + } { + got := EqualMapKeys(tc.a, tc.b) + require.Equal(t, tc.same, got) + } +} diff --git a/proto-public/pbhcp/v2/hcp_config.pb.binary.go b/proto-public/pbhcp/v2/hcp_config.pb.binary.go deleted file mode 100644 index 66197e55b6..0000000000 --- a/proto-public/pbhcp/v2/hcp_config.pb.binary.go +++ /dev/null @@ -1,18 +0,0 @@ -// Code generated by protoc-gen-go-binary. DO NOT EDIT. -// source: pbhcp/v2/hcp_config.proto - -package hcpv2 - -import ( - "google.golang.org/protobuf/proto" -) - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *HCPConfig) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *HCPConfig) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} diff --git a/proto-public/pbhcp/v2/hcp_config.pb.go b/proto-public/pbhcp/v2/hcp_config.pb.go deleted file mode 100644 index 66e6318792..0000000000 --- a/proto-public/pbhcp/v2/hcp_config.pb.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.33.0 -// protoc (unknown) -// source: pbhcp/v2/hcp_config.proto - -package hcpv2 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// HCPConfig is used to configure the HCP SDK for communicating with -// the HashiCorp Cloud Platform. All configuration is optional with default -// values provided by the SDK. -type HCPConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // AuthUrl is the URL which will be used to authenticate. - AuthUrl string `protobuf:"bytes,1,opt,name=auth_url,json=authUrl,proto3" json:"auth_url,omitempty"` - // ApiAddress is the address ([:port]) of the HCP api. - ApiAddress string `protobuf:"bytes,2,opt,name=api_address,json=apiAddress,proto3" json:"api_address,omitempty"` - // ScadaAddress is the address ([:port]) of the HCP SCADA endpoint. - ScadaAddress string `protobuf:"bytes,3,opt,name=scada_address,json=scadaAddress,proto3" json:"scada_address,omitempty"` - // TlsInsecureSkipVerify if true will ignore server name verification when making HTTPS requests - TlsInsecureSkipVerify bool `protobuf:"varint,4,opt,name=tls_insecure_skip_verify,json=tlsInsecureSkipVerify,proto3" json:"tls_insecure_skip_verify,omitempty"` -} - -func (x *HCPConfig) Reset() { - *x = HCPConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_pbhcp_v2_hcp_config_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HCPConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HCPConfig) ProtoMessage() {} - -func (x *HCPConfig) ProtoReflect() protoreflect.Message { - mi := &file_pbhcp_v2_hcp_config_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HCPConfig.ProtoReflect.Descriptor instead. -func (*HCPConfig) Descriptor() ([]byte, []int) { - return file_pbhcp_v2_hcp_config_proto_rawDescGZIP(), []int{0} -} - -func (x *HCPConfig) GetAuthUrl() string { - if x != nil { - return x.AuthUrl - } - return "" -} - -func (x *HCPConfig) GetApiAddress() string { - if x != nil { - return x.ApiAddress - } - return "" -} - -func (x *HCPConfig) GetScadaAddress() string { - if x != nil { - return x.ScadaAddress - } - return "" -} - -func (x *HCPConfig) GetTlsInsecureSkipVerify() bool { - if x != nil { - return x.TlsInsecureSkipVerify - } - return false -} - -var File_pbhcp_v2_hcp_config_proto protoreflect.FileDescriptor - -var file_pbhcp_v2_hcp_config_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x63, 0x70, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, - 0x70, 0x2e, 0x76, 0x32, 0x22, 0xa5, 0x01, 0x0a, 0x09, 0x48, 0x43, 0x50, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x75, 0x74, 0x68, 0x55, 0x72, 0x6c, 0x12, 0x1f, 0x0a, - 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x23, - 0x0a, 0x0d, 0x73, 0x63, 0x61, 0x64, 0x61, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x63, 0x61, 0x64, 0x61, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x74, 0x6c, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x65, 0x63, - 0x75, 0x72, 0x65, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x74, 0x6c, 0x73, 0x49, 0x6e, 0x73, 0x65, 0x63, 0x75, - 0x72, 0x65, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x42, 0xe5, 0x01, 0x0a, - 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x42, 0x0e, 0x48, 0x63, - 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, - 0x32, 0x3b, 0x68, 0x63, 0x70, 0x76, 0x32, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x48, 0xaa, 0x02, 0x17, - 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, - 0x2e, 0x48, 0x63, 0x70, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x17, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, 0x5c, 0x56, - 0x32, 0xe2, 0x02, 0x23, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, 0x5c, 0x56, 0x32, 0x5c, 0x47, 0x50, 0x42, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x48, 0x63, 0x70, - 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pbhcp_v2_hcp_config_proto_rawDescOnce sync.Once - file_pbhcp_v2_hcp_config_proto_rawDescData = file_pbhcp_v2_hcp_config_proto_rawDesc -) - -func file_pbhcp_v2_hcp_config_proto_rawDescGZIP() []byte { - file_pbhcp_v2_hcp_config_proto_rawDescOnce.Do(func() { - file_pbhcp_v2_hcp_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbhcp_v2_hcp_config_proto_rawDescData) - }) - return file_pbhcp_v2_hcp_config_proto_rawDescData -} - -var file_pbhcp_v2_hcp_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_pbhcp_v2_hcp_config_proto_goTypes = []interface{}{ - (*HCPConfig)(nil), // 0: hashicorp.consul.hcp.v2.HCPConfig -} -var file_pbhcp_v2_hcp_config_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_pbhcp_v2_hcp_config_proto_init() } -func file_pbhcp_v2_hcp_config_proto_init() { - if File_pbhcp_v2_hcp_config_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_pbhcp_v2_hcp_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HCPConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pbhcp_v2_hcp_config_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_pbhcp_v2_hcp_config_proto_goTypes, - DependencyIndexes: file_pbhcp_v2_hcp_config_proto_depIdxs, - MessageInfos: file_pbhcp_v2_hcp_config_proto_msgTypes, - }.Build() - File_pbhcp_v2_hcp_config_proto = out.File - file_pbhcp_v2_hcp_config_proto_rawDesc = nil - file_pbhcp_v2_hcp_config_proto_goTypes = nil - file_pbhcp_v2_hcp_config_proto_depIdxs = nil -} diff --git a/proto-public/pbhcp/v2/hcp_config.proto b/proto-public/pbhcp/v2/hcp_config.proto deleted file mode 100644 index a61585a3d2..0000000000 --- a/proto-public/pbhcp/v2/hcp_config.proto +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -syntax = "proto3"; - -package hashicorp.consul.hcp.v2; - -// HCPConfig is used to configure the HCP SDK for communicating with -// the HashiCorp Cloud Platform. All configuration is optional with default -// values provided by the SDK. -message HCPConfig { - // AuthUrl is the URL which will be used to authenticate. - string auth_url = 1; - - // ApiAddress is the address ([:port]) of the HCP api. - string api_address = 2; - - // ScadaAddress is the address ([:port]) of the HCP SCADA endpoint. - string scada_address = 3; - - // TlsInsecureSkipVerify if true will ignore server name verification when making HTTPS requests - bool tls_insecure_skip_verify = 4; -} diff --git a/proto-public/pbhcp/v2/hcp_config_deepcopy.gen.go b/proto-public/pbhcp/v2/hcp_config_deepcopy.gen.go deleted file mode 100644 index 56e40830df..0000000000 --- a/proto-public/pbhcp/v2/hcp_config_deepcopy.gen.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by protoc-gen-deepcopy. DO NOT EDIT. -package hcpv2 - -import ( - proto "google.golang.org/protobuf/proto" -) - -// DeepCopyInto supports using HCPConfig within kubernetes types, where deepcopy-gen is used. -func (in *HCPConfig) DeepCopyInto(out *HCPConfig) { - proto.Reset(out) - proto.Merge(out, proto.Clone(in)) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HCPConfig. Required by controller-gen. -func (in *HCPConfig) DeepCopy() *HCPConfig { - if in == nil { - return nil - } - out := new(HCPConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new HCPConfig. Required by controller-gen. -func (in *HCPConfig) DeepCopyInterface() interface{} { - return in.DeepCopy() -} diff --git a/proto-public/pbhcp/v2/hcp_config_json.gen.go b/proto-public/pbhcp/v2/hcp_config_json.gen.go deleted file mode 100644 index efe1d13b85..0000000000 --- a/proto-public/pbhcp/v2/hcp_config_json.gen.go +++ /dev/null @@ -1,22 +0,0 @@ -// Code generated by protoc-json-shim. DO NOT EDIT. -package hcpv2 - -import ( - protojson "google.golang.org/protobuf/encoding/protojson" -) - -// MarshalJSON is a custom marshaler for HCPConfig -func (this *HCPConfig) MarshalJSON() ([]byte, error) { - str, err := HcpConfigMarshaler.Marshal(this) - return []byte(str), err -} - -// UnmarshalJSON is a custom unmarshaler for HCPConfig -func (this *HCPConfig) UnmarshalJSON(b []byte) error { - return HcpConfigUnmarshaler.Unmarshal(b, this) -} - -var ( - HcpConfigMarshaler = &protojson.MarshalOptions{} - HcpConfigUnmarshaler = &protojson.UnmarshalOptions{DiscardUnknown: false} -) diff --git a/proto-public/pbhcp/v2/link.pb.binary.go b/proto-public/pbhcp/v2/link.pb.binary.go deleted file mode 100644 index 7dbfbebae5..0000000000 --- a/proto-public/pbhcp/v2/link.pb.binary.go +++ /dev/null @@ -1,18 +0,0 @@ -// Code generated by protoc-gen-go-binary. DO NOT EDIT. -// source: pbhcp/v2/link.proto - -package hcpv2 - -import ( - "google.golang.org/protobuf/proto" -) - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *Link) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *Link) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} diff --git a/proto-public/pbhcp/v2/link.pb.go b/proto-public/pbhcp/v2/link.pb.go deleted file mode 100644 index 25b2478458..0000000000 --- a/proto-public/pbhcp/v2/link.pb.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.33.0 -// protoc (unknown) -// source: pbhcp/v2/link.proto - -package hcpv2 - -import ( - _ "github.com/hashicorp/consul/proto-public/pbresource" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type AccessLevel int32 - -const ( - AccessLevel_ACCESS_LEVEL_UNSPECIFIED AccessLevel = 0 - AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE AccessLevel = 1 - AccessLevel_ACCESS_LEVEL_GLOBAL_READ_ONLY AccessLevel = 2 -) - -// Enum value maps for AccessLevel. -var ( - AccessLevel_name = map[int32]string{ - 0: "ACCESS_LEVEL_UNSPECIFIED", - 1: "ACCESS_LEVEL_GLOBAL_READ_WRITE", - 2: "ACCESS_LEVEL_GLOBAL_READ_ONLY", - } - AccessLevel_value = map[string]int32{ - "ACCESS_LEVEL_UNSPECIFIED": 0, - "ACCESS_LEVEL_GLOBAL_READ_WRITE": 1, - "ACCESS_LEVEL_GLOBAL_READ_ONLY": 2, - } -) - -func (x AccessLevel) Enum() *AccessLevel { - p := new(AccessLevel) - *p = x - return p -} - -func (x AccessLevel) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (AccessLevel) Descriptor() protoreflect.EnumDescriptor { - return file_pbhcp_v2_link_proto_enumTypes[0].Descriptor() -} - -func (AccessLevel) Type() protoreflect.EnumType { - return &file_pbhcp_v2_link_proto_enumTypes[0] -} - -func (x AccessLevel) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use AccessLevel.Descriptor instead. -func (AccessLevel) EnumDescriptor() ([]byte, []int) { - return file_pbhcp_v2_link_proto_rawDescGZIP(), []int{0} -} - -type Link struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` - ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - ClientSecret string `protobuf:"bytes,3,opt,name=client_secret,json=clientSecret,proto3" json:"client_secret,omitempty"` - HcpClusterUrl string `protobuf:"bytes,4,opt,name=hcp_cluster_url,json=hcpClusterUrl,proto3" json:"hcp_cluster_url,omitempty"` - AccessLevel AccessLevel `protobuf:"varint,5,opt,name=access_level,json=accessLevel,proto3,enum=hashicorp.consul.hcp.v2.AccessLevel" json:"access_level,omitempty"` - HcpConfig *HCPConfig `protobuf:"bytes,6,opt,name=hcp_config,json=hcpConfig,proto3" json:"hcp_config,omitempty"` -} - -func (x *Link) Reset() { - *x = Link{} - if protoimpl.UnsafeEnabled { - mi := &file_pbhcp_v2_link_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Link) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Link) ProtoMessage() {} - -func (x *Link) ProtoReflect() protoreflect.Message { - mi := &file_pbhcp_v2_link_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Link.ProtoReflect.Descriptor instead. -func (*Link) Descriptor() ([]byte, []int) { - return file_pbhcp_v2_link_proto_rawDescGZIP(), []int{0} -} - -func (x *Link) GetResourceId() string { - if x != nil { - return x.ResourceId - } - return "" -} - -func (x *Link) GetClientId() string { - if x != nil { - return x.ClientId - } - return "" -} - -func (x *Link) GetClientSecret() string { - if x != nil { - return x.ClientSecret - } - return "" -} - -func (x *Link) GetHcpClusterUrl() string { - if x != nil { - return x.HcpClusterUrl - } - return "" -} - -func (x *Link) GetAccessLevel() AccessLevel { - if x != nil { - return x.AccessLevel - } - return AccessLevel_ACCESS_LEVEL_UNSPECIFIED -} - -func (x *Link) GetHcpConfig() *HCPConfig { - if x != nil { - return x.HcpConfig - } - return nil -} - -var File_pbhcp_v2_link_proto protoreflect.FileDescriptor - -var file_pbhcp_v2_link_proto_rawDesc = []byte{ - 0x0a, 0x13, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x2f, 0x6c, 0x69, 0x6e, 0x6b, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x1a, 0x19, - 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x63, 0x70, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x70, 0x62, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x02, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, - 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x23, - 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x68, 0x63, 0x70, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x68, 0x63, - 0x70, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x55, 0x72, 0x6c, 0x12, 0x47, 0x0a, 0x0c, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x24, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, - 0x65, 0x76, 0x65, 0x6c, 0x12, 0x41, 0x0a, 0x0a, 0x68, 0x63, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, - 0x76, 0x32, 0x2e, 0x48, 0x43, 0x50, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x68, 0x63, - 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08, 0x01, 0x2a, - 0x72, 0x0a, 0x0b, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x1c, - 0x0a, 0x18, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x22, 0x0a, 0x1e, - 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x47, 0x4c, 0x4f, - 0x42, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, - 0x12, 0x21, 0x0a, 0x1d, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, - 0x5f, 0x47, 0x4c, 0x4f, 0x42, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x02, 0x42, 0xe0, 0x01, 0x0a, 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, - 0x2e, 0x76, 0x32, 0x42, 0x09, 0x4c, 0x69, 0x6e, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x68, 0x63, 0x70, - 0x2f, 0x76, 0x32, 0x3b, 0x68, 0x63, 0x70, 0x76, 0x32, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x48, 0xaa, - 0x02, 0x17, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x2e, 0x48, 0x63, 0x70, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x17, 0x48, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, - 0x5c, 0x56, 0x32, 0xe2, 0x02, 0x23, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, - 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, 0x5c, 0x56, 0x32, 0x5c, 0x47, 0x50, - 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x48, - 0x63, 0x70, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pbhcp_v2_link_proto_rawDescOnce sync.Once - file_pbhcp_v2_link_proto_rawDescData = file_pbhcp_v2_link_proto_rawDesc -) - -func file_pbhcp_v2_link_proto_rawDescGZIP() []byte { - file_pbhcp_v2_link_proto_rawDescOnce.Do(func() { - file_pbhcp_v2_link_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbhcp_v2_link_proto_rawDescData) - }) - return file_pbhcp_v2_link_proto_rawDescData -} - -var file_pbhcp_v2_link_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_pbhcp_v2_link_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_pbhcp_v2_link_proto_goTypes = []interface{}{ - (AccessLevel)(0), // 0: hashicorp.consul.hcp.v2.AccessLevel - (*Link)(nil), // 1: hashicorp.consul.hcp.v2.Link - (*HCPConfig)(nil), // 2: hashicorp.consul.hcp.v2.HCPConfig -} -var file_pbhcp_v2_link_proto_depIdxs = []int32{ - 0, // 0: hashicorp.consul.hcp.v2.Link.access_level:type_name -> hashicorp.consul.hcp.v2.AccessLevel - 2, // 1: hashicorp.consul.hcp.v2.Link.hcp_config:type_name -> hashicorp.consul.hcp.v2.HCPConfig - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_pbhcp_v2_link_proto_init() } -func file_pbhcp_v2_link_proto_init() { - if File_pbhcp_v2_link_proto != nil { - return - } - file_pbhcp_v2_hcp_config_proto_init() - if !protoimpl.UnsafeEnabled { - file_pbhcp_v2_link_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Link); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pbhcp_v2_link_proto_rawDesc, - NumEnums: 1, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_pbhcp_v2_link_proto_goTypes, - DependencyIndexes: file_pbhcp_v2_link_proto_depIdxs, - EnumInfos: file_pbhcp_v2_link_proto_enumTypes, - MessageInfos: file_pbhcp_v2_link_proto_msgTypes, - }.Build() - File_pbhcp_v2_link_proto = out.File - file_pbhcp_v2_link_proto_rawDesc = nil - file_pbhcp_v2_link_proto_goTypes = nil - file_pbhcp_v2_link_proto_depIdxs = nil -} diff --git a/proto-public/pbhcp/v2/link.proto b/proto-public/pbhcp/v2/link.proto deleted file mode 100644 index ac11ca34b5..0000000000 --- a/proto-public/pbhcp/v2/link.proto +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -syntax = "proto3"; - -package hashicorp.consul.hcp.v2; - -import "pbhcp/v2/hcp_config.proto"; -import "pbresource/annotations.proto"; - -enum AccessLevel { - ACCESS_LEVEL_UNSPECIFIED = 0; - ACCESS_LEVEL_GLOBAL_READ_WRITE = 1; - ACCESS_LEVEL_GLOBAL_READ_ONLY = 2; -} - -message Link { - option (hashicorp.consul.resource.spec) = {scope: SCOPE_CLUSTER}; - - string resource_id = 1; - string client_id = 2; - string client_secret = 3; - string hcp_cluster_url = 4; - AccessLevel access_level = 5; - HCPConfig hcp_config = 6; -} diff --git a/proto-public/pbhcp/v2/link_deepcopy.gen.go b/proto-public/pbhcp/v2/link_deepcopy.gen.go deleted file mode 100644 index 9432d81b1e..0000000000 --- a/proto-public/pbhcp/v2/link_deepcopy.gen.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by protoc-gen-deepcopy. DO NOT EDIT. -package hcpv2 - -import ( - proto "google.golang.org/protobuf/proto" -) - -// DeepCopyInto supports using Link within kubernetes types, where deepcopy-gen is used. -func (in *Link) DeepCopyInto(out *Link) { - proto.Reset(out) - proto.Merge(out, proto.Clone(in)) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Link. Required by controller-gen. -func (in *Link) DeepCopy() *Link { - if in == nil { - return nil - } - out := new(Link) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Link. Required by controller-gen. -func (in *Link) DeepCopyInterface() interface{} { - return in.DeepCopy() -} diff --git a/proto-public/pbhcp/v2/link_json.gen.go b/proto-public/pbhcp/v2/link_json.gen.go deleted file mode 100644 index cd476920dd..0000000000 --- a/proto-public/pbhcp/v2/link_json.gen.go +++ /dev/null @@ -1,22 +0,0 @@ -// Code generated by protoc-json-shim. DO NOT EDIT. -package hcpv2 - -import ( - protojson "google.golang.org/protobuf/encoding/protojson" -) - -// MarshalJSON is a custom marshaler for Link -func (this *Link) MarshalJSON() ([]byte, error) { - str, err := LinkMarshaler.Marshal(this) - return []byte(str), err -} - -// UnmarshalJSON is a custom unmarshaler for Link -func (this *Link) UnmarshalJSON(b []byte) error { - return LinkUnmarshaler.Unmarshal(b, this) -} - -var ( - LinkMarshaler = &protojson.MarshalOptions{} - LinkUnmarshaler = &protojson.UnmarshalOptions{DiscardUnknown: false} -) diff --git a/proto-public/pbhcp/v2/resources.rtypes.go b/proto-public/pbhcp/v2/resources.rtypes.go deleted file mode 100644 index 7ef93d0ee2..0000000000 --- a/proto-public/pbhcp/v2/resources.rtypes.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by protoc-gen-resource-types. DO NOT EDIT. - -package hcpv2 - -import ( - "github.com/hashicorp/consul/proto-public/pbresource" -) - -const ( - GroupName = "hcp" - Version = "v2" - - LinkKind = "Link" - TelemetryStateKind = "TelemetryState" -) - -var ( - LinkType = &pbresource.Type{ - Group: GroupName, - GroupVersion: Version, - Kind: LinkKind, - } - - TelemetryStateType = &pbresource.Type{ - Group: GroupName, - GroupVersion: Version, - Kind: TelemetryStateKind, - } -) diff --git a/proto-public/pbhcp/v2/telemetry_state.pb.binary.go b/proto-public/pbhcp/v2/telemetry_state.pb.binary.go deleted file mode 100644 index 278fe7c2bd..0000000000 --- a/proto-public/pbhcp/v2/telemetry_state.pb.binary.go +++ /dev/null @@ -1,38 +0,0 @@ -// Code generated by protoc-gen-go-binary. DO NOT EDIT. -// source: pbhcp/v2/telemetry_state.proto - -package hcpv2 - -import ( - "google.golang.org/protobuf/proto" -) - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *TelemetryState) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *TelemetryState) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *MetricsConfig) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *MetricsConfig) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} - -// MarshalBinary implements encoding.BinaryMarshaler -func (msg *ProxyConfig) MarshalBinary() ([]byte, error) { - return proto.Marshal(msg) -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *ProxyConfig) UnmarshalBinary(b []byte) error { - return proto.Unmarshal(b, msg) -} diff --git a/proto-public/pbhcp/v2/telemetry_state.pb.go b/proto-public/pbhcp/v2/telemetry_state.pb.go deleted file mode 100644 index 10c48ab9b6..0000000000 --- a/proto-public/pbhcp/v2/telemetry_state.pb.go +++ /dev/null @@ -1,426 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.33.0 -// protoc (unknown) -// source: pbhcp/v2/telemetry_state.proto - -package hcpv2 - -import ( - _ "github.com/hashicorp/consul/proto-public/pbresource" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// TelemetryState describes configuration required to forward telemetry to the HashiCorp Cloud Platform. -// This resource is managed internally and is only written if the cluster is linked to HCP. Any -// manual changes to the resource will be reconciled and overwritten with the internally computed -// state. -type TelemetryState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ResourceId is the identifier for the cluster linked with HCP. - ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` - // ClientId is the oauth client identifier for cluster. - // This client has capabilities limited to writing telemetry data for this cluster. - ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - // ClientSecret is the oauth secret used to authenticate requests to send telemetry data to HCP. - ClientSecret string `protobuf:"bytes,3,opt,name=client_secret,json=clientSecret,proto3" json:"client_secret,omitempty"` - HcpConfig *HCPConfig `protobuf:"bytes,4,opt,name=hcp_config,json=hcpConfig,proto3" json:"hcp_config,omitempty"` - Proxy *ProxyConfig `protobuf:"bytes,5,opt,name=proxy,proto3" json:"proxy,omitempty"` - Metrics *MetricsConfig `protobuf:"bytes,6,opt,name=metrics,proto3" json:"metrics,omitempty"` -} - -func (x *TelemetryState) Reset() { - *x = TelemetryState{} - if protoimpl.UnsafeEnabled { - mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TelemetryState) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TelemetryState) ProtoMessage() {} - -func (x *TelemetryState) ProtoReflect() protoreflect.Message { - mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TelemetryState.ProtoReflect.Descriptor instead. -func (*TelemetryState) Descriptor() ([]byte, []int) { - return file_pbhcp_v2_telemetry_state_proto_rawDescGZIP(), []int{0} -} - -func (x *TelemetryState) GetResourceId() string { - if x != nil { - return x.ResourceId - } - return "" -} - -func (x *TelemetryState) GetClientId() string { - if x != nil { - return x.ClientId - } - return "" -} - -func (x *TelemetryState) GetClientSecret() string { - if x != nil { - return x.ClientSecret - } - return "" -} - -func (x *TelemetryState) GetHcpConfig() *HCPConfig { - if x != nil { - return x.HcpConfig - } - return nil -} - -func (x *TelemetryState) GetProxy() *ProxyConfig { - if x != nil { - return x.Proxy - } - return nil -} - -func (x *TelemetryState) GetMetrics() *MetricsConfig { - if x != nil { - return x.Metrics - } - return nil -} - -// MetricsConfig configures metric specific collection details -type MetricsConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Endpoint is the HTTPS address and path to forward metrics to - Endpoint string `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"` - // IncludeList contains patterns to match against metric names. Only matched metrics are forwarded. - IncludeList []string `protobuf:"bytes,2,rep,name=include_list,json=includeList,proto3" json:"include_list,omitempty"` - // Labels contains key value pairs that are associated with all metrics collected and fowarded. - Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Disabled toggles metric forwarding. If true, metric forwarding will stop until disabled is set to false. - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (x *MetricsConfig) Reset() { - *x = MetricsConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MetricsConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MetricsConfig) ProtoMessage() {} - -func (x *MetricsConfig) ProtoReflect() protoreflect.Message { - mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MetricsConfig.ProtoReflect.Descriptor instead. -func (*MetricsConfig) Descriptor() ([]byte, []int) { - return file_pbhcp_v2_telemetry_state_proto_rawDescGZIP(), []int{1} -} - -func (x *MetricsConfig) GetEndpoint() string { - if x != nil { - return x.Endpoint - } - return "" -} - -func (x *MetricsConfig) GetIncludeList() []string { - if x != nil { - return x.IncludeList - } - return nil -} - -func (x *MetricsConfig) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -func (x *MetricsConfig) GetDisabled() bool { - if x != nil { - return x.Disabled - } - return false -} - -// ProxyConfig describes configuration for forwarding requests through an http proxy -type ProxyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // HttpProxy configures the http proxy to use for HTTP (non-TLS) requests. - HttpProxy string `protobuf:"bytes,1,opt,name=http_proxy,json=httpProxy,proto3" json:"http_proxy,omitempty"` - // HttpsProxy configures the http proxy to use for HTTPS (TLS) requests. - HttpsProxy string `protobuf:"bytes,2,opt,name=https_proxy,json=httpsProxy,proto3" json:"https_proxy,omitempty"` - // NoProxy can be configured to include domains which should NOT be forwarded through the configured http proxy - NoProxy []string `protobuf:"bytes,3,rep,name=no_proxy,json=noProxy,proto3" json:"no_proxy,omitempty"` -} - -func (x *ProxyConfig) Reset() { - *x = ProxyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ProxyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProxyConfig) ProtoMessage() {} - -func (x *ProxyConfig) ProtoReflect() protoreflect.Message { - mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProxyConfig.ProtoReflect.Descriptor instead. -func (*ProxyConfig) Descriptor() ([]byte, []int) { - return file_pbhcp_v2_telemetry_state_proto_rawDescGZIP(), []int{2} -} - -func (x *ProxyConfig) GetHttpProxy() string { - if x != nil { - return x.HttpProxy - } - return "" -} - -func (x *ProxyConfig) GetHttpsProxy() string { - if x != nil { - return x.HttpsProxy - } - return "" -} - -func (x *ProxyConfig) GetNoProxy() []string { - if x != nil { - return x.NoProxy - } - return nil -} - -var File_pbhcp_v2_telemetry_state_proto protoreflect.FileDescriptor - -var file_pbhcp_v2_telemetry_state_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, - 0x65, 0x74, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x17, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x70, 0x62, 0x68, 0x63, 0x70, - 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x63, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x70, 0x62, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0xbc, 0x02, 0x0a, 0x0e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x41, 0x0a, 0x0a, 0x68, 0x63, 0x70, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x68, - 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, - 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x43, 0x50, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x09, 0x68, 0x63, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x05, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, - 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x05, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, - 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08, - 0x01, 0x22, 0xf1, 0x01, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, - 0x21, 0x0a, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4c, 0x69, - 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x1a, - 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x68, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x68, 0x74, 0x74, 0x70, 0x50, 0x72, - 0x6f, 0x78, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x73, 0x50, - 0x72, 0x6f, 0x78, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x6f, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x42, - 0xea, 0x01, 0x0a, 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x42, - 0x13, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, - 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x2f, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x3b, 0x68, 0x63, 0x70, 0x76, 0x32, 0xa2, - 0x02, 0x03, 0x48, 0x43, 0x48, 0xaa, 0x02, 0x17, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x48, 0x63, 0x70, 0x2e, 0x56, 0x32, 0xca, - 0x02, 0x17, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, 0x5c, 0x56, 0x32, 0xe2, 0x02, 0x23, 0x48, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, - 0x5c, 0x56, 0x32, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, - 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, - 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x48, 0x63, 0x70, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pbhcp_v2_telemetry_state_proto_rawDescOnce sync.Once - file_pbhcp_v2_telemetry_state_proto_rawDescData = file_pbhcp_v2_telemetry_state_proto_rawDesc -) - -func file_pbhcp_v2_telemetry_state_proto_rawDescGZIP() []byte { - file_pbhcp_v2_telemetry_state_proto_rawDescOnce.Do(func() { - file_pbhcp_v2_telemetry_state_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbhcp_v2_telemetry_state_proto_rawDescData) - }) - return file_pbhcp_v2_telemetry_state_proto_rawDescData -} - -var file_pbhcp_v2_telemetry_state_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_pbhcp_v2_telemetry_state_proto_goTypes = []interface{}{ - (*TelemetryState)(nil), // 0: hashicorp.consul.hcp.v2.TelemetryState - (*MetricsConfig)(nil), // 1: hashicorp.consul.hcp.v2.MetricsConfig - (*ProxyConfig)(nil), // 2: hashicorp.consul.hcp.v2.ProxyConfig - nil, // 3: hashicorp.consul.hcp.v2.MetricsConfig.LabelsEntry - (*HCPConfig)(nil), // 4: hashicorp.consul.hcp.v2.HCPConfig -} -var file_pbhcp_v2_telemetry_state_proto_depIdxs = []int32{ - 4, // 0: hashicorp.consul.hcp.v2.TelemetryState.hcp_config:type_name -> hashicorp.consul.hcp.v2.HCPConfig - 2, // 1: hashicorp.consul.hcp.v2.TelemetryState.proxy:type_name -> hashicorp.consul.hcp.v2.ProxyConfig - 1, // 2: hashicorp.consul.hcp.v2.TelemetryState.metrics:type_name -> hashicorp.consul.hcp.v2.MetricsConfig - 3, // 3: hashicorp.consul.hcp.v2.MetricsConfig.labels:type_name -> hashicorp.consul.hcp.v2.MetricsConfig.LabelsEntry - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_pbhcp_v2_telemetry_state_proto_init() } -func file_pbhcp_v2_telemetry_state_proto_init() { - if File_pbhcp_v2_telemetry_state_proto != nil { - return - } - file_pbhcp_v2_hcp_config_proto_init() - if !protoimpl.UnsafeEnabled { - file_pbhcp_v2_telemetry_state_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TelemetryState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pbhcp_v2_telemetry_state_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetricsConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pbhcp_v2_telemetry_state_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProxyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pbhcp_v2_telemetry_state_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_pbhcp_v2_telemetry_state_proto_goTypes, - DependencyIndexes: file_pbhcp_v2_telemetry_state_proto_depIdxs, - MessageInfos: file_pbhcp_v2_telemetry_state_proto_msgTypes, - }.Build() - File_pbhcp_v2_telemetry_state_proto = out.File - file_pbhcp_v2_telemetry_state_proto_rawDesc = nil - file_pbhcp_v2_telemetry_state_proto_goTypes = nil - file_pbhcp_v2_telemetry_state_proto_depIdxs = nil -} diff --git a/proto-public/pbhcp/v2/telemetry_state.proto b/proto-public/pbhcp/v2/telemetry_state.proto deleted file mode 100644 index bc9521a5ff..0000000000 --- a/proto-public/pbhcp/v2/telemetry_state.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package hashicorp.consul.hcp.v2; - -import "pbhcp/v2/hcp_config.proto"; -import "pbresource/annotations.proto"; - -// TelemetryState describes configuration required to forward telemetry to the HashiCorp Cloud Platform. -// This resource is managed internally and is only written if the cluster is linked to HCP. Any -// manual changes to the resource will be reconciled and overwritten with the internally computed -// state. -message TelemetryState { - option (hashicorp.consul.resource.spec) = {scope: SCOPE_CLUSTER}; - - // ResourceId is the identifier for the cluster linked with HCP. - string resource_id = 1; - - // ClientId is the oauth client identifier for cluster. - // This client has capabilities limited to writing telemetry data for this cluster. - string client_id = 2; - - // ClientSecret is the oauth secret used to authenticate requests to send telemetry data to HCP. - string client_secret = 3; - - HCPConfig hcp_config = 4; - ProxyConfig proxy = 5; - MetricsConfig metrics = 6; -} - -// MetricsConfig configures metric specific collection details -message MetricsConfig { - // Endpoint is the HTTPS address and path to forward metrics to - string endpoint = 1; - - // IncludeList contains patterns to match against metric names. Only matched metrics are forwarded. - repeated string include_list = 2; - - // Labels contains key value pairs that are associated with all metrics collected and fowarded. - map labels = 3; - - // Disabled toggles metric forwarding. If true, metric forwarding will stop until disabled is set to false. - bool disabled = 4; -} - -// ProxyConfig describes configuration for forwarding requests through an http proxy -message ProxyConfig { - // HttpProxy configures the http proxy to use for HTTP (non-TLS) requests. - string http_proxy = 1; - - // HttpsProxy configures the http proxy to use for HTTPS (TLS) requests. - string https_proxy = 2; - - // NoProxy can be configured to include domains which should NOT be forwarded through the configured http proxy - repeated string no_proxy = 3; -} diff --git a/proto-public/pbhcp/v2/telemetry_state_deepcopy.gen.go b/proto-public/pbhcp/v2/telemetry_state_deepcopy.gen.go deleted file mode 100644 index 7d71330de6..0000000000 --- a/proto-public/pbhcp/v2/telemetry_state_deepcopy.gen.go +++ /dev/null @@ -1,69 +0,0 @@ -// Code generated by protoc-gen-deepcopy. DO NOT EDIT. -package hcpv2 - -import ( - proto "google.golang.org/protobuf/proto" -) - -// DeepCopyInto supports using TelemetryState within kubernetes types, where deepcopy-gen is used. -func (in *TelemetryState) DeepCopyInto(out *TelemetryState) { - proto.Reset(out) - proto.Merge(out, proto.Clone(in)) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryState. Required by controller-gen. -func (in *TelemetryState) DeepCopy() *TelemetryState { - if in == nil { - return nil - } - out := new(TelemetryState) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryState. Required by controller-gen. -func (in *TelemetryState) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using MetricsConfig within kubernetes types, where deepcopy-gen is used. -func (in *MetricsConfig) DeepCopyInto(out *MetricsConfig) { - proto.Reset(out) - proto.Merge(out, proto.Clone(in)) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsConfig. Required by controller-gen. -func (in *MetricsConfig) DeepCopy() *MetricsConfig { - if in == nil { - return nil - } - out := new(MetricsConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new MetricsConfig. Required by controller-gen. -func (in *MetricsConfig) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using ProxyConfig within kubernetes types, where deepcopy-gen is used. -func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { - proto.Reset(out) - proto.Merge(out, proto.Clone(in)) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. Required by controller-gen. -func (in *ProxyConfig) DeepCopy() *ProxyConfig { - if in == nil { - return nil - } - out := new(ProxyConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. Required by controller-gen. -func (in *ProxyConfig) DeepCopyInterface() interface{} { - return in.DeepCopy() -} diff --git a/proto-public/pbhcp/v2/telemetry_state_json.gen.go b/proto-public/pbhcp/v2/telemetry_state_json.gen.go deleted file mode 100644 index a07647002f..0000000000 --- a/proto-public/pbhcp/v2/telemetry_state_json.gen.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated by protoc-json-shim. DO NOT EDIT. -package hcpv2 - -import ( - protojson "google.golang.org/protobuf/encoding/protojson" -) - -// MarshalJSON is a custom marshaler for TelemetryState -func (this *TelemetryState) MarshalJSON() ([]byte, error) { - str, err := TelemetryStateMarshaler.Marshal(this) - return []byte(str), err -} - -// UnmarshalJSON is a custom unmarshaler for TelemetryState -func (this *TelemetryState) UnmarshalJSON(b []byte) error { - return TelemetryStateUnmarshaler.Unmarshal(b, this) -} - -// MarshalJSON is a custom marshaler for MetricsConfig -func (this *MetricsConfig) MarshalJSON() ([]byte, error) { - str, err := TelemetryStateMarshaler.Marshal(this) - return []byte(str), err -} - -// UnmarshalJSON is a custom unmarshaler for MetricsConfig -func (this *MetricsConfig) UnmarshalJSON(b []byte) error { - return TelemetryStateUnmarshaler.Unmarshal(b, this) -} - -// MarshalJSON is a custom marshaler for ProxyConfig -func (this *ProxyConfig) MarshalJSON() ([]byte, error) { - str, err := TelemetryStateMarshaler.Marshal(this) - return []byte(str), err -} - -// UnmarshalJSON is a custom unmarshaler for ProxyConfig -func (this *ProxyConfig) UnmarshalJSON(b []byte) error { - return TelemetryStateUnmarshaler.Unmarshal(b, this) -} - -var ( - TelemetryStateMarshaler = &protojson.MarshalOptions{} - TelemetryStateUnmarshaler = &protojson.UnmarshalOptions{DiscardUnknown: false} -) diff --git a/scan.hcl b/scan.hcl index 0da769efb4..f67bb4b24e 100644 --- a/scan.hcl +++ b/scan.hcl @@ -28,8 +28,7 @@ repository { # periodically cleaned up to remove items that are no longer found by the scanner. triage { suppress { - # N.b. `vulnerabilites` is the correct spelling for this tool. - vulnerabilites = [ + vulnerabilities = [ ] paths = [ "internal/tools/proto-gen-rpc-glue/e2e/consul/*", diff --git a/test-integ/go.mod b/test-integ/go.mod index d6293c4744..dcbb5ae90b 100644 --- a/test-integ/go.mod +++ b/test-integ/go.mod @@ -5,7 +5,7 @@ go 1.22 toolchain go1.22.5 require ( - github.com/google/go-cmp v0.5.9 + github.com/google/go-cmp v0.6.0 github.com/hashicorp/consul/api v1.29.4 github.com/hashicorp/consul/proto-public v0.6.2 github.com/hashicorp/consul/sdk v0.16.1 @@ -16,7 +16,7 @@ require ( github.com/mitchellh/copystructure v1.2.0 github.com/rboyer/blankspace v0.2.1 github.com/stretchr/testify v1.8.4 - golang.org/x/net v0.24.0 + golang.org/x/net v0.25.0 google.golang.org/grpc v1.58.3 ) @@ -64,7 +64,7 @@ require ( github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.2.1 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/hcl v1.0.1-vault-7 // indirect github.com/hashicorp/hcl/v2 v2.16.2 // indirect github.com/hashicorp/memberlist v0.5.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect @@ -99,12 +99,13 @@ require ( github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 // indirect github.com/testcontainers/testcontainers-go v0.22.0 // indirect github.com/zclconf/go-cty v1.12.1 // indirect - golang.org/x/crypto v0.22.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect - golang.org/x/mod v0.13.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.14.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/test-integ/go.sum b/test-integ/go.sum index 4e4c0e2d5e..ede92ac102 100644 --- a/test-integ/go.sum +++ b/test-integ/go.sum @@ -98,8 +98,9 @@ github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -141,8 +142,8 @@ github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hashicorp/hcl/v2 v2.16.2 h1:mpkHZh/Tv+xet3sy3F9Ld4FyI2tUpWe9x3XtPx9f1a0= github.com/hashicorp/hcl/v2 v2.16.2/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -310,8 +311,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -319,8 +320,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -336,8 +337,8 @@ golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -346,8 +347,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -381,8 +382,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -395,8 +396,9 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -408,8 +410,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/test/integration/consul-container/go.mod b/test/integration/consul-container/go.mod index aaebeb412a..4e3e4de0fd 100644 --- a/test/integration/consul-container/go.mod +++ b/test/integration/consul-container/go.mod @@ -20,7 +20,7 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-version v1.2.1 - github.com/hashicorp/hcl v1.0.0 + github.com/hashicorp/hcl v1.0.1-vault-7 github.com/hashicorp/serf v0.10.1 github.com/itchyny/gojq v0.12.12 github.com/mitchellh/copystructure v1.2.0 @@ -30,7 +30,7 @@ require ( github.com/stretchr/testify v1.8.4 github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 github.com/testcontainers/testcontainers-go v0.22.0 - golang.org/x/mod v0.13.0 + golang.org/x/mod v0.17.0 google.golang.org/grpc v1.58.3 ) @@ -58,7 +58,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/uuid v1.4.0 // indirect github.com/hashicorp/consul-server-connection-manager v0.1.4 // indirect github.com/hashicorp/consul/proto-public v0.6.2 // indirect @@ -94,12 +94,13 @@ require ( github.com/prometheus/procfs v0.8.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - golang.org/x/crypto v0.22.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.14.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5 // indirect google.golang.org/protobuf v1.33.0 // indirect diff --git a/test/integration/consul-container/go.sum b/test/integration/consul-container/go.sum index 2a7b3b8015..5975d9fcba 100644 --- a/test/integration/consul-container/go.sum +++ b/test/integration/consul-container/go.sum @@ -107,8 +107,9 @@ github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -150,8 +151,8 @@ github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= @@ -306,8 +307,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= @@ -319,8 +320,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -339,8 +340,8 @@ golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -351,8 +352,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -387,8 +388,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -401,8 +402,9 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -418,8 +420,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/website/content/api-docs/api-structure.mdx b/website/content/api-docs/api-structure.mdx index 32a8a2c17c..edfbcf3258 100644 --- a/website/content/api-docs/api-structure.mdx +++ b/website/content/api-docs/api-structure.mdx @@ -159,8 +159,3 @@ UUID-format identifiers generated by the Consul API use the These UUID-format strings are generated using high quality, purely random bytes. It is not intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. - -## CORS HTTP Response Headers - -As of Consul 1.18, Consul adds an HTTP header `Access-Control-Expose-Headers: x-consul-default-acl-policy` to -all responses in order to support linking self-managed Enterprise clusters to HCP Consul Central. diff --git a/website/content/api-docs/hcp-link.mdx b/website/content/api-docs/hcp-link.mdx deleted file mode 100644 index 7f54306691..0000000000 --- a/website/content/api-docs/hcp-link.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -layout: api -page_title: HCP Linking HTTP API -description: The Link resource allows for linking your cluster to HCP Consul Central. ---- - -# Link HTTP API - --> **1.18.0+:** The Link API is available in Consul versions 1.18.0 and newer. - --> **Note:** This endpoint does not use the `/v1/` prefix. - -The `/api/hcp/v2/link/global` endpoint allows you to link your Consul cluster to [HCP Consul Central](/hcp/docs/consul/concepts/consul-central). - -## Establish or update link to HCP Consul Central - -This endpoint creates or updates a Link, which establishes a connection with HCP Consul Central. - -| Method | Path | Produces | -| ------ | ----------------------------- | ------------------ | -| `PUT` | `/api/hcp/v2/link/global` | `application/json` | - -The table below shows this endpoint's support for -[blocking queries](/consul/api-docs/features/blocking), -[consistency modes](/consul/api-docs/features/consistency), -[agent caching](/consul/api-docs/features/caching), and -[required ACLs](/consul/api-docs/api-structure#authentication). - -| Blocking Queries | Consistency Modes | Agent Caching | ACL Required | -| ---------------- | --------------------- | ------------- | ------------------------------ | -| `NO` | `stale`, `consistent` | `none` | `operator:write`, `acl:write` | - -### Link API vs. configuration-based linking - -The Link API described here is an alternative method to accomplish the same thing as [configuration-based linking](/consul/docs/agent/config/config-files#self-managed-hcp-parameters). You should generally only choose one method or the other for linking your cluster, not both. If you do use both methods, they interact in the following ways: - -* When Consul is started, values set in the `cloud` configuration will take precedence over what was previously set by the API or CLI. -* Clusters can only be unlinked from HCP Consul Central by the API or CLI, regardless of if they were established via configuration, API, or CLI. - -### JSON Request Body Schema - -- `data` `(object: )` - - - `resourceId` `(string: )` - The ID of the HCP Consul Central cluster to link to. Has the format of: - `organization//project//hashicorp.consul.global-network-manager.cluster/` - - - `clientId` `(string: )` - The ID used to authenticate to HCP, which is returned as part of the HCP - Consul Central cluster creation. It can also be obtained by fetching the HCP Consul Central cluster secrets. - - - `clientSecret` `(string: )` - The secret used to authenticate to HCP, which is returned as part of the - HCP Consul Central cluster creation. It can also be obtained by fetching the HCP Consul Central cluster secrets. - -### Sample Payload - -```json -{ - "data": { - "resourceId": "organization/c0bf7aac-7690-4905-a8aa-889df1510314/project/6e82a47b-79af-4920-ad42-c0f74421ab52/hashicorp.consul.global-network-manager.cluster/my-cluster", - "clientId": "3jz6zk2tlr802htzwquczlxlsrohlpm5", - "clientSecret": "07ywcvaqmg1f8ko9eq4julep2tfglnv4o18rz3py9dik4ywox0ytoscycn39o4vs" - } -} -``` - -### Sample Request - -```shell-session -$ curl --request PUT \ - --header "X-Consul-Token: 9cdjse6c-2dia-3720-81fe-5dae3k714a6e" \ - --data @payload.json \ - http://127.0.0.1:8500/api/hcp/v2/link/global -``` - -### Sample Response - -```json -{ - "data": { - "clientId": "3jz6zk2tlr802htzwquczlxlsrohlpm5", - "clientSecret": "07ywcvaqmg1f8ko9eq4julep2tfglnv4o18rz3py9dik4ywox0ytoscycn39o4vs", - "resourceId": "organization/c0bf7aac-7690-4905-a8aa-889df1510314/project/6e82a47b-79af-4920-ad42-c0f74421ab52/hashicorp.consul.global-network-manager.cluster/my-cluster" - }, - "generation": "01HMHTHZND8VJDXHHJBKDR4TTA", - "id": { - "name": "global", - "tenancy": { - "peerName": "local" - }, - "type": { - "group": "hcp", - "groupVersion": "v2", - "kind": "Link" - }, - "uid": "01HMHTHZND8VJDXHHJBGY1KG0F" - }, - "version": "60" -} -``` - -## Read Link - -This endpoint reads a Link so you can view information about your cluster's current linking status. - -| Method | Path | Produces | -| ------ | ----------------------------- | ------------------ | -| `GET` | `/api/hcp/v2/link/global` | `application/json` | - -The table below shows this endpoint's support for -[blocking queries](/consul/api-docs/features/blocking), -[consistency modes](/consul/api-docs/features/consistency), -[agent caching](/consul/api-docs/features/caching), and -[required ACLs](/consul/api-docs/api-structure#authentication). - -| Blocking Queries | Consistency Modes | Agent Caching | ACL Required | -| ---------------- | --------------------- | ------------- | ---------------- | -| `NO` | `stale`, `consistent` | `none` | `operator:read` | - -### Sample Request - -```shell-session -$ curl --header "X-Consul-Token: 5cdcae6c-0cce-4210-86fe-5dff3b984a6e" \ - http://127.0.0.1:8500/api/hcp/v2/link/global -``` - -### Sample Response -```json -{ - "data": { - "accessLevel": "ACCESS_LEVEL_GLOBAL_READ_WRITE", - "clientId": "3jz6zk2tlr802htzwquczlxlsrohlpm5", - "clientSecret": "07ywcvaqmg1f8ko9eq4julep2tfglnv4o18rz3py9dik4ywox0ytoscycn39o4vs", - "hcpClusterUrl": "https://portal.cloud.hashicorp.com/services/consul/clusters/self-managed/my-cluster?project_id=6e82a47b-79af-4920-ad42-c0f74421ab52", - "resourceId": "organization/c0bf7aac-7690-4905-a8aa-889df1510314/project/6e82a47b-79af-4920-ad42-c0f74421ab52/hashicorp.consul.global-network-manager.cluster/my-cluster" - }, - "generation": "01HMHTF4K5V27T91FMVHGFE87F", - "id": { - "name": "global", - "tenancy": { - "peerName": "local" - }, - "type": { - "group": "hcp", - "groupVersion": "v2", - "kind": "Link" - }, - "uid": "01HMHS7DJ9FEQH5XK1VD0W0536" - }, - "status": { - "consul.io/hcp/link": { - "conditions": [ - { - "message": "Successfully linked to cluster 'organization/c0bf7aac-7690-4905-a8aa-889df1510314/project/6e82a47b-79af-4920-ad42-c0f74421ab52/hashicorp.consul.global-network-manager.cluster/my-cluster'", - "reason": "SUCCESS", - "state": "STATE_TRUE", - "type": "linked" - } - ], - "observedGeneration": "01HMHS7DN1C417JXJF7DBVS79F", - "updatedAt": "2024-01-19T21:14:59.707744Z" - } - }, - "version": "53" -} -``` - -## Delete Link - -This endpoint deletes a Link, which removes the cluster's connection with HCP Consul Central. - -| Method | Path | Produces | -| --------- | ----------------------------- | ------------------ | -| `DELETE` | `/api/hcp/v2/link/global` | `application/json` | - -The table below shows this endpoint's support for -[blocking queries](/consul/api-docs/features/blocking), -[consistency modes](/consul/api-docs/features/consistency), -[agent caching](/consul/api-docs/features/caching), and -[required ACLs](/consul/api-docs/api-structure#authentication). - -| Blocking Queries | Consistency Modes | Agent Caching | ACL Required | -| ---------------- | ---------------------- | ------------- | -------------------------------- | -| `NO` | `stale`, `consistent` | `none` | `operator:write`, `acl:write` | - -### Sample Request - -```shell-session -$ curl --request DELETE \ - --header "X-Consul-Token: 5cdcae6c-0cce-4210-86fe-5dff3b984a6e" \ - http://127.0.0.1:8500/api/hcp/v2/link/global -``` diff --git a/website/content/api-docs/index.mdx b/website/content/api-docs/index.mdx index 1b972e4425..20d0021e30 100644 --- a/website/content/api-docs/index.mdx +++ b/website/content/api-docs/index.mdx @@ -49,7 +49,6 @@ The following API endpoints help you manage Consul operations. - [`/namespace`](/consul/api-docs/namespaces): Create and manage namespaces in Consul. Namespaces isolate groups of resources to lower operational overhead. - [`/snapshot`](/consul/api-docs/snapshot): Save and restore Consul server state in the event of a disaster. - [`/txn`](/consul/api-docs/txn): Apply multiple operations, such as updating the catalog and retrieving multiple KV entries, in a single transaction. -- [`/api/hcp/v2/link/global`](/consul/api-docs/hcp-link): Link cluster to [HCP Consul Central](/hcp/docs/consul/concepts/consul-central). HCP Consul Central is a management plane service hosted by HashiCorp that enables you to monitor and manage Consul clusters. ## Configure your services dynamically diff --git a/website/content/docs/connect/cluster-peering/index.mdx b/website/content/docs/connect/cluster-peering/index.mdx index 2aa5147235..83cc4b97e4 100644 --- a/website/content/docs/connect/cluster-peering/index.mdx +++ b/website/content/docs/connect/cluster-peering/index.mdx @@ -70,13 +70,6 @@ The following resources are available to help you use Consul's cluster peering f - [Manage L7 traffic with cluster peering on Kubernetes](/consul/docs/k8s/connect/cluster-peering/usage/l7-traffic) - [Create sameness groups on Kubernetes](/consul/docs/k8s/connect/cluster-peering/usage/create-sameness-groups) -### HCP Consul Central documentation - -- [Cluster peering](/hcp/docs/consul/usage/cluster-peering) -- [Cluster peering topologies](/hcp/docs/consul/usage/cluster-peering/topologies) -- [Establish cluster peering connections on HCP Consul Central](/hcp/docs/consul/usage/cluster-peering/create-connections) -- [Cluster peering with HCP Consul Central](/hcp/docs/extend/cluster-peering/establish) - ### Reference documentation - [Cluster peering technical specifications](/consul/docs/connect/cluster-peering/tech-specs) diff --git a/website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx b/website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx index 49b8141777..71537355eb 100644 --- a/website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx +++ b/website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx @@ -43,8 +43,6 @@ Mesh gateways are required for cluster peering connections and recommended to se You must establish connections with cluster peers before you can create a sameness group that includes them. A cluster peering connection exists between two admin partitions in different datacenters, and each connection between two partitions must be established separately with each peer. Refer to [establish cluster peering connections](/consul/docs/connect/cluster-peering/usage/establish-cluster-peering) for step-by-step instructions. -You can establish and manage cluster peering relationships between all of your self-managed clusters using [HCP Consul Central](/hcp/docs/consul/concepts/consul-central). For more information, refer to [cluster peering global view](/hcp/docs/consul/monitor/consul-central/global-views#cluster-peering) in the HCP documentation. - To establish cluster peering connections and define a group as part of the same workflow, follow instructions up to [Export services between clusters](/consul/docs/connect/cluster-peering/usage/establish-cluster-peering#export-services-between-clusters). You can use the same exported services and service intention configuration entries to establish the cluster peering connection and create the sameness group. ## Create a sameness group diff --git a/website/content/docs/connect/cluster-peering/usage/establish-cluster-peering.mdx b/website/content/docs/connect/cluster-peering/usage/establish-cluster-peering.mdx index 4e0128bb3e..b2ed30f460 100644 --- a/website/content/docs/connect/cluster-peering/usage/establish-cluster-peering.mdx +++ b/website/content/docs/connect/cluster-peering/usage/establish-cluster-peering.mdx @@ -16,7 +16,7 @@ This page details the process for establishing a cluster peering connection betw Cluster peering between services cannot be established until all four steps are complete. If you want to establish cluster peering connections and create sameness groups at the same time, refer to the guidance in [create sameness groups](/consul/docs/connect/cluster-peering/usage/create-sameness-groups). -For Kubernetes guidance, refer to [Establish cluster peering connections on Kubernetes](/consul/docs/k8s/connect/cluster-peering/usage/establish-peering). For HCP Consul Central guidance, refer to [Establish cluster peering connections on HCP Consul Central](/hcp/docs/consul/usage/cluster-peering/create-connections). +For Kubernetes guidance, refer to [Establish cluster peering connections on Kubernetes](/consul/docs/k8s/connect/cluster-peering/usage/establish-peering). ## Requirements diff --git a/website/content/docs/connect/config-entries/mesh.mdx b/website/content/docs/connect/config-entries/mesh.mdx index b64062e518..cf75c4bfa4 100644 --- a/website/content/docs/connect/config-entries/mesh.mdx +++ b/website/content/docs/connect/config-entries/mesh.mdx @@ -268,6 +268,8 @@ Note that the Kubernetes example does not include a `partition` field. Configura Enable options under `HTTP.Incoming.RequestNormalization` to apply normalization to all inbound traffic to mesh proxies. +~> **Compatibility warning**: This feature is available as of Consul CE 1.20.1 and Consul Enterprise 1.20.1, 1.19.2, 1.18.3, and 1.15.15. We recommend upgrading to the latest version of Consul to take advantage of the latest features and improvements. + ```hcl diff --git a/website/content/docs/connect/config-entries/service-intentions.mdx b/website/content/docs/connect/config-entries/service-intentions.mdx index 4440b2a76c..47d980f9a8 100644 --- a/website/content/docs/connect/config-entries/service-intentions.mdx +++ b/website/content/docs/connect/config-entries/service-intentions.mdx @@ -48,7 +48,9 @@ The following outline shows how to format the service intentions configuration e - [`Exact`](#sources-permissions-http-header): string - [`Prefix`](#sources-permissions-http-header): string - [`Suffix`](#sources-permissions-http-header): string - - [`Regex`](#sources-permissions-http-header): string + - [`Contains`](#spec-sources-permissions-http-header): string + - [`Regex`](#spec-sources-permissions-http-header): string + - [`IgnoreCase`](#spec-sources-permissions-http-header): boolean | `false` - [`Invert`](#sources-permissions-http-header): boolean | `false` - [`Precedence`](#sources-precedence): number - [`Type`](#sources-type): string | `consul` @@ -648,7 +650,9 @@ Each member of the `Header` list is a map that contains a `Name` field and at le | `Exact` | Specifies a value for the header key set in the `Name` field. If the request header value matches the `Exact` value, Consul applies the permission. Do not specify `Exact` if `Present`, `Prefix`, `Suffix`, or `Regex` are configured in the same `Header` configuration. | string | optional | | `Prefix` | Specifies a prefix value for the header key set in the `Name` field. If the request header value starts with the `Prefix` value, Consul applies the permission. Do not specify `Prefix` if `Present`, `Exact`, `Suffix`, or `Regex` are configured in the same `Header` configuration. | string | optional | | `Suffix` | Specifies a suffix value for the header key set in the `Name` field. If the request header value ends with the `Suffix` value, Consul applies the permission. Do not specify `Suffix` if `Present`, `Exact`, `Prefix`, or `Regex` are configured in the same `Header` configuration. | string | optional | -| `Regex` | Specifies a regular expression pattern as the value for the header key set in the `Name` field. If the request header value matches the regex, Consul applies the permission. Do not specify `Regex` if `Present`, `Exact`, `Prefix`, or `Suffix` are configured in the same `Header` configuration. The regex syntax is proxy-specific. If using Envoy, refer to the [re2 documentation](https://github.com/google/re2/wiki/Syntax) for details. | string | optional | +| `Contains` | Specifies a contains value for the header key set in the `Name` field. If the request header value includes the `Contains` value, Consul applies the permission. Do not specify `Contains` if `Present`, `Exact`, `Prefix`, `Suffix`, or `Regex` are configured in the same `header` configuration. | string | optional | +| `Regex` | Specifies a regular expression pattern as the value for the header key set in the `Name` field. If the request header value matches the regex, Consul applies the permission. Do not specify `Regex` if `Present`, `Exact`, `Prefix`, `Suffix`, or `Contains` are configured in the same `Header` configuration. The regex syntax is proxy-specific. If using Envoy, refer to the [re2 documentation](https://github.com/google/re2/wiki/Syntax) for details. | string | optional | +| `IgnoreCase` | Ignores the case of the provided header value when matching with `Exact`, `Prefix`, `Suffix`, or `Contains`. Default is `false`. | boolean | optional | | `Invert` | Inverts the matching logic configured in the `Header`. Default is `false`. | boolean | optional | ### `Sources[].Precedence` @@ -964,7 +968,7 @@ Specifies a set of criteria for matching HTTP request headers. The request heade Each member of the `header` list is a map that contains a `name` field and at least one match criterion. -~> **Warning**: If it is possible for a header to contain multiple values, we recommend using `contains` or `regex` rather than `exact`, `prefix`, or `suffix`. Envoy internally concatenates multiple header values into a single CSV value prior to applying match rules, which may result in match rules that depend on the beginning or end of a string vulnerable to circumvention. A more robust alternative is using `contains` or, if a stricter value match is required, configuring a regex pattern that is tolerant of comma-separated values. +~> **Warning**: If it is possible for a header to contain multiple values, we recommend using `contains` or `regex` rather than `exact`, `prefix`, or `suffix`. Envoy internally concatenates multiple header values into a single CSV value prior to applying match rules, which may result in match rules that depend on the beginning or end of a string vulnerable to circumvention. A more robust alternative is using `contains` or, if a stricter value match is required, configuring a regex pattern that is tolerant of comma-separated values. These options are available as of Consul CE 1.20.1 and Consul Enterprise 1.20.1, 1.19.2, 1.18.3, and 1.15.15. The following table describes the parameters that each member of the `header` list may contain: diff --git a/website/content/docs/connect/dataplane/index.mdx b/website/content/docs/connect/dataplane/index.mdx index 30996e3476..2f16014cac 100644 --- a/website/content/docs/connect/dataplane/index.mdx +++ b/website/content/docs/connect/dataplane/index.mdx @@ -121,7 +121,7 @@ Consul Dataplane on Kubernetes supports the following features: - Running Consul service mesh in AWS Fargate and GKE Autopilot is supported. - xDS load balancing is supported. - Servers running in Kubernetes and servers external to Kubernetes are both supported. -- HCP Consul Dedicated and HCP Consul Central are supported. +- HCP Consul Dedicated is supported. - Consul API Gateway Consul Dataplane on ECS support the following features: diff --git a/website/content/docs/connect/gateways/api-gateway/configuration/routes.mdx b/website/content/docs/connect/gateways/api-gateway/configuration/routes.mdx index 6723931290..9e2e8265ee 100644 --- a/website/content/docs/connect/gateways/api-gateway/configuration/routes.mdx +++ b/website/content/docs/connect/gateways/api-gateway/configuration/routes.mdx @@ -101,7 +101,7 @@ This field specifies backend services that the `Route` references. The following | Parameter | Description | Type | Required | | --- | --- | --- | --- | -| `group` | Specifies the Kubernetes API Group of the referenced backend. You can specify the following values:
  • `""`: Specifies the core Kubernetes API group. This value must be used when `kind` is set to `Service`. This is the default value if unspecified.
  • `api-gateway.consul.hashicorp.com`: This value must be used when `kind` is set to `MeshService`.
| String | Optional | +| `group` | Specifies the Kubernetes API Group of the referenced backend. You can specify the following values:
  • `""`: Specifies the core Kubernetes API group. This value must be used when `kind` is set to `Service`. This is the default value if unspecified.
  • `consul.hashicorp.com`: This value must be used when `kind` is set to `MeshService`.
| String | Optional | | `kind` | Specifies the Kubernetes Kind of the referenced backend. You can specify the following values:
  • `Service` (default): Indicates that the `backendRef` references a Service in the Kubernetes cluster.
  • `MeshService`: Indicates that the `backendRef` references a service in the Consul mesh. Refer to the `MeshService` [documentation](/consul/docs/connect/gateways/api-gateway/configuration/meshservice) for additional information.
| String | Optional | | `name` | Specifies the name of the Kubernetes Service or Consul mesh service resource. | String | Required | | `namespace` | Specifies the Kubernetes namespace containing the Kubernetes Service or Consul mesh service resource. You must specify a value if the Service or Consul mesh service is defined in a different namespace from the `Route`. Defaults to the namespace of the `Route`.
To create a route for a `backendRef` in a different namespace, you must also create a [ReferenceGrant](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.ReferenceGrant). Refer to the [example route](#example-cross-namespace-backendref) configured to reference across namespaces. | String | Optional | diff --git a/website/content/docs/connect/gateways/api-gateway/define-routes/route-to-peered-services.mdx b/website/content/docs/connect/gateways/api-gateway/define-routes/route-to-peered-services.mdx index 414ce45f53..e323f8ea9e 100644 --- a/website/content/docs/connect/gateways/api-gateway/define-routes/route-to-peered-services.mdx +++ b/website/content/docs/connect/gateways/api-gateway/define-routes/route-to-peered-services.mdx @@ -67,7 +67,7 @@ spec: ... rules: - backendRefs: - - group: api-gateway.consul.hashicorp.com + - group: consul.hashicorp.com kind: MeshService name: example-mesh-service port: 3000 diff --git a/website/content/docs/connect/observability/grafanadashboards/consuldataplanedashboard.mdx b/website/content/docs/connect/observability/grafanadashboards/consuldataplanedashboard.mdx new file mode 100644 index 0000000000..24436828aa --- /dev/null +++ b/website/content/docs/connect/observability/grafanadashboards/consuldataplanedashboard.mdx @@ -0,0 +1,133 @@ +--- +layout: docs +page_title: Dashboard for Consul dataplane metrics +description: >- + This Grafana dashboard provides Consul dataplane metrics on Kubernetes deployments. Learn about the Grafana queries that produce the metrics and visualizations in this dashboard. +--- + +# Consul dataplane monitoring dashboard + +This page provides reference information about the [Grafana dashboard configuration included in the `hashicorp/consul` GitHub repository](https://github.com/hashicorp/consul/blob/main/grafana/consuldataplanedashboard.json). The Consul dataplane dashboard provides a comprehensive view of the service health, performance, and resource utilization within the Consul service mesh. You can monitor key metrics at both the cluster and service levels with this dashboard. It can help you ensure service reliability and performance. + +![Preview of the Consul dataplane dashboard](/public/img/grafana/consul-dataplane-dashboard.png) + +This image provides an example of the dashboard's visual layout and contents. + +## Grafana queries overview + +The Consul dataplane dashboard provides the following information about service mesh operations. + +### Live service count + +**Description:** Displays the total number of live Envoy proxies currently running in the service mesh. It helps track the overall availability of services and identify any outages or other widespread issues in the service mesh. + +```promql +sum(envoy_server_live{app=~"$service"}) +``` + +### Total request success rate + +**Description:** Tracks the percentage of successful requests across the service mesh. It excludes 4xx and 5xx response codes to focus on operational success. Use it to monitor the overall reliability of your services. + +```promql +sum(irate(envoy_cluster_upstream_rq_xx{envoy_response_code_class!~"5|4",consul_destination_service=~"$service"}[10m])) / sum(irate(envoy_cluster_upstream_rq_xx{consul_destination_service=~"$service"}[10m])) +``` + +### Total failed requests + +**Description:** This pie chart shows the total number of failed requests within the service mesh, categorized by service. It provides a visual breakdown of where failures are occurring, allowing operators to focus on problematic services. + +```promql +sum(increase(envoy_cluster_upstream_rq_xx{envoy_response_code_class=~"4|5", consul_destination_service=~"$service"}[10m])) by (local_cluster) +``` + +### Requests per second + +**Description:** This metric shows the rate of incoming HTTP requests per second to the selected services. It helps operators understand the current load on services and how much traffic they are processing. + +```promql +sum(rate(envoy_http_downstream_rq_total{service=~"$service",envoy_http_conn_manager_prefix="public_listener"}[5m])) by (service) +``` + +### Unhealthy clusters + +**Description:** This metric tracks the number of unhealthy clusters in the mesh, helping operators identify services that are experiencing issues and need attention to ensure operational health. + +```promql +(sum(envoy_cluster_membership_healthy{app=~"$service",envoy_cluster_name=~"$cluster"}) - sum(envoy_cluster_membership_total{app=~"$service",envoy_cluster_name=~"$cluster"})) +``` + +### Heap size + +**Description:** This metric displays the total memory heap size of the Envoy proxies. Monitoring heap size is essential to detect memory issues and ensure that services are operating efficiently. + +```promql +SUM(envoy_server_memory_heap_size{app=~"$service"}) +``` + +### Allocated memory + +**Description:** This metric shows the amount of memory allocated by the Envoy proxies. It helps operators monitor the resource usage of services to prevent memory overuse and optimize performance. + +```promql +SUM(envoy_server_memory_allocated{app=~"$service"}) +``` + +### Avg uptime per node + +**Description:** This metric calculates the average uptime of Envoy proxies across all nodes. It helps operators monitor the stability of services and detect potential issues with service restarts or crashes. + +```promql +avg(envoy_server_uptime{app=~"$service"}) +``` + +### Cluster state + +**Description:** This metric indicates whether all clusters are healthy. It provides a quick overview of the cluster state to ensure that there are no issues affecting service performance. + +```promql +(sum(envoy_cluster_membership_total{app=~"$service",envoy_cluster_name=~"$cluster"})-sum(envoy_cluster_membership_healthy{app=~"$service",envoy_cluster_name=~"$cluster"})) == bool 0 +``` + +### CPU throttled seconds by namespace + +**Description:** This metric tracks the number of seconds during which CPU usage was throttled. Monitoring CPU throttling helps operators identify when services are exceeding their allocated CPU limits and may need optimization. + +```promql +rate(container_cpu_cfs_throttled_seconds_total{namespace=~"$namespace"}[5m]) +``` + +### Memory usage by pod limits + +**Description:** This metric shows memory usage as a percentage of the memory limit set for each pod. It helps operators ensure that services are staying within their allocated memory limits to avoid performance degradation. + +```promql +100 * max (container_memory_working_set_bytes{namespace=~"$namespace"} / on(container, pod) label_replace(kube_pod_container_resource_limits{resource="memory"}, "pod", "$1", "exported_pod", "(.+)")) by (pod) +``` + +### CPU usage by pod limits + +**Description:** This metric displays CPU usage as a percentage of the CPU limit set for each pod. Monitoring CPU usage helps operators optimize service performance and prevent CPU exhaustion. + +```promql +100 * max( + container_memory_working_set_bytes{namespace=~"$namespace"} / + on(container, pod) label_replace(kube_pod_container_resource_limits{resource="memory"}, "pod", "$1", "exported_pod", "(.+)") +) by (pod) +``` + +### Total active upstream connections + +**Description:** This metric tracks the total number of active upstream connections to other services in the mesh. It provides insight into service dependencies and network load. + +```promql +sum(envoy_cluster_upstream_cx_active{app=~"$service",envoy_cluster_name=~"$cluster"}) by (app, envoy_cluster_name) +``` + +### Total active downstream connections + +**Description:** This metric tracks the total number of active downstream connections from services to clients. It helps operators monitor service load and ensure that services are able to handle the traffic effectively. + +```promql +sum(envoy_http_downstream_cx_active{app=~"$service"}) +``` diff --git a/website/content/docs/connect/observability/grafanadashboards/consulk8sdashboard.mdx b/website/content/docs/connect/observability/grafanadashboards/consulk8sdashboard.mdx new file mode 100644 index 0000000000..5dc35e40d4 --- /dev/null +++ b/website/content/docs/connect/observability/grafanadashboards/consulk8sdashboard.mdx @@ -0,0 +1,128 @@ +--- +layout: docs +page_title: Dashboard for Consul k8s control plane metrics +description: >- + This documentation provides an overview of the Consul on Kubernetes Grafana Dashboard. Learn about the metrics it displays and the queries that produce the metrics. +--- + +# Consul on Kubernetes control plane monitoring dashboard + +This page provides reference information about the [Grafana dashboard configuration included in the `hashicorp/consul` GitHub repository](https://github.com/hashicorp/consul/blob/main/grafana/consul-k8s-control-plane-monitoring.json). + +## Grafana queries overview + +This dashboard provides the following information about service mesh operations. + +### Number of Consul servers + +**Description:** Displays the number of Consul servers currently active. This metric provides insight into the cluster's health and the number of Consul nodes running in the environment. + +```promql +consul_consul_server_0_consul_members_servers{pod="consul-server-0"} +``` + +### Number of connected Consul dataplanes + +**Description:** Tracks the number of connected Consul dataplanes. This metric helps operators understand how many Envoy sidecars are actively connected to the mesh. + +```promql +count(consul_dataplane_envoy_connected) +``` + +### CPU usage in seconds (Consul servers) + +**Description:** This metric shows the CPU usage of the Consul servers over time, helping operators monitor resource consumption. + +```promql +rate(container_cpu_usage_seconds_total{container="consul", pod=~"consul-server-.*"}[5m]) +``` + +### Memory usage (Consul servers) + +**Description:** Displays the memory usage of the Consul servers. This metric helps ensure that the servers have sufficient memory resources for proper operation. + +```promql +container_memory_working_set_bytes{container="consul", pod=~"consul-server-.*"} +``` + +### Disk read/write total per 5 minutes (Consul servers) + +**Description:** Tracks the total network bytes received by Consul servers within a 5 minute window. This metric helps assess the network load on Consul nodes. + +```promql +sum(rate(container_fs_writes_bytes_total{pod=~"consul-server-.*", container="consul"}[5m])) by (pod, device) +``` + +```promql +sum(rate(container_fs_reads_bytes_total{pod=~"consul-server-.*", container="consul"}[5m])) by (pod, device) +``` + +### Received bytes total per 5 minutes (Consul servers) + +**Description:** Tracks the total network bytes received by Consul servers within a 5 minute window. This metric helps assess the network load on Consul nodes. + +```promql +sum(rate(container_network_receive_bytes_total{pod=~"consul-server-.*"}[5m])) by (pod) +``` + +### Memory limit (Consul servers) + +**Description:** Displays the memory limit for Consul servers. This metric ensures that memory usage stays within the defined limits for each Consul server. + +```promql +kube_pod_container_resource_limits{resource="memory", pod="consul-server-0"} +``` + +### CPU limit in seconds (Consul servers) + +**Description:** Displays the CPU limit for Consul servers. Monitoring CPU limits helps operators ensure that the services are not constrained by resource limitations. + +```promql +kube_pod_container_resource_limits{resource="cpu", pod="consul-server-0"} +``` + +### Disk usage (Consul servers) + +**Description:** Shows the amount of filesystem storage used by Consul servers. This metric helps operators track disk usage and plan for capacity. + +```promql +sum(container_fs_usage_bytes{}) by (pod) +``` + +```promql +sum(container_fs_usage_bytes{pod="consul-server-0"}) +``` + +### CPU usage in seconds (Connect injector) + +**Description:** Tracks the CPU usage of the Connect injector, which is responsible for injecting Envoy sidecars and other operations within the mesh. Monitoring this helps ensure that Connect injector has adequate CPU resources. + +```promql +rate(container_cpu_usage_seconds_total{pod=~".*-connect-injector-.*", container="sidecar-injector"}[5m]) +``` + +### CPU limit in seconds (Connect injector) + +**Description:** Displays the CPU limit for the Connect injector. Monitoring the CPU limits ensures that Connect injector is not constrained by resource limitations. + +```promql +max(kube_pod_container_resource_limits{resource="cpu", container="sidecar-injector"}) +``` + +### Memory usage (Connect injector) + +**Description:** Tracks the memory usage of the Connect injector. Monitoring this helps ensure the Connect injector has sufficient memory resources. + +```promql +container_memory_working_set_bytes{pod=~".*-connect-injector-.*", container="sidecar-injector"} +``` + +### Memory limit (Connect injector) + +**Description:** Displays the memory limit for the Connect injector, helping to monitor if the service is nearing its resource limits. + +```promql +max(kube_pod_container_resource_limits{resource="memory", container="sidecar-injector"}) +``` + + diff --git a/website/content/docs/connect/observability/grafanadashboards/consulserverdashboard.mdx b/website/content/docs/connect/observability/grafanadashboards/consulserverdashboard.mdx new file mode 100644 index 0000000000..e3e2ff49bf --- /dev/null +++ b/website/content/docs/connect/observability/grafanadashboards/consulserverdashboard.mdx @@ -0,0 +1,164 @@ +--- +layout: docs +page_title: Dashboard for Consul server metrics +description: >- + This documentation provides an overview of the Consul Server Dashboard. Learn about the metrics it displays and the queries that produce the metrics. +--- + +# Consul server monitoring dashboard + +This page provides reference information about the [Grafana dashboard configuration included in the `hashicorp/consul` GitHub repository](https://github.com/hashicorp/consul/blob/main/grafana/consul-server-monitoring.json). + +## Grafana queries overview + +This dashboard provides the following information about service mesh operations. + +### Raft commit time + +**Description:** This metric measures the time it takes to commit Raft log entries. Stable values are expected for a healthy cluster. High values can indicate issues with resources such as memory, CPU, or disk space. + +```promql +consul_raft_commitTime +``` + +### Raft commits per 5 minutes + +**Description:** This metric tracks the rate of Raft log commits emitted by the leader, showing how quickly changes are being applied across the cluster. + +```promql +rate(consul_raft_apply[5m]) +``` + +### Last contacted leader + +**Description:** Measures the duration since the last contact with the Raft leader. Spikes in this metric can indicate network issues or an unavailable leader, which may affect cluster stability. + +```promql +consul_raft_leader_lastContact != 0 +``` + +### Election events + +**Description:** Tracks Raft state transitions, which indicate leadership elections. Frequent transitions might suggest cluster instability and require investigation. + +```promql +rate(consul_raft_state_candidate[1m]) +``` + +```promql +rate(consul_raft_state_leader[1m]) +``` + +### Autopilot health + +**Description:** A boolean metric that shows a value of 1 when Autopilot is healthy and 0 when issues are detected. Ensures that the cluster has sufficient resources and an operational leader. + +```promql +consul_autopilot_healthy +``` + +### DNS queries per 5 minutes + +**Description:** This metric tracks the rate of DNS queries per node, bucketed into 5 minute intervals. It helps monitor the query load on Consul’s DNS service. + +```promql +rate(consul_dns_domain_query_count[5m]) +``` + +### DNS domain query time + +**Description:** Measures the time spent handling DNS domain queries. Spikes in this metric may indicate high contention in the catalog or too many concurrent queries. + +```promql +consul_dns_domain_query +``` + +### DNS reverse query time + +**Description:** Tracks the time spent processing reverse DNS queries. Spikes in query time may indicate performance bottlenecks or increased workload. + +```promql +consul_dns_ptr_query +``` + +### KV applies per 5 minutes + +**Description:** This metric tracks the rate of key-value store applies over 5 minute intervals, indicating the operational load on Consul’s KV store. + +```promql +rate(consul_kvs_apply_count[5m]) +``` + +### KV apply time + +**Description:** Measures the time taken to apply updates to the key-value store. Spikes in this metric might suggest resource contention or client overload. + +```promql +consul_kvs_apply +``` + +### Transaction apply time + +**Description:** Tracks the time spent applying transaction operations in Consul, providing insights into potential bottlenecks in transaction operations. + +```promql +consul_txn_apply +``` + +### ACL resolves per 5 minutes + +**Description:** This metric tracks the rate of ACL token resolutions over 5 minute intervals. It provides insights into the activity related to ACL tokens within the cluster. + +```promql +rate(consul_acl_ResolveToken_count[5m]) +``` + +### ACL resolve token time + +**Description:** Measures the time taken to resolve ACL tokens into their associated policies. + +```promql +consul_acl_ResolveToken +``` + +### ACL updates per 5 minutes + +**Description:** Tracks the rate of ACL updates over 5 minute intervals. This metric helps monitor changes in ACL configurations over time. + +```promql +rate(consul_acl_apply_count[5m]) +``` + +### ACL apply time + +**Description:** Measures the time spent applying ACL changes. Spikes in apply time might suggest resource constraints or high operational load. + +```promql +consul_acl_apply +``` + +### Catalog operations per 5 minutes + +**Description:** Tracks the rate of register and deregister operations in the Consul catalog, providing insights into the churn of services within the cluster. + +```promql +rate(consul_catalog_register_count[5m]) +``` + +```promql +rate(consul_catalog_deregister_count[5m]) +``` + +### Catalog operation time + +**Description:** Measures the time taken to complete catalog register or deregister operations. Spikes in this metric may indicate performance issues within the catalog. + +```promql +consul_catalog_register +``` + +```promql +consul_catalog_deregister +``` + + diff --git a/website/content/docs/connect/observability/grafanadashboards/index.mdx b/website/content/docs/connect/observability/grafanadashboards/index.mdx new file mode 100644 index 0000000000..2a21ec6f2f --- /dev/null +++ b/website/content/docs/connect/observability/grafanadashboards/index.mdx @@ -0,0 +1,91 @@ +--- +layout: docs +page_title: Service Mesh Observability - Dashboards +description: >- + This documentation provides an overview of several dashboards designed for monitoring and managing services within a Consul-managed Envoy service mesh. Learn how to enable access logs and configure key performance and operational metrics to ensure the reliability and performance of services in the service mesh. +--- + +# Dashboards for service mesh observability + +This topic describes the configuration and usage of dashboards for monitoring and managing services within a Consul-managed Envoy service mesh. These dashboards provide critical insights into the health, performance, and resource utilization of services. The dashboards described here are essential tools for ensuring the stability, efficiency, and reliability of your service mesh environment. + +This page provides reference information about the Grafana dashboard configurations included in the [`grafana` directory in the `hashicorp/consul` GitHub repository](https://github.com/hashicorp/consul/tree/main/grafana). + +## Dashboards overview + +The repository includes the following dashboards: + + - **Consul service-to-service dashboard**: Provides a detailed view of service-to-service communications, monitoring key metrics like access logs, HTTP requests, error counts, response code distributions, and request success rates. The dashboard includes customizable filters for focusing on specific services and namespaces. + + - **Consul service dashboard**: Tracks key metrics for Envoy proxies at the cluster and service levels, ensuring the performance and reliability of individual services within the mesh. + + - **Consul dataplane dashboard**: Offers a comprehensive overview of service health and performance, including request success rates, resource utilization (CPU and memory), active connections, and cluster health. It helps operators maintain service reliability and optimize resource usage. + + - **Consul k8s dashboard**: Focuses on monitoring the health and resource usage of the Consul control plane within a Kubernetes environment, ensuring the stability of the control plane. + + - **Consul server dashboard**: Provides detailed monitoring of Consul servers, tracking key metrics like server health, CPU and memory usage, disk I/O, and network performance. This dashboard is critical for ensuring the stability and performance of Consul servers within the service mesh. + +## Enabling prometheus + +Add the following configurations to your Consul Helm chart to enable the prometheus tools. + + + +```yaml +global: + metrics: + enabled: true + provider: "prometheus" + enableAgentMetrics: true + agentMetricsRetentionTime: "10m" + +prometheus: + enabled: true + +ui: + enabled: true + metrics: + enabled: true + provider: "prometheus" + baseURL: http://prometheus-server.consul +``` + + + +## Enable access logs + +Access logs configurations are defined globally in the [`proxy-defaults`](/consul/docs/connect/config-entries/proxy-defaults#accesslogs) configuration entry. + +The following example is a minimal configuration for enabling access logs: + + + +```hcl +Kind = "proxy-defaults" +Name = "global" +AccessLogs { + Enabled = true +} +``` + +```yaml +apiVersion: consul.hashicorp.com/v1alpha1 +kind: ProxyDefaults +metadata: + name: global +spec: + accessLogs: + enabled: true +``` + +```json +{ + "Kind": "proxy-defaults", + "Name": "global", + "AccessLogs": { + "Enabled": true + } +} +``` + + diff --git a/website/content/docs/connect/observability/grafanadashboards/service-to-servicedashboard.mdx b/website/content/docs/connect/observability/grafanadashboards/service-to-servicedashboard.mdx new file mode 100644 index 0000000000..f6abad471e --- /dev/null +++ b/website/content/docs/connect/observability/grafanadashboards/service-to-servicedashboard.mdx @@ -0,0 +1,183 @@ +--- +layout: docs +page_title: Dashboard for monitoring Consul service-to-service mesh +description: >- + This documentation provides an overview of the Service-to-service dashboard. Learn about the metrics it displays and the queries that produce the metrics. +--- + +# Service-to-service dashboard + +This page provides reference information about the [Grafana dashboard configuration included in the `hashicorp/consul` GitHub repository](https://github.com/hashicorp/consul/blob/main/grafana/consulservicetoservicedashboard.json). The service-to-service dashboard provides deep visibility into the traffic and interactions between services within the Consul service mesh. It focuses on critical metrics such as logs, error rates, traffic patterns, and success rates, all of which help operators maintain smooth and reliable service-to-service communication. + +![Preview of the service to service mesh dashboard](/public/img/grafana/service-to-service-1.png) + +## Grafana queries overview + +This dashboard provides the following information about service mesh operations. + +### Access logs and errors monitoring + +**Description:** This section provides visibility into logs and errors related to service-to-service communications. It tracks and displays the number of logs generated, errors encountered, and the percentage of logs matching specific patterns. + +### Total logs + +**Description:** This metric counts the total number of log lines produced by Consul dataplane containers. It provides an overview of the volume of logs being generated for a specific namespace. + +```promql +sum(count_over_time(({container="consul-dataplane",namespace=~"$namespace"})[$__interval])) +``` + +### Total logs containing "$searchable_pattern" + +**Description:** This metric tracks the number of logs containing the specified pattern. It is useful for filtering and monitoring specific log events across the service mesh. + +```promql +sum(count_over_time({container="consul-dataplane", namespace=~"$namespace"} |~ (?i)(?i)$searchable_pattern [$__interval])) +``` + +### Percentage of logs containing "$searchable_pattern" + +**Description:** This metric calculates the percentage of logs containing the specified search pattern within the total log volume. It helps gauge the proportion of specific log events. + +```promql +(sum(count_over_time({container="consul-dataplane", namespace=~"$namespace"} |~ (?i)(?i)$searchable_pattern [$__interval])) * 100) / sum(count_over_time({container="consul-dataplane", namespace=~"$namespace"} [$__interval])) +``` + +### Total response code distribution + +**Description:** This pie chart visualizes the distribution of HTTP response codes, helping identify any 4xx and 5xx error codes generated by the services. + +```promql +sum by(response_code) (count_over_time({container="consul-dataplane", namespace="$namespace"} | json | response_code != "0" | __error__= [$__range])) +``` + +### Rate of logs containing "$searchable_pattern" per service + +**Description:** This metric monitors the rate at which specific patterns appear in logs per service, helping to detect trends and anomalies in log data. + +```promql +sum by(app) (rate({container="consul-dataplane", namespace=~"$namespace"} |~ (?i)(?i)$searchable_pattern [$__range])) +``` + +### TCP metrics - service level + +### TCP inbound and outbound bytes + +**Description:** This metric tracks the inbound and outbound TCP bytes transferred between services. It is essential for understanding the network traffic flow between source and destination services. + +```promql +sum(rate(envoy_tcp_downstream_cx_rx_bytes_total{}[10m])) by (service, destination_service) +``` + +### TCP inbound and outbound bytes buffered + +**Description:** This metric monitors the amount of TCP bytes buffered for inbound and outbound traffic between services. It helps identify potential network performance bottlenecks. + +```promql +sum(rate(envoy_tcp_downstream_cx_rx_bytes_buffered{}[10m])) by (service, destination_service) +``` + +### TCP downstream connections + +**Description:** This metric counts the number of active TCP downstream connections from the source service to the destination service, providing visibility into the volume of connections between services. + +```promql +sum(envoy_tcp_downstream_cx_total) by (service, destination_service) +``` + +### Outbound traffic monitoring +![Preview of the outbound traffic monitoring](/public/img/grafana/service-to-service-2.png) + +### Upstream traffic + +**Description:** This metric monitors the upstream traffic from the source service to the destination service. It shows how much traffic is being sent between services. + +```promql +sum(irate(envoy_cluster_upstream_rq_total{local_cluster=~"$source_service",consul_destination_service=~"$destination_service"}[10m])) +``` + +### Upstream request response timeliness + +**Description:** This metric calculates the 95th percentile of upstream request response times between the source and destination services. It helps ensure that service communications are handled promptly. + +```promql +histogram_quantile(0.95, sum(rate(envoy_cluster_upstream_rq_time_bucket{local_cluster=~"$source_service",consul_destination_target!=""}[10m])) by (le, consul_destination_target)) +``` + +### Upstream request success rate + +**Description:** This metric tracks the success rate of requests from the source service to the destination service, excluding 4xx and 5xx errors. It helps assess the reliability of service communications. + +```promql +sum(irate(envoy_cluster_upstream_rq_xx{envoy_response_code_class!="5",local_cluster=~"$source_service",consul_destination_service=~"$destination_service"}[10m])) +``` + +### Inbound traffic monitoring +![Preview of the inbound traffic monitoring](/public/img/grafana/service-to-service-3.png) + +### Requests sent + +**Description:** This metric tracks the number of requests sent between the source service and destination service within the service mesh. + +```promql +sum(irate(envoy_cluster_upstream_rq_total{consul_destination_datacenter="dc1",local_cluster=~"$source_service",consul_destination_service=~"$destination_service"}[10m])) by (consul_destination_service, local_cluster) +``` + +### Request success rate + +**Description:** This metric tracks the success rate of requests from the source service to the destination service, helping identify failures or bottlenecks in communication. + +```promql +sum(irate(envoy_cluster_upstream_rq_xx{envoy_response_code_class!="5",local_cluster=~"$source_service",consul_destination_service=~"$destination_service"}[10m])) by (local_cluster, consul_destination_service) / sum(irate(envoy_cluster_upstream_rq_xx{consul_destination_service=~"$destination_service"}[10m])) by (local_cluster, consul_destination_service) +``` + +### Response success by status code + +**Description:** This metric tracks response success by status code for requests sent by the source service to the destination service. + +```promql +sum(increase(envoy_http_downstream_rq_xx{local_cluster=~"$source_service",envoy_http_conn_manager_prefix="public_listener"}[10m])) by (local_cluster, envoy_response_code_class) +``` + +### Request duration + +**Description:** This metric tracks the request duration between the source and destination services, helping monitor performance and response times. + +```promql +histogram_quantile(0.95, sum(rate(envoy_cluster_upstream_rq_time_bucket{consul_destination_datacenter="dc1", consul_destination_service=~"$destination_service",local_cluster=~"$source_service"}[10m])) by (le, cluster, local_cluster, consul_destination_service)) +``` + +### Response success + +**Description:** This metric tracks the success of responses for the source service's requests across the service mesh. + +```promql +sum(increase(envoy_http_downstream_rq_total{local_cluster=~"$source_service",envoy_http_conn_manager_prefix="public_listener"}[10m])) by (local_cluster) +``` + +### Request response rate + +**Description:** This metric tracks the rate at which responses are being generated by the source service, providing insight into service activity and performance. + +```promql +sum(irate(envoy_http_downstream_rq_total{local_cluster=~"$source_service",envoy_http_conn_manager_prefix="public_listener"}[10m])) by (local_cluster) +``` + +## Customization options + +![Preview of the nginx service selection as a customization option on the service to service dashboard](/public/img/grafana/service-to-service-4.png) + +The service-to-service dashboard includes a variety of customization options to help you analyze specific aspects of service-to-service communications, tailor the dashboard for more targeted monitoring, and enhance visibility into the service mesh. + +- **Filter by source service:** You can filter the dashboard to focus on traffic originating from a specific source service, allowing you to analyze interactions from the source service to all destination services. + +- **Filter by destination service:** Similarly, you can filter the dashboard by destination service to track and analyze the traffic received by specific services. This helps pinpoint communication issues or performance bottlenecks related to specific services. + +- **Filter by namespace:** The dashboard can be customized to focus on service interactions within a particular namespace. This is especially useful for isolating issues in multi-tenant environments or clusters that operate with strict namespace isolation. + +- **Log pattern search:** You can apply custom search patterns to logs to filter out specific log events of interest, such as error messages or specific HTTP status codes. This enables you to narrow down on specific log entries and identify patterns that may indicate issues. + +- **Time range selection:** The dashboard supports dynamic time range selection, allowing you to focus on service interactions over specific time intervals. This helps in analyzing traffic trends, troubleshooting incidents, and understanding the timing of service communications. + +By using these customization options, you can tailor the dashboard to your specific needs and ensure they are always monitoring the most relevant data for maintaining a healthy and performant service mesh. + diff --git a/website/content/docs/connect/observability/grafanadashboards/servicedashboard.mdx b/website/content/docs/connect/observability/grafanadashboards/servicedashboard.mdx new file mode 100644 index 0000000000..dfe684c343 --- /dev/null +++ b/website/content/docs/connect/observability/grafanadashboards/servicedashboard.mdx @@ -0,0 +1,157 @@ +--- +layout: docs +page_title: Dashboard for monitoring Consul service mesh +description: >- + This documentation provides an overview of the Service Dashboard. Learn about the metrics it displays and the queries that produce the metrics. +--- + +# Service dashboard + +This page provides reference information about the [Grafana dashboard configuration included in the `hashicorp/consul` GitHub repository](https://github.com/hashicorp/consul/blob/main/grafana/consulservicedashboard.json). The service dashboard offers an overview of the performance and health of individual services within the Consul service mesh. It provides insights into service availability, request success rates, latency, and connection metrics. This dashboard is essential for maintaining optimal service performance and quickly identifying any issues with service communications. + +![Preview of the service dashboard](/public/img/grafana/service-dashboard-2.png) + +## Grafana queries overview + +This dashboard provides the following information about service mesh operations. + +### Total running services + +**Description:** This gauge tracks the total number of running services within the mesh that are not labeled as `traffic-generator`. It provides an overall view of active services, helping operators maintain visibility into service availability. + +```promql +sum(envoy_server_live{app!="traffic-generator"}) +``` + +### Total request success rate + +**Description:** This stat visualizes the success rate of upstream requests to the selected service. It filters out 4xx and 5xx response codes, providing a clearer picture of how well the service is performing in terms of handling requests successfully. + +```promql +sum(irate(envoy_cluster_upstream_rq_xx{envoy_response_code_class!="5", envoy_response_code_class!="4", consul_destination_service=~"$service"}[10m])) / sum(irate(envoy_cluster_upstream_rq_xx{consul_destination_service=~"$service"}[10m])) +``` + +### Total failed request rate + +**Description:** This stat tracks the rate of failed requests for the selected service according to 4xx and 5xx errors. It helps operators quickly identify if there are issues with client requests or server errors for a specific service. + +```promql +sum(irate(envoy_cluster_upstream_rq_xx{envoy_response_code_class=~"4|5", consul_destination_service=~"$service"}[10m])) / sum(irate(envoy_cluster_upstream_rq_xx{consul_destination_service=~"$service"}[10m])) +``` + +### Average request response time in milliseconds + +**Description:** This gauge displays the average response time for requests to the selected service, providing an overview of the service's performance and responsiveness. + +```promql +sum(rate(envoy_cluster_upstream_rq_time_sum{consul_destination_service=~"$service"}[10m])) / sum(rate(envoy_cluster_upstream_rq_total{consul_destination_service=~"$service"}[10m])) +``` + +### Total failed requests + +**Description:** This gauge tracks the total number of failed requests over a 10 minute window, categorized by service. It allows for easy identification of services that are experiencing high failure rates. + +```promql +sum(increase(envoy_cluster_upstream_rq_xx{envoy_response_code_class=~"4|5", consul_destination_service=~"$service"}[10m])) by(local_cluster) +``` + +### Dataplane latency + +**Description:** This stat tracks the dataplane latency percentiles (p50, p75, p90, p99.9) for the selected service. It gives detailed insights into the distribution of latency within the service's request handling, helping identify performance bottlenecks. + +![Preview of the dataplane latency metrics](/public/img/grafana/service-dashboard-1.png) + +```promql +histogram_quantile(0.50, sum by(le) (rate(envoy_cluster_upstream_rq_time_bucket{kubernetes_namespace=~"$namespace", local_cluster=~"$service"}[5m]))) +``` + +```promql +histogram_quantile(0.75, sum by(le) (rate(envoy_cluster_upstream_rq_time_bucket{kubernetes_namespace=~"$namespace", local_cluster=~"$service"}[5m]))) +``` + +```promql +histogram_quantile(0.90, sum by(le) (rate(envoy_cluster_upstream_rq_time_bucket{kubernetes_namespace=~"$namespace", local_cluster=~"$service"}[5m]))) +``` + +```promql +histogram_quantile(0.999, sum by(le) (rate(envoy_cluster_upstream_rq_time_bucket{kubernetes_namespace=~"$namespace", local_cluster=~"$service"}[5m]))) +``` + +### Total TCP inbound and outbound bytes + +**Description:** This time series shows the total number of inbound and outbound TCP bytes for services within the mesh. It provides visibility into the data transfer patterns and volume between services. + +```promql +sum(rate(envoy_tcp_downstream_cx_rx_bytes_total{}[10m])) by (local_cluster) +``` + +### Total TCP inbound and outbound bytes buffered + +**Description:** This metric tracks the amount of TCP traffic buffered during inbound and outbound communications. It helps in identifying whether there is any potential latency caused by packet buffering or congestion. + +```promql +sum(rate(envoy_tcp_downstream_cx_rx_bytes_buffered{}[10m])) by (local_cluster) +``` + +### Total TCP downstream active connections + +**Description:** This metric counts the total number of active TCP downstream connections, providing an overview of the current connection load on the services within the mesh. + +```promql +sum(rate(envoy_tcp_downstream_cx_total{}[10m])) by(local_cluster) +``` + +### Total active HTTP upstream connections + +**Description:** This time series tracks the total number of active HTTP upstream connections for the selected service. It helps monitor connection patterns and assess load. + +```promql +sum(envoy_cluster_upstream_cx_active{app=~"$service"}) by (app) +``` + +### Total active HTTP downstream connections + +**Description:** This time series monitors the number of active HTTP downstream connections for the selected service, providing visibility into the current active user or client load on the service. + +```promql +sum(envoy_http_downstream_cx_active{app=~"$service"}) by (app) +``` + +### Upstream requests by status code + +**Description:** This metric tracks the number of upstream requests, grouped by HTTP status codes, giving insight into the health of the requests being made to upstream services from the selected service. + +```promql +sum by(namespace,app,envoy_response_code_class) (rate(envoy_cluster_upstream_rq_xx[5m])) +``` + +### Downstream requests by status code + +**Description:** This time series tracks downstream HTTP requests by status code, showing how well the selected service is responding to downstream requests from clients. + +```promql +sum(rate(envoy_http_downstream_rq_xx{envoy_http_conn_manager_prefix="public_listener"}[5m])) by (namespace, app, envoy_response_code_class) +``` + +### Connections rejected + +**Description:** This metric tracks the number of connections rejected due to overload or overflow conditions on listeners. Monitoring these values helps identify if the service is under too much load or has insufficient capacity to handle the incoming connections. + +```promql +rate(envoy_listener_downstream_cx_overload_reject{}[$__interval]) +``` + +## Customization options + +The service dashboard offers various customization options to help you analyze specific services and metrics. Use these options to tailor the dashboard to your needs and improve your ability to monitor and troubleshoot service health. + +- **Filter by service:** You can filter the dashboard by the service you want to monitor. This helps narrow down the metrics to the service of interest and provides a more targeted view of its performance. + +- **Filter by namespace:** The namespace filter allows operators to focus on a particular namespace in a multi-tenant or multi-namespace environment, isolating the service metrics within that specific context. + +- **Time range selection:** The dashboard supports flexible time range selection, allowing operators to analyze service behavior over different time periods. This is helpful for pinpointing issues that may occur at specific times or during high-traffic periods. + +- **Percentile latency tracking:** The dashboard allows operators to track multiple latency percentiles (p50, p75, p90, p99.9) to get a more detailed view of how the service performs across different levels of traffic load. + + + diff --git a/website/content/docs/connect/proxies/envoy.mdx b/website/content/docs/connect/proxies/envoy.mdx index 8180787efb..927bac2fcd 100644 --- a/website/content/docs/connect/proxies/envoy.mdx +++ b/website/content/docs/connect/proxies/envoy.mdx @@ -43,10 +43,12 @@ refer to the previous release's version of this page. ### Envoy and Consul Client Agent Every major Consul release initially supports **four major Envoy releases**. -Standard major Consul releases do not expand that support in minor releases. However, [Consul Enterprise Long Term Support (LTS)](/consul/docs/enterprise/long-term-support) -releases do expand their Envoy version compatibility window in minor releases -to ensure compatibility with a maintained Envoy version. +releases expand their Envoy version compatibility window in minor releases to +ensure compatibility with a maintained Envoy version. Standard (non-LTS) Consul +Enterprise releases may also expand support to a new major version of Envoy in +order to receive important security fixes, if the previous major Envoy version +has reached end-of-life. Every major Consul release maintains and tests compatibility with specific Envoy patch releases to ensure users can benefit from bug and security fixes in Envoy. @@ -86,18 +88,18 @@ which packages both Envoy and the `consul-dataplane` binary in a single containe To enable seamless upgrades, each major version of Consul also supports the previous and next Consul dataplane versions. -Compared to standard Consul releases, Consul Enterprise -[Long Term Support (LTS)](/consul/docs/enterprise/long-term-support) -releases have the following differences with Consul dataplane compatibility: -- [Expanded compatibility window](#enterprise-long-term-support-releases): +Compared to community edition releases, Consul Enterprise releases have +the following differences with Consul dataplane compatibility: +- [LTS-Only: Expanded compatibility window](#enterprise-long-term-support-releases): Active Consul Enterprise LTS releases expand their Consul dataplane - version compatibility window until the LTS release reaches its - end of maintenance. + version compatibility window to include the version of Consul dataplane + aligned with the next Consul LTS release. - [Maintained Envoy version](#consul-dataplane-releases-that-span-envoy-major-versions): - Major versions of Consul dataplane aligned with a Consul Enterprise LTS version - may contain minor version updates that use a new major version of Envoy. - These minor version updates are necessary to ensure maintained versions - of Consul dataplane use a maintained version of Envoy. + Major versions of Consul dataplane aligned with a maintained Consul + Enterprise version may contain minor version updates that use a new + major version of Envoy. These minor version updates are necessary to + ensure that maintained versions of Consul dataplane use a maintained + version of Envoy. #### Standard releases @@ -106,6 +108,7 @@ apply to both Consul Enterprise and Consul community edition (CE). | Consul Version | Default `consul-dataplane` Version | Other compatible `consul-dataplane` Versions | | -------------- | -------------------------------------|----------------------------------------------| +| 1.20.x CE | 1.6.x (Envoy 1.31.x) | 1.5.x (Envoy 1.29.x) | | 1.19.x CE | 1.5.x (Envoy 1.29.x) | 1.4.x (Envoy 1.28.x) | | 1.18.x CE | 1.4.x (Envoy 1.28.x) | 1.3.x (Envoy 1.27.x) | | 1.17.x | 1.3.x (Envoy 1.27.x) | 1.4.x (Envoy 1.28.x), 1.2.x (Envoy 1.26.x) | @@ -120,22 +123,21 @@ until the LTS release reaches its end of maintenance. | Consul Version | Default `consul-dataplane` Version | Other compatible `consul-dataplane` Versions | | -------------- | -------------------------------------|----------------------------------------------| -| 1.19.x Ent | 1.5.x (Envoy 1.29.x) | 1.4.x (Envoy 1.28.x) | | 1.18.x Ent | 1.4.x (Envoy 1.28.x) | 1.3.x (Envoy 1.27.x) | | 1.15.x Ent | 1.1.x (Envoy 1.26.x) | 1.4.x (Envoy 1.28.x) - 1.0.x (Envoy 1.24.x) | #### Consul dataplane releases that span Envoy major versions -Major versions of Consul dataplane aligned with a Consul Enterprise LTS version +Major versions of Consul dataplane aligned with active versions of Consul may contain minor version updates that use a new major version of Envoy. These minor version updates are necessary to ensure maintained versions -of Consul dataplane use a maintained version of Envoy. +of Consul dataplane use a maintained version of Envoy including important +security fixes. -| `consul-dataplane` Version Range | Associated Consul Enterprise LTS version | Contained Envoy Binary Version | +| `consul-dataplane` Version Range | Associated Consul Enterprise version | Contained Envoy Binary Version | | -------------------------------- | ---------------------------------------- | ------------------------------ | -| 1.5.0 - 1.5.latest | 1.18.x Ent | Envoy 1.29.x | -| 1.4.0 - 1.4.latest | 1.18.x Ent | Envoy 1.28.x | -| 1.1.9 - 1.1.latest | 1.15.x Ent | Envoy 1.26.x | +| 1.1.11 - 1.1.latest | 1.15.x Ent | Envoy 1.27.x | +| 1.1.9 - 1.1.10 | 1.15.x Ent | Envoy 1.26.x | | 1.1.0 - 1.1.8 | 1.15.x Ent | Envoy 1.25.x | ## Getting Started diff --git a/website/content/docs/connect/security.mdx b/website/content/docs/connect/security.mdx index a65d4fabcd..cefcaa3be2 100644 --- a/website/content/docs/connect/security.mdx +++ b/website/content/docs/connect/security.mdx @@ -42,6 +42,8 @@ an explicit intention. ### Request Normalization Configured for L7 Intentions +~> **Compatibility warning**: This feature is available as of Consul CE 1.20.1 and Consul Enterprise 1.20.1, 1.19.2, 1.18.3, and 1.15.15. We recommend upgrading to the latest version of Consul to take advantage of the latest features and improvements. + Atypical traffic patterns may interfere with the enforcement of L7 intentions. For example, if a service makes request to a non-normalized URI path and Consul is not configured to force path normalization, it becomes possible to circumvent path match rules. While a diff --git a/website/content/docs/dynamic-app-config/sessions/index.mdx b/website/content/docs/dynamic-app-config/sessions/index.mdx index 3eb26f558f..5bbe65e8f1 100644 --- a/website/content/docs/dynamic-app-config/sessions/index.mdx +++ b/website/content/docs/dynamic-app-config/sessions/index.mdx @@ -10,7 +10,7 @@ description: >- Consul provides a session mechanism which can be used to build distributed locks. Sessions act as a binding layer between nodes, health checks, and key/value data. They are designed to provide granular locking and are heavily inspired by -[The Chubby Lock Service for Loosely-Coupled Distributed Systems](http://research.google.com/archive/chubby.html). +[The Chubby Lock Service for Loosely-Coupled Distributed Systems](https://research.google/pubs/the-chubby-lock-service-for-loosely-coupled-distributed-systems/). ## Session Design diff --git a/website/content/docs/k8s/compatibility.mdx b/website/content/docs/k8s/compatibility.mdx index e3b7ecd743..a786ed513a 100644 --- a/website/content/docs/k8s/compatibility.mdx +++ b/website/content/docs/k8s/compatibility.mdx @@ -29,6 +29,7 @@ apply to both Consul Enterprise and Consul community edition (CE). | Consul version | Compatible `consul-k8s` versions | Compatible Kubernetes versions | Compatible OpenShift versions | | -------------- | -------------------------------- | -------------------------------| ------------------------------| +| 1.20.x | 1.6.x | 1.28.x - 1.30.x | 4.13.x - 4.15.x | | 1.19.x | 1.5.x | 1.27.x - 1.29.x | 4.13.x - 4.15.x | | 1.18.x CE | 1.4.x | 1.26.x - 1.29.x | 4.13.x - 4.15.x | | 1.17.x | 1.3.x | 1.25.x - 1.28.x | 4.12.x - 4.15.x | diff --git a/website/content/docs/k8s/connect/cluster-peering/usage/create-sameness-groups.mdx b/website/content/docs/k8s/connect/cluster-peering/usage/create-sameness-groups.mdx index 0b3f1d43c2..2674805e16 100644 --- a/website/content/docs/k8s/connect/cluster-peering/usage/create-sameness-groups.mdx +++ b/website/content/docs/k8s/connect/cluster-peering/usage/create-sameness-groups.mdx @@ -43,8 +43,6 @@ Mesh gateways are required for cluster peering connections and recommended to se You must establish connections with cluster peers before you can create a sameness group that includes them. A cluster peering connection exists between two admin partitions in different datacenters, and each connection between two partitions must be established separately with each peer. Refer to [establish cluster peering connections](/consul/docs/k8s/connect/cluster-peering/usage/establish-peering) for step-by-step instructions. -You can establish and manage cluster peering relationships between all of your self-managed clusters using [HCP Consul Central](/hcp/docs/consul/concepts/consul-central). For more information, refer to [cluster peering global view](/hcp/docs/consul/monitor/consul-central/global-views#cluster-peering) in the HCP documentation. - To establish cluster peering connections and define a group as part of the same workflow, follow instructions up to [Export services between clusters](/consul/docs/k8s/connect/cluster-peering/usage/establish-peering#export-services-between-clusters). You can use the same exported services and service intention configuration entries to establish the cluster peering connection and create the sameness group. ## Create a sameness group diff --git a/website/content/docs/k8s/helm.mdx b/website/content/docs/k8s/helm.mdx index 42635dc2ca..9315109464 100644 --- a/website/content/docs/k8s/helm.mdx +++ b/website/content/docs/k8s/helm.mdx @@ -689,58 +689,6 @@ Use these links to navigate to a particular top-level stanza. - `consulAPITimeout` ((#v-global-consulapitimeout)) (`string: 5s`) - The time in seconds that the consul API client will wait for a response from the API before cancelling the request. - - `cloud` ((#v-global-cloud)) - Enables installing an HCP Consul Central self-managed cluster. - Requires Consul v1.14+. - - - `enabled` ((#v-global-cloud-enabled)) (`boolean: false`) - If true, the Helm chart will link a [self-managed cluster to HCP](/hcp/docs/consul/self-managed). - This can either be used to [configure a new cluster](/hcp/docs/consul/self-managed/new) - or [link an existing one](/hcp/docs/consul/self-managed/existing). - - Note: this setting should not be enabled for [HCP Consul Dedicated clusters](/hcp/docs/consul/dedicated). - It is strictly for linking self-managed clusters. - - - `resourceId` ((#v-global-cloud-resourceid)) - The resource id of the HCP Consul Central cluster to link to. Eg: - organization/27109cd4-a309-4bf3-9986-e1d071914b18/project/fcef6c24-259d-4510-bb8d-1d812e120e34/hashicorp.consul.global-network-manager.cluster/consul-cluster - This is required when global.cloud.enabled is true. - - - `secretName` ((#v-global-cloud-resourceid-secretname)) (`string: null`) - The name of the Kubernetes secret that holds the resource id. - - - `secretKey` ((#v-global-cloud-resourceid-secretkey)) (`string: null`) - The key within the Kubernetes secret that holds the resource id. - - - `clientId` ((#v-global-cloud-clientid)) - The client id portion of a [service principal](/hcp/docs/hcp/admin/iam/service-principals#service-principals) with authorization to link the cluster - in global.cloud.resourceId to HCP Consul Central. - This is required when global.cloud.enabled is true. - - - `secretName` ((#v-global-cloud-clientid-secretname)) (`string: null`) - The name of the Kubernetes secret that holds the client id. - - - `secretKey` ((#v-global-cloud-clientid-secretkey)) (`string: null`) - The key within the Kubernetes secret that holds the client id. - - - `clientSecret` ((#v-global-cloud-clientsecret)) - The client secret portion of a [service principal](/hcp/docs/hcp/admin/iam/service-principals#service-principals) with authorization to link the cluster - in global.cloud.resourceId to HCP Consul Central. - This is required when global.cloud.enabled is true. - - - `secretName` ((#v-global-cloud-clientsecret-secretname)) (`string: null`) - The name of the Kubernetes secret that holds the client secret. - - - `secretKey` ((#v-global-cloud-clientsecret-secretkey)) (`string: null`) - The key within the Kubernetes secret that holds the client secret. - - - `apiHost` ((#v-global-cloud-apihost)) - The hostname of HCP's API. This setting is used for internal testing and validation. - - - `secretName` ((#v-global-cloud-apihost-secretname)) (`string: null`) - The name of the Kubernetes secret that holds the api hostname. - - - `secretKey` ((#v-global-cloud-apihost-secretkey)) (`string: null`) - The key within the Kubernetes secret that holds the api hostname. - - - `authUrl` ((#v-global-cloud-authurl)) - The URL of HCP's auth API. This setting is used for internal testing and validation. - - - `secretName` ((#v-global-cloud-authurl-secretname)) (`string: null`) - The name of the Kubernetes secret that holds the authorization url. - - - `secretKey` ((#v-global-cloud-authurl-secretkey)) (`string: null`) - The key within the Kubernetes secret that holds the authorization url. - - - `scadaAddress` ((#v-global-cloud-scadaaddress)) - The address of HCP's scada service. This setting is used for internal testing and validation. - - - `secretName` ((#v-global-cloud-scadaaddress-secretname)) (`string: null`) - The name of the Kubernetes secret that holds the scada address. - - - `secretKey` ((#v-global-cloud-scadaaddress-secretkey)) (`string: null`) - The key within the Kubernetes secret that holds the scada address. - - `extraLabels` ((#v-global-extralabels)) (`map`) - Extra labels to attach to all pods, deployments, daemonsets, statefulsets, and jobs. This should be a YAML map. Example: @@ -2843,40 +2791,6 @@ Use these links to navigate to a particular top-level stanza. "sample/annotation2": "bar" ``` - - `cloud` ((#v-telemetrycollector-cloud)) - - - `resourceId` ((#v-telemetrycollector-cloud-resourceid)) - The resource id of the HCP Consul Central cluster to push metrics for. Eg: - `organization/27109cd4-a309-4bf3-9986-e1d071914b18/project/fcef6c24-259d-4510-bb8d-1d812e120e34/hashicorp.consul.global-network-manager.cluster/consul-cluster` - - This is used for HCP Consul Central-linked or HCP Consul Dedicated clusters where global.cloud.resourceId is unset. For example, when using externalServers - with HCP Consul Dedicated clusters or HCP Consul Central-linked clusters in a different admin partition. - - If global.cloud.resourceId is set, this should either be unset (defaulting to global.cloud.resourceId) or be the same as global.cloud.resourceId. - - - `secretName` ((#v-telemetrycollector-cloud-resourceid-secretname)) (`string: null`) - The name of the Kubernetes secret that holds the resource id. - - - `secretKey` ((#v-telemetrycollector-cloud-resourceid-secretkey)) (`string: null`) - The key within the Kubernetes secret that holds the resource id. - - - `clientId` ((#v-telemetrycollector-cloud-clientid)) - The client id portion of a [service principal](/hcp/docs/hcp/admin/iam/service-principals#service-principals) with authorization to push metrics to HCP - - This is set in two scenarios: - - the service principal in global.cloud is unset - - the HCP UI provides a service principal with more narrowly scoped permissions that the service principal used in global.cloud - - - `secretName` ((#v-telemetrycollector-cloud-clientid-secretname)) (`string: null`) - The name of the Kubernetes secret that holds the client id. - - - `secretKey` ((#v-telemetrycollector-cloud-clientid-secretkey)) (`string: null`) - The key within the Kubernetes secret that holds the client id. - - - `clientSecret` ((#v-telemetrycollector-cloud-clientsecret)) - The client secret portion of a [service principal](/hcp/docs/hcp/admin/iam/service-principals#service-principals) with authorization to push metrics to HCP. - - This is set in two scenarios: - - the service principal in global.cloud is unset - - the HCP UI provides a service principal with more narrowly scoped permissions that the service principal used in global.cloud - - - `secretName` ((#v-telemetrycollector-cloud-clientsecret-secretname)) (`string: null`) - The name of the Kubernetes secret that holds the client secret. - - - `secretKey` ((#v-telemetrycollector-cloud-clientsecret-secretkey)) (`string: null`) - The key within the Kubernetes secret that holds the client secret. - - `initContainer` ((#v-telemetrycollector-initcontainer)) - `resources` ((#v-telemetrycollector-initcontainer-resources)) (`map`) - The resource settings for consul-telemetry-collector initContainer. diff --git a/website/content/docs/upgrading/upgrade-specific.mdx b/website/content/docs/upgrading/upgrade-specific.mdx index c7fec8cb34..4ff5eb8c30 100644 --- a/website/content/docs/upgrading/upgrade-specific.mdx +++ b/website/content/docs/upgrading/upgrade-specific.mdx @@ -14,6 +14,12 @@ provided for their upgrades as a result of new features or changed behavior. This page is used to document those details separately from the standard upgrade flow. +## Consul 1.20.x + +### Mesh traffic request path normalization enabled by default + +As of Consul v1.20.1, inbound traffic to mesh proxies will have Envoy request [path normalization](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#envoy-v3-api-field-extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-normalize-path) applied by default. This should not interfere with the majority of service traffic, but can be disabled if needed by setting `http.incoming.request_normalization.insecure_disable_path_normalization` to `true` in the [global `mesh` configuration entry](/consul/docs/connect/config-entries/mesh#request-normalization). This setting is generally safe to change if not using L7 intentions with path matching. + ## Consul v1.19.x ### Health endpoint status filtering is now processed on the server side when using client agents @@ -74,10 +80,6 @@ service-defaults are configured in each partition and namespace before upgrading #### ACL tokens with templated policies [ACL templated policies](/consul/docs/security/acl#templated-policies) were added to 1.17.0 to simplify obtaining the right permissions for ACL tokens. When performing a [rolling upgrade](/consul/tutorials/datacenter-operations/upgrade-federated-environment#server-rolling-upgrade) and a version of Consul prior to 1.17.x is presented with a token created Consul v1.17.x or newer that contains templated policies, the templated policies field is not recognized. As a result, the token might not have the expected permissions on the older version of Consul. -### Mesh traffic request path normalization enabled by default - -As of Consul v1.17.8, inbound traffic to mesh proxies will have Envoy request [path normalization](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#envoy-v3-api-field-extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-normalize-path) applied by default. This should not interfere with the majority of service traffic, but can be disabled if needed by setting `http.incoming.request_normalization.insecure_disable_path_normalization` to `true` in the [global `mesh` configuration entry](/consul/docs/connect/config-entries/mesh#request-normalization). This setting is generally safe to change if not using L7 intentions with path matching. - ## Consul 1.16.x ### Known issues diff --git a/website/data/api-docs-nav-data.json b/website/data/api-docs-nav-data.json index 9f86aa6e9b..d553573105 100644 --- a/website/data/api-docs-nav-data.json +++ b/website/data/api-docs-nav-data.json @@ -135,10 +135,6 @@ "title": "Exported Services", "path": "exported-services" }, - { - "title": "HCP Consul Central Link", - "path": "hcp-link" - }, { "title": "Health", "path": "health" diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 204303ef9b..1276feaa02 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -418,7 +418,7 @@ "title": "Cache DNS lookups", "path": "services/discovery/dns-cache" }, - { + { "title": "Enable dynamic DNS lookups", "path": "services/discovery/dns-dynamic-lookups" } @@ -690,6 +690,35 @@ { "title": "UI Visualization", "path": "connect/observability/ui-visualization" + }, + { + "title": "Grafana Dashboards", + "routes": [ + { + "title": "Overview", + "path": "connect/observability/grafanadashboards" + }, + { + "title": "Service to Service Dashboard", + "path": "connect/observability/grafanadashboards/service-to-servicedashboard" + }, + { + "title": "Service Dashboard", + "path": "connect/observability/grafanadashboards/servicedashboard" + }, + { + "title": "Consul Dataplane Dashboard", + "path": "connect/observability/grafanadashboards/consuldataplanedashboard" + }, + { + "title": "Consul K8s Dashboard", + "path": "connect/observability/grafanadashboards/consulk8sdashboard" + }, + { + "title": "Consul Server Dashboard", + "path": "connect/observability/grafanadashboards/consulserverdashboard" + } + ] } ] }, diff --git a/website/public/img/grafana/consul-dataplane-dashboard.png b/website/public/img/grafana/consul-dataplane-dashboard.png new file mode 100644 index 0000000000..fbca984c11 Binary files /dev/null and b/website/public/img/grafana/consul-dataplane-dashboard.png differ diff --git a/website/public/img/grafana/service-dashboard-1.png b/website/public/img/grafana/service-dashboard-1.png new file mode 100644 index 0000000000..99f68d4a5c Binary files /dev/null and b/website/public/img/grafana/service-dashboard-1.png differ diff --git a/website/public/img/grafana/service-dashboard-2.png b/website/public/img/grafana/service-dashboard-2.png new file mode 100644 index 0000000000..fa68fe3d1b Binary files /dev/null and b/website/public/img/grafana/service-dashboard-2.png differ diff --git a/website/public/img/grafana/service-to-service-1.png b/website/public/img/grafana/service-to-service-1.png new file mode 100644 index 0000000000..b2c14d0aca Binary files /dev/null and b/website/public/img/grafana/service-to-service-1.png differ diff --git a/website/public/img/grafana/service-to-service-2.png b/website/public/img/grafana/service-to-service-2.png new file mode 100644 index 0000000000..3501407792 Binary files /dev/null and b/website/public/img/grafana/service-to-service-2.png differ diff --git a/website/public/img/grafana/service-to-service-3.png b/website/public/img/grafana/service-to-service-3.png new file mode 100644 index 0000000000..07380f357b Binary files /dev/null and b/website/public/img/grafana/service-to-service-3.png differ diff --git a/website/public/img/grafana/service-to-service-4.png b/website/public/img/grafana/service-to-service-4.png new file mode 100644 index 0000000000..17d5227b5d Binary files /dev/null and b/website/public/img/grafana/service-to-service-4.png differ diff --git a/website/redirects.js b/website/redirects.js index 536a11abad..57590f8025 100644 --- a/website/redirects.js +++ b/website/redirects.js @@ -238,7 +238,7 @@ module.exports = [ permanent: true, }, { - source: 'consul/docs/architecture/v2/:slug*', + source: '/consul/docs/architecture/v2/:slug*', destination: '/consul/docs/architecture/catalog#v2-catalog', permanent: true, }, @@ -256,5 +256,10 @@ module.exports = [ source: '/consul/docs/:version(v1\.(?:11|12|13|14|15|16|17|18)\.x)/k8s/dns/enable', destination: '/consul/docs/:version/k8s/dns', permanent: true, - } + }, + { + source: '/consul/api-docs/hcp-link', + destination: '/hcp/docs/consul/concepts/consul-central', + permanent: true, + }, ]