mirror of https://github.com/hashicorp/consul
Merge branch 'main' into cm-bug-11457
commit
65753357cc
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
xds: configures Envoy to load balance over all instances of an external service configured with hostnames when "envoy_dns_discovery_type" is set to "STRICT_DNS"
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:feature
|
||||
docs: added the docs for the grafana dashboards
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
proxycfg: fix a bug where peered upstreams watches are canceled even when another target needs it.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:feature
|
||||
v2: remove HCP Link integration
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:security
|
||||
Resolved issue where hcl would allow duplicates of the same key in acl policy configuration.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
state: ensure that identical manual virtual IP updates result in not bumping the modify indexes
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:security
|
||||
api: Enforces strict content-type header validation to protect against XSS vulnerability.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:security
|
||||
Removed ability to use bexpr to filter results without ACL read on endpoint
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:security
|
||||
Update `github.com/golang-jwt/jwt/v4` to v4.5.1 to address [GHSA-29wx-vh33-7x7r](https://github.com/golang-jwt/jwt/security/advisories/GHSA-29wx-vh33-7x7r).
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:security
|
||||
Update `golang.org/x/crypto` to v0.31.0 to address [GO-2024-3321](https://pkg.go.dev/vuln/GO-2024-3321).
|
||||
```
|
|
@ -1,3 +1,5 @@
|
|||
* @hashicorp/consul-selfmanage-maintainers
|
||||
|
||||
# Techical Writer Review
|
||||
|
||||
/website/content/docs/ @hashicorp/consul-docs
|
||||
|
@ -6,8 +8,8 @@
|
|||
|
||||
|
||||
# release configuration
|
||||
/.release/ @hashicorp/release-engineering @hashicorp/github-consul-core
|
||||
/.github/workflows/build.yml @hashicorp/release-engineering @hashicorp/github-consul-core
|
||||
/.release/ @hashicorp/team-selfmanaged-releng @hashicorp/consul-selfmanage-maintainers
|
||||
/.github/workflows/build.yml @hashicorp/team-selfmanaged-releng @hashicorp/consul-selfmanage-maintainers
|
||||
|
||||
|
||||
# Staff Engineer Review (protocol buffer definitions)
|
||||
|
|
|
@ -217,7 +217,7 @@ jobs:
|
|||
# matrix.consul-version (i.e. whenever the highest common Envoy version across active
|
||||
# Consul versions changes). The minor Envoy version does not necessarily need to be
|
||||
# kept current for the purpose of these tests, but the major (1.N) version should be.
|
||||
ENVOY_VERSION: 1.27.6
|
||||
ENVOY_VERSION: 1.28.7
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
container {
|
||||
dependencies = true
|
||||
alpine_secdb = true
|
||||
osv = true
|
||||
|
||||
secrets {
|
||||
matchers {
|
||||
|
@ -36,9 +36,14 @@ container {
|
|||
# periodically cleaned up to remove items that are no longer found by the scanner.
|
||||
triage {
|
||||
suppress {
|
||||
# N.b. `vulnerabilites` is the correct spelling for this tool.
|
||||
vulnerabilites = [
|
||||
vulnerabilities = [
|
||||
"CVE-2024-8096", # curl@8.9.1-r2,
|
||||
"CVE-2024-9143", # openssl@3.3.2-r0,
|
||||
"CVE-2024-3596", # openssl@3.3.2-r0,
|
||||
"CVE-2024-2236", # openssl@3.3.2-r0,
|
||||
"CVE-2024-26458", # openssl@3.3.2-r0,
|
||||
"CVE-2024-2511", # openssl@3.3.2-r0,
|
||||
#the above can be resolved when they're resolved in the alpine image
|
||||
]
|
||||
paths = [
|
||||
"internal/tools/proto-gen-rpc-glue/e2e/consul/*",
|
||||
|
@ -78,8 +83,8 @@ binary {
|
|||
# periodically cleaned up to remove items that are no longer found by the scanner.
|
||||
triage {
|
||||
suppress {
|
||||
# N.b. `vulnerabilites` is the correct spelling for this tool.
|
||||
vulnerabilites = [
|
||||
vulnerabilities = [
|
||||
"GO-2022-0635", // github.com/aws/aws-sdk-go@v1.55.5
|
||||
]
|
||||
paths = [
|
||||
"internal/tools/proto-gen-rpc-glue/e2e/consul/*",
|
||||
|
|
110
CHANGELOG.md
110
CHANGELOG.md
|
@ -1,3 +1,19 @@
|
|||
## 1.20.1 (October 29, 2024)
|
||||
BREAKING CHANGES:
|
||||
|
||||
* mesh: Enable Envoy `HttpConnectionManager.normalize_path` by default on inbound traffic to mesh proxies. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005). [[GH-21816](https://github.com/hashicorp/consul/issues/21816)]
|
||||
|
||||
SECURITY:
|
||||
|
||||
* mesh: Add `contains` and `ignoreCase` to L7 Intentions HTTP header matching criteria to support configuration resilient to variable casing and multiple values. This resolves [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006). [[GH-21816](https://github.com/hashicorp/consul/issues/21816)]
|
||||
* mesh: Add `http.incoming.requestNormalization` to Mesh configuration entry to support inbound service traffic request normalization. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005) and [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006). [[GH-21816](https://github.com/hashicorp/consul/issues/21816)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* api: remove dependency on proto-public, protobuf, and grpc [[GH-21780](https://github.com/hashicorp/consul/issues/21780)]
|
||||
* snapshot agent: **(Enterprise only)** Implement Service Principal Auth for snapshot agent on azure.
|
||||
* xds: configures Envoy to load balance over all instances of an external service configured with hostnames when "envoy_dns_discovery_type" is set to "STRICT_DNS" [[GH-21655](https://github.com/hashicorp/consul/issues/21655)]
|
||||
|
||||
## 1.20.0 (October 14, 2024)
|
||||
|
||||
SECURITY:
|
||||
|
@ -59,6 +75,38 @@ BUG FIXES:
|
|||
|
||||
* jwt-provider: change dns lookup family from the default of AUTO which would prefer ipv6 to ALL if LOGICAL_DNS is used or PREFER_IPV4 if STRICT_DNS is used to gracefully handle transitions to ipv6. [[GH-21703](https://github.com/hashicorp/consul/issues/21703)]
|
||||
|
||||
## 1.19.3 Enterprise (October 29, 2024)
|
||||
BREAKING CHANGES:
|
||||
|
||||
* mesh: **(Enterprise Only)** Enable Envoy `HttpConnectionManager.normalize_path` by default on inbound traffic to mesh proxies. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005).
|
||||
|
||||
SECURITY:
|
||||
|
||||
* Explicitly set 'Content-Type' header to mitigate XSS vulnerability. [[GH-21704](https://github.com/hashicorp/consul/issues/21704)]
|
||||
* Implement HTML sanitization for user-generated content to prevent XSS attacks in the UI. [[GH-21711](https://github.com/hashicorp/consul/issues/21711)]
|
||||
* UI: Remove codemirror linting due to package dependency [[GH-21726](https://github.com/hashicorp/consul/issues/21726)]
|
||||
* Upgrade Go to use 1.22.7. This addresses CVE
|
||||
[CVE-2024-34155](https://nvd.nist.gov/vuln/detail/CVE-2024-34155) [[GH-21705](https://github.com/hashicorp/consul/issues/21705)]
|
||||
* Upgrade to support aws/aws-sdk-go `v1.55.5 or higher`. This resolves CVEs
|
||||
[CVE-2020-8911](https://nvd.nist.gov/vuln/detail/cve-2020-8911) and
|
||||
[CVE-2020-8912](https://nvd.nist.gov/vuln/detail/cve-2020-8912). [[GH-21684](https://github.com/hashicorp/consul/issues/21684)]
|
||||
* mesh: **(Enterprise Only)** Add `contains` and `ignoreCase` to L7 Intentions HTTP header matching criteria to support configuration resilient to variable casing and multiple values. This resolves [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006).
|
||||
* mesh: **(Enterprise Only)** Add `http.incoming.requestNormalization` to Mesh configuration entry to support inbound service traffic request normalization. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005) and [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006).
|
||||
* ui: Pin a newer resolution of Braces [[GH-21710](https://github.com/hashicorp/consul/issues/21710)]
|
||||
* ui: Pin a newer resolution of Codemirror [[GH-21715](https://github.com/hashicorp/consul/issues/21715)]
|
||||
* ui: Pin a newer resolution of Markdown-it [[GH-21717](https://github.com/hashicorp/consul/issues/21717)]
|
||||
* ui: Pin a newer resolution of ansi-html [[GH-21735](https://github.com/hashicorp/consul/issues/21735)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* security: upgrade ubi base image to 9.4 [[GH-21750](https://github.com/hashicorp/consul/issues/21750)]
|
||||
* api: remove dependency on proto-public, protobuf, and grpc [[GH-21780](https://github.com/hashicorp/consul/issues/21780)]
|
||||
* xds: configures Envoy to load balance over all instances of an external service configured with hostnames when "envoy_dns_discovery_type" is set to "STRICT_DNS" [[GH-21655](https://github.com/hashicorp/consul/issues/21655)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* jwt-provider: change dns lookup family from the default of AUTO which would prefer ipv6 to ALL if LOGICAL_DNS is used or PREFER_IPV4 if STRICT_DNS is used to gracefully handle transitions to ipv6. [[GH-21703](https://github.com/hashicorp/consul/issues/21703)]
|
||||
|
||||
## 1.19.2 (August 26, 2024)
|
||||
|
||||
SECURITY:
|
||||
|
@ -73,6 +121,39 @@ BUG FIXES:
|
|||
|
||||
* api-gateway: **(Enterprise only)** ensure clusters are properly created for JWT providers with a remote URI for the JWKS endpoint [[GH-21604](https://github.com/hashicorp/consul/issues/21604)]
|
||||
|
||||
## 1.18.5 Enterprise (October 29, 2024)
|
||||
|
||||
Enterprise LTS: Consul Enterprise 1.18 is a Long-Term Support (LTS) release.
|
||||
BREAKING CHANGES:
|
||||
|
||||
* mesh: **(Enterprise Only)** Enable Envoy `HttpConnectionManager.normalize_path` by default on inbound traffic to mesh proxies. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005).
|
||||
|
||||
SECURITY:
|
||||
|
||||
* Explicitly set 'Content-Type' header to mitigate XSS vulnerability. [[GH-21704](https://github.com/hashicorp/consul/issues/21704)]
|
||||
* Implement HTML sanitization for user-generated content to prevent XSS attacks in the UI. [[GH-21711](https://github.com/hashicorp/consul/issues/21711)]
|
||||
* Upgrade Go to use 1.22.7. This addresses CVE
|
||||
[CVE-2024-34155](https://nvd.nist.gov/vuln/detail/CVE-2024-34155) [[GH-21705](https://github.com/hashicorp/consul/issues/21705)]
|
||||
* Upgrade to support aws/aws-sdk-go `v1.55.5 or higher`. This resolves CVEs
|
||||
[CVE-2020-8911](https://nvd.nist.gov/vuln/detail/cve-2020-8911) and
|
||||
[CVE-2020-8912](https://nvd.nist.gov/vuln/detail/cve-2020-8912). [[GH-21684](https://github.com/hashicorp/consul/issues/21684)]
|
||||
* mesh: **(Enterprise Only)** Add `contains` and `ignoreCase` to L7 Intentions HTTP header matching criteria to support configuration resilient to variable casing and multiple values. This resolves [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006).
|
||||
* mesh: **(Enterprise Only)** Add `http.incoming.requestNormalization` to Mesh configuration entry to support inbound service traffic request normalization. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005) and [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006).
|
||||
* ui: Pin a newer resolution of Braces [[GH-21710](https://github.com/hashicorp/consul/issues/21710)]
|
||||
* ui: Pin a newer resolution of Codemirror [[GH-21715](https://github.com/hashicorp/consul/issues/21715)]
|
||||
* ui: Pin a newer resolution of Markdown-it [[GH-21717](https://github.com/hashicorp/consul/issues/21717)]
|
||||
* ui: Pin a newer resolution of ansi-html [[GH-21735](https://github.com/hashicorp/consul/issues/21735)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* security: upgrade ubi base image to 9.4 [[GH-21750](https://github.com/hashicorp/consul/issues/21750)]
|
||||
* api: remove dependency on proto-public, protobuf, and grpc [[GH-21780](https://github.com/hashicorp/consul/issues/21780)]
|
||||
* xds: configures Envoy to load balance over all instances of an external service configured with hostnames when "envoy_dns_discovery_type" is set to "STRICT_DNS" [[GH-21655](https://github.com/hashicorp/consul/issues/21655)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* jwt-provider: change dns lookup family from the default of AUTO which would prefer ipv6 to ALL if LOGICAL_DNS is used or PREFER_IPV4 if STRICT_DNS is used to gracefully handle transitions to ipv6. [[GH-21703](https://github.com/hashicorp/consul/issues/21703)]
|
||||
|
||||
## 1.18.4 Enterprise (August 26, 2024)
|
||||
|
||||
Enterprise LTS: Consul Enterprise 1.18 is a Long-Term Support (LTS) release.
|
||||
|
@ -93,6 +174,35 @@ IMPROVEMENTS:
|
|||
|
||||
* Use Envoy's default for a route's validate_clusters option, which is false. This fixes a case where non-existent clusters could cause a route to no longer route to any of its backends, including existing ones. [[GH-21587](https://github.com/hashicorp/consul/issues/21587)]
|
||||
|
||||
## 1.15.15 Enterprise (October 29, 2024)
|
||||
|
||||
Enterprise LTS: Consul Enterprise 1.15 is a Long-Term Support (LTS) release.
|
||||
BREAKING CHANGES:
|
||||
|
||||
* mesh: **(Enterprise Only)** Enable Envoy `HttpConnectionManager.normalize_path` by default on inbound traffic to mesh proxies. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005).
|
||||
|
||||
SECURITY:
|
||||
|
||||
* Explicitly set 'Content-Type' header to mitigate XSS vulnerability. [[GH-21704](https://github.com/hashicorp/consul/issues/21704)]
|
||||
* Implement HTML sanitization for user-generated content to prevent XSS attacks in the UI. [[GH-21711](https://github.com/hashicorp/consul/issues/21711)]
|
||||
* UI: Remove codemirror linting due to package dependency [[GH-21726](https://github.com/hashicorp/consul/issues/21726)]
|
||||
* Upgrade Go to use 1.22.7. This addresses CVE
|
||||
[CVE-2024-34155](https://nvd.nist.gov/vuln/detail/CVE-2024-34155) [[GH-21705](https://github.com/hashicorp/consul/issues/21705)]
|
||||
* Upgrade to support aws/aws-sdk-go `v1.55.5 or higher`. This resolves CVEs
|
||||
[CVE-2020-8911](https://nvd.nist.gov/vuln/detail/cve-2020-8911) and
|
||||
[CVE-2020-8912](https://nvd.nist.gov/vuln/detail/cve-2020-8912). [[GH-21684](https://github.com/hashicorp/consul/issues/21684)]
|
||||
* mesh: **(Enterprise Only)** Add `contains` and `ignoreCase` to L7 Intentions HTTP header matching criteria to support configuration resilient to variable casing and multiple values. This resolves [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006).
|
||||
* mesh: **(Enterprise Only)** Add `http.incoming.requestNormalization` to Mesh configuration entry to support inbound service traffic request normalization. This resolves [CVE-2024-10005](https://nvd.nist.gov/vuln/detail/CVE-2024-10005) and [CVE-2024-10006](https://nvd.nist.gov/vuln/detail/CVE-2024-10006).
|
||||
* ui: Pin a newer resolution of Braces [[GH-21710](https://github.com/hashicorp/consul/issues/21710)]
|
||||
* ui: Pin a newer resolution of Codemirror [[GH-21715](https://github.com/hashicorp/consul/issues/21715)]
|
||||
* ui: Pin a newer resolution of Markdown-it [[GH-21717](https://github.com/hashicorp/consul/issues/21717)]
|
||||
* ui: Pin a newer resolution of ansi-html [[GH-21735](https://github.com/hashicorp/consul/issues/21735)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* security: upgrade ubi base image to 9.4 [[GH-21750](https://github.com/hashicorp/consul/issues/21750)]
|
||||
* xds: configures Envoy to load balance over all instances of an external service configured with hostnames when "envoy_dns_discovery_type" is set to "STRICT_DNS" [[GH-21655](https://github.com/hashicorp/consul/issues/21655)]
|
||||
|
||||
## 1.15.14 Enterprise (August 26, 2024)
|
||||
|
||||
Enterprise LTS: Consul Enterprise 1.15 is a Long-Term Support (LTS) release.
|
||||
|
|
|
@ -22,6 +22,9 @@ type Config struct {
|
|||
// WildcardName is the string that represents a request to authorize a wildcard permission
|
||||
WildcardName string
|
||||
|
||||
//by default errors, but in certain instances we want to make sure to maintain backwards compatabilty
|
||||
WarnOnDuplicateKey bool
|
||||
|
||||
// embedded enterprise configuration
|
||||
EnterpriseConfig
|
||||
}
|
||||
|
|
|
@ -310,8 +310,8 @@ func (pr *PolicyRules) Validate(conf *Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func parse(rules string, conf *Config, meta *EnterprisePolicyMeta) (*Policy, error) {
|
||||
p, err := decodeRules(rules, conf, meta)
|
||||
func parse(rules string, warnOnDuplicateKey bool, conf *Config, meta *EnterprisePolicyMeta) (*Policy, error) {
|
||||
p, err := decodeRules(rules, warnOnDuplicateKey, conf, meta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -338,7 +338,11 @@ func NewPolicyFromSource(rules string, conf *Config, meta *EnterprisePolicyMeta)
|
|||
|
||||
var policy *Policy
|
||||
var err error
|
||||
policy, err = parse(rules, conf, meta)
|
||||
warnOnDuplicateKey := false
|
||||
if conf != nil {
|
||||
warnOnDuplicateKey = conf.WarnOnDuplicateKey
|
||||
}
|
||||
policy, err = parse(rules, warnOnDuplicateKey, conf, meta)
|
||||
return policy, err
|
||||
}
|
||||
|
||||
|
|
|
@ -7,8 +7,9 @@ package acl
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/hcl"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// EnterprisePolicyMeta stub
|
||||
|
@ -30,12 +31,28 @@ func (r *EnterprisePolicyRules) Validate(*Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func decodeRules(rules string, _ *Config, _ *EnterprisePolicyMeta) (*Policy, error) {
|
||||
func decodeRules(rules string, warnOnDuplicateKey bool, _ *Config, _ *EnterprisePolicyMeta) (*Policy, error) {
|
||||
p := &Policy{}
|
||||
|
||||
if err := hcl.Decode(p, rules); err != nil {
|
||||
err := hcl.DecodeErrorOnDuplicates(p, rules)
|
||||
|
||||
if errIsDuplicateKey(err) && warnOnDuplicateKey {
|
||||
//because the snapshot saves the unparsed rules we have to assume some snapshots exist that shouldn't fail, but
|
||||
// have duplicates
|
||||
if err := hcl.Decode(p, rules); err != nil {
|
||||
hclog.Default().Warn("Warning- Duplicate key in ACL Policy ignored", "errorMessage", err.Error())
|
||||
return nil, fmt.Errorf("Failed to parse ACL rules: %v", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse ACL rules: %v", err)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func errIsDuplicateKey(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(err.Error(), "was already set. Each argument can only be defined once")
|
||||
}
|
||||
|
|
|
@ -342,6 +342,12 @@ func TestPolicySourceParse(t *testing.T) {
|
|||
RulesJSON: `{ "acl": "list" }`, // there is no list policy but this helps to exercise another check in isPolicyValid
|
||||
Err: "Invalid acl policy",
|
||||
},
|
||||
{
|
||||
Name: "Bad Policy - Duplicate ACL Key",
|
||||
Rules: `acl="read"
|
||||
acl="write"`,
|
||||
Err: "Failed to parse ACL rules: The argument \"acl\" at",
|
||||
},
|
||||
{
|
||||
Name: "Bad Policy - Agent",
|
||||
Rules: `agent "foo" { policy = "nope" }`,
|
||||
|
|
|
@ -380,16 +380,14 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request
|
|||
return nil, err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(agentSvcs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
agentSvcs = raw.(map[string]*api.AgentService)
|
||||
|
||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||
// bexpr filter, to ensure total (and the filter-by-acls header we set below)
|
||||
// do not include results that would be filtered out even if the user did have
|
||||
// permission.
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
total := len(agentSvcs)
|
||||
if err := s.agent.filterServicesWithAuthorizer(authz, agentSvcs); err != nil {
|
||||
return nil, err
|
||||
|
@ -407,6 +405,12 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request
|
|||
setResultsFilteredByACLs(resp, total != len(agentSvcs))
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(agentSvcs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
agentSvcs = raw.(map[string]*api.AgentService)
|
||||
|
||||
return agentSvcs, nil
|
||||
}
|
||||
|
||||
|
@ -540,16 +544,14 @@ func (s *HTTPHandlers) AgentChecks(resp http.ResponseWriter, req *http.Request)
|
|||
}
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(agentChecks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
agentChecks = raw.(map[types.CheckID]*structs.HealthCheck)
|
||||
|
||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||
// bexpr filter, to ensure total (and the filter-by-acls header we set below)
|
||||
// do not include results that would be filtered out even if the user did have
|
||||
// permission.
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
total := len(agentChecks)
|
||||
if err := s.agent.filterChecksWithAuthorizer(authz, agentChecks); err != nil {
|
||||
return nil, err
|
||||
|
@ -567,6 +569,12 @@ func (s *HTTPHandlers) AgentChecks(resp http.ResponseWriter, req *http.Request)
|
|||
setResultsFilteredByACLs(resp, total != len(agentChecks))
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(agentChecks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
agentChecks = raw.(map[types.CheckID]*structs.HealthCheck)
|
||||
|
||||
return agentChecks, nil
|
||||
}
|
||||
|
||||
|
@ -623,21 +631,14 @@ func (s *HTTPHandlers) AgentMembers(resp http.ResponseWriter, req *http.Request)
|
|||
}
|
||||
}
|
||||
|
||||
// filter the members by parsed filter expression
|
||||
var filterExpression string
|
||||
s.parseFilter(req, &filterExpression)
|
||||
if filterExpression != "" {
|
||||
filter, err := bexpr.CreateFilter(filterExpression, nil, members)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
raw, err := filter.Execute(members)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
members = raw.([]serf.Member)
|
||||
}
|
||||
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
total := len(members)
|
||||
if err := s.agent.filterMembers(token, &members); err != nil {
|
||||
return nil, err
|
||||
|
@ -655,6 +656,21 @@ func (s *HTTPHandlers) AgentMembers(resp http.ResponseWriter, req *http.Request)
|
|||
setResultsFilteredByACLs(resp, total != len(members))
|
||||
}
|
||||
|
||||
// filter the members by parsed filter expression
|
||||
var filterExpression string
|
||||
s.parseFilter(req, &filterExpression)
|
||||
if filterExpression != "" {
|
||||
filter, err := bexpr.CreateFilter(filterExpression, nil, members)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
raw, err := filter.Execute(members)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
members = raw.([]serf.Member)
|
||||
}
|
||||
|
||||
return members, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -433,6 +433,60 @@ func TestAgent_Services_ACLFilter(t *testing.T) {
|
|||
require.Len(t, val, 2)
|
||||
require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs"))
|
||||
})
|
||||
|
||||
// ensure ACL filtering occurs before bexpr filtering.
|
||||
const bexprMatchingUserTokenPermissions = "Service matches `web.*`"
|
||||
const bexprNotMatchingUserTokenPermissions = "Service matches `api.*`"
|
||||
|
||||
tokenWithWebRead := testCreateToken(t, a, `
|
||||
service "web" {
|
||||
policy = "read"
|
||||
}
|
||||
`)
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape(bexprMatchingUserTokenPermissions), nil)
|
||||
req.Header.Add("X-Consul-Token", tokenWithWebRead)
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
var val map[string]*api.AgentService
|
||||
err := dec.Decode(&val)
|
||||
if err != nil {
|
||||
t.Fatalf("Err: %v", err)
|
||||
}
|
||||
require.Len(t, val, 1)
|
||||
require.NotEmpty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs"))
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape(bexprNotMatchingUserTokenPermissions), nil)
|
||||
req.Header.Add("X-Consul-Token", tokenWithWebRead)
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
var val map[string]*api.AgentService
|
||||
err := dec.Decode(&val)
|
||||
if err != nil {
|
||||
t.Fatalf("Err: %v", err)
|
||||
}
|
||||
require.Len(t, val, 0)
|
||||
require.NotEmpty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs"))
|
||||
})
|
||||
|
||||
t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape(bexprNotMatchingUserTokenPermissions), nil)
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
var val map[string]*api.AgentService
|
||||
err := dec.Decode(&val)
|
||||
if err != nil {
|
||||
t.Fatalf("Err: %v", err)
|
||||
}
|
||||
require.Len(t, val, 0)
|
||||
require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestAgent_Service(t *testing.T) {
|
||||
|
@ -1432,6 +1486,57 @@ func TestAgent_Checks_ACLFilter(t *testing.T) {
|
|||
require.Len(t, val, 2)
|
||||
require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs"))
|
||||
})
|
||||
|
||||
// ensure ACL filtering occurs before bexpr filtering.
|
||||
const bexprMatchingUserTokenPermissions = "ServiceName matches `web.*`"
|
||||
const bexprNotMatchingUserTokenPermissions = "ServiceName matches `api.*`"
|
||||
|
||||
tokenWithWebRead := testCreateToken(t, a, `
|
||||
service "web" {
|
||||
policy = "read"
|
||||
}
|
||||
`)
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/checks?filter="+url.QueryEscape(bexprMatchingUserTokenPermissions), nil)
|
||||
req.Header.Add("X-Consul-Token", tokenWithWebRead)
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
val := make(map[types.CheckID]*structs.HealthCheck)
|
||||
if err := dec.Decode(&val); err != nil {
|
||||
t.Fatalf("Err: %v", err)
|
||||
}
|
||||
require.Len(t, val, 1)
|
||||
require.NotEmpty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs"))
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/checks?filter="+url.QueryEscape(bexprNotMatchingUserTokenPermissions), nil)
|
||||
req.Header.Add("X-Consul-Token", tokenWithWebRead)
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
val := make(map[types.CheckID]*structs.HealthCheck)
|
||||
if err := dec.Decode(&val); err != nil {
|
||||
t.Fatalf("Err: %v", err)
|
||||
}
|
||||
require.Len(t, val, 0)
|
||||
require.NotEmpty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs"))
|
||||
})
|
||||
|
||||
t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/checks?filter="+url.QueryEscape(bexprNotMatchingUserTokenPermissions), nil)
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
val := make(map[types.CheckID]*structs.HealthCheck)
|
||||
if err := dec.Decode(&val); err != nil {
|
||||
t.Fatalf("Err: %v", err)
|
||||
}
|
||||
require.Len(t, val, 0)
|
||||
require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestAgent_Self(t *testing.T) {
|
||||
|
@ -2110,6 +2215,57 @@ func TestAgent_Members_ACLFilter(t *testing.T) {
|
|||
require.Len(t, val, 2)
|
||||
require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs"))
|
||||
})
|
||||
|
||||
// ensure ACL filtering occurs before bexpr filtering.
|
||||
bexprMatchingUserTokenPermissions := fmt.Sprintf("Name matches `%s.*`", b.Config.NodeName)
|
||||
bexprNotMatchingUserTokenPermissions := fmt.Sprintf("Name matches `%s.*`", a.Config.NodeName)
|
||||
|
||||
tokenWithReadOnMemberB := testCreateToken(t, a, fmt.Sprintf(`
|
||||
node "%s" {
|
||||
policy = "read"
|
||||
}
|
||||
`, b.Config.NodeName))
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/members?filter="+url.QueryEscape(bexprMatchingUserTokenPermissions), nil)
|
||||
req.Header.Add("X-Consul-Token", tokenWithReadOnMemberB)
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
val := make([]serf.Member, 0)
|
||||
if err := dec.Decode(&val); err != nil {
|
||||
t.Fatalf("Err: %v", err)
|
||||
}
|
||||
require.Len(t, val, 1)
|
||||
require.NotEmpty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs"))
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/members?filter="+url.QueryEscape(bexprNotMatchingUserTokenPermissions), nil)
|
||||
req.Header.Add("X-Consul-Token", tokenWithReadOnMemberB)
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
val := make([]serf.Member, 0)
|
||||
if err := dec.Decode(&val); err != nil {
|
||||
t.Fatalf("Err: %v", err)
|
||||
}
|
||||
require.Len(t, val, 0)
|
||||
require.NotEmpty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs"))
|
||||
})
|
||||
|
||||
t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/members?filter="+url.QueryEscape(bexprNotMatchingUserTokenPermissions), nil)
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
val := make([]serf.Member, 0)
|
||||
if err := dec.Decode(&val); err != nil {
|
||||
t.Fatalf("Err: %v", err)
|
||||
}
|
||||
require.Len(t, val, 0)
|
||||
require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestAgent_Join(t *testing.T) {
|
||||
|
|
|
@ -122,7 +122,7 @@ func TestCatalogDeregister(t *testing.T) {
|
|||
a := NewTestAgent(t, "")
|
||||
defer a.Shutdown()
|
||||
|
||||
// Register node
|
||||
// Deregister node
|
||||
args := &structs.DeregisterRequest{Node: "foo"}
|
||||
req, _ := http.NewRequest("PUT", "/v1/catalog/deregister", jsonReader(args))
|
||||
obj, err := a.srv.CatalogDeregister(nil, req)
|
||||
|
|
|
@ -2169,6 +2169,22 @@ func TestACLEndpoint_PolicySet(t *testing.T) {
|
|||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("Key Dup", func(t *testing.T) {
|
||||
req := structs.ACLPolicySetRequest{
|
||||
Datacenter: "dc1",
|
||||
Policy: structs.ACLPolicy{
|
||||
Description: "foobar",
|
||||
Name: "baz2",
|
||||
Rules: "service \"\" { policy = \"read\" policy = \"write\" }",
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken},
|
||||
}
|
||||
resp := structs.ACLPolicy{}
|
||||
|
||||
err := aclEp.PolicySet(&req, &resp)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("Update it", func(t *testing.T) {
|
||||
req := structs.ACLPolicySetRequest{
|
||||
Datacenter: "dc1",
|
||||
|
|
|
@ -533,19 +533,24 @@ func (c *Catalog) ListNodes(args *structs.DCSpecificRequest, reply *structs.Inde
|
|||
return nil
|
||||
}
|
||||
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
if err := c.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.Nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.Nodes = raw.(structs.Nodes)
|
||||
|
||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
||||
// results that would be filtered out even if the user did have permission.
|
||||
if err := c.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.srv.sortNodesByDistanceFrom(args.Source, reply.Nodes)
|
||||
})
|
||||
}
|
||||
|
@ -607,14 +612,25 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
|
|||
return nil
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(serviceNodes)
|
||||
// need to temporarily create an IndexedServiceNode so that the ACL filter can be applied
|
||||
// to the service nodes and then re-use those same node to run the filter expression.
|
||||
idxServiceNodeReply := &structs.IndexedServiceNodes{
|
||||
ServiceNodes: serviceNodes,
|
||||
QueryMeta: reply.QueryMeta,
|
||||
}
|
||||
|
||||
// enforce ACLs
|
||||
c.srv.filterACLWithAuthorizer(authz, idxServiceNodeReply)
|
||||
|
||||
// run the filter expression
|
||||
raw, err := filter.Execute(idxServiceNodeReply.ServiceNodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// convert the result back to the original type
|
||||
reply.Services = servicesTagsByName(raw.(structs.ServiceNodes))
|
||||
|
||||
c.srv.filterACLWithAuthorizer(authz, reply)
|
||||
reply.QueryMeta = idxServiceNodeReply.QueryMeta
|
||||
|
||||
return nil
|
||||
})
|
||||
|
@ -813,6 +829,18 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
|
|||
reply.ServiceNodes = filtered
|
||||
}
|
||||
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
if err := c.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This is safe to do even when the filter is nil - its just a no-op then
|
||||
raw, err := filter.Execute(reply.ServiceNodes)
|
||||
if err != nil {
|
||||
|
@ -820,13 +848,6 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
|
|||
}
|
||||
reply.ServiceNodes = raw.(structs.ServiceNodes)
|
||||
|
||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
||||
// results that would be filtered out even if the user did have permission.
|
||||
if err := c.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.srv.sortNodesByDistanceFrom(args.Source, reply.ServiceNodes)
|
||||
})
|
||||
|
||||
|
@ -904,6 +925,18 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs
|
|||
}
|
||||
reply.Index, reply.NodeServices = index, services
|
||||
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
if err := c.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if reply.NodeServices != nil {
|
||||
raw, err := filter.Execute(reply.NodeServices.Services)
|
||||
if err != nil {
|
||||
|
@ -912,13 +945,6 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs
|
|||
reply.NodeServices.Services = raw.(map[string]*structs.NodeService)
|
||||
}
|
||||
|
||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
||||
// results that would be filtered out even if the user did have permission.
|
||||
if err := c.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -1009,21 +1035,26 @@ func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *stru
|
|||
|
||||
if mergedServices != nil {
|
||||
reply.NodeServices = *mergedServices
|
||||
|
||||
raw, err := filter.Execute(reply.NodeServices.Services)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.NodeServices.Services = raw.([]*structs.NodeService)
|
||||
}
|
||||
|
||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
||||
// results that would be filtered out even if the user did have permission.
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
if err := c.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.NodeServices.Services)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.NodeServices.Services = raw.([]*structs.NodeService)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -984,6 +984,63 @@ func TestCatalog_RPC_Filter(t *testing.T) {
|
|||
require.Equal(t, "baz", out.Nodes[0].Node)
|
||||
})
|
||||
|
||||
t.Run("ListServices", func(t *testing.T) {
|
||||
args := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "redis",
|
||||
QueryOptions: structs.QueryOptions{Filter: "ServiceMeta.version == 1"},
|
||||
}
|
||||
|
||||
out := new(structs.IndexedServices)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out))
|
||||
require.Len(t, out.Services, 2)
|
||||
require.Len(t, out.Services["redis"], 1)
|
||||
require.Len(t, out.Services["web"], 2)
|
||||
|
||||
args.Filter = "ServiceMeta.version == 2"
|
||||
out = new(structs.IndexedServices)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out))
|
||||
require.Len(t, out.Services, 4)
|
||||
require.Len(t, out.Services["redis"], 1)
|
||||
require.Len(t, out.Services["web"], 2)
|
||||
require.Len(t, out.Services["critical"], 1)
|
||||
require.Len(t, out.Services["warning"], 1)
|
||||
})
|
||||
|
||||
t.Run("NodeServices", func(t *testing.T) {
|
||||
args := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "baz",
|
||||
QueryOptions: structs.QueryOptions{Filter: "Service == web"},
|
||||
}
|
||||
|
||||
out := new(structs.IndexedNodeServices)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &out))
|
||||
require.Len(t, out.NodeServices.Services, 2)
|
||||
|
||||
args.Filter = "Service == web and Meta.version == 2"
|
||||
out = new(structs.IndexedNodeServices)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &out))
|
||||
require.Len(t, out.NodeServices.Services, 1)
|
||||
})
|
||||
|
||||
t.Run("NodeServiceList", func(t *testing.T) {
|
||||
args := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "baz",
|
||||
QueryOptions: structs.QueryOptions{Filter: "Service == web"},
|
||||
}
|
||||
|
||||
out := new(structs.IndexedNodeServiceList)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServiceList", &args, &out))
|
||||
require.Len(t, out.NodeServices.Services, 2)
|
||||
|
||||
args.Filter = "Service == web and Meta.version == 2"
|
||||
out = new(structs.IndexedNodeServiceList)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServiceList", &args, &out))
|
||||
require.Len(t, out.NodeServices.Services, 1)
|
||||
})
|
||||
|
||||
t.Run("ServiceNodes", func(t *testing.T) {
|
||||
args := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -1006,22 +1063,6 @@ func TestCatalog_RPC_Filter(t *testing.T) {
|
|||
require.Equal(t, "foo", out.ServiceNodes[0].Node)
|
||||
})
|
||||
|
||||
t.Run("NodeServices", func(t *testing.T) {
|
||||
args := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "baz",
|
||||
QueryOptions: structs.QueryOptions{Filter: "Service == web"},
|
||||
}
|
||||
|
||||
out := new(structs.IndexedNodeServices)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &out))
|
||||
require.Len(t, out.NodeServices.Services, 2)
|
||||
|
||||
args.Filter = "Service == web and Meta.version == 2"
|
||||
out = new(structs.IndexedNodeServices)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &out))
|
||||
require.Len(t, out.NodeServices.Services, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCatalog_ListNodes_StaleRead(t *testing.T) {
|
||||
|
@ -1332,6 +1373,7 @@ func TestCatalog_ListNodes_ACLFilter(t *testing.T) {
|
|||
Datacenter: "dc1",
|
||||
}
|
||||
|
||||
readToken := token("read")
|
||||
t.Run("deny", func(t *testing.T) {
|
||||
args.Token = token("deny")
|
||||
|
||||
|
@ -1348,7 +1390,7 @@ func TestCatalog_ListNodes_ACLFilter(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("allow", func(t *testing.T) {
|
||||
args.Token = token("read")
|
||||
args.Token = readToken
|
||||
|
||||
var reply structs.IndexedNodes
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply); err != nil {
|
||||
|
@ -1361,6 +1403,67 @@ func TestCatalog_ListNodes_ACLFilter(t *testing.T) {
|
|||
t.Fatal("ResultsFilteredByACLs should not true")
|
||||
}
|
||||
})
|
||||
|
||||
// Register additional node
|
||||
regArgs := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Token: "root",
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", regArgs, &out))
|
||||
|
||||
bexprMatchingUserTokenPermissions := fmt.Sprintf("Node matches `%s.*`", s1.config.NodeName)
|
||||
const bexpNotMatchingUserTokenPermissions = "Node matches `node-deny.*`"
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
var reply structs.IndexedNodes
|
||||
args = structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: readToken,
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply = structs.IndexedNodes{}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply))
|
||||
require.Equal(t, 1, len(reply.Nodes))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
var reply structs.IndexedNodes
|
||||
args = structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: readToken,
|
||||
Filter: bexpNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply = structs.IndexedNodes{}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply))
|
||||
require.Empty(t, reply.Nodes)
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
var reply structs.IndexedNodes
|
||||
args = structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: "", // no token
|
||||
Filter: bexpNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply = structs.IndexedNodes{}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply))
|
||||
require.Empty(t, reply.Nodes)
|
||||
require.False(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
}
|
||||
|
||||
func Benchmark_Catalog_ListNodes(t *testing.B) {
|
||||
|
@ -2758,6 +2861,14 @@ service "foo" {
|
|||
node_prefix "" {
|
||||
policy = "read"
|
||||
}
|
||||
|
||||
node "node-deny" {
|
||||
policy = "deny"
|
||||
}
|
||||
|
||||
service "service-deny" {
|
||||
policy = "deny"
|
||||
}
|
||||
`
|
||||
token = createToken(t, codec, rules)
|
||||
|
||||
|
@ -2915,23 +3026,76 @@ func TestCatalog_ListServices_FilterACL(t *testing.T) {
|
|||
defer codec.Close()
|
||||
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken("root"))
|
||||
|
||||
opt := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{Token: token},
|
||||
}
|
||||
reply := structs.IndexedServices{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &opt, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if _, ok := reply.Services["foo"]; !ok {
|
||||
t.Fatalf("bad: %#v", reply.Services)
|
||||
}
|
||||
if _, ok := reply.Services["bar"]; ok {
|
||||
t.Fatalf("bad: %#v", reply.Services)
|
||||
}
|
||||
if !reply.QueryMeta.ResultsFilteredByACLs {
|
||||
t.Fatal("ResultsFilteredByACLs should be true")
|
||||
}
|
||||
t.Run("request with user token without filter param sets ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{Token: token},
|
||||
}
|
||||
reply := structs.IndexedServices{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if _, ok := reply.Services["foo"]; !ok {
|
||||
t.Fatalf("bad: %#v", reply.Services)
|
||||
}
|
||||
if _, ok := reply.Services["bar"]; ok {
|
||||
t.Fatalf("bad: %#v", reply.Services)
|
||||
}
|
||||
if !reply.QueryMeta.ResultsFilteredByACLs {
|
||||
t.Fatal("ResultsFilteredByACLs should be true")
|
||||
}
|
||||
})
|
||||
|
||||
const bexprMatchingUserTokenPermissions = "ServiceName matches `f.*`"
|
||||
const bexpNotMatchingUserTokenPermissions = "ServiceName matches `b.*`"
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedServices{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Equal(t, 1, len(reply.Services))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexpNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedServices{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Zero(t, len(reply.Services))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
req := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: "", // no token
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedServices{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Zero(t, len(reply.Services))
|
||||
require.False(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCatalog_ServiceNodes_FilterACL(t *testing.T) {
|
||||
|
@ -2982,11 +3146,80 @@ func TestCatalog_ServiceNodes_FilterACL(t *testing.T) {
|
|||
}
|
||||
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
||||
|
||||
// We've already proven that we call the ACL filtering function so we
|
||||
// test node filtering down in acl.go for node cases. This also proves
|
||||
// that we respect the version 8 ACL flag, since the test server sets
|
||||
// that to false (the regression value of *not* changing this is better
|
||||
// for now until we change the sense of the version 8 ACL flag).
|
||||
bexprMatchingUserTokenPermissions := fmt.Sprintf("Node matches `%s.*`", srv.config.NodeName)
|
||||
const bexpNotMatchingUserTokenPermissions = "Node matches `node-deny.*`"
|
||||
|
||||
// Register a service of the same name on the denied node
|
||||
regArg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "node-deny",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
ID: "foo",
|
||||
Service: "foo",
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
CheckID: "service:foo",
|
||||
Name: "service:foo",
|
||||
ServiceID: "foo",
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: "root"},
|
||||
}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", ®Arg, nil); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
opt = structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "foo",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply = structs.IndexedServiceNodes{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &opt, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Equal(t, 1, len(reply.ServiceNodes))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
opt = structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "foo",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexpNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply = structs.IndexedServiceNodes{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &opt, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Zero(t, len(reply.ServiceNodes))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
opt = structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "foo",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: "", // no token
|
||||
Filter: bexpNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply = structs.IndexedServiceNodes{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &opt, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Zero(t, len(reply.ServiceNodes))
|
||||
require.False(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCatalog_NodeServices_ACL(t *testing.T) {
|
||||
|
@ -3075,6 +3308,139 @@ func TestCatalog_NodeServices_FilterACL(t *testing.T) {
|
|||
svc, ok := reply.NodeServices.Services["foo"]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "foo", svc.ID)
|
||||
|
||||
const bexprMatchingUserTokenPermissions = "Service matches `f.*`"
|
||||
const bexpNotMatchingUserTokenPermissions = "Service matches `b.*`"
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: srv.config.NodeName,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply = structs.IndexedNodeServices{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Equal(t, 1, len(reply.NodeServices.Services))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: srv.config.NodeName,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexpNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply = structs.IndexedNodeServices{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Zero(t, len(reply.NodeServices.Services))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
req := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: srv.config.NodeName,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: "", // no token
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply = structs.IndexedNodeServices{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Nil(t, reply.NodeServices)
|
||||
require.False(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCatalog_NodeServicesList_FilterACL(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
dir, token, srv, codec := testACLFilterServer(t)
|
||||
defer os.RemoveAll(dir)
|
||||
defer srv.Shutdown()
|
||||
defer codec.Close()
|
||||
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken("root"))
|
||||
|
||||
opt := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: srv.config.NodeName,
|
||||
QueryOptions: structs.QueryOptions{Token: token},
|
||||
}
|
||||
|
||||
var reply structs.IndexedNodeServiceList
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServiceList", &opt, &reply))
|
||||
|
||||
require.NotNil(t, reply.NodeServices)
|
||||
require.Len(t, reply.NodeServices.Services, 1)
|
||||
|
||||
const bexprMatchingUserTokenPermissions = "Service matches `f.*`"
|
||||
const bexpNotMatchingUserTokenPermissions = "Service matches `b.*`"
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: srv.config.NodeName,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply = structs.IndexedNodeServiceList{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServiceList", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Equal(t, 1, len(reply.NodeServices.Services))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: srv.config.NodeName,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexpNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply = structs.IndexedNodeServiceList{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServiceList", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Zero(t, len(reply.NodeServices.Services))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
req := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: srv.config.NodeName,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: "", // no token
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply = structs.IndexedNodeServiceList{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServiceList", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Empty(t, reply.NodeServices.Services)
|
||||
require.False(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCatalog_GatewayServices_TerminatingGateway(t *testing.T) {
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"time"
|
||||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
"github.com/armon/go-metrics/prometheus"
|
||||
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
|
||||
|
||||
"github.com/hashicorp/go-bexpr"
|
||||
|
@ -22,33 +21,6 @@ import (
|
|||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
var ConfigSummaries = []prometheus.SummaryDefinition{
|
||||
{
|
||||
Name: []string{"config_entry", "apply"},
|
||||
Help: "",
|
||||
},
|
||||
{
|
||||
Name: []string{"config_entry", "get"},
|
||||
Help: "",
|
||||
},
|
||||
{
|
||||
Name: []string{"config_entry", "list"},
|
||||
Help: "",
|
||||
},
|
||||
{
|
||||
Name: []string{"config_entry", "listAll"},
|
||||
Help: "",
|
||||
},
|
||||
{
|
||||
Name: []string{"config_entry", "delete"},
|
||||
Help: "",
|
||||
},
|
||||
{
|
||||
Name: []string{"config_entry", "resolve_service_config"},
|
||||
Help: "",
|
||||
},
|
||||
}
|
||||
|
||||
// The ConfigEntry endpoint is used to query centralized config information
|
||||
type ConfigEntry struct {
|
||||
srv *Server
|
||||
|
@ -280,7 +252,14 @@ func (c *ConfigEntry) List(args *structs.ConfigEntryQuery, reply *structs.Indexe
|
|||
return err
|
||||
}
|
||||
|
||||
// Filter the entries returned by ACL permissions.
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
filteredEntries := make([]structs.ConfigEntry, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if err := entry.CanRead(authz); err != nil {
|
||||
|
|
|
@ -783,7 +783,7 @@ service "foo" {
|
|||
}
|
||||
operator = "read"
|
||||
`
|
||||
id := createToken(t, codec, rules)
|
||||
token := createToken(t, codec, rules)
|
||||
|
||||
// Create some dummy service/proxy configs to be looked up.
|
||||
state := s1.fsm.State()
|
||||
|
@ -804,7 +804,7 @@ operator = "read"
|
|||
args := structs.ConfigEntryQuery{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Datacenter: s1.config.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{Token: id},
|
||||
QueryOptions: structs.QueryOptions{Token: token},
|
||||
}
|
||||
var out structs.IndexedConfigEntries
|
||||
err := msgpackrpc.CallWithCodec(codec, "ConfigEntry.List", &args, &out)
|
||||
|
@ -828,6 +828,58 @@ operator = "read"
|
|||
require.Equal(t, structs.ProxyConfigGlobal, proxyConf.Name)
|
||||
require.Equal(t, structs.ProxyDefaults, proxyConf.Kind)
|
||||
require.False(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
|
||||
|
||||
// ensure ACL filtering occurs before bexpr filtering.
|
||||
const bexprMatchingUserTokenPermissions = "Name matches `f.*`"
|
||||
const bexprNotMatchingUserTokenPermissions = "Name matches `db.*`"
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
args = structs.ConfigEntryQuery{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Datacenter: s1.config.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
var reply structs.IndexedConfigEntries
|
||||
err = msgpackrpc.CallWithCodec(codec, "ConfigEntry.List", &args, &reply)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(reply.Entries))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
args = structs.ConfigEntryQuery{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Datacenter: s1.config.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
var reply structs.IndexedConfigEntries
|
||||
err = msgpackrpc.CallWithCodec(codec, "ConfigEntry.List", &args, &reply)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, len(reply.Entries))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
args = structs.ConfigEntryQuery{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Datacenter: s1.config.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: "", // no token
|
||||
Filter: bexprNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
var reply structs.IndexedConfigEntries
|
||||
err = msgpackrpc.CallWithCodec(codec, "ConfigEntry.List", &args, &reply)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, len(reply.Entries))
|
||||
require.False(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigEntry_ListAll_ACLDeny(t *testing.T) {
|
||||
|
|
|
@ -63,19 +63,24 @@ func (h *Health) ChecksInState(args *structs.ChecksInStateRequest,
|
|||
}
|
||||
reply.Index, reply.HealthChecks = index, checks
|
||||
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
if err := h.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.HealthChecks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.HealthChecks = raw.(structs.HealthChecks)
|
||||
|
||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
||||
// results that would be filtered out even if the user did have permission.
|
||||
if err := h.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return h.srv.sortNodesByDistanceFrom(args.Source, reply.HealthChecks)
|
||||
})
|
||||
}
|
||||
|
@ -111,19 +116,24 @@ func (h *Health) NodeChecks(args *structs.NodeSpecificRequest,
|
|||
}
|
||||
reply.Index, reply.HealthChecks = index, checks
|
||||
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
if err := h.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.HealthChecks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.HealthChecks = raw.(structs.HealthChecks)
|
||||
|
||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
||||
// results that would be filtered out even if the user did have permission.
|
||||
if err := h.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -303,6 +313,18 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc
|
|||
thisReply.Nodes = nodeMetaFilter(arg.NodeMetaFilters, thisReply.Nodes)
|
||||
}
|
||||
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
if err := h.srv.filterACL(arg.Token, &thisReply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(thisReply.Nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -310,13 +332,6 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc
|
|||
filteredNodes := raw.(structs.CheckServiceNodes)
|
||||
thisReply.Nodes = filteredNodes.Filter(structs.CheckServiceNodeFilterOptions{FilterType: arg.HealthFilterType})
|
||||
|
||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
||||
// results that would be filtered out even if the user did have permission.
|
||||
if err := h.srv.filterACL(arg.Token, &thisReply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := h.srv.sortNodesByDistanceFrom(arg.Source, thisReply.Nodes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1527,11 +1527,62 @@ func TestHealth_NodeChecks_FilterACL(t *testing.T) {
|
|||
require.True(t, found, "bad: %#v", reply.HealthChecks)
|
||||
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
||||
|
||||
// We've already proven that we call the ACL filtering function so we
|
||||
// test node filtering down in acl.go for node cases. This also proves
|
||||
// that we respect the version 8 ACL flag, since the test server sets
|
||||
// that to false (the regression value of *not* changing this is better
|
||||
// for now until we change the sense of the version 8 ACL flag).
|
||||
const bexprMatchingUserTokenPermissions = "ServiceName matches `f.*`"
|
||||
const bexprNotMatchingUserTokenPermissions = "ServiceName matches `b.*`"
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
opt := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: srv.config.NodeName,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedHealthChecks{}
|
||||
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Equal(t, 1, len(reply.HealthChecks))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
opt := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: srv.config.NodeName,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedHealthChecks{}
|
||||
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Zero(t, len(reply.HealthChecks))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
opt := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: srv.config.NodeName,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: "", // no token
|
||||
Filter: bexprNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedHealthChecks{}
|
||||
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Zero(t, len(reply.HealthChecks))
|
||||
require.False(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHealth_ServiceChecks_FilterACL(t *testing.T) {
|
||||
|
@ -1571,11 +1622,77 @@ func TestHealth_ServiceChecks_FilterACL(t *testing.T) {
|
|||
require.Empty(t, reply.HealthChecks)
|
||||
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
||||
|
||||
// We've already proven that we call the ACL filtering function so we
|
||||
// test node filtering down in acl.go for node cases. This also proves
|
||||
// that we respect the version 8 ACL flag, since the test server sets
|
||||
// that to false (the regression value of *not* changing this is better
|
||||
// for now until we change the sense of the version 8 ACL flag).
|
||||
// Register a service of the same name on the denied node
|
||||
regArg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "node-deny",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
ID: "foo",
|
||||
Service: "foo",
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
CheckID: "service:foo",
|
||||
Name: "service:foo",
|
||||
ServiceID: "foo",
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: "root"},
|
||||
}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", ®Arg, nil); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
const bexprMatchingUserTokenPermissions = "ServiceName matches `f.*`"
|
||||
const bexprNotMatchingUserTokenPermissions = "Node matches `node-deny.*`"
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
opt := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "foo",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedHealthChecks{}
|
||||
err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(reply.HealthChecks))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
opt := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "foo",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedHealthChecks{}
|
||||
err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, len(reply.HealthChecks))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
opt := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "foo",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: "", // no token
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedHealthChecks{}
|
||||
err := msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &opt, &reply)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, len(reply.HealthChecks))
|
||||
require.False(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHealth_ServiceNodes_FilterACL(t *testing.T) {
|
||||
|
@ -1607,11 +1724,77 @@ func TestHealth_ServiceNodes_FilterACL(t *testing.T) {
|
|||
require.Empty(t, reply.Nodes)
|
||||
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
||||
|
||||
// We've already proven that we call the ACL filtering function so we
|
||||
// test node filtering down in acl.go for node cases. This also proves
|
||||
// that we respect the version 8 ACL flag, since the test server sets
|
||||
// that to false (the regression value of *not* changing this is better
|
||||
// for now until we change the sense of the version 8 ACL flag).
|
||||
// Register a service of the same name on the denied node
|
||||
regArg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "node-deny",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
ID: "foo",
|
||||
Service: "foo",
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
CheckID: "service:foo",
|
||||
Name: "service:foo",
|
||||
ServiceID: "foo",
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: "root"},
|
||||
}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", ®Arg, nil); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
const bexprMatchingUserTokenPermissions = "Service.Service matches `f.*`"
|
||||
const bexprNotMatchingUserTokenPermissions = "Node.Node matches `node-deny.*`"
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
opt := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "foo",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedCheckServiceNodes{}
|
||||
err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(reply.Nodes))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
opt := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "foo",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedCheckServiceNodes{}
|
||||
err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, len(reply.Nodes))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
opt := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "foo",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: "", // no token
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedCheckServiceNodes{}
|
||||
err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &opt, &reply)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, len(reply.Nodes))
|
||||
require.False(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHealth_ChecksInState_FilterACL(t *testing.T) {
|
||||
|
@ -1647,11 +1830,59 @@ func TestHealth_ChecksInState_FilterACL(t *testing.T) {
|
|||
require.True(t, found, "missing service 'foo': %#v", reply.HealthChecks)
|
||||
require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
||||
|
||||
// We've already proven that we call the ACL filtering function so we
|
||||
// test node filtering down in acl.go for node cases. This also proves
|
||||
// that we respect the version 8 ACL flag, since the test server sets
|
||||
// that to false (the regression value of *not* changing this is better
|
||||
// for now until we change the sense of the version 8 ACL flag).
|
||||
const bexprMatchingUserTokenPermissions = "ServiceName matches `f.*`"
|
||||
const bexprNotMatchingUserTokenPermissions = "ServiceName matches `b.*`"
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := structs.ChecksInStateRequest{
|
||||
Datacenter: "dc1",
|
||||
State: api.HealthPassing,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedHealthChecks{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Equal(t, 1, len(reply.HealthChecks))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := structs.ChecksInStateRequest{
|
||||
Datacenter: "dc1",
|
||||
State: api.HealthPassing,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedHealthChecks{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Zero(t, len(reply.HealthChecks))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that would normally match but without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
req := structs.ChecksInStateRequest{
|
||||
Datacenter: "dc1",
|
||||
State: api.HealthPassing,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: "", // no token
|
||||
Filter: bexprNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
reply := structs.IndexedHealthChecks{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Zero(t, len(reply.HealthChecks))
|
||||
require.False(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHealth_RPC_Filter(t *testing.T) {
|
||||
|
|
|
@ -550,19 +550,25 @@ func (s *Intention) List(args *structs.IntentionListRequest, reply *structs.Inde
|
|||
} else {
|
||||
reply.DataOrigin = structs.IntentionDataOriginLegacy
|
||||
}
|
||||
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
if err := s.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.Intentions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.Intentions = raw.(structs.Intentions)
|
||||
|
||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
||||
// results that would be filtered out even if the user did have permission.
|
||||
if err := s.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
)
|
||||
|
|
|
@ -1639,6 +1639,11 @@ func TestIntentionList_acl(t *testing.T) {
|
|||
token, err := upsertTestTokenWithPolicyRules(codec, TestDefaultInitialManagementToken, "dc1", `service_prefix "foo" { policy = "write" }`)
|
||||
require.NoError(t, err)
|
||||
|
||||
const (
|
||||
bexprMatch = "DestinationName matches `f.*`"
|
||||
bexprNoMatch = "DestinationName matches `nomatch.*`"
|
||||
)
|
||||
|
||||
// Create a few records
|
||||
for _, name := range []string{"foobar", "bar", "baz"} {
|
||||
ixn := structs.IntentionRequest{
|
||||
|
@ -1691,12 +1696,29 @@ func TestIntentionList_acl(t *testing.T) {
|
|||
require.True(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
||||
})
|
||||
|
||||
t.Run("filtered", func(t *testing.T) {
|
||||
// maskResultsFilteredByACLs() in rpc.go sets ResultsFilteredByACLs to false if the token is an empty string
|
||||
// after resp.QueryMeta.ResultsFilteredByACLs has been determined to be true from filterACLs().
|
||||
t.Run("filtered with no token should return no results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
req := &structs.IntentionListRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Filter: bexprMatch,
|
||||
},
|
||||
}
|
||||
|
||||
var resp structs.IndexedIntentions
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp))
|
||||
require.Len(t, resp.Intentions, 0)
|
||||
require.False(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
|
||||
})
|
||||
|
||||
// has access to everything
|
||||
t.Run("filtered with initial management token should return 1 and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
req := &structs.IntentionListRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: TestDefaultInitialManagementToken,
|
||||
Filter: "DestinationName == foobar",
|
||||
Filter: bexprMatch,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1705,6 +1727,54 @@ func TestIntentionList_acl(t *testing.T) {
|
|||
require.Len(t, resp.Intentions, 1)
|
||||
require.False(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false")
|
||||
})
|
||||
|
||||
// ResultsFilteredByACLs should reflect user does not have access to read all intentions but has access to some.
|
||||
t.Run("filtered with user token whose permissions match filter should return 1 and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := &structs.IntentionListRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token.SecretID,
|
||||
Filter: bexprMatch,
|
||||
},
|
||||
}
|
||||
|
||||
var resp structs.IndexedIntentions
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp))
|
||||
require.Len(t, resp.Intentions, 1)
|
||||
require.True(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
||||
})
|
||||
|
||||
// ResultsFilteredByACLs need to act as though no filter was applied.
|
||||
t.Run("filtered with user token whose permissions do match filter should return 0 and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := &structs.IntentionListRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token.SecretID,
|
||||
Filter: bexprNoMatch,
|
||||
},
|
||||
}
|
||||
|
||||
var resp structs.IndexedIntentions
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp))
|
||||
require.Len(t, resp.Intentions, 0)
|
||||
require.True(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
||||
})
|
||||
|
||||
// ResultsFilteredByACLs should reflect user does not have access to read any intentions
|
||||
t.Run("filtered with anonymous token should return 0 and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := &structs.IntentionListRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: "anonymous",
|
||||
Filter: bexprMatch,
|
||||
},
|
||||
}
|
||||
|
||||
var resp structs.IndexedIntentions
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp))
|
||||
require.Len(t, resp.Intentions, 0)
|
||||
require.True(t, resp.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
||||
})
|
||||
}
|
||||
|
||||
// Test basic matching. We don't need to exhaustively test inputs since this
|
||||
|
|
|
@ -7,15 +7,18 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
|
||||
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/hashicorp/go-bexpr"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
)
|
||||
|
||||
const MaximumManualVIPsPerService = 8
|
||||
|
@ -117,6 +120,18 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest,
|
|||
}
|
||||
reply.Index = maxIndex
|
||||
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
if err := m.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.Dump)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not filter local node dump: %w", err)
|
||||
|
@ -129,13 +144,6 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest,
|
|||
}
|
||||
reply.ImportedDump = importedRaw.(structs.NodeDump)
|
||||
|
||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
||||
// results that would be filtered out even if the user did have permission.
|
||||
if err := m.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -235,13 +243,26 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
|
|||
}
|
||||
}
|
||||
reply.Index = maxIndex
|
||||
raw, err := filter.Execute(reply.Nodes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not filter local service dump: %w", err)
|
||||
}
|
||||
reply.Nodes = raw.(structs.CheckServiceNodes)
|
||||
}
|
||||
|
||||
// Note: we filter the results with ACLs *before* applying the user-supplied
|
||||
// bexpr filter to ensure that the user can only run expressions on data that
|
||||
// they have access to. This is a security measure to prevent users from
|
||||
// running arbitrary expressions on data they don't have access to.
|
||||
// QueryMeta.ResultsFilteredByACLs being true already indicates to the user
|
||||
// that results they don't have access to have been removed. If they were
|
||||
// also allowed to run the bexpr filter on the data, they could potentially
|
||||
// infer the specific attributes of data they don't have access to.
|
||||
if err := m.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.Nodes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not filter local service dump: %w", err)
|
||||
}
|
||||
reply.Nodes = raw.(structs.CheckServiceNodes)
|
||||
|
||||
if !args.NodesOnly {
|
||||
importedRaw, err := filter.Execute(reply.ImportedNodes)
|
||||
if err != nil {
|
||||
|
@ -249,12 +270,6 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
|
|||
}
|
||||
reply.ImportedNodes = importedRaw.(structs.CheckServiceNodes)
|
||||
}
|
||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
||||
// results that would be filtered out even if the user did have permission.
|
||||
if err := m.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
@ -770,17 +785,38 @@ func (m *Internal) AssignManualServiceVIPs(args *structs.AssignServiceManualVIPs
|
|||
return fmt.Errorf("cannot associate more than %d manual virtual IPs with the same service", MaximumManualVIPsPerService)
|
||||
}
|
||||
|
||||
vipMap := make(map[string]struct{})
|
||||
for _, ip := range args.ManualVIPs {
|
||||
parsedIP := net.ParseIP(ip)
|
||||
if parsedIP == nil || parsedIP.To4() == nil {
|
||||
return fmt.Errorf("%q is not a valid IPv4 address", parsedIP.String())
|
||||
}
|
||||
vipMap[ip] = struct{}{}
|
||||
}
|
||||
// Silently ignore duplicates.
|
||||
args.ManualVIPs = maps.Keys(vipMap)
|
||||
|
||||
psn := structs.PeeredServiceName{
|
||||
ServiceName: structs.NewServiceName(args.Service, &args.EnterpriseMeta),
|
||||
}
|
||||
|
||||
// Check to see if we can skip the raft apply entirely.
|
||||
{
|
||||
existingIPs, err := m.srv.fsm.State().ServiceManualVIPs(psn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking for existing manual ips for service: %w", err)
|
||||
}
|
||||
if existingIPs != nil && stringslice.EqualMapKeys(existingIPs.ManualIPs, vipMap) {
|
||||
*reply = structs.AssignServiceManualVIPsResponse{
|
||||
Found: true,
|
||||
UnassignedFrom: nil,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
req := state.ServiceVirtualIP{
|
||||
Service: structs.PeeredServiceName{
|
||||
ServiceName: structs.NewServiceName(args.Service, &args.EnterpriseMeta),
|
||||
},
|
||||
Service: psn,
|
||||
ManualIPs: args.ManualVIPs,
|
||||
}
|
||||
resp, err := m.srv.raftApplyMsgpack(structs.UpdateVirtualIPRequestType, req)
|
||||
|
|
|
@ -12,11 +12,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
|
@ -656,11 +656,73 @@ func TestInternal_NodeDump_FilterACL(t *testing.T) {
|
|||
t.Fatal("ResultsFilteredByACLs should be true")
|
||||
}
|
||||
|
||||
// We've already proven that we call the ACL filtering function so we
|
||||
// test node filtering down in acl.go for node cases. This also proves
|
||||
// that we respect the version 8 ACL flag, since the test server sets
|
||||
// that to false (the regression value of *not* changing this is better
|
||||
// for now until we change the sense of the version 8 ACL flag).
|
||||
// need to ensure that ACLs are filtered prior to bexprFiltering
|
||||
// Register additional node
|
||||
regArgs := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Token: "root",
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", regArgs, &out))
|
||||
|
||||
bexprMatchingUserTokenPermissions := fmt.Sprintf("Node matches `%s.*`", srv.config.NodeName)
|
||||
const bexpNotMatchingUserTokenPermissions = "Node matches `node-deny.*`"
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
|
||||
reply = structs.IndexedNodeDump{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Equal(t, 1, len(reply.Dump))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
req := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexpNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
|
||||
reply = structs.IndexedNodeDump{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Zero(t, len(reply.Dump))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that would match only record without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
req := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: "",
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
|
||||
reply = structs.IndexedNodeDump{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Empty(t, reply.Dump)
|
||||
require.False(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestInternal_EventFire_Token(t *testing.T) {
|
||||
|
@ -1064,6 +1126,113 @@ func TestInternal_ServiceDump_ACL(t *testing.T) {
|
|||
require.Empty(t, out.Gateways)
|
||||
require.True(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
|
||||
})
|
||||
|
||||
// need to ensure that ACLs are filtered prior to bexprFiltering
|
||||
// Register additional node
|
||||
regArgs := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "node-deny",
|
||||
ID: types.NodeID("e0155642-135d-4739-9853-b1ee6c9f945b"),
|
||||
Address: "192.18.1.2",
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
ID: "memcached",
|
||||
Service: "memcached",
|
||||
Port: 5678,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "memcached check",
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "memcached",
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Token: "root",
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", regArgs, &out))
|
||||
|
||||
const (
|
||||
bexprMatchingUserTokenPermissions = "Service.Service matches `redis.*`"
|
||||
bexpNotMatchingUserTokenPermissions = "Node.Node matches `node-deny.*`"
|
||||
)
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
token := tokenWithRules(t, `
|
||||
node "node-deny" {
|
||||
policy = "deny"
|
||||
}
|
||||
node "node1" {
|
||||
policy = "read"
|
||||
}
|
||||
service "redis" {
|
||||
policy = "read"
|
||||
}
|
||||
`)
|
||||
var reply structs.IndexedNodesWithGateways
|
||||
req := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexprMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
|
||||
reply = structs.IndexedNodesWithGateways{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Equal(t, 1, len(reply.Nodes))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
token := tokenWithRules(t, `
|
||||
node "node-deny" {
|
||||
policy = "deny"
|
||||
}
|
||||
node "node1" {
|
||||
policy = "read"
|
||||
}
|
||||
service "redis" {
|
||||
policy = "read"
|
||||
}
|
||||
`)
|
||||
var reply structs.IndexedNodesWithGateways
|
||||
req := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: token,
|
||||
Filter: bexpNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
|
||||
reply = structs.IndexedNodesWithGateways{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Zero(t, len(reply.Nodes))
|
||||
require.True(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that would match only record without any token returns zero results and ResultsFilteredByACLs equal to false", func(t *testing.T) {
|
||||
var reply structs.IndexedNodesWithGateways
|
||||
req := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: "", // no token
|
||||
Filter: bexpNotMatchingUserTokenPermissions,
|
||||
},
|
||||
}
|
||||
|
||||
reply = structs.IndexedNodesWithGateways{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &req, &reply); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
require.Empty(t, reply.Nodes)
|
||||
require.False(t, reply.ResultsFilteredByACLs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestInternal_GatewayServiceDump_Terminating(t *testing.T) {
|
||||
|
@ -3716,21 +3885,41 @@ func TestInternal_AssignManualServiceVIPs(t *testing.T) {
|
|||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.AssignManualServiceVIPs", req, &resp))
|
||||
|
||||
type testcase struct {
|
||||
name string
|
||||
req structs.AssignServiceManualVIPsRequest
|
||||
expect structs.AssignServiceManualVIPsResponse
|
||||
expectErr string
|
||||
name string
|
||||
req structs.AssignServiceManualVIPsRequest
|
||||
expect structs.AssignServiceManualVIPsResponse
|
||||
expectAgain structs.AssignServiceManualVIPsResponse
|
||||
expectErr string
|
||||
expectIPs []string
|
||||
}
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
var resp structs.AssignServiceManualVIPsResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "Internal.AssignManualServiceVIPs", tc.req, &resp)
|
||||
if tc.expectErr != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.expectErr)
|
||||
return
|
||||
|
||||
run := func(t *testing.T, tc testcase, again bool) {
|
||||
if tc.expectErr != "" && again {
|
||||
return // we don't retest known errors
|
||||
}
|
||||
|
||||
var resp structs.AssignServiceManualVIPsResponse
|
||||
idx1 := s1.raft.CommitIndex()
|
||||
err := msgpackrpc.CallWithCodec(codec, "Internal.AssignManualServiceVIPs", tc.req, &resp)
|
||||
idx2 := s1.raft.CommitIndex()
|
||||
if tc.expectErr != "" {
|
||||
testutil.RequireErrorContains(t, err, tc.expectErr)
|
||||
} else {
|
||||
if again {
|
||||
require.Equal(t, tc.expectAgain, resp)
|
||||
require.Equal(t, idx1, idx2, "no raft operations occurred")
|
||||
} else {
|
||||
require.Equal(t, tc.expect, resp)
|
||||
}
|
||||
|
||||
psn := structs.PeeredServiceName{ServiceName: structs.NewServiceName(tc.req.Service, nil)}
|
||||
got, err := s1.fsm.State().ServiceManualVIPs(psn)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, got)
|
||||
require.Equal(t, tc.expectIPs, got.ManualIPs)
|
||||
}
|
||||
require.Equal(t, tc.expect, resp)
|
||||
}
|
||||
|
||||
tcs := []testcase{
|
||||
{
|
||||
name: "successful manual ip assignment",
|
||||
|
@ -3738,7 +3927,19 @@ func TestInternal_AssignManualServiceVIPs(t *testing.T) {
|
|||
Service: "web",
|
||||
ManualVIPs: []string{"1.1.1.1", "2.2.2.2"},
|
||||
},
|
||||
expect: structs.AssignServiceManualVIPsResponse{Found: true},
|
||||
expectIPs: []string{"1.1.1.1", "2.2.2.2"},
|
||||
expect: structs.AssignServiceManualVIPsResponse{Found: true},
|
||||
expectAgain: structs.AssignServiceManualVIPsResponse{Found: true},
|
||||
},
|
||||
{
|
||||
name: "successfully ignoring duplicates",
|
||||
req: structs.AssignServiceManualVIPsRequest{
|
||||
Service: "web",
|
||||
ManualVIPs: []string{"1.2.3.4", "5.6.7.8", "1.2.3.4", "5.6.7.8"},
|
||||
},
|
||||
expectIPs: []string{"1.2.3.4", "5.6.7.8"},
|
||||
expect: structs.AssignServiceManualVIPsResponse{Found: true},
|
||||
expectAgain: structs.AssignServiceManualVIPsResponse{Found: true},
|
||||
},
|
||||
{
|
||||
name: "reassign existing ip",
|
||||
|
@ -3746,6 +3947,7 @@ func TestInternal_AssignManualServiceVIPs(t *testing.T) {
|
|||
Service: "web",
|
||||
ManualVIPs: []string{"8.8.8.8"},
|
||||
},
|
||||
expectIPs: []string{"8.8.8.8"},
|
||||
expect: structs.AssignServiceManualVIPsResponse{
|
||||
Found: true,
|
||||
UnassignedFrom: []structs.PeeredServiceName{
|
||||
|
@ -3754,6 +3956,8 @@ func TestInternal_AssignManualServiceVIPs(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
// When we repeat this operation the second time it's a no-op.
|
||||
expectAgain: structs.AssignServiceManualVIPsResponse{Found: true},
|
||||
},
|
||||
{
|
||||
name: "invalid ip",
|
||||
|
@ -3761,13 +3965,19 @@ func TestInternal_AssignManualServiceVIPs(t *testing.T) {
|
|||
Service: "web",
|
||||
ManualVIPs: []string{"3.3.3.3", "invalid"},
|
||||
},
|
||||
expect: structs.AssignServiceManualVIPsResponse{},
|
||||
expectErr: "not a valid",
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
t.Run("initial", func(t *testing.T) {
|
||||
run(t, tc, false)
|
||||
})
|
||||
if tc.expectErr == "" {
|
||||
t.Run("repeat", func(t *testing.T) {
|
||||
run(t, tc, true) // only repeat a write if it isn't an known error
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,7 +53,6 @@ import (
|
|||
"github.com/hashicorp/consul/agent/consul/xdscapacity"
|
||||
"github.com/hashicorp/consul/agent/grpc-external/services/peerstream"
|
||||
"github.com/hashicorp/consul/agent/hcp"
|
||||
"github.com/hashicorp/consul/agent/hcp/bootstrap"
|
||||
hcpclient "github.com/hashicorp/consul/agent/hcp/client"
|
||||
logdrop "github.com/hashicorp/consul/agent/log-drop"
|
||||
"github.com/hashicorp/consul/agent/metadata"
|
||||
|
@ -65,7 +64,6 @@ import (
|
|||
"github.com/hashicorp/consul/agent/token"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/gossip/librtt"
|
||||
hcpctl "github.com/hashicorp/consul/internal/hcp"
|
||||
"github.com/hashicorp/consul/internal/multicluster"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/demo"
|
||||
|
@ -838,25 +836,6 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
|
|||
// to enable RPC forwarding.
|
||||
s.grpcLeaderForwarder = flat.LeaderForwarder
|
||||
|
||||
if s.config.Cloud.IsConfigured() {
|
||||
// Start watching HCP Link resource. This needs to be created after
|
||||
// the GRPC services are set up in order for the resource service client to
|
||||
// function. This uses the insecure grpc channel so that it doesn't need to
|
||||
// present a valid ACL token.
|
||||
go hcp.RunHCPLinkWatcher(
|
||||
&lib.StopChannelContext{StopCh: shutdownCh},
|
||||
logger.Named("hcp-link-watcher"),
|
||||
pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan),
|
||||
hcp.HCPManagerLifecycleFn(
|
||||
s.hcpManager,
|
||||
hcpclient.NewClient,
|
||||
bootstrap.LoadManagementToken,
|
||||
flat.HCP.Config,
|
||||
flat.HCP.DataDir,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
s.controllerManager = controller.NewManager(
|
||||
// Usage of the insecure + unsafe grpc chan is required for the controller
|
||||
// manager. It must be unauthorized so that controllers do not need to
|
||||
|
@ -928,15 +907,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
|
|||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Server) registerControllers(deps Deps) error {
|
||||
if s.config.Cloud.IsConfigured() {
|
||||
hcpctl.RegisterControllers(
|
||||
s.controllerManager, hcpctl.ControllerDependencies{
|
||||
CloudConfig: deps.HCP.Config,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *Server) registerControllers(_ Deps) error {
|
||||
shim := NewExportedServicesShim(s)
|
||||
multicluster.RegisterCompatControllers(s.controllerManager, multicluster.DefaultCompatControllerDependencies(shim))
|
||||
|
||||
|
|
|
@ -8,6 +8,8 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-memdb"
|
||||
|
@ -18,6 +20,7 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/lib/maps"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
"github.com/hashicorp/consul/types"
|
||||
)
|
||||
|
||||
|
@ -1106,6 +1109,9 @@ func (s *Store) AssignManualServiceVIPs(idx uint64, psn structs.PeeredServiceNam
|
|||
for _, ip := range ips {
|
||||
assignedIPs[ip] = struct{}{}
|
||||
}
|
||||
|
||||
txnNeedsCommit := false
|
||||
|
||||
modifiedEntries := make(map[structs.PeeredServiceName]struct{})
|
||||
for ip := range assignedIPs {
|
||||
entry, err := tx.First(tableServiceVirtualIPs, indexManualVIPs, psn.ServiceName.PartitionOrDefault(), ip)
|
||||
|
@ -1118,7 +1124,13 @@ func (s *Store) AssignManualServiceVIPs(idx uint64, psn structs.PeeredServiceNam
|
|||
}
|
||||
|
||||
newEntry := entry.(ServiceVirtualIP)
|
||||
if newEntry.Service.ServiceName.Matches(psn.ServiceName) {
|
||||
|
||||
var (
|
||||
thisServiceName = newEntry.Service.ServiceName
|
||||
thisPeer = newEntry.Service.Peer
|
||||
)
|
||||
|
||||
if thisServiceName.Matches(psn.ServiceName) && thisPeer == psn.Peer {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -1130,6 +1142,7 @@ func (s *Store) AssignManualServiceVIPs(idx uint64, psn structs.PeeredServiceNam
|
|||
filteredIPs = append(filteredIPs, existingIP)
|
||||
}
|
||||
}
|
||||
sort.Strings(filteredIPs)
|
||||
|
||||
newEntry.ManualIPs = filteredIPs
|
||||
newEntry.ModifyIndex = idx
|
||||
|
@ -1137,6 +1150,12 @@ func (s *Store) AssignManualServiceVIPs(idx uint64, psn structs.PeeredServiceNam
|
|||
return false, nil, fmt.Errorf("failed inserting service virtual IP entry: %s", err)
|
||||
}
|
||||
modifiedEntries[newEntry.Service] = struct{}{}
|
||||
|
||||
if err := updateVirtualIPMaxIndexes(tx, idx, thisServiceName.PartitionOrDefault(), thisPeer); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
txnNeedsCommit = true
|
||||
}
|
||||
|
||||
entry, err := tx.First(tableServiceVirtualIPs, indexID, psn)
|
||||
|
@ -1149,23 +1168,37 @@ func (s *Store) AssignManualServiceVIPs(idx uint64, psn structs.PeeredServiceNam
|
|||
}
|
||||
|
||||
newEntry := entry.(ServiceVirtualIP)
|
||||
newEntry.ManualIPs = ips
|
||||
newEntry.ModifyIndex = idx
|
||||
|
||||
if err := tx.Insert(tableServiceVirtualIPs, newEntry); err != nil {
|
||||
return false, nil, fmt.Errorf("failed inserting service virtual IP entry: %s", err)
|
||||
// Check to see if the slice already contains the same ips.
|
||||
if !stringslice.EqualMapKeys(newEntry.ManualIPs, assignedIPs) {
|
||||
newEntry.ManualIPs = slices.Clone(ips)
|
||||
newEntry.ModifyIndex = idx
|
||||
|
||||
sort.Strings(newEntry.ManualIPs)
|
||||
|
||||
if err := tx.Insert(tableServiceVirtualIPs, newEntry); err != nil {
|
||||
return false, nil, fmt.Errorf("failed inserting service virtual IP entry: %s", err)
|
||||
}
|
||||
if err := updateVirtualIPMaxIndexes(tx, idx, psn.ServiceName.PartitionOrDefault(), psn.Peer); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
txnNeedsCommit = true
|
||||
}
|
||||
if err := updateVirtualIPMaxIndexes(tx, idx, psn.ServiceName.PartitionOrDefault(), psn.Peer); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
if err = tx.Commit(); err != nil {
|
||||
return false, nil, err
|
||||
|
||||
if txnNeedsCommit {
|
||||
if err = tx.Commit(); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return true, maps.SliceOfKeys(modifiedEntries), nil
|
||||
}
|
||||
|
||||
func updateVirtualIPMaxIndexes(txn WriteTxn, idx uint64, partition, peerName string) error {
|
||||
// update global max index (for snapshots)
|
||||
if err := indexUpdateMaxTxn(txn, idx, tableServiceVirtualIPs); err != nil {
|
||||
return fmt.Errorf("failed while updating index: %w", err)
|
||||
}
|
||||
// update per-partition max index
|
||||
if err := indexUpdateMaxTxn(txn, idx, partitionedIndexEntryName(tableServiceVirtualIPs, partition)); err != nil {
|
||||
return fmt.Errorf("failed while updating partitioned index: %w", err)
|
||||
|
@ -3086,6 +3119,7 @@ func servicesVirtualIPsTxn(tx ReadTxn, ws memdb.WatchSet) (uint64, []ServiceVirt
|
|||
vips = append(vips, vip)
|
||||
}
|
||||
|
||||
// Pull from the global one
|
||||
idx := maxIndexWatchTxn(tx, nil, tableServiceVirtualIPs)
|
||||
|
||||
return idx, vips, nil
|
||||
|
|
|
@ -13,15 +13,15 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
|
@ -1963,81 +1963,289 @@ func TestStateStore_AssignManualVirtualIPs(t *testing.T) {
|
|||
s := testStateStore(t)
|
||||
setVirtualIPFlags(t, s)
|
||||
|
||||
// Attempt to assign manual virtual IPs to a service that doesn't exist - should be a no-op.
|
||||
psn := structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "foo", EnterpriseMeta: *acl.DefaultEnterpriseMeta()}}
|
||||
found, svcs, err := s.AssignManualServiceVIPs(0, psn, []string{"7.7.7.7", "8.8.8.8"})
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
require.Empty(t, svcs)
|
||||
serviceVIP, err := s.ServiceManualVIPs(psn)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, serviceVIP)
|
||||
|
||||
// Create the service registration.
|
||||
entMeta := structs.DefaultEnterpriseMetaInDefaultPartition()
|
||||
ns1 := &structs.NodeService{
|
||||
ID: "foo",
|
||||
Service: "foo",
|
||||
Address: "1.1.1.1",
|
||||
Port: 1111,
|
||||
Connect: structs.ServiceConnect{Native: true},
|
||||
EnterpriseMeta: *entMeta,
|
||||
newPSN := func(name, peer string) structs.PeeredServiceName {
|
||||
return structs.PeeredServiceName{
|
||||
ServiceName: structs.ServiceName{
|
||||
Name: name,
|
||||
EnterpriseMeta: *acl.DefaultEnterpriseMeta(),
|
||||
},
|
||||
Peer: peer,
|
||||
}
|
||||
}
|
||||
|
||||
// Service successfully registers into the state store.
|
||||
testRegisterNode(t, s, 0, "node1")
|
||||
require.NoError(t, s.EnsureService(1, "node1", ns1))
|
||||
checkMaxIndexes := func(t *testing.T, expect, expectImported uint64) {
|
||||
t.Helper()
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
||||
// Make sure there's a virtual IP for the foo service.
|
||||
vip, err := s.VirtualIPForService(psn)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "240.0.0.1", vip)
|
||||
idx := maxIndexWatchTxn(tx, nil, tableServiceVirtualIPs)
|
||||
require.Equal(t, expect, idx)
|
||||
|
||||
// No manual IP should be set yet.
|
||||
serviceVIP, err = s.ServiceManualVIPs(psn)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "0.0.0.1", serviceVIP.IP.String())
|
||||
require.Empty(t, serviceVIP.ManualIPs)
|
||||
entMeta := acl.DefaultEnterpriseMeta()
|
||||
|
||||
// Attempt to assign manual virtual IPs again.
|
||||
found, svcs, err = s.AssignManualServiceVIPs(2, psn, []string{"7.7.7.7", "8.8.8.8"})
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Empty(t, svcs)
|
||||
serviceVIP, err = s.ServiceManualVIPs(psn)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "0.0.0.1", serviceVIP.IP.String())
|
||||
require.Equal(t, serviceVIP.ManualIPs, []string{"7.7.7.7", "8.8.8.8"})
|
||||
importedIdx := maxIndexTxn(tx, partitionedIndexEntryName(tableServiceVirtualIPs+".imported", entMeta.PartitionOrDefault()))
|
||||
require.Equal(t, expectImported, importedIdx)
|
||||
}
|
||||
|
||||
// Register another service via config entry.
|
||||
s.EnsureConfigEntry(3, &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "bar",
|
||||
assignManual := func(
|
||||
t *testing.T,
|
||||
idx uint64,
|
||||
psn structs.PeeredServiceName,
|
||||
ips []string,
|
||||
modified ...structs.PeeredServiceName,
|
||||
) {
|
||||
t.Helper()
|
||||
found, svcs, err := s.AssignManualServiceVIPs(idx, psn, ips)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
if len(modified) == 0 {
|
||||
require.Empty(t, svcs)
|
||||
} else {
|
||||
require.ElementsMatch(t, modified, svcs)
|
||||
}
|
||||
}
|
||||
|
||||
checkVIP := func(
|
||||
t *testing.T,
|
||||
psn structs.PeeredServiceName,
|
||||
expectVIP string,
|
||||
) {
|
||||
t.Helper()
|
||||
// Make sure there's a virtual IP for the foo service.
|
||||
vip, err := s.VirtualIPForService(psn)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectVIP, vip)
|
||||
}
|
||||
|
||||
checkManualVIP := func(
|
||||
t *testing.T,
|
||||
psn structs.PeeredServiceName,
|
||||
expectIP string,
|
||||
expectManual []string,
|
||||
expectIndex uint64,
|
||||
) {
|
||||
t.Helper()
|
||||
serviceVIP, err := s.ServiceManualVIPs(psn)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectIP, serviceVIP.IP.String())
|
||||
if len(expectManual) == 0 {
|
||||
require.Empty(t, serviceVIP.ManualIPs)
|
||||
} else {
|
||||
require.Equal(t, expectManual, serviceVIP.ManualIPs)
|
||||
}
|
||||
require.Equal(t, expectIndex, serviceVIP.ModifyIndex)
|
||||
}
|
||||
|
||||
psn := newPSN("foo", "")
|
||||
|
||||
lastIndex := uint64(0)
|
||||
nextIndex := func() uint64 {
|
||||
lastIndex++
|
||||
return lastIndex
|
||||
}
|
||||
|
||||
testutil.RunStep(t, "assign to nonexistent service is noop", func(t *testing.T) {
|
||||
useIdx := nextIndex()
|
||||
|
||||
// Attempt to assign manual virtual IPs to a service that doesn't exist - should be a no-op.
|
||||
found, svcs, err := s.AssignManualServiceVIPs(useIdx, psn, []string{"7.7.7.7", "8.8.8.8"})
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
require.Empty(t, svcs)
|
||||
|
||||
serviceVIP, err := s.ServiceManualVIPs(psn)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, serviceVIP)
|
||||
|
||||
checkMaxIndexes(t, 0, 0)
|
||||
})
|
||||
|
||||
psn2 := structs.PeeredServiceName{ServiceName: structs.ServiceName{Name: "bar"}}
|
||||
vip, err = s.VirtualIPForService(psn2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "240.0.0.2", vip)
|
||||
// Create the service registration.
|
||||
var regIndex1 uint64
|
||||
testutil.RunStep(t, "create service 1", func(t *testing.T) {
|
||||
useIdx := nextIndex()
|
||||
regIndex1 = useIdx
|
||||
|
||||
entMeta := acl.DefaultEnterpriseMeta()
|
||||
ns1 := &structs.NodeService{
|
||||
ID: "foo",
|
||||
Service: "foo",
|
||||
Address: "1.1.1.1",
|
||||
Port: 1111,
|
||||
Connect: structs.ServiceConnect{Native: true},
|
||||
EnterpriseMeta: *entMeta,
|
||||
}
|
||||
|
||||
// Service successfully registers into the state store.
|
||||
testRegisterNode(t, s, useIdx, "node1")
|
||||
require.NoError(t, s.EnsureService(useIdx, "node1", ns1))
|
||||
|
||||
// Make sure there's a virtual IP for the foo service.
|
||||
checkVIP(t, psn, "240.0.0.1")
|
||||
|
||||
// No manual IP should be set yet.
|
||||
checkManualVIP(t, psn, "0.0.0.1", []string{}, regIndex1)
|
||||
|
||||
checkMaxIndexes(t, regIndex1, 0)
|
||||
})
|
||||
|
||||
// Attempt to assign manual virtual IPs again.
|
||||
var assignIndex1 uint64
|
||||
testutil.RunStep(t, "assign to existent service does something", func(t *testing.T) {
|
||||
useIdx := nextIndex()
|
||||
assignIndex1 = useIdx
|
||||
|
||||
// inserting in the wrong order to test the string sort
|
||||
assignManual(t, useIdx, psn, []string{"7.7.7.7", "8.8.8.8", "6.6.6.6"})
|
||||
|
||||
checkManualVIP(t, psn, "0.0.0.1", []string{
|
||||
"6.6.6.6", "7.7.7.7", "8.8.8.8",
|
||||
}, assignIndex1)
|
||||
|
||||
checkMaxIndexes(t, assignIndex1, 0)
|
||||
})
|
||||
|
||||
psn2 := newPSN("bar", "")
|
||||
|
||||
var regIndex2 uint64
|
||||
testutil.RunStep(t, "create service 2", func(t *testing.T) {
|
||||
useIdx := nextIndex()
|
||||
regIndex2 = useIdx
|
||||
|
||||
// Register another service via config entry.
|
||||
s.EnsureConfigEntry(useIdx, &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "bar",
|
||||
})
|
||||
|
||||
checkVIP(t, psn2, "240.0.0.2")
|
||||
|
||||
// No manual IP should be set yet.
|
||||
checkManualVIP(t, psn2, "0.0.0.2", []string{}, regIndex2)
|
||||
|
||||
checkMaxIndexes(t, regIndex2, 0)
|
||||
})
|
||||
|
||||
// Attempt to assign manual virtual IPs for bar, with one IP overlapping with foo.
|
||||
// This should cause the ip to be removed from foo's list of manual IPs.
|
||||
found, svcs, err = s.AssignManualServiceVIPs(4, psn2, []string{"7.7.7.7", "9.9.9.9"})
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.ElementsMatch(t, svcs, []structs.PeeredServiceName{psn})
|
||||
var assignIndex2 uint64
|
||||
testutil.RunStep(t, "assign to existent service and ip is removed from another", func(t *testing.T) {
|
||||
useIdx := nextIndex()
|
||||
assignIndex2 = useIdx
|
||||
|
||||
serviceVIP, err = s.ServiceManualVIPs(psn)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "0.0.0.1", serviceVIP.IP.String())
|
||||
require.Equal(t, []string{"8.8.8.8"}, serviceVIP.ManualIPs)
|
||||
require.Equal(t, uint64(4), serviceVIP.ModifyIndex)
|
||||
assignManual(t, useIdx, psn2, []string{"7.7.7.7", "9.9.9.9"}, psn)
|
||||
|
||||
serviceVIP, err = s.ServiceManualVIPs(psn2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "0.0.0.2", serviceVIP.IP.String())
|
||||
require.Equal(t, []string{"7.7.7.7", "9.9.9.9"}, serviceVIP.ManualIPs)
|
||||
require.Equal(t, uint64(4), serviceVIP.ModifyIndex)
|
||||
checkManualVIP(t, psn, "0.0.0.1", []string{
|
||||
"6.6.6.6", "8.8.8.8", // 7.7.7.7 was stolen by psn2
|
||||
}, assignIndex2)
|
||||
checkManualVIP(t, psn2, "0.0.0.2", []string{
|
||||
"7.7.7.7", "9.9.9.9",
|
||||
}, assignIndex2)
|
||||
|
||||
checkMaxIndexes(t, assignIndex2, 0)
|
||||
})
|
||||
|
||||
psn3 := newPSN("gir", "peer1")
|
||||
|
||||
var regIndex3 uint64
|
||||
testutil.RunStep(t, "create peered service 1", func(t *testing.T) {
|
||||
useIdx := nextIndex()
|
||||
regIndex3 = useIdx
|
||||
|
||||
// Create the service registration.
|
||||
entMetaPeer := acl.DefaultEnterpriseMeta()
|
||||
nsPeer1 := &structs.NodeService{
|
||||
ID: "gir",
|
||||
Service: "gir",
|
||||
Address: "9.9.9.9",
|
||||
Port: 2222,
|
||||
PeerName: "peer1",
|
||||
Connect: structs.ServiceConnect{Native: true},
|
||||
EnterpriseMeta: *entMetaPeer,
|
||||
}
|
||||
|
||||
// Service successfully registers into the state store.
|
||||
testRegisterPeering(t, s, useIdx, "peer1")
|
||||
testRegisterNodeOpts(t, s, useIdx, "node9", func(n *structs.Node) error {
|
||||
n.PeerName = "peer1"
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, s.EnsureService(useIdx, "node9", nsPeer1))
|
||||
|
||||
checkVIP(t, psn3, "240.0.0.3")
|
||||
|
||||
// No manual IP should be set yet.
|
||||
checkManualVIP(t, psn3, "0.0.0.3", []string{}, regIndex3)
|
||||
|
||||
checkMaxIndexes(t, regIndex3, regIndex3)
|
||||
})
|
||||
|
||||
// Assign manual virtual IPs to peered service.
|
||||
var assignIndex3 uint64
|
||||
testutil.RunStep(t, "assign to peered service and steal from non-peered", func(t *testing.T) {
|
||||
useIdx := nextIndex()
|
||||
assignIndex3 = useIdx
|
||||
|
||||
// 5.5.5.5 is stolen from psn
|
||||
assignManual(t, useIdx, psn3, []string{"5.5.5.5", "6.6.6.6"}, psn)
|
||||
|
||||
checkManualVIP(t, psn, "0.0.0.1", []string{
|
||||
"8.8.8.8", // 5.5.5.5 was stolen by psn3
|
||||
}, assignIndex3)
|
||||
checkManualVIP(t, psn2, "0.0.0.2", []string{
|
||||
"7.7.7.7", "9.9.9.9",
|
||||
}, assignIndex2)
|
||||
checkManualVIP(t, psn3, "0.0.0.3", []string{
|
||||
"5.5.5.5", "6.6.6.6",
|
||||
}, assignIndex3)
|
||||
|
||||
checkMaxIndexes(t, assignIndex3, assignIndex3)
|
||||
})
|
||||
|
||||
var assignIndex4 uint64
|
||||
testutil.RunStep(t, "assign to non-peered service and steal from peered", func(t *testing.T) {
|
||||
useIdx := nextIndex()
|
||||
assignIndex4 = useIdx
|
||||
|
||||
// 6.6.6.6 is stolen from psn3
|
||||
assignManual(t, useIdx, psn2, []string{
|
||||
"7.7.7.7", "9.9.9.9", "6.6.6.6",
|
||||
}, psn3)
|
||||
|
||||
checkManualVIP(t, psn, "0.0.0.1", []string{
|
||||
"8.8.8.8", // 5.5.5.5 was stolen by psn3
|
||||
}, assignIndex3)
|
||||
checkManualVIP(t, psn2, "0.0.0.2", []string{
|
||||
"6.6.6.6", "7.7.7.7", "9.9.9.9",
|
||||
}, assignIndex4)
|
||||
checkManualVIP(t, psn3, "0.0.0.3", []string{
|
||||
"5.5.5.5",
|
||||
}, assignIndex4)
|
||||
|
||||
checkMaxIndexes(t, assignIndex4, assignIndex4)
|
||||
})
|
||||
|
||||
testutil.RunStep(t, "repeat the last write and no indexes should be bumped", func(t *testing.T) {
|
||||
useIdx := nextIndex()
|
||||
|
||||
assignManual(t, useIdx, psn2, []string{
|
||||
"7.7.7.7", "9.9.9.9", "6.6.6.6",
|
||||
}) // no modified this time
|
||||
|
||||
// no changes
|
||||
checkManualVIP(t, psn, "0.0.0.1", []string{
|
||||
"8.8.8.8",
|
||||
}, assignIndex3)
|
||||
checkManualVIP(t, psn2, "0.0.0.2", []string{
|
||||
"6.6.6.6", "7.7.7.7", "9.9.9.9",
|
||||
}, assignIndex4)
|
||||
checkManualVIP(t, psn3, "0.0.0.3", []string{
|
||||
"5.5.5.5",
|
||||
}, assignIndex4)
|
||||
|
||||
// no change
|
||||
checkMaxIndexes(t, assignIndex4, assignIndex4)
|
||||
})
|
||||
}
|
||||
|
||||
func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) {
|
||||
|
|
|
@ -7,8 +7,6 @@ flowchart TD
|
|||
demo/v1/recordlabel
|
||||
demo/v2/album
|
||||
demo/v2/artist
|
||||
hcp/v2/link
|
||||
hcp/v2/telemetrystate
|
||||
internal/v1/tombstone
|
||||
multicluster/v2/computedexportedservices --> multicluster/v2/exportedservices
|
||||
multicluster/v2/computedexportedservices --> multicluster/v2/namespaceexportedservices
|
||||
|
@ -16,4 +14,4 @@ flowchart TD
|
|||
multicluster/v2/exportedservices
|
||||
multicluster/v2/namespaceexportedservices
|
||||
multicluster/v2/partitionexportedservices
|
||||
```
|
||||
```
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/internal/hcp"
|
||||
"github.com/hashicorp/consul/internal/multicluster"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/demo"
|
||||
|
@ -22,7 +21,6 @@ func NewTypeRegistry() resource.Registry {
|
|||
|
||||
demo.RegisterTypes(registry)
|
||||
multicluster.RegisterTypes(registry)
|
||||
hcp.RegisterTypes(registry)
|
||||
|
||||
return registry
|
||||
}
|
||||
|
|
|
@ -2367,7 +2367,7 @@ func TestDNS_trimUDPResponse_NoTrim(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `)
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `)
|
||||
if trimmed := trimUDPResponse(req, resp, cfg.DNSUDPAnswerLimit); trimmed {
|
||||
t.Fatalf("Bad %#v", *resp)
|
||||
}
|
||||
|
@ -2400,7 +2400,7 @@ func TestDNS_trimUDPResponse_NoTrim(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDNS_trimUDPResponse_TrimLimit(t *testing.T) {
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `)
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `)
|
||||
|
||||
req, resp, expected := &dns.Msg{}, &dns.Msg{}, &dns.Msg{}
|
||||
for i := 0; i < cfg.DNSUDPAnswerLimit+1; i++ {
|
||||
|
@ -2439,7 +2439,7 @@ func TestDNS_trimUDPResponse_TrimLimit(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDNS_trimUDPResponse_TrimLimitWithNS(t *testing.T) {
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `)
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `)
|
||||
|
||||
req, resp, expected := &dns.Msg{}, &dns.Msg{}, &dns.Msg{}
|
||||
for i := 0; i < cfg.DNSUDPAnswerLimit+1; i++ {
|
||||
|
@ -2486,7 +2486,7 @@ func TestDNS_trimUDPResponse_TrimLimitWithNS(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDNS_trimTCPResponse_TrimLimitWithNS(t *testing.T) {
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `)
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `)
|
||||
|
||||
req, resp, expected := &dns.Msg{}, &dns.Msg{}, &dns.Msg{}
|
||||
for i := 0; i < 5000; i++ {
|
||||
|
@ -2542,7 +2542,7 @@ func loadRuntimeConfig(t *testing.T, hcl string) *config.RuntimeConfig {
|
|||
}
|
||||
|
||||
func TestDNS_trimUDPResponse_TrimSize(t *testing.T) {
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `)
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `)
|
||||
|
||||
req, resp := &dns.Msg{}, &dns.Msg{}
|
||||
for i := 0; i < 100; i++ {
|
||||
|
@ -2594,7 +2594,7 @@ func TestDNS_trimUDPResponse_TrimSize(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDNS_trimUDPResponse_TrimSizeEDNS(t *testing.T) {
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `)
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `)
|
||||
|
||||
req, resp := &dns.Msg{}, &dns.Msg{}
|
||||
|
||||
|
@ -2672,7 +2672,7 @@ func TestDNS_trimUDPResponse_TrimSizeEDNS(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDNS_trimUDPResponse_TrimSizeMaxSize(t *testing.T) {
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy" `)
|
||||
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" `)
|
||||
|
||||
resp := &dns.Msg{}
|
||||
|
||||
|
|
|
@ -1,68 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package hcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
hcpctl "github.com/hashicorp/consul/internal/hcp"
|
||||
"github.com/hashicorp/consul/lib/retry"
|
||||
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
type LinkEventHandler = func(context.Context, hclog.Logger, *pbresource.WatchEvent)
|
||||
|
||||
func handleLinkEvents(ctx context.Context, logger hclog.Logger, watchClient pbresource.ResourceService_WatchListClient, linkEventHandler LinkEventHandler) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Debug("context canceled, exiting")
|
||||
return
|
||||
default:
|
||||
watchEvent, err := watchClient.Recv()
|
||||
|
||||
if err != nil {
|
||||
logger.Error("error receiving link watch event", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
linkEventHandler(ctx, logger, watchEvent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func RunHCPLinkWatcher(
|
||||
ctx context.Context, logger hclog.Logger, client pbresource.ResourceServiceClient, linkEventHandler LinkEventHandler,
|
||||
) {
|
||||
errorBackoff := &retry.Waiter{
|
||||
MinFailures: 10,
|
||||
MinWait: 0,
|
||||
MaxWait: 1 * time.Minute,
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Debug("context canceled, exiting")
|
||||
return
|
||||
default:
|
||||
watchClient, err := client.WatchList(
|
||||
ctx, &pbresource.WatchListRequest{
|
||||
Type: pbhcp.LinkType,
|
||||
NamePrefix: hcpctl.LinkName,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error("failed to create watch on Link", "error", err)
|
||||
errorBackoff.Wait(ctx)
|
||||
continue
|
||||
}
|
||||
errorBackoff.Reset()
|
||||
handleLinkEvents(ctx, logger, watchClient, linkEventHandler)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package hcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
mockpbresource "github.com/hashicorp/consul/grpcmocks/proto-public/pbresource"
|
||||
hcpctl "github.com/hashicorp/consul/internal/hcp"
|
||||
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
// This tests that when we get a watch event from the Recv call, we get that same event on the
|
||||
// output channel, then we
|
||||
func TestLinkWatcher_Ok(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
testWatchEvent := &pbresource.WatchEvent{}
|
||||
mockWatchListClient := mockpbresource.NewResourceService_WatchListClient(t)
|
||||
mockWatchListClient.EXPECT().Recv().Return(testWatchEvent, nil)
|
||||
|
||||
eventCh := make(chan *pbresource.WatchEvent)
|
||||
mockLinkHandler := func(_ context.Context, _ hclog.Logger, event *pbresource.WatchEvent) {
|
||||
eventCh <- event
|
||||
}
|
||||
|
||||
client := mockpbresource.NewResourceServiceClient(t)
|
||||
client.EXPECT().WatchList(mock.Anything, &pbresource.WatchListRequest{
|
||||
Type: pbhcp.LinkType,
|
||||
NamePrefix: hcpctl.LinkName,
|
||||
}).Return(mockWatchListClient, nil)
|
||||
|
||||
go RunHCPLinkWatcher(ctx, hclog.Default(), client, mockLinkHandler)
|
||||
|
||||
// Assert that the link handler is called with the testWatchEvent
|
||||
receivedWatchEvent := <-eventCh
|
||||
require.Equal(t, testWatchEvent, receivedWatchEvent)
|
||||
}
|
||||
|
||||
func TestLinkWatcher_RecvError(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
// Our mock WatchListClient will simulate 5 errors, then will cancel the context.
|
||||
// We expect RunHCPLinkWatcher to attempt to create the WatchListClient 6 times (initial attempt plus 5 retries)
|
||||
// before exiting due to context cancellation.
|
||||
mockWatchListClient := mockpbresource.NewResourceService_WatchListClient(t)
|
||||
numFailures := 5
|
||||
failures := 0
|
||||
mockWatchListClient.EXPECT().Recv().RunAndReturn(func() (*pbresource.WatchEvent, error) {
|
||||
if failures < numFailures {
|
||||
failures++
|
||||
return nil, errors.New("unexpectedError")
|
||||
}
|
||||
defer cancel()
|
||||
return &pbresource.WatchEvent{}, nil
|
||||
})
|
||||
|
||||
client := mockpbresource.NewResourceServiceClient(t)
|
||||
client.EXPECT().WatchList(mock.Anything, &pbresource.WatchListRequest{
|
||||
Type: pbhcp.LinkType,
|
||||
NamePrefix: hcpctl.LinkName,
|
||||
}).Return(mockWatchListClient, nil).Times(numFailures + 1)
|
||||
|
||||
RunHCPLinkWatcher(ctx, hclog.Default(), client, func(_ context.Context, _ hclog.Logger, _ *pbresource.WatchEvent) {})
|
||||
}
|
||||
|
||||
func TestLinkWatcher_WatchListError(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
// Our mock WatchList will simulate 5 errors, then will cancel the context.
|
||||
// We expect RunHCPLinkWatcher to attempt to create the WatchListClient 6 times (initial attempt plus 5 retries)
|
||||
// before exiting due to context cancellation.
|
||||
numFailures := 5
|
||||
failures := 0
|
||||
|
||||
client := mockpbresource.NewResourceServiceClient(t)
|
||||
client.EXPECT().WatchList(mock.Anything, &pbresource.WatchListRequest{
|
||||
Type: pbhcp.LinkType,
|
||||
NamePrefix: hcpctl.LinkName,
|
||||
}).RunAndReturn(func(_ context.Context, _ *pbresource.WatchListRequest, _ ...grpc.CallOption) (pbresource.ResourceService_WatchListClient, error) {
|
||||
if failures < numFailures {
|
||||
failures++
|
||||
return nil, errors.New("unexpectedError")
|
||||
}
|
||||
defer cancel()
|
||||
return mockpbresource.NewResourceService_WatchListClient(t), nil
|
||||
}).Times(numFailures + 1)
|
||||
|
||||
RunHCPLinkWatcher(ctx, hclog.Default(), client, func(_ context.Context, _ hclog.Logger, _ *pbresource.WatchEvent) {})
|
||||
}
|
|
@ -1,107 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package hcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
"github.com/hashicorp/consul/agent/hcp/bootstrap/constants"
|
||||
hcpclient "github.com/hashicorp/consul/agent/hcp/client"
|
||||
"github.com/hashicorp/consul/agent/hcp/config"
|
||||
hcpctl "github.com/hashicorp/consul/internal/hcp"
|
||||
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
// HCPManagerLifecycleFn returns a LinkEventHandler function which will appropriately
|
||||
// Start and Stop the HCP Manager based on the Link event received. If a link is upserted,
|
||||
// the HCP Manager is started, and if a link is deleted, the HCP manager is stopped.
|
||||
func HCPManagerLifecycleFn(
|
||||
m Manager,
|
||||
hcpClientFn func(cfg config.CloudConfig) (hcpclient.Client, error),
|
||||
loadMgmtTokenFn func(
|
||||
ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string,
|
||||
) (string, error),
|
||||
cloudConfig config.CloudConfig,
|
||||
dataDir string,
|
||||
) LinkEventHandler {
|
||||
return func(ctx context.Context, logger hclog.Logger, watchEvent *pbresource.WatchEvent) {
|
||||
// This indicates that a Link was deleted
|
||||
if watchEvent.GetDelete() != nil {
|
||||
logger.Debug("HCP Link deleted, stopping HCP manager")
|
||||
|
||||
if dataDir != "" {
|
||||
hcpConfigDir := filepath.Join(dataDir, constants.SubDir)
|
||||
logger.Debug("deleting hcp-config dir", "dir", hcpConfigDir)
|
||||
err := os.RemoveAll(hcpConfigDir)
|
||||
if err != nil {
|
||||
logger.Error("failed to delete hcp-config dir", "dir", hcpConfigDir, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := m.Stop()
|
||||
if err != nil {
|
||||
logger.Error("error stopping HCP manager", "error", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// This indicates that a Link was either created or updated
|
||||
if watchEvent.GetUpsert() != nil {
|
||||
logger.Debug("HCP Link upserted, starting manager if not already started")
|
||||
|
||||
res := watchEvent.GetUpsert().GetResource()
|
||||
var link pbhcp.Link
|
||||
if err := res.GetData().UnmarshalTo(&link); err != nil {
|
||||
logger.Error("error unmarshalling link data", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if validated, reason := hcpctl.IsValidated(res); !validated {
|
||||
logger.Debug("HCP Link not validated, not starting manager", "reason", reason)
|
||||
return
|
||||
}
|
||||
|
||||
// Update the HCP manager configuration with the link values
|
||||
// Merge the link data with the existing cloud config so that we only overwrite the
|
||||
// fields that are provided by the link. This ensures that:
|
||||
// 1. The HCP configuration (i.e., how to connect to HCP) is preserved
|
||||
// 2. The Consul agent's node ID and node name are preserved
|
||||
newCfg := config.CloudConfig{
|
||||
ResourceID: link.ResourceId,
|
||||
ClientID: link.ClientId,
|
||||
ClientSecret: link.ClientSecret,
|
||||
}
|
||||
mergedCfg := config.Merge(cloudConfig, newCfg)
|
||||
hcpClient, err := hcpClientFn(mergedCfg)
|
||||
if err != nil {
|
||||
logger.Error("error creating HCP client", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Load the management token if access is set to read-write. Read-only clusters
|
||||
// will not have a management token provided by HCP.
|
||||
var token string
|
||||
if link.GetAccessLevel() == pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE {
|
||||
token, err = loadMgmtTokenFn(ctx, logger, hcpClient, dataDir)
|
||||
if err != nil {
|
||||
logger.Error("error loading management token", "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
mergedCfg.ManagementToken = token
|
||||
m.UpdateConfig(hcpClient, mergedCfg)
|
||||
|
||||
err = m.Start(ctx)
|
||||
if err != nil {
|
||||
logger.Error("error starting HCP manager", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,236 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package hcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
"github.com/hashicorp/consul/agent/hcp/bootstrap/constants"
|
||||
hcpclient "github.com/hashicorp/consul/agent/hcp/client"
|
||||
"github.com/hashicorp/consul/agent/hcp/config"
|
||||
hcpctl "github.com/hashicorp/consul/internal/hcp"
|
||||
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
||||
func TestHCPManagerLifecycleFn(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
logger := hclog.New(&hclog.LoggerOptions{Output: io.Discard})
|
||||
|
||||
mockHCPClient := hcpclient.NewMockClient(t)
|
||||
mockHcpClientFn := func(_ config.CloudConfig) (hcpclient.Client, error) {
|
||||
return mockHCPClient, nil
|
||||
}
|
||||
|
||||
mockLoadMgmtTokenFn := func(ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string) (string, error) {
|
||||
return "test-mgmt-token", nil
|
||||
}
|
||||
|
||||
dataDir := testutil.TempDir(t, "test-link-controller")
|
||||
err := os.Mkdir(filepath.Join(dataDir, constants.SubDir), os.ModeDir)
|
||||
require.NoError(t, err)
|
||||
existingCfg := config.CloudConfig{
|
||||
AuthURL: "test.com",
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
mutateLink func(*pbhcp.Link)
|
||||
mutateUpsertEvent func(*pbresource.WatchEvent_Upsert)
|
||||
applyMocksAndAssertions func(*testing.T, *MockManager, *pbhcp.Link)
|
||||
hcpClientFn func(config.CloudConfig) (hcpclient.Client, error)
|
||||
loadMgmtTokenFn func(context.Context, hclog.Logger, hcpclient.Client, string) (string, error)
|
||||
}
|
||||
|
||||
testCases := map[string]testCase{
|
||||
// HCP manager should be started when link is created and stopped when link is deleted
|
||||
"Ok": {
|
||||
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
|
||||
mgr.EXPECT().Start(mock.Anything).Return(nil).Once()
|
||||
|
||||
expectedCfg := config.CloudConfig{
|
||||
ResourceID: link.ResourceId,
|
||||
ClientID: link.ClientId,
|
||||
ClientSecret: link.ClientSecret,
|
||||
AuthURL: "test.com",
|
||||
ManagementToken: "test-mgmt-token",
|
||||
}
|
||||
mgr.EXPECT().UpdateConfig(mockHCPClient, expectedCfg).Once()
|
||||
|
||||
mgr.EXPECT().Stop().Return(nil).Once()
|
||||
},
|
||||
},
|
||||
// HCP manager should not be updated with management token
|
||||
"ReadOnly": {
|
||||
mutateLink: func(link *pbhcp.Link) {
|
||||
link.AccessLevel = pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_ONLY
|
||||
},
|
||||
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
|
||||
mgr.EXPECT().Start(mock.Anything).Return(nil).Once()
|
||||
|
||||
expectedCfg := config.CloudConfig{
|
||||
ResourceID: link.ResourceId,
|
||||
ClientID: link.ClientId,
|
||||
ClientSecret: link.ClientSecret,
|
||||
AuthURL: "test.com",
|
||||
ManagementToken: "",
|
||||
}
|
||||
mgr.EXPECT().UpdateConfig(mockHCPClient, expectedCfg).Once()
|
||||
|
||||
mgr.EXPECT().Stop().Return(nil).Once()
|
||||
},
|
||||
},
|
||||
// HCP manager should not be started or updated if link is not validated
|
||||
"ValidationError": {
|
||||
mutateUpsertEvent: func(upsert *pbresource.WatchEvent_Upsert) {
|
||||
upsert.Resource.Status = map[string]*pbresource.Status{
|
||||
hcpctl.StatusKey: {
|
||||
Conditions: []*pbresource.Condition{hcpctl.ConditionValidatedFailed},
|
||||
},
|
||||
}
|
||||
},
|
||||
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
|
||||
mgr.AssertNotCalled(t, "Start", mock.Anything)
|
||||
mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything)
|
||||
mgr.EXPECT().Stop().Return(nil).Once()
|
||||
},
|
||||
},
|
||||
"Error_InvalidLink": {
|
||||
mutateUpsertEvent: func(upsert *pbresource.WatchEvent_Upsert) {
|
||||
upsert.Resource = nil
|
||||
},
|
||||
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
|
||||
mgr.AssertNotCalled(t, "Start", mock.Anything)
|
||||
mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything)
|
||||
mgr.EXPECT().Stop().Return(nil).Once()
|
||||
},
|
||||
},
|
||||
"Error_HCPManagerStop": {
|
||||
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
|
||||
mgr.EXPECT().Start(mock.Anything).Return(nil).Once()
|
||||
mgr.EXPECT().UpdateConfig(mock.Anything, mock.Anything).Return().Once()
|
||||
mgr.EXPECT().Stop().Return(errors.New("could not stop HCP manager")).Once()
|
||||
},
|
||||
},
|
||||
"Error_CreatingHCPClient": {
|
||||
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
|
||||
mgr.AssertNotCalled(t, "Start", mock.Anything)
|
||||
mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything)
|
||||
mgr.EXPECT().Stop().Return(nil).Once()
|
||||
},
|
||||
hcpClientFn: func(_ config.CloudConfig) (hcpclient.Client, error) {
|
||||
return nil, errors.New("could not create HCP client")
|
||||
},
|
||||
},
|
||||
// This should result in the HCP manager not being started
|
||||
"Error_LoadMgmtToken": {
|
||||
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
|
||||
mgr.AssertNotCalled(t, "Start", mock.Anything)
|
||||
mgr.AssertNotCalled(t, "UpdateConfig", mock.Anything, mock.Anything)
|
||||
mgr.EXPECT().Stop().Return(nil).Once()
|
||||
},
|
||||
loadMgmtTokenFn: func(ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string) (string, error) {
|
||||
return "", errors.New("could not load management token")
|
||||
},
|
||||
},
|
||||
"Error_HCPManagerStart": {
|
||||
applyMocksAndAssertions: func(t *testing.T, mgr *MockManager, link *pbhcp.Link) {
|
||||
mgr.EXPECT().Start(mock.Anything).Return(errors.New("could not start HCP manager")).Once()
|
||||
mgr.EXPECT().UpdateConfig(mock.Anything, mock.Anything).Return().Once()
|
||||
mgr.EXPECT().Stop().Return(nil).Once()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range testCases {
|
||||
t.Run(name, func(t2 *testing.T) {
|
||||
mgr := NewMockManager(t2)
|
||||
|
||||
// Set up a link
|
||||
link := pbhcp.Link{
|
||||
ResourceId: "abc",
|
||||
ClientId: "def",
|
||||
ClientSecret: "ghi",
|
||||
AccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE,
|
||||
}
|
||||
|
||||
if test.mutateLink != nil {
|
||||
test.mutateLink(&link)
|
||||
}
|
||||
|
||||
linkResource, err := anypb.New(&link)
|
||||
require.NoError(t2, err)
|
||||
|
||||
if test.applyMocksAndAssertions != nil {
|
||||
test.applyMocksAndAssertions(t2, mgr, &link)
|
||||
}
|
||||
|
||||
testHcpClientFn := mockHcpClientFn
|
||||
if test.hcpClientFn != nil {
|
||||
testHcpClientFn = test.hcpClientFn
|
||||
}
|
||||
|
||||
testLoadMgmtToken := mockLoadMgmtTokenFn
|
||||
if test.loadMgmtTokenFn != nil {
|
||||
testLoadMgmtToken = test.loadMgmtTokenFn
|
||||
}
|
||||
|
||||
updateManagerLifecycle := HCPManagerLifecycleFn(
|
||||
mgr, testHcpClientFn,
|
||||
testLoadMgmtToken, existingCfg, dataDir,
|
||||
)
|
||||
|
||||
upsertEvent := &pbresource.WatchEvent_Upsert{
|
||||
Resource: &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Name: "global",
|
||||
Type: pbhcp.LinkType,
|
||||
},
|
||||
Status: map[string]*pbresource.Status{
|
||||
hcpctl.StatusKey: {
|
||||
Conditions: []*pbresource.Condition{hcpctl.ConditionValidatedSuccess},
|
||||
},
|
||||
},
|
||||
Data: linkResource,
|
||||
},
|
||||
}
|
||||
if test.mutateUpsertEvent != nil {
|
||||
test.mutateUpsertEvent(upsertEvent)
|
||||
}
|
||||
|
||||
// Handle upsert event
|
||||
updateManagerLifecycle(ctx, logger, &pbresource.WatchEvent{
|
||||
Event: &pbresource.WatchEvent_Upsert_{
|
||||
Upsert: upsertEvent,
|
||||
},
|
||||
})
|
||||
|
||||
// Handle delete event. This should stop HCP manager
|
||||
updateManagerLifecycle(ctx, logger, &pbresource.WatchEvent{
|
||||
Event: &pbresource.WatchEvent_Delete_{
|
||||
Delete: &pbresource.WatchEvent_Delete{},
|
||||
},
|
||||
})
|
||||
|
||||
// Ensure hcp-config directory is removed
|
||||
file := filepath.Join(dataDir, constants.SubDir)
|
||||
if _, err := os.Stat(file); err == nil || !os.IsNotExist(err) {
|
||||
require.Fail(t2, "should have removed hcp-config directory")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -12,75 +12,15 @@ import (
|
|||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
hcpclient "github.com/hashicorp/consul/agent/hcp/client"
|
||||
"github.com/hashicorp/consul/agent/hcp/config"
|
||||
"github.com/hashicorp/consul/agent/hcp/scada"
|
||||
hcpctl "github.com/hashicorp/consul/internal/hcp"
|
||||
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
||||
func TestManager_MonitorHCPLink(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
logger := hclog.New(&hclog.LoggerOptions{Output: io.Discard})
|
||||
|
||||
mgr := NewManager(
|
||||
ManagerConfig{
|
||||
Logger: hclog.New(&hclog.LoggerOptions{Output: io.Discard}),
|
||||
},
|
||||
)
|
||||
mockHCPClient := hcpclient.NewMockClient(t)
|
||||
mockHcpClientFn := func(_ config.CloudConfig) (hcpclient.Client, error) {
|
||||
return mockHCPClient, nil
|
||||
}
|
||||
loadMgmtTokenFn := func(ctx context.Context, logger hclog.Logger, hcpClient hcpclient.Client, dataDir string) (string, error) {
|
||||
return "test-mgmt-token", nil
|
||||
}
|
||||
|
||||
require.False(t, mgr.isRunning())
|
||||
updateManagerLifecycle := HCPManagerLifecycleFn(
|
||||
mgr, mockHcpClientFn,
|
||||
loadMgmtTokenFn, config.CloudConfig{}, "",
|
||||
)
|
||||
|
||||
// Set up a link
|
||||
link := pbhcp.Link{
|
||||
ResourceId: "abc",
|
||||
ClientId: "def",
|
||||
ClientSecret: "ghi",
|
||||
AccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE,
|
||||
}
|
||||
linkResource, err := anypb.New(&link)
|
||||
require.NoError(t, err)
|
||||
updateManagerLifecycle(ctx, logger, &pbresource.WatchEvent{
|
||||
Event: &pbresource.WatchEvent_Upsert_{
|
||||
Upsert: &pbresource.WatchEvent_Upsert{
|
||||
Resource: &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Name: "global",
|
||||
Type: pbhcp.LinkType,
|
||||
},
|
||||
Status: map[string]*pbresource.Status{
|
||||
hcpctl.StatusKey: {
|
||||
Conditions: []*pbresource.Condition{hcpctl.ConditionValidatedSuccess},
|
||||
},
|
||||
},
|
||||
Data: linkResource,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Validate that the HCP manager is started
|
||||
require.True(t, mgr.isRunning())
|
||||
}
|
||||
|
||||
func TestManager_Start(t *testing.T) {
|
||||
client := hcpclient.NewMockClient(t)
|
||||
statusF := func(ctx context.Context) (hcpclient.ServerStatus, error) {
|
||||
|
|
|
@ -6,7 +6,6 @@ package agent
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -20,6 +19,8 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
"github.com/NYTimes/gziphandler"
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/armon/go-metrics/prometheus"
|
||||
|
@ -348,16 +349,24 @@ func withRemoteAddrHandler(next http.Handler) http.Handler {
|
|||
})
|
||||
}
|
||||
|
||||
// Injects content type explicitly if not already set into response to prevent XSS
|
||||
// ensureContentTypeHeader injects content-type explicitly if not already set into response to prevent XSS
|
||||
func ensureContentTypeHeader(next http.Handler, logger hclog.Logger) http.Handler {
|
||||
|
||||
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
|
||||
next.ServeHTTP(resp, req)
|
||||
|
||||
val := resp.Header().Get(contentTypeHeader)
|
||||
if val == "" {
|
||||
resp.Header().Set(contentTypeHeader, plainContentType)
|
||||
logger.Debug("warning: content-type header not explicitly set.", "request-path", req.URL)
|
||||
contentType := api.GetContentType(req)
|
||||
|
||||
if req != nil {
|
||||
logger.Debug("warning: request content-type is not supported", "request-path", req.URL)
|
||||
req.Header.Set(contentTypeHeader, contentType)
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
respContentType := resp.Header().Get(contentTypeHeader)
|
||||
if respContentType == "" || respContentType != contentType {
|
||||
logger.Debug("warning: response content-type header not explicitly set.", "request-path", req.URL)
|
||||
resp.Header().Set(contentTypeHeader, contentType)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -617,7 +617,6 @@ func TestHTTPAPI_DefaultACLPolicy(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPAPIResponseHeaders(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
@ -646,6 +645,87 @@ func TestHTTPAPIResponseHeaders(t *testing.T) {
|
|||
requireHasHeadersSet(t, a, "/", "text/plain; charset=utf-8")
|
||||
}
|
||||
|
||||
func TestHTTPAPIValidateContentTypeHeaders(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
type testcase struct {
|
||||
name string
|
||||
endpoint string
|
||||
method string
|
||||
requestBody io.Reader
|
||||
expectedContentType string
|
||||
}
|
||||
|
||||
cases := []testcase{
|
||||
{
|
||||
name: "snapshot endpoint expect non-default content type",
|
||||
method: http.MethodPut,
|
||||
endpoint: "/v1/snapshot",
|
||||
requestBody: bytes.NewBuffer([]byte("test")),
|
||||
expectedContentType: "application/octet-stream",
|
||||
},
|
||||
{
|
||||
name: "kv endpoint expect non-default content type",
|
||||
method: http.MethodPut,
|
||||
endpoint: "/v1/kv",
|
||||
requestBody: bytes.NewBuffer([]byte("test")),
|
||||
expectedContentType: "application/octet-stream",
|
||||
},
|
||||
{
|
||||
name: "event/fire endpoint expect default content type",
|
||||
method: http.MethodPut,
|
||||
endpoint: "/v1/event/fire",
|
||||
requestBody: bytes.NewBuffer([]byte("test")),
|
||||
expectedContentType: "application/octet-stream",
|
||||
},
|
||||
{
|
||||
name: "peering/token endpoint expect default content type",
|
||||
method: http.MethodPost,
|
||||
endpoint: "/v1/peering/token",
|
||||
requestBody: bytes.NewBuffer([]byte("test")),
|
||||
expectedContentType: "application/json",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
a := NewTestAgent(t, "")
|
||||
defer a.Shutdown()
|
||||
|
||||
requireContentTypeHeadersSet(t, a, tc.method, tc.endpoint, tc.requestBody, tc.expectedContentType)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func requireContentTypeHeadersSet(t *testing.T, a *TestAgent, method, path string, body io.Reader, contentType string) {
|
||||
t.Helper()
|
||||
|
||||
resp := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(method, path, body)
|
||||
a.enableDebug.Store(true)
|
||||
|
||||
a.srv.handler().ServeHTTP(resp, req)
|
||||
|
||||
reqHdrs := req.Header
|
||||
respHdrs := resp.Header()
|
||||
|
||||
// require request content-type
|
||||
require.NotEmpty(t, reqHdrs.Get("Content-Type"))
|
||||
require.Equal(t, contentType, reqHdrs.Get("Content-Type"),
|
||||
"Request Header Content-Type value incorrect")
|
||||
|
||||
// require response content-type
|
||||
require.NotEmpty(t, respHdrs.Get("Content-Type"))
|
||||
require.Equal(t, contentType, respHdrs.Get("Content-Type"),
|
||||
"Response Header Content-Type value incorrect")
|
||||
}
|
||||
|
||||
func requireHasHeadersSet(t *testing.T, a *TestAgent, path string, contentType string) {
|
||||
t.Helper()
|
||||
|
||||
|
@ -663,7 +743,7 @@ func requireHasHeadersSet(t *testing.T, a *TestAgent, path string, contentType s
|
|||
"X-XSS-Protection header value incorrect")
|
||||
|
||||
require.Equal(t, contentType, hdrs.Get("Content-Type"),
|
||||
"")
|
||||
"Response Content-Type header value incorrect")
|
||||
}
|
||||
|
||||
func TestUIResponseHeaders(t *testing.T) {
|
||||
|
@ -704,7 +784,7 @@ func TestErrorContentTypeHeaderSet(t *testing.T) {
|
|||
`)
|
||||
defer a.Shutdown()
|
||||
|
||||
requireHasHeadersSet(t, a, "/fake-path-doesn't-exist", "text/plain; charset=utf-8")
|
||||
requireHasHeadersSet(t, a, "/fake-path-doesn't-exist", "application/json")
|
||||
}
|
||||
|
||||
func TestAcceptEncodingGzip(t *testing.T) {
|
||||
|
|
|
@ -81,19 +81,21 @@ func (s *serverInternalServiceDump) Notify(ctx context.Context, req *structs.Ser
|
|||
return 0, nil, err
|
||||
}
|
||||
|
||||
totalNodeLength := len(nodes)
|
||||
aclfilter.New(authz, s.deps.Logger).Filter(&nodes)
|
||||
|
||||
raw, err := filter.Execute(nodes)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("could not filter local service dump: %w", err)
|
||||
}
|
||||
nodes = raw.(structs.CheckServiceNodes)
|
||||
|
||||
aclfilter.New(authz, s.deps.Logger).Filter(&nodes)
|
||||
|
||||
return idx, &structs.IndexedCheckServiceNodes{
|
||||
Nodes: nodes,
|
||||
QueryMeta: structs.QueryMeta{
|
||||
Index: idx,
|
||||
Backend: structs.QueryBackendBlocking,
|
||||
Index: idx,
|
||||
Backend: structs.QueryBackendBlocking,
|
||||
ResultsFilteredByACLs: totalNodeLength != len(nodes),
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
|
|
|
@ -55,6 +55,10 @@ func TestServerInternalServiceDump(t *testing.T) {
|
|||
Service: "web",
|
||||
Kind: structs.ServiceKindTypical,
|
||||
},
|
||||
{
|
||||
Service: "web-deny",
|
||||
Kind: structs.ServiceKindTypical,
|
||||
},
|
||||
{
|
||||
Service: "db",
|
||||
Kind: structs.ServiceKindTypical,
|
||||
|
@ -67,14 +71,14 @@ func TestServerInternalServiceDump(t *testing.T) {
|
|||
}))
|
||||
}
|
||||
|
||||
authz := newStaticResolver(
|
||||
policyAuthorizer(t, `
|
||||
policyAuth := policyAuthorizer(t, `
|
||||
service "mgw" { policy = "read" }
|
||||
service "web" { policy = "read" }
|
||||
service "web-deny" { policy = "deny" }
|
||||
service "db" { policy = "read" }
|
||||
node_prefix "node-" { policy = "read" }
|
||||
`),
|
||||
)
|
||||
`)
|
||||
authz := newStaticResolver(policyAuth)
|
||||
|
||||
dataSource := ServerInternalServiceDump(ServerDataSourceDeps{
|
||||
GetStore: func() Store { return store },
|
||||
|
@ -121,6 +125,42 @@ func TestServerInternalServiceDump(t *testing.T) {
|
|||
result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh)
|
||||
require.Empty(t, result.Nodes)
|
||||
})
|
||||
|
||||
const (
|
||||
bexprMatchingUserTokenPermissions = "Service.Service matches `web.*`"
|
||||
bexpNotMatchingUserTokenPermissions = "Service.Service matches `mgw.*`"
|
||||
)
|
||||
|
||||
authz.SwapAuthorizer(policyAuthorizer(t, `
|
||||
service "mgw" { policy = "deny" }
|
||||
service "web" { policy = "read" }
|
||||
service "web-deny" { policy = "deny" }
|
||||
service "db" { policy = "read" }
|
||||
node_prefix "node-" { policy = "read" }
|
||||
`))
|
||||
|
||||
t.Run("request with filter that matches token permissions returns 1 result and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
eventCh := make(chan proxycfg.UpdateEvent)
|
||||
require.NoError(t, dataSource.Notify(ctx, &structs.ServiceDumpRequest{
|
||||
QueryOptions: structs.QueryOptions{Filter: bexprMatchingUserTokenPermissions},
|
||||
}, "", eventCh))
|
||||
|
||||
result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh)
|
||||
require.Len(t, result.Nodes, 1)
|
||||
require.Equal(t, "web", result.Nodes[0].Service.Service)
|
||||
require.True(t, result.ResultsFilteredByACLs)
|
||||
})
|
||||
|
||||
t.Run("request with filter that does not match token permissions returns 0 results and ResultsFilteredByACLs equal to true", func(t *testing.T) {
|
||||
eventCh := make(chan proxycfg.UpdateEvent)
|
||||
require.NoError(t, dataSource.Notify(ctx, &structs.ServiceDumpRequest{
|
||||
QueryOptions: structs.QueryOptions{Filter: bexpNotMatchingUserTokenPermissions},
|
||||
}, "", eventCh))
|
||||
|
||||
result := getEventResult[*structs.IndexedCheckServiceNodes](t, eventCh)
|
||||
require.Len(t, result.Nodes, 0)
|
||||
require.True(t, result.ResultsFilteredByACLs)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -476,16 +476,12 @@ func (h *handlerAPIGateway) handleRouteConfigUpdate(ctx context.Context, u Updat
|
|||
cancelUpstream()
|
||||
delete(snap.APIGateway.WatchedUpstreams[upstreamID], targetID)
|
||||
delete(snap.APIGateway.WatchedUpstreamEndpoints[upstreamID], targetID)
|
||||
|
||||
if targetUID := NewUpstreamIDFromTargetID(targetID); targetUID.Peer != "" {
|
||||
snap.APIGateway.PeerUpstreamEndpoints.CancelWatch(targetUID)
|
||||
snap.APIGateway.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
|
||||
}
|
||||
}
|
||||
|
||||
cancelDiscoChain()
|
||||
delete(snap.APIGateway.WatchedDiscoveryChains, upstreamID)
|
||||
}
|
||||
reconcilePeeringWatches(snap.APIGateway.DiscoveryChain, snap.APIGateway.UpstreamConfig, snap.APIGateway.PeeredUpstreams, snap.APIGateway.PeerUpstreamEndpoints, snap.APIGateway.UpstreamPeerTrustBundles)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -380,49 +380,7 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
|
|||
//
|
||||
// Clean up data
|
||||
//
|
||||
|
||||
peeredChainTargets := make(map[UpstreamID]struct{})
|
||||
for _, discoChain := range snap.ConnectProxy.DiscoveryChain {
|
||||
for _, target := range discoChain.Targets {
|
||||
if target.Peer == "" {
|
||||
continue
|
||||
}
|
||||
uid := NewUpstreamIDFromTargetID(target.ID)
|
||||
peeredChainTargets[uid] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
validPeerNames := make(map[string]struct{})
|
||||
|
||||
// Iterate through all known endpoints and remove references to upstream IDs that weren't in the update
|
||||
snap.ConnectProxy.PeerUpstreamEndpoints.ForEachKey(func(uid UpstreamID) bool {
|
||||
// Peered upstream is explicitly defined in upstream config
|
||||
if _, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok {
|
||||
validPeerNames[uid.Peer] = struct{}{}
|
||||
return true
|
||||
}
|
||||
// Peered upstream came from dynamic source of imported services
|
||||
if _, ok := seenUpstreams[uid]; ok {
|
||||
validPeerNames[uid.Peer] = struct{}{}
|
||||
return true
|
||||
}
|
||||
// Peered upstream came from a discovery chain target
|
||||
if _, ok := peeredChainTargets[uid]; ok {
|
||||
validPeerNames[uid.Peer] = struct{}{}
|
||||
return true
|
||||
}
|
||||
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(uid)
|
||||
return true
|
||||
})
|
||||
|
||||
// Iterate through all known trust bundles and remove references to any unseen peer names
|
||||
snap.ConnectProxy.UpstreamPeerTrustBundles.ForEachKey(func(peerName PeerName) bool {
|
||||
if _, ok := validPeerNames[peerName]; !ok {
|
||||
snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(peerName)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
reconcilePeeringWatches(snap.ConnectProxy.DiscoveryChain, snap.ConnectProxy.UpstreamConfig, snap.ConnectProxy.PeeredUpstreams, snap.ConnectProxy.PeerUpstreamEndpoints, snap.ConnectProxy.UpstreamPeerTrustBundles)
|
||||
case u.CorrelationID == intentionUpstreamsID:
|
||||
resp, ok := u.Result.(*structs.IndexedServiceList)
|
||||
if !ok {
|
||||
|
@ -490,18 +448,13 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
|
|||
continue
|
||||
}
|
||||
if _, ok := seenUpstreams[uid]; !ok {
|
||||
for targetID, cancelFn := range targets {
|
||||
for _, cancelFn := range targets {
|
||||
cancelFn()
|
||||
|
||||
targetUID := NewUpstreamIDFromTargetID(targetID)
|
||||
if targetUID.Peer != "" {
|
||||
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(targetUID)
|
||||
snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
|
||||
}
|
||||
}
|
||||
delete(snap.ConnectProxy.WatchedUpstreams, uid)
|
||||
}
|
||||
}
|
||||
reconcilePeeringWatches(snap.ConnectProxy.DiscoveryChain, snap.ConnectProxy.UpstreamConfig, snap.ConnectProxy.PeeredUpstreams, snap.ConnectProxy.PeerUpstreamEndpoints, snap.ConnectProxy.UpstreamPeerTrustBundles)
|
||||
for uid := range snap.ConnectProxy.WatchedUpstreamEndpoints {
|
||||
if upstream, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok && !upstream.CentrallyConfigured {
|
||||
continue
|
||||
|
|
|
@ -171,18 +171,13 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent,
|
|||
delete(snap.IngressGateway.WatchedUpstreams[uid], targetID)
|
||||
delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID)
|
||||
cancelUpstreamFn()
|
||||
|
||||
targetUID := NewUpstreamIDFromTargetID(targetID)
|
||||
if targetUID.Peer != "" {
|
||||
snap.IngressGateway.PeerUpstreamEndpoints.CancelWatch(targetUID)
|
||||
snap.IngressGateway.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
|
||||
}
|
||||
}
|
||||
|
||||
cancelFn()
|
||||
delete(snap.IngressGateway.WatchedDiscoveryChains, uid)
|
||||
}
|
||||
}
|
||||
reconcilePeeringWatches(snap.IngressGateway.DiscoveryChain, snap.IngressGateway.UpstreamConfig, snap.IngressGateway.PeeredUpstreams, snap.IngressGateway.PeerUpstreamEndpoints, snap.IngressGateway.UpstreamPeerTrustBundles)
|
||||
|
||||
if err := s.watchIngressLeafCert(ctx, snap); err != nil {
|
||||
return err
|
||||
|
|
|
@ -13,12 +13,15 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||
"github.com/hashicorp/consul/agent/proxycfg/internal/watch"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
"github.com/hashicorp/consul/proto/private/pbpeering"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -551,3 +554,48 @@ func watchMeshGateway(ctx context.Context, opts gatewayWatchOpts) error {
|
|||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(opts.key.Partition),
|
||||
}, correlationId, opts.notifyCh)
|
||||
}
|
||||
|
||||
func reconcilePeeringWatches(compiledDiscoveryChains map[UpstreamID]*structs.CompiledDiscoveryChain, upstreams map[UpstreamID]*structs.Upstream, peeredUpstreams map[UpstreamID]struct{}, peerUpstreamEndpoints watch.Map[UpstreamID, structs.CheckServiceNodes], upstreamPeerTrustBundles watch.Map[PeerName, *pbpeering.PeeringTrustBundle]) {
|
||||
|
||||
peeredChainTargets := make(map[UpstreamID]struct{})
|
||||
for _, discoChain := range compiledDiscoveryChains {
|
||||
for _, target := range discoChain.Targets {
|
||||
if target.Peer == "" {
|
||||
continue
|
||||
}
|
||||
uid := NewUpstreamIDFromTargetID(target.ID)
|
||||
peeredChainTargets[uid] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
validPeerNames := make(map[string]struct{})
|
||||
|
||||
// Iterate through all known endpoints and remove references to upstream IDs that weren't in the update
|
||||
peerUpstreamEndpoints.ForEachKey(func(uid UpstreamID) bool {
|
||||
// Peered upstream is explicitly defined in upstream config
|
||||
if _, ok := upstreams[uid]; ok {
|
||||
validPeerNames[uid.Peer] = struct{}{}
|
||||
return true
|
||||
}
|
||||
// Peered upstream came from dynamic source of imported services
|
||||
if _, ok := peeredUpstreams[uid]; ok {
|
||||
validPeerNames[uid.Peer] = struct{}{}
|
||||
return true
|
||||
}
|
||||
// Peered upstream came from a discovery chain target
|
||||
if _, ok := peeredChainTargets[uid]; ok {
|
||||
validPeerNames[uid.Peer] = struct{}{}
|
||||
return true
|
||||
}
|
||||
peerUpstreamEndpoints.CancelWatch(uid)
|
||||
return true
|
||||
})
|
||||
|
||||
// Iterate through all known trust bundles and remove references to any unseen peer names
|
||||
upstreamPeerTrustBundles.ForEachKey(func(peerName PeerName) bool {
|
||||
if _, ok := validPeerNames[peerName]; !ok {
|
||||
upstreamPeerTrustBundles.CancelWatch(peerName)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
|
|
@ -102,6 +102,7 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
|
|||
if err := s.resetWatchesFromChain(ctx, uid, resp.Chain, upstreamsSnapshot); err != nil {
|
||||
return err
|
||||
}
|
||||
reconcilePeeringWatches(upstreamsSnapshot.DiscoveryChain, upstreamsSnapshot.UpstreamConfig, upstreamsSnapshot.PeeredUpstreams, upstreamsSnapshot.PeerUpstreamEndpoints, upstreamsSnapshot.UpstreamPeerTrustBundles)
|
||||
|
||||
case strings.HasPrefix(u.CorrelationID, upstreamPeerWatchIDPrefix):
|
||||
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
||||
|
@ -301,12 +302,6 @@ func (s *handlerUpstreams) resetWatchesFromChain(
|
|||
delete(snap.WatchedUpstreams[uid], targetID)
|
||||
delete(snap.WatchedUpstreamEndpoints[uid], targetID)
|
||||
cancelFn()
|
||||
|
||||
targetUID := NewUpstreamIDFromTargetID(targetID)
|
||||
if targetUID.Peer != "" {
|
||||
snap.PeerUpstreamEndpoints.CancelWatch(targetUID)
|
||||
snap.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -479,8 +474,8 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
|
|||
var entMeta acl.EnterpriseMeta
|
||||
entMeta.Merge(opts.entMeta)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{
|
||||
peerCtx, cancel := context.WithCancel(ctx)
|
||||
err := s.dataSources.Health.Notify(peerCtx, &structs.ServiceSpecificRequest{
|
||||
PeerName: opts.peer,
|
||||
Datacenter: opts.datacenter,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
|
@ -506,25 +501,25 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
|
|||
return nil
|
||||
}
|
||||
|
||||
if ok := snap.PeerUpstreamEndpoints.IsWatched(uid); !ok {
|
||||
if !snap.PeerUpstreamEndpoints.IsWatched(uid) {
|
||||
snap.PeerUpstreamEndpoints.InitWatch(uid, cancel)
|
||||
}
|
||||
|
||||
// Check whether a watch for this peer exists to avoid duplicates.
|
||||
if ok := snap.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok {
|
||||
peerCtx, cancel := context.WithCancel(ctx)
|
||||
if err := s.dataSources.TrustBundle.Notify(peerCtx, &cachetype.TrustBundleReadRequest{
|
||||
|
||||
if !snap.UpstreamPeerTrustBundles.IsWatched(uid.Peer) {
|
||||
peerCtx2, cancel2 := context.WithCancel(ctx)
|
||||
if err := s.dataSources.TrustBundle.Notify(peerCtx2, &cachetype.TrustBundleReadRequest{
|
||||
Request: &pbpeering.TrustBundleReadRequest{
|
||||
Name: uid.Peer,
|
||||
Partition: uid.PartitionOrDefault(),
|
||||
},
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
}, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil {
|
||||
cancel()
|
||||
cancel2()
|
||||
return fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err)
|
||||
}
|
||||
|
||||
snap.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel)
|
||||
snap.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel2)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -64,9 +64,7 @@ func setupPrimaryServer(t *testing.T) *agent.TestAgent {
|
|||
|
||||
config := `
|
||||
server = true
|
||||
datacenter = "primary"
|
||||
primary_datacenter = "primary"
|
||||
|
||||
datacenter = "primary"
|
||||
connect {
|
||||
enabled = true
|
||||
}
|
||||
|
|
|
@ -800,6 +800,11 @@ func (policies ACLPolicies) resolveWithCache(cache *ACLCaches, entConf *acl.Conf
|
|||
continue
|
||||
}
|
||||
|
||||
//pulling from the cache, we don't want to break any rules that are already in the cache
|
||||
if entConf == nil {
|
||||
entConf = &acl.Config{}
|
||||
}
|
||||
entConf.WarnOnDuplicateKey = true
|
||||
p, err := acl.NewPolicyFromSource(policy.Rules, entConf, policy.EnterprisePolicyMeta())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err)
|
||||
|
|
|
@ -403,7 +403,7 @@ func TestStructs_ACLPolicies_resolveWithCache(t *testing.T) {
|
|||
ID: "5d5653a1-2c2b-4b36-b083-fc9f1398eb7b",
|
||||
Name: "policy1",
|
||||
Description: "policy1",
|
||||
Rules: `node_prefix "" { policy = "read" }`,
|
||||
Rules: `node_prefix "" { policy = "read", policy = "read", },`,
|
||||
RaftIndex: RaftIndex{
|
||||
CreateIndex: 1,
|
||||
ModifyIndex: 2,
|
||||
|
@ -413,7 +413,7 @@ func TestStructs_ACLPolicies_resolveWithCache(t *testing.T) {
|
|||
ID: "b35541f0-a88a-48da-bc66-43553c60b628",
|
||||
Name: "policy2",
|
||||
Description: "policy2",
|
||||
Rules: `agent_prefix "" { policy = "read" }`,
|
||||
Rules: `agent_prefix "" { policy = "read" } `,
|
||||
RaftIndex: RaftIndex{
|
||||
CreateIndex: 3,
|
||||
ModifyIndex: 4,
|
||||
|
@ -433,7 +433,8 @@ func TestStructs_ACLPolicies_resolveWithCache(t *testing.T) {
|
|||
ID: "8bf38965-95e5-4e86-9be7-f6070cc0708b",
|
||||
Name: "policy4",
|
||||
Description: "policy4",
|
||||
Rules: `service_prefix "" { policy = "read" }`,
|
||||
//test should still pass even with the duplicate key since its resolving the cache
|
||||
Rules: `service_prefix "" { policy = "read" policy = "read" }`,
|
||||
RaftIndex: RaftIndex{
|
||||
CreateIndex: 7,
|
||||
ModifyIndex: 8,
|
||||
|
|
|
@ -1824,13 +1824,15 @@ func configureClusterWithHostnames(
|
|||
cluster.DnsRefreshRate = durationpb.New(rate)
|
||||
cluster.DnsLookupFamily = envoy_cluster_v3.Cluster_V4_ONLY
|
||||
|
||||
envoyMaxEndpoints := 1
|
||||
discoveryType := envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_LOGICAL_DNS}
|
||||
if dnsDiscoveryType == "strict_dns" {
|
||||
discoveryType.Type = envoy_cluster_v3.Cluster_STRICT_DNS
|
||||
envoyMaxEndpoints = len(hostnameEndpoints)
|
||||
}
|
||||
cluster.ClusterDiscoveryType = &discoveryType
|
||||
|
||||
endpoints := make([]*envoy_endpoint_v3.LbEndpoint, 0, 1)
|
||||
endpoints := make([]*envoy_endpoint_v3.LbEndpoint, 0, envoyMaxEndpoints)
|
||||
uniqueHostnames := make(map[string]bool)
|
||||
|
||||
var (
|
||||
|
@ -1848,12 +1850,15 @@ func configureClusterWithHostnames(
|
|||
continue
|
||||
}
|
||||
|
||||
if len(endpoints) == 0 {
|
||||
if len(endpoints) < envoyMaxEndpoints {
|
||||
endpoints = append(endpoints, makeLbEndpoint(addr, port, health, weight))
|
||||
|
||||
hostname = addr
|
||||
idx = i
|
||||
break
|
||||
|
||||
if len(endpoints) == envoyMaxEndpoints {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1867,8 +1872,8 @@ func configureClusterWithHostnames(
|
|||
|
||||
endpoints = append(endpoints, fallback)
|
||||
}
|
||||
if len(uniqueHostnames) > 1 {
|
||||
logger.Warn(fmt.Sprintf("service contains instances with more than one unique hostname; only %q be resolved by Envoy", hostname),
|
||||
if len(uniqueHostnames) > 1 && envoyMaxEndpoints == 1 {
|
||||
logger.Warn(fmt.Sprintf("service contains instances with more than one unique hostname; only %q will be resolved by Envoy", hostname),
|
||||
"dc", dc, "service", service.String())
|
||||
}
|
||||
|
||||
|
|
15
api/api.go
15
api/api.go
|
@ -1087,8 +1087,23 @@ func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
|
|||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
contentType := GetContentType(req)
|
||||
|
||||
if req != nil {
|
||||
req.Header.Set(contentTypeHeader, contentType)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
resp, err := c.config.HttpClient.Do(req)
|
||||
|
||||
if resp != nil {
|
||||
respContentType := resp.Header.Get(contentTypeHeader)
|
||||
if respContentType == "" || respContentType != contentType {
|
||||
resp.Header.Set(contentTypeHeader, contentType)
|
||||
}
|
||||
}
|
||||
|
||||
diff := time.Since(start)
|
||||
return diff, resp, err
|
||||
}
|
||||
|
|
|
@ -935,11 +935,11 @@ func TestAPI_Headers(t *testing.T) {
|
|||
|
||||
_, _, err = kv.Get("test-headers", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "", request.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", request.Header.Get("Content-Type"))
|
||||
|
||||
_, err = kv.Delete("test-headers", nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "", request.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", request.Header.Get("Content-Type"))
|
||||
|
||||
err = c.Snapshot().Restore(nil, strings.NewReader("foo"))
|
||||
require.Error(t, err)
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
contentTypeHeader = "Content-Type"
|
||||
plainContentType = "text/plain; charset=utf-8"
|
||||
octetStream = "application/octet-stream"
|
||||
jsonContentType = "application/json" // Default content type
|
||||
)
|
||||
|
||||
// ContentTypeRule defines a rule for determining the content type of an HTTP request.
|
||||
// This rule is based on the combination of the HTTP path, method, and the desired content type.
|
||||
type ContentTypeRule struct {
|
||||
path string
|
||||
httpMethod string
|
||||
contentType string
|
||||
}
|
||||
|
||||
var ContentTypeRules = []ContentTypeRule{
|
||||
{
|
||||
path: "/v1/snapshot",
|
||||
httpMethod: http.MethodPut,
|
||||
contentType: octetStream,
|
||||
},
|
||||
{
|
||||
path: "/v1/kv",
|
||||
httpMethod: http.MethodPut,
|
||||
contentType: octetStream,
|
||||
},
|
||||
{
|
||||
path: "/v1/event/fire",
|
||||
httpMethod: http.MethodPut,
|
||||
contentType: octetStream,
|
||||
},
|
||||
}
|
||||
|
||||
// GetContentType returns the content type for a request
|
||||
// This function isused as routing logic or middleware to determine and enforce
|
||||
// the appropriate content type for HTTP requests.
|
||||
func GetContentType(req *http.Request) string {
|
||||
reqContentType := req.Header.Get(contentTypeHeader)
|
||||
|
||||
if isIndexPage(req) {
|
||||
return plainContentType
|
||||
}
|
||||
|
||||
// For GET, DELETE, or internal API paths, ensure a valid Content-Type is returned.
|
||||
if req.Method == http.MethodGet || req.Method == http.MethodDelete || strings.HasPrefix(req.URL.Path, "/v1/internal") {
|
||||
if reqContentType == "" {
|
||||
// Default to JSON Content-Type if no Content-Type is provided.
|
||||
return jsonContentType
|
||||
}
|
||||
// Return the provided Content-Type if it exists.
|
||||
return reqContentType
|
||||
}
|
||||
|
||||
for _, rule := range ContentTypeRules {
|
||||
if matchesRule(req, rule) {
|
||||
return rule.contentType
|
||||
}
|
||||
}
|
||||
return jsonContentType
|
||||
}
|
||||
|
||||
// matchesRule checks if a request matches a content type rule
|
||||
func matchesRule(req *http.Request, rule ContentTypeRule) bool {
|
||||
return strings.HasPrefix(req.URL.Path, rule.path) &&
|
||||
(rule.httpMethod == "" || req.Method == rule.httpMethod)
|
||||
}
|
||||
|
||||
// isIndexPage checks if the request is for the index page
|
||||
func isIndexPage(req *http.Request) bool {
|
||||
return req.URL.Path == "/" || req.URL.Path == "/ui"
|
||||
}
|
|
@ -5,6 +5,7 @@ go 1.19
|
|||
replace github.com/hashicorp/consul/sdk => ../sdk
|
||||
|
||||
retract (
|
||||
v1.29.5 // cut from incorrect branch
|
||||
v1.28.0 // tag was mutated
|
||||
v1.27.1 // tag was mutated
|
||||
v1.21.2 // tag was mutated
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#
|
||||
# See https://www.consul.io/docs/connect/proxies/envoy#supported-versions for more information on Consul's Envoy
|
||||
# version support.
|
||||
1.31.2
|
||||
1.30.6
|
||||
1.29.9
|
||||
1.31.4
|
||||
1.30.8
|
||||
1.29.11
|
||||
1.28.7
|
||||
|
|
22
go.mod
22
go.mod
|
@ -35,7 +35,7 @@ require (
|
|||
github.com/go-jose/go-jose/v3 v3.0.3
|
||||
github.com/go-openapi/runtime v0.26.2
|
||||
github.com/go-openapi/strfmt v0.21.10
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/gofuzz v1.2.0
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1
|
||||
github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2
|
||||
|
@ -68,7 +68,7 @@ require (
|
|||
github.com/hashicorp/go-version v1.2.1
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/hashicorp/hcdiag v0.5.1
|
||||
github.com/hashicorp/hcl v1.0.0
|
||||
github.com/hashicorp/hcl v1.0.1-vault-7
|
||||
github.com/hashicorp/hcl/v2 v2.14.1
|
||||
github.com/hashicorp/hcp-scada-provider v0.2.4
|
||||
github.com/hashicorp/hcp-sdk-go v0.80.0
|
||||
|
@ -114,12 +114,12 @@ require (
|
|||
go.opentelemetry.io/otel/sdk/metric v0.39.0
|
||||
go.opentelemetry.io/proto/otlp v1.0.0
|
||||
go.uber.org/goleak v1.1.10
|
||||
golang.org/x/crypto v0.22.0
|
||||
golang.org/x/crypto v0.31.0
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
|
||||
golang.org/x/net v0.24.0
|
||||
golang.org/x/net v0.25.0
|
||||
golang.org/x/oauth2 v0.15.0
|
||||
golang.org/x/sync v0.4.0
|
||||
golang.org/x/sys v0.20.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/sys v0.28.0
|
||||
golang.org/x/time v0.3.0
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98
|
||||
google.golang.org/grpc v1.58.3
|
||||
|
@ -186,7 +186,7 @@ require (
|
|||
github.com/go-openapi/validate v0.22.4 // indirect
|
||||
github.com/go-ozzo/ozzo-validation v3.6.0+incompatible // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
|
@ -263,10 +263,10 @@ require (
|
|||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.17.0 // indirect
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
|
||||
golang.org/x/mod v0.13.0 // indirect
|
||||
golang.org/x/term v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.14.0 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/api v0.126.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect
|
||||
|
|
41
go.sum
41
go.sum
|
@ -287,8 +287,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
|
|||
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
|
||||
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
|
||||
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
|
||||
|
@ -348,8 +348,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
@ -488,8 +489,9 @@ github.com/hashicorp/golang-lru/v2 v2.0.0 h1:Lf+9eD8m5pncvHAOCQj49GSN6aQI8XGfI5O
|
|||
github.com/hashicorp/golang-lru/v2 v2.0.0/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/hcdiag v0.5.1 h1:KZcx9xzRfEOQ2OMbwPxVvHyXwLLRqYpSHxCEOtHfQ6w=
|
||||
github.com/hashicorp/hcdiag v0.5.1/go.mod h1:RMC2KkffN9uJ+5mFSaL67ZFVj4CDeetPF2d/53XpwXo=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I=
|
||||
github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM=
|
||||
github.com/hashicorp/hcl/v2 v2.14.1 h1:x0BpjfZ+CYdbiz+8yZTQ+gdLO7IXvOut7Da+XJayx34=
|
||||
github.com/hashicorp/hcl/v2 v2.14.1/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0=
|
||||
github.com/hashicorp/hcp-scada-provider v0.2.4 h1:XvctVEd4VqWVlqN1VA4vIhJANstZrc4gd2oCfrFLWZc=
|
||||
|
@ -919,8 +921,8 @@ golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0
|
|||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -961,8 +963,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
|
||||
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180611182652-db08ff08e862/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -1013,8 +1015,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1045,8 +1047,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
|
||||
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -1126,15 +1128,15 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
|
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1147,8 +1149,9 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1214,8 +1217,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
|
||||
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package hcp
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/hcp/internal/controllers"
|
||||
"github.com/hashicorp/consul/internal/hcp/internal/controllers/link"
|
||||
"github.com/hashicorp/consul/internal/hcp/internal/types"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
)
|
||||
|
||||
// RegisterTypes adds all resource types within the "hcp" API group
|
||||
// to the given type registry
|
||||
func RegisterTypes(r resource.Registry) {
|
||||
types.Register(r)
|
||||
}
|
||||
|
||||
type ControllerDependencies = controllers.Dependencies
|
||||
|
||||
var IsValidated = link.IsValidated
|
||||
var LinkName = types.LinkName
|
||||
|
||||
// RegisterControllers registers controllers for the catalog types with
|
||||
// the given controller Manager.
|
||||
func RegisterControllers(mgr *controller.Manager, deps ControllerDependencies) {
|
||||
controllers.Register(mgr, deps)
|
||||
}
|
||||
|
||||
// Needed for testing
|
||||
var StatusKey = link.StatusKey
|
||||
var ConditionValidatedSuccess = link.ConditionValidatedSuccess
|
||||
var ConditionValidatedFailed = link.ConditionValidatedFailed
|
|
@ -1,234 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package link
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
gnmmod "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/models"
|
||||
|
||||
hcpclient "github.com/hashicorp/consul/agent/hcp/client"
|
||||
"github.com/hashicorp/consul/agent/hcp/config"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/hcp/internal/types"
|
||||
"github.com/hashicorp/consul/internal/storage"
|
||||
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
// HCPClientFn is a function that can be used to create an HCP client from a Link object.
|
||||
// This function type should be passed to a LinkController in order to tell it how to make a client from
|
||||
// a Link. For normal use, DefaultHCPClientFn should be used, but tests can substitute in a function that creates a
|
||||
// mock client.
|
||||
type HCPClientFn func(config.CloudConfig) (hcpclient.Client, error)
|
||||
|
||||
var DefaultHCPClientFn HCPClientFn = func(cfg config.CloudConfig) (hcpclient.Client, error) {
|
||||
hcpClient, err := hcpclient.NewClient(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hcpClient, nil
|
||||
}
|
||||
|
||||
func LinkController(
|
||||
hcpClientFn HCPClientFn,
|
||||
cfg config.CloudConfig,
|
||||
) *controller.Controller {
|
||||
return controller.NewController("link", pbhcp.LinkType).
|
||||
WithInitializer(
|
||||
&linkInitializer{
|
||||
cloudConfig: cfg,
|
||||
},
|
||||
).
|
||||
WithReconciler(
|
||||
&linkReconciler{
|
||||
hcpClientFn: hcpClientFn,
|
||||
cloudConfig: cfg,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
type linkReconciler struct {
|
||||
hcpClientFn HCPClientFn
|
||||
cloudConfig config.CloudConfig
|
||||
}
|
||||
|
||||
func hcpAccessLevelToConsul(level *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel) pbhcp.AccessLevel {
|
||||
if level == nil {
|
||||
return pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED
|
||||
}
|
||||
|
||||
switch *level {
|
||||
case gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELUNSPECIFIED:
|
||||
return pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED
|
||||
case gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADWRITE:
|
||||
return pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE
|
||||
case gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADONLY:
|
||||
return pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_ONLY
|
||||
default:
|
||||
return pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED
|
||||
}
|
||||
}
|
||||
|
||||
func (r *linkReconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error {
|
||||
// The runtime is passed by value so replacing it here for the remainder of this
|
||||
// reconciliation request processing will not affect future invocations.
|
||||
rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", StatusKey)
|
||||
|
||||
rt.Logger.Trace("reconciling link")
|
||||
|
||||
rsp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: req.ID})
|
||||
switch {
|
||||
case status.Code(err) == codes.NotFound:
|
||||
rt.Logger.Trace("link has been deleted")
|
||||
return nil
|
||||
case err != nil:
|
||||
rt.Logger.Error("the resource service has returned an unexpected error", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
res := rsp.Resource
|
||||
var link pbhcp.Link
|
||||
if err := res.Data.UnmarshalTo(&link); err != nil {
|
||||
rt.Logger.Error("error unmarshalling link data", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
newStatus := &pbresource.Status{
|
||||
ObservedGeneration: res.Generation,
|
||||
Conditions: []*pbresource.Condition{},
|
||||
}
|
||||
defer writeStatusIfNotEqual(ctx, rt, res, newStatus)
|
||||
newStatus.Conditions = append(newStatus.Conditions, ConditionValidatedSuccess)
|
||||
|
||||
// Merge the link data with the existing cloud config so that we only overwrite the
|
||||
// fields that are provided by the link. This ensures that:
|
||||
// 1. The HCP configuration (i.e., how to connect to HCP) is preserved
|
||||
// 2. The Consul agent's node ID and node name are preserved
|
||||
newCfg := CloudConfigFromLink(&link)
|
||||
cfg := config.Merge(r.cloudConfig, newCfg)
|
||||
hcpClient, err := r.hcpClientFn(cfg)
|
||||
if err != nil {
|
||||
rt.Logger.Error("error creating HCP client", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Sync cluster data from HCP
|
||||
cluster, err := hcpClient.GetCluster(ctx)
|
||||
if err != nil {
|
||||
rt.Logger.Error("error querying HCP for cluster", "error", err)
|
||||
condition := linkingFailedCondition(err)
|
||||
newStatus.Conditions = append(newStatus.Conditions, condition)
|
||||
return err
|
||||
}
|
||||
accessLevel := hcpAccessLevelToConsul(cluster.AccessLevel)
|
||||
|
||||
if link.HcpClusterUrl != cluster.HCPPortalURL ||
|
||||
link.AccessLevel != accessLevel {
|
||||
|
||||
link.HcpClusterUrl = cluster.HCPPortalURL
|
||||
link.AccessLevel = accessLevel
|
||||
|
||||
updatedData, err := anypb.New(&link)
|
||||
if err != nil {
|
||||
rt.Logger.Error("error marshalling link data", "error", err)
|
||||
return err
|
||||
}
|
||||
_, err = rt.Client.Write(
|
||||
ctx, &pbresource.WriteRequest{Resource: &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Name: types.LinkName,
|
||||
Type: pbhcp.LinkType,
|
||||
},
|
||||
Metadata: res.Metadata,
|
||||
Data: updatedData,
|
||||
}},
|
||||
)
|
||||
if err != nil {
|
||||
rt.Logger.Error("error updating link", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newStatus.Conditions = append(newStatus.Conditions, ConditionLinked(link.ResourceId))
|
||||
|
||||
return writeStatusIfNotEqual(ctx, rt, res, newStatus)
|
||||
}
|
||||
|
||||
type linkInitializer struct {
|
||||
cloudConfig config.CloudConfig
|
||||
}
|
||||
|
||||
func (i *linkInitializer) Initialize(ctx context.Context, rt controller.Runtime) error {
|
||||
if !i.cloudConfig.IsConfigured() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct a link resource to reflect the configuration
|
||||
data, err := anypb.New(
|
||||
&pbhcp.Link{
|
||||
ResourceId: i.cloudConfig.ResourceID,
|
||||
ClientId: i.cloudConfig.ClientID,
|
||||
ClientSecret: i.cloudConfig.ClientSecret,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the link resource for a configuration-based link
|
||||
_, err = rt.Client.Write(
|
||||
ctx,
|
||||
&pbresource.WriteRequest{
|
||||
Resource: &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Name: types.LinkName,
|
||||
Type: pbhcp.LinkType,
|
||||
},
|
||||
Metadata: map[string]string{
|
||||
types.MetadataSourceKey: types.MetadataSourceConfig,
|
||||
},
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), storage.ErrWrongUid.Error()) ||
|
||||
strings.Contains(err.Error(), "leader unknown") {
|
||||
// If the error is likely ignorable and could eventually resolve itself,
|
||||
// log it as TRACE rather than ERROR.
|
||||
rt.Logger.Trace("error initializing controller", "error", err)
|
||||
} else {
|
||||
rt.Logger.Error("error initializing controller", "error", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func CloudConfigFromLink(link *pbhcp.Link) config.CloudConfig {
|
||||
var cfg config.CloudConfig
|
||||
if link == nil {
|
||||
return cfg
|
||||
}
|
||||
cfg = config.CloudConfig{
|
||||
ResourceID: link.GetResourceId(),
|
||||
ClientID: link.GetClientId(),
|
||||
ClientSecret: link.GetClientSecret(),
|
||||
}
|
||||
if link.GetHcpConfig() != nil {
|
||||
cfg.AuthURL = link.GetHcpConfig().GetAuthUrl()
|
||||
cfg.ScadaAddress = link.GetHcpConfig().GetScadaAddress()
|
||||
cfg.Hostname = link.GetHcpConfig().GetApiAddress()
|
||||
cfg.TLSConfig = &tls.Config{InsecureSkipVerify: link.GetHcpConfig().GetTlsInsecureSkipVerify()}
|
||||
}
|
||||
return cfg
|
||||
}
|
|
@ -1,248 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package link
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
gnmmod "github.com/hashicorp/hcp-sdk-go/clients/cloud-global-network-manager-service/preview/2022-02-15/models"
|
||||
|
||||
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
|
||||
hcpclient "github.com/hashicorp/consul/agent/hcp/client"
|
||||
"github.com/hashicorp/consul/agent/hcp/config"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/hcp/internal/types"
|
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
||||
type controllerSuite struct {
|
||||
suite.Suite
|
||||
|
||||
ctx context.Context
|
||||
client *rtest.Client
|
||||
rt controller.Runtime
|
||||
|
||||
tenancies []*pbresource.Tenancy
|
||||
}
|
||||
|
||||
func mockHcpClientFn(t *testing.T) (*hcpclient.MockClient, HCPClientFn) {
|
||||
mockClient := hcpclient.NewMockClient(t)
|
||||
|
||||
mockClientFunc := func(config config.CloudConfig) (hcpclient.Client, error) {
|
||||
return mockClient, nil
|
||||
}
|
||||
|
||||
return mockClient, mockClientFunc
|
||||
}
|
||||
|
||||
func (suite *controllerSuite) SetupTest() {
|
||||
suite.ctx = testutil.TestContext(suite.T())
|
||||
suite.tenancies = rtest.TestTenancies()
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(types.Register).
|
||||
WithTenancies(suite.tenancies...).
|
||||
Run(suite.T())
|
||||
|
||||
suite.rt = controller.Runtime{
|
||||
Client: client,
|
||||
Logger: testutil.Logger(suite.T()),
|
||||
}
|
||||
suite.client = rtest.NewClient(client)
|
||||
}
|
||||
|
||||
func TestLinkController(t *testing.T) {
|
||||
suite.Run(t, new(controllerSuite))
|
||||
}
|
||||
|
||||
func (suite *controllerSuite) deleteResourceFunc(id *pbresource.ID) func() {
|
||||
return func() {
|
||||
suite.client.MustDelete(suite.T(), id)
|
||||
suite.client.WaitForDeletion(suite.T(), id)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *controllerSuite) TestController_Ok() {
|
||||
// Run the controller manager
|
||||
mgr := controller.NewManager(suite.client, suite.rt.Logger)
|
||||
mockClient, mockClientFn := mockHcpClientFn(suite.T())
|
||||
readWrite := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADWRITE
|
||||
mockClient.EXPECT().GetCluster(mock.Anything).Return(&hcpclient.Cluster{
|
||||
HCPPortalURL: "http://test.com",
|
||||
AccessLevel: &readWrite,
|
||||
}, nil)
|
||||
|
||||
mgr.Register(LinkController(
|
||||
mockClientFn,
|
||||
config.CloudConfig{},
|
||||
))
|
||||
mgr.SetRaftLeader(true)
|
||||
go mgr.Run(suite.ctx)
|
||||
|
||||
linkData := &pbhcp.Link{
|
||||
ClientId: "abc",
|
||||
ClientSecret: "abc",
|
||||
ResourceId: types.GenerateTestResourceID(suite.T()),
|
||||
}
|
||||
|
||||
link := rtest.Resource(pbhcp.LinkType, "global").
|
||||
WithData(suite.T(), linkData).
|
||||
Write(suite.T(), suite.client)
|
||||
|
||||
suite.T().Cleanup(suite.deleteResourceFunc(link.Id))
|
||||
|
||||
suite.client.WaitForStatusCondition(suite.T(), link.Id, StatusKey, ConditionLinked(linkData.ResourceId))
|
||||
var updatedLink pbhcp.Link
|
||||
updatedLinkResource := suite.client.WaitForNewVersion(suite.T(), link.Id, link.Version)
|
||||
require.NoError(suite.T(), updatedLinkResource.Data.UnmarshalTo(&updatedLink))
|
||||
require.Equal(suite.T(), "http://test.com", updatedLink.HcpClusterUrl)
|
||||
require.Equal(suite.T(), pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE, updatedLink.AccessLevel)
|
||||
}
|
||||
|
||||
func (suite *controllerSuite) TestController_Initialize() {
|
||||
// Run the controller manager with a configured link
|
||||
mgr := controller.NewManager(suite.client, suite.rt.Logger)
|
||||
|
||||
mockClient, mockClientFn := mockHcpClientFn(suite.T())
|
||||
readOnly := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADONLY
|
||||
mockClient.EXPECT().GetCluster(mock.Anything).Return(&hcpclient.Cluster{
|
||||
HCPPortalURL: "http://test.com",
|
||||
AccessLevel: &readOnly,
|
||||
}, nil)
|
||||
|
||||
cloudCfg := config.CloudConfig{
|
||||
ClientID: "client-id-abc",
|
||||
ClientSecret: "client-secret-abc",
|
||||
ResourceID: types.GenerateTestResourceID(suite.T()),
|
||||
}
|
||||
|
||||
mgr.Register(LinkController(
|
||||
mockClientFn,
|
||||
cloudCfg,
|
||||
))
|
||||
mgr.SetRaftLeader(true)
|
||||
go mgr.Run(suite.ctx)
|
||||
|
||||
// Wait for link to be created by initializer
|
||||
id := &pbresource.ID{
|
||||
Type: pbhcp.LinkType,
|
||||
Name: types.LinkName,
|
||||
}
|
||||
suite.T().Cleanup(suite.deleteResourceFunc(id))
|
||||
r := suite.client.WaitForResourceExists(suite.T(), id)
|
||||
|
||||
// Check that created link has expected values
|
||||
var link pbhcp.Link
|
||||
err := r.Data.UnmarshalTo(&link)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
require.Equal(suite.T(), cloudCfg.ResourceID, link.ResourceId)
|
||||
require.Equal(suite.T(), cloudCfg.ClientID, link.ClientId)
|
||||
require.Equal(suite.T(), cloudCfg.ClientSecret, link.ClientSecret)
|
||||
require.Equal(suite.T(), types.MetadataSourceConfig, r.Metadata[types.MetadataSourceKey])
|
||||
|
||||
// Wait for link to be connected successfully
|
||||
suite.client.WaitForStatusCondition(suite.T(), id, StatusKey, ConditionLinked(link.ResourceId))
|
||||
}
|
||||
|
||||
func (suite *controllerSuite) TestController_GetClusterError() {
|
||||
type testCase struct {
|
||||
expectErr error
|
||||
expectCondition *pbresource.Condition
|
||||
}
|
||||
tt := map[string]testCase{
|
||||
"unexpected": {
|
||||
expectErr: fmt.Errorf("error"),
|
||||
expectCondition: ConditionFailed,
|
||||
},
|
||||
"unauthorized": {
|
||||
expectErr: hcpclient.ErrUnauthorized,
|
||||
expectCondition: ConditionUnauthorized,
|
||||
},
|
||||
"forbidden": {
|
||||
expectErr: hcpclient.ErrForbidden,
|
||||
expectCondition: ConditionForbidden,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range tt {
|
||||
suite.T().Run(name, func(t *testing.T) {
|
||||
// Run the controller manager
|
||||
mgr := controller.NewManager(suite.client, suite.rt.Logger)
|
||||
mockClient, mockClientFunc := mockHcpClientFn(t)
|
||||
mockClient.EXPECT().GetCluster(mock.Anything).Return(nil, tc.expectErr)
|
||||
|
||||
mgr.Register(LinkController(
|
||||
mockClientFunc,
|
||||
config.CloudConfig{},
|
||||
))
|
||||
|
||||
mgr.SetRaftLeader(true)
|
||||
ctx, cancel := context.WithCancel(suite.ctx)
|
||||
t.Cleanup(cancel)
|
||||
go mgr.Run(ctx)
|
||||
|
||||
linkData := &pbhcp.Link{
|
||||
ClientId: "abc",
|
||||
ClientSecret: "abc",
|
||||
ResourceId: types.GenerateTestResourceID(t),
|
||||
}
|
||||
link := rtest.Resource(pbhcp.LinkType, "global").
|
||||
WithData(t, linkData).
|
||||
Write(t, suite.client)
|
||||
|
||||
t.Cleanup(suite.deleteResourceFunc(link.Id))
|
||||
|
||||
suite.client.WaitForStatusCondition(t, link.Id, StatusKey, tc.expectCondition)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_hcpAccessModeToConsul(t *testing.T) {
|
||||
type testCase struct {
|
||||
hcpAccessLevel *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel
|
||||
consulAccessLevel pbhcp.AccessLevel
|
||||
}
|
||||
tt := map[string]testCase{
|
||||
"unspecified": {
|
||||
hcpAccessLevel: func() *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel {
|
||||
t := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELUNSPECIFIED
|
||||
return &t
|
||||
}(),
|
||||
consulAccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED,
|
||||
},
|
||||
"invalid": {
|
||||
hcpAccessLevel: nil,
|
||||
consulAccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_UNSPECIFIED,
|
||||
},
|
||||
"read_only": {
|
||||
hcpAccessLevel: func() *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel {
|
||||
t := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADONLY
|
||||
return &t
|
||||
}(),
|
||||
consulAccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_ONLY,
|
||||
},
|
||||
"read_write": {
|
||||
hcpAccessLevel: func() *gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevel {
|
||||
t := gnmmod.HashicorpCloudGlobalNetworkManager20220215ClusterConsulAccessLevelCONSULACCESSLEVELGLOBALREADWRITE
|
||||
return &t
|
||||
}(),
|
||||
consulAccessLevel: pbhcp.AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE,
|
||||
},
|
||||
}
|
||||
for name, tc := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
accessLevel := hcpAccessLevelToConsul(tc.hcpAccessLevel)
|
||||
require.Equal(t, tc.consulAccessLevel, accessLevel)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,142 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package link
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/agent/hcp/client"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
const (
|
||||
StatusKey = "consul.io/hcp/link"
|
||||
|
||||
// Statuses
|
||||
StatusLinked = "linked"
|
||||
StatusValidated = "validated"
|
||||
|
||||
LinkedSuccessReason = "SUCCESS"
|
||||
LinkedFailedReason = "FAILED"
|
||||
LinkedDisabledReasonV2ResourcesUnsupportedReason = "DISABLED_V2_RESOURCES_UNSUPPORTED"
|
||||
LinkedUnauthorizedReason = "UNAUTHORIZED"
|
||||
LinkedForbiddenReason = "FORBIDDEN"
|
||||
ValidatedSuccessReason = "SUCCESS"
|
||||
ValidatedFailedV2ResourcesReason = "V2_RESOURCES_UNSUPPORTED"
|
||||
|
||||
LinkedMessageFormat = "Successfully linked to cluster '%s'"
|
||||
FailedMessage = "Failed to link to HCP due to unexpected error"
|
||||
DisabledResourceAPIsEnabledMessage = "Link is disabled because resource-apis are enabled"
|
||||
UnauthorizedMessage = "Access denied, check client_id and client_secret"
|
||||
ForbiddenMessage = "Access denied, check the resource_id"
|
||||
ValidatedSuccessMessage = "Successfully validated link"
|
||||
ValidatedFailedV2ResourcesMessage = "Link is disabled because resource-apis are enabled"
|
||||
)
|
||||
|
||||
var (
|
||||
ConditionDisabled = &pbresource.Condition{
|
||||
Type: StatusLinked,
|
||||
State: pbresource.Condition_STATE_FALSE,
|
||||
Reason: LinkedDisabledReasonV2ResourcesUnsupportedReason,
|
||||
Message: DisabledResourceAPIsEnabledMessage,
|
||||
}
|
||||
ConditionFailed = &pbresource.Condition{
|
||||
Type: StatusLinked,
|
||||
State: pbresource.Condition_STATE_FALSE,
|
||||
Reason: LinkedFailedReason,
|
||||
Message: FailedMessage,
|
||||
}
|
||||
ConditionUnauthorized = &pbresource.Condition{
|
||||
Type: StatusLinked,
|
||||
State: pbresource.Condition_STATE_FALSE,
|
||||
Reason: LinkedUnauthorizedReason,
|
||||
Message: UnauthorizedMessage,
|
||||
}
|
||||
ConditionForbidden = &pbresource.Condition{
|
||||
Type: StatusLinked,
|
||||
State: pbresource.Condition_STATE_FALSE,
|
||||
Reason: LinkedForbiddenReason,
|
||||
Message: ForbiddenMessage,
|
||||
}
|
||||
ConditionValidatedSuccess = &pbresource.Condition{
|
||||
Type: StatusValidated,
|
||||
State: pbresource.Condition_STATE_TRUE,
|
||||
Reason: ValidatedSuccessReason,
|
||||
Message: ValidatedSuccessMessage,
|
||||
}
|
||||
ConditionValidatedFailed = &pbresource.Condition{
|
||||
Type: StatusValidated,
|
||||
State: pbresource.Condition_STATE_FALSE,
|
||||
Reason: ValidatedFailedV2ResourcesReason,
|
||||
Message: ValidatedFailedV2ResourcesMessage,
|
||||
}
|
||||
)
|
||||
|
||||
func ConditionLinked(resourceId string) *pbresource.Condition {
|
||||
return &pbresource.Condition{
|
||||
Type: StatusLinked,
|
||||
State: pbresource.Condition_STATE_TRUE,
|
||||
Reason: LinkedSuccessReason,
|
||||
Message: fmt.Sprintf(LinkedMessageFormat, resourceId),
|
||||
}
|
||||
}
|
||||
|
||||
func writeStatusIfNotEqual(ctx context.Context, rt controller.Runtime, res *pbresource.Resource, status *pbresource.Status) error {
|
||||
if resource.EqualStatus(res.Status[StatusKey], status, false) {
|
||||
return nil
|
||||
}
|
||||
_, err := rt.Client.WriteStatus(
|
||||
ctx, &pbresource.WriteStatusRequest{
|
||||
Id: res.Id,
|
||||
Key: StatusKey,
|
||||
Status: status,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
rt.Logger.Error("error writing link status", "error", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func linkingFailedCondition(err error) *pbresource.Condition {
|
||||
switch {
|
||||
case errors.Is(err, client.ErrUnauthorized):
|
||||
return ConditionUnauthorized
|
||||
case errors.Is(err, client.ErrForbidden):
|
||||
return ConditionForbidden
|
||||
default:
|
||||
return ConditionFailed
|
||||
}
|
||||
}
|
||||
|
||||
func IsLinked(res *pbresource.Resource) (linked bool, reason string) {
|
||||
return isConditionTrue(res, StatusLinked)
|
||||
}
|
||||
|
||||
func IsValidated(res *pbresource.Resource) (linked bool, reason string) {
|
||||
return isConditionTrue(res, StatusValidated)
|
||||
}
|
||||
|
||||
func isConditionTrue(res *pbresource.Resource, statusType string) (bool, string) {
|
||||
if !resource.EqualType(res.GetId().GetType(), pbhcp.LinkType) {
|
||||
return false, "resource is not hcp.Link type"
|
||||
}
|
||||
|
||||
linkStatus, ok := res.GetStatus()[StatusKey]
|
||||
if !ok {
|
||||
return false, "link status not set"
|
||||
}
|
||||
|
||||
for _, cond := range linkStatus.GetConditions() {
|
||||
if cond.Type == statusType && cond.GetState() == pbresource.Condition_STATE_TRUE {
|
||||
return true, ""
|
||||
}
|
||||
}
|
||||
return false, fmt.Sprintf("link status does not include positive %s condition", statusType)
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/agent/hcp/config"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/hcp/internal/controllers/link"
|
||||
"github.com/hashicorp/consul/internal/hcp/internal/controllers/telemetrystate"
|
||||
)
|
||||
|
||||
type Dependencies struct {
|
||||
CloudConfig config.CloudConfig
|
||||
}
|
||||
|
||||
func Register(mgr *controller.Manager, deps Dependencies) {
|
||||
mgr.Register(
|
||||
link.LinkController(
|
||||
link.DefaultHCPClientFn,
|
||||
deps.CloudConfig,
|
||||
),
|
||||
)
|
||||
|
||||
mgr.Register(telemetrystate.TelemetryStateController(link.DefaultHCPClientFn))
|
||||
}
|
|
@ -1,203 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package telemetrystate
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/controller/dependency"
|
||||
"github.com/hashicorp/consul/internal/hcp/internal/controllers/link"
|
||||
"github.com/hashicorp/consul/internal/hcp/internal/types"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
var (
|
||||
globalID = &pbresource.ID{
|
||||
Name: "global",
|
||||
Type: pbhcp.TelemetryStateType,
|
||||
Tenancy: &pbresource.Tenancy{},
|
||||
}
|
||||
)
|
||||
|
||||
const MetaKeyDebugSkipDeletion = StatusKey + "/debug/skip-deletion"
|
||||
|
||||
func TelemetryStateController(hcpClientFn link.HCPClientFn) *controller.Controller {
|
||||
return controller.NewController(StatusKey, pbhcp.TelemetryStateType).
|
||||
WithWatch(pbhcp.LinkType, dependency.ReplaceType(pbhcp.TelemetryStateType)).
|
||||
WithReconciler(&telemetryStateReconciler{
|
||||
hcpClientFn: hcpClientFn,
|
||||
})
|
||||
}
|
||||
|
||||
type telemetryStateReconciler struct {
|
||||
hcpClientFn link.HCPClientFn
|
||||
}
|
||||
|
||||
func (r *telemetryStateReconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error {
|
||||
// The runtime is passed by value so replacing it here for the remainder of this
|
||||
// reconciliation request processing will not affect future invocations.
|
||||
rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", StatusKey)
|
||||
|
||||
rt.Logger.Trace("reconciling telemetry-state")
|
||||
|
||||
// First get the link resource in order to build a hcp client. If the link resource
|
||||
// doesn't exist then the telemetry-state should not exist either.
|
||||
res, err := getLinkResource(ctx, rt)
|
||||
if err != nil {
|
||||
rt.Logger.Error("failed to lookup Link resource", "error", err)
|
||||
return err
|
||||
}
|
||||
if res == nil {
|
||||
return ensureTelemetryStateDeleted(ctx, rt)
|
||||
}
|
||||
|
||||
// Check that the link resource indicates the cluster is linked
|
||||
// If the cluster is not linked, the telemetry-state resource should not exist
|
||||
if linked, reason := link.IsLinked(res.GetResource()); !linked {
|
||||
rt.Logger.Trace("cluster is not linked", "reason", reason)
|
||||
return ensureTelemetryStateDeleted(ctx, rt)
|
||||
}
|
||||
|
||||
hcpClient, err := r.hcpClientFn(link.CloudConfigFromLink(res.GetData()))
|
||||
if err != nil {
|
||||
rt.Logger.Error("error creating HCP Client", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the telemetry configuration and observability scoped credentials from hcp
|
||||
tCfg, err := hcpClient.FetchTelemetryConfig(ctx)
|
||||
if err != nil {
|
||||
rt.Logger.Error("error requesting telemetry config", "error", err)
|
||||
return err
|
||||
}
|
||||
clientID, clientSecret, err := hcpClient.GetObservabilitySecret(ctx)
|
||||
if err != nil {
|
||||
rt.Logger.Error("error requesting telemetry credentials", "error", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO allow hcp client config override from hcp TelemetryConfig
|
||||
hcpCfg := res.GetData().GetHcpConfig()
|
||||
|
||||
// TODO implement proxy options from hcp
|
||||
proxyCfg := &pbhcp.ProxyConfig{}
|
||||
|
||||
state := &pbhcp.TelemetryState{
|
||||
ResourceId: res.GetData().ResourceId,
|
||||
ClientId: clientID,
|
||||
ClientSecret: clientSecret,
|
||||
HcpConfig: hcpCfg,
|
||||
Proxy: proxyCfg,
|
||||
Metrics: &pbhcp.MetricsConfig{
|
||||
Labels: tCfg.MetricsConfig.Labels,
|
||||
Disabled: tCfg.MetricsConfig.Disabled,
|
||||
},
|
||||
}
|
||||
|
||||
if tCfg.MetricsConfig.Endpoint != nil {
|
||||
state.Metrics.Endpoint = tCfg.MetricsConfig.Endpoint.String()
|
||||
}
|
||||
if tCfg.MetricsConfig.Filters != nil {
|
||||
state.Metrics.IncludeList = []string{tCfg.MetricsConfig.Filters.String()}
|
||||
}
|
||||
|
||||
if err := writeTelemetryStateIfUpdated(ctx, rt, state); err != nil {
|
||||
rt.Logger.Error("error updating telemetry-state", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureTelemetryStateDeleted(ctx context.Context, rt controller.Runtime) error {
|
||||
resp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: &pbresource.ID{Name: "global", Type: pbhcp.TelemetryStateType}})
|
||||
switch {
|
||||
case status.Code(err) == codes.NotFound:
|
||||
return nil
|
||||
case err != nil:
|
||||
rt.Logger.Error("the resource service has returned an unexpected error", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
rt.Logger.Trace("deleting telemetry-state")
|
||||
if _, ok := resp.GetResource().Metadata[MetaKeyDebugSkipDeletion]; ok {
|
||||
rt.Logger.Debug("skip-deletion metadata key found, skipping deletion of telemetry-state resource")
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := rt.Client.Delete(ctx, &pbresource.DeleteRequest{Id: resp.GetResource().GetId()}); err != nil {
|
||||
rt.Logger.Error("error deleting telemetry-state resource", "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeTelemetryStateIfUpdated(ctx context.Context, rt controller.Runtime, state *pbhcp.TelemetryState) error {
|
||||
currentState, err := getTelemetryStateResource(ctx, rt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if currentState != nil && proto.Equal(currentState.GetData(), state) {
|
||||
return nil
|
||||
}
|
||||
|
||||
stateData, err := anypb.New(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = rt.Client.Write(ctx, &pbresource.WriteRequest{Resource: &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Name: "global",
|
||||
Type: pbhcp.TelemetryStateType,
|
||||
},
|
||||
Data: stateData,
|
||||
}})
|
||||
return err
|
||||
}
|
||||
|
||||
func getGlobalResource(ctx context.Context, rt controller.Runtime, t *pbresource.Type) (*pbresource.Resource, error) {
|
||||
resp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: &pbresource.ID{Name: "global", Type: t}})
|
||||
switch {
|
||||
case status.Code(err) == codes.NotFound:
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp.GetResource(), nil
|
||||
}
|
||||
|
||||
// getLinkResource returns the cluster scoped pbhcp.Link resource. If the resource is not found a nil
|
||||
// pointer and no error will be returned.
|
||||
func getLinkResource(ctx context.Context, rt controller.Runtime) (*types.DecodedLink, error) {
|
||||
res, err := getGlobalResource(ctx, rt, pbhcp.LinkType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return resource.Decode[*pbhcp.Link](res)
|
||||
}
|
||||
|
||||
func getTelemetryStateResource(ctx context.Context, rt controller.Runtime) (*types.DecodedTelemetryState, error) {
|
||||
res, err := getGlobalResource(ctx, rt, pbhcp.TelemetryStateType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return resource.Decode[*pbhcp.TelemetryState](res)
|
||||
}
|
|
@ -1,174 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package telemetrystate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
|
||||
hcpclient "github.com/hashicorp/consul/agent/hcp/client"
|
||||
"github.com/hashicorp/consul/agent/hcp/config"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/hcp/internal/controllers/link"
|
||||
"github.com/hashicorp/consul/internal/hcp/internal/types"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
||||
type controllerSuite struct {
|
||||
suite.Suite
|
||||
|
||||
ctx context.Context
|
||||
client *rtest.Client
|
||||
rt controller.Runtime
|
||||
|
||||
ctl *controller.TestController
|
||||
tenancies []*pbresource.Tenancy
|
||||
|
||||
hcpMock *hcpclient.MockClient
|
||||
}
|
||||
|
||||
func mockHcpClientFn(t *testing.T) (*hcpclient.MockClient, link.HCPClientFn) {
|
||||
mockClient := hcpclient.NewMockClient(t)
|
||||
|
||||
mockClientFunc := func(link config.CloudConfig) (hcpclient.Client, error) {
|
||||
return mockClient, nil
|
||||
}
|
||||
|
||||
return mockClient, mockClientFunc
|
||||
}
|
||||
|
||||
func (suite *controllerSuite) SetupTest() {
|
||||
suite.ctx = testutil.TestContext(suite.T())
|
||||
suite.tenancies = rtest.TestTenancies()
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(types.Register).
|
||||
WithTenancies(suite.tenancies...).
|
||||
Run(suite.T())
|
||||
|
||||
hcpMock, hcpClientFn := mockHcpClientFn(suite.T())
|
||||
suite.hcpMock = hcpMock
|
||||
suite.ctl = controller.NewTestController(TelemetryStateController(hcpClientFn), client).
|
||||
WithLogger(testutil.Logger(suite.T()))
|
||||
|
||||
suite.rt = suite.ctl.Runtime()
|
||||
suite.client = rtest.NewClient(client)
|
||||
}
|
||||
|
||||
func TestTelemetryStateController(t *testing.T) {
|
||||
suite.Run(t, new(controllerSuite))
|
||||
}
|
||||
|
||||
func (suite *controllerSuite) deleteResourceFunc(id *pbresource.ID) func() {
|
||||
return func() {
|
||||
suite.client.MustDelete(suite.T(), id)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *controllerSuite) TestController_Ok() {
|
||||
// Run the controller manager
|
||||
mgr := controller.NewManager(suite.client, suite.rt.Logger)
|
||||
mockClient, mockClientFn := mockHcpClientFn(suite.T())
|
||||
mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(&hcpclient.TelemetryConfig{
|
||||
MetricsConfig: &hcpclient.MetricsConfig{
|
||||
Endpoint: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: "localhost",
|
||||
Path: "/test",
|
||||
},
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
Filters: regexp.MustCompile(".*"),
|
||||
},
|
||||
RefreshConfig: &hcpclient.RefreshConfig{},
|
||||
}, nil)
|
||||
mockClient.EXPECT().GetObservabilitySecret(mock.Anything).Return("xxx", "yyy", nil)
|
||||
mgr.Register(TelemetryStateController(mockClientFn))
|
||||
mgr.SetRaftLeader(true)
|
||||
go mgr.Run(suite.ctx)
|
||||
|
||||
link := suite.writeLinkResource()
|
||||
|
||||
tsRes := suite.client.WaitForResourceExists(suite.T(), &pbresource.ID{Name: "global", Type: pbhcp.TelemetryStateType})
|
||||
decodedState, err := resource.Decode[*pbhcp.TelemetryState](tsRes)
|
||||
require.NoError(suite.T(), err)
|
||||
require.Equal(suite.T(), link.GetData().GetResourceId(), decodedState.GetData().ResourceId)
|
||||
require.Equal(suite.T(), "xxx", decodedState.GetData().ClientId)
|
||||
require.Equal(suite.T(), "http://localhost/test", decodedState.GetData().Metrics.Endpoint)
|
||||
|
||||
suite.client.MustDelete(suite.T(), link.Id)
|
||||
suite.client.WaitForDeletion(suite.T(), tsRes.Id)
|
||||
}
|
||||
|
||||
func (suite *controllerSuite) TestReconcile_AvoidReconciliationWriteLoop() {
|
||||
suite.hcpMock.EXPECT().FetchTelemetryConfig(mock.Anything).Return(&hcpclient.TelemetryConfig{
|
||||
MetricsConfig: &hcpclient.MetricsConfig{
|
||||
Endpoint: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: "localhost",
|
||||
Path: "/test",
|
||||
},
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
Filters: regexp.MustCompile(".*"),
|
||||
},
|
||||
RefreshConfig: &hcpclient.RefreshConfig{},
|
||||
}, nil)
|
||||
link := suite.writeLinkResource()
|
||||
suite.hcpMock.EXPECT().GetObservabilitySecret(mock.Anything).Return("xxx", "yyy", nil)
|
||||
suite.NoError(suite.ctl.Reconcile(context.Background(), controller.Request{ID: link.Id}))
|
||||
tsRes := suite.client.WaitForResourceExists(suite.T(), &pbresource.ID{Name: "global", Type: pbhcp.TelemetryStateType})
|
||||
suite.NoError(suite.ctl.Reconcile(context.Background(), controller.Request{ID: tsRes.Id}))
|
||||
suite.client.RequireVersionUnchanged(suite.T(), tsRes.Id, tsRes.Version)
|
||||
}
|
||||
|
||||
func (suite *controllerSuite) TestController_LinkingDisabled() {
|
||||
// Run the controller manager
|
||||
mgr := controller.NewManager(suite.client, suite.rt.Logger)
|
||||
_, mockClientFn := mockHcpClientFn(suite.T())
|
||||
mgr.Register(TelemetryStateController(mockClientFn))
|
||||
mgr.SetRaftLeader(true)
|
||||
go mgr.Run(suite.ctx)
|
||||
|
||||
linkData := &pbhcp.Link{
|
||||
ClientId: "abc",
|
||||
ClientSecret: "abc",
|
||||
ResourceId: types.GenerateTestResourceID(suite.T()),
|
||||
}
|
||||
|
||||
rtest.Resource(pbhcp.LinkType, "global").
|
||||
WithData(suite.T(), linkData).
|
||||
WithStatus(link.StatusKey, &pbresource.Status{Conditions: []*pbresource.Condition{link.ConditionDisabled}}).
|
||||
Write(suite.T(), suite.client)
|
||||
|
||||
suite.client.WaitForDeletion(suite.T(), &pbresource.ID{Name: "global", Type: pbhcp.TelemetryStateType})
|
||||
}
|
||||
|
||||
func (suite *controllerSuite) writeLinkResource() *types.DecodedLink {
|
||||
suite.T().Helper()
|
||||
|
||||
linkData := &pbhcp.Link{
|
||||
ClientId: "abc",
|
||||
ClientSecret: "abc",
|
||||
ResourceId: types.GenerateTestResourceID(suite.T()),
|
||||
}
|
||||
|
||||
res := rtest.Resource(pbhcp.LinkType, "global").
|
||||
WithData(suite.T(), linkData).
|
||||
WithStatus(link.StatusKey, &pbresource.Status{Conditions: []*pbresource.Condition{link.ConditionLinked(linkData.ResourceId)}}).
|
||||
Write(suite.T(), suite.client)
|
||||
|
||||
suite.T().Cleanup(suite.deleteResourceFunc(res.Id))
|
||||
link, err := resource.Decode[*pbhcp.Link](res)
|
||||
require.NoError(suite.T(), err)
|
||||
return link
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package telemetrystate
|
||||
|
||||
const (
|
||||
StatusKey = "consul.io/hcp/telemetry-state"
|
||||
)
|
|
@ -1,117 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
hcpresource "github.com/hashicorp/hcp-sdk-go/resource"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
type DecodedLink = resource.DecodedResource[*pbhcp.Link]
|
||||
|
||||
const (
|
||||
LinkName = "global"
|
||||
MetadataSourceKey = "source"
|
||||
MetadataSourceConfig = "config"
|
||||
)
|
||||
|
||||
var (
|
||||
errLinkConfigurationName = errors.New("only a single Link resource is allowed and it must be named global")
|
||||
errInvalidHCPResourceID = errors.New("could not parse, invalid format")
|
||||
)
|
||||
|
||||
func RegisterLink(r resource.Registry) {
|
||||
r.Register(resource.Registration{
|
||||
Type: pbhcp.LinkType,
|
||||
Proto: &pbhcp.Link{},
|
||||
Scope: resource.ScopeCluster,
|
||||
Validate: ValidateLink,
|
||||
ACLs: &resource.ACLHooks{
|
||||
Read: aclReadHookLink,
|
||||
Write: aclWriteHookLink,
|
||||
List: aclListHookLink,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func aclReadHookLink(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, _ *pbresource.ID, _ *pbresource.Resource) error {
|
||||
err := authorizer.ToAllowAuthorizer().OperatorReadAllowed(authzContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func aclWriteHookLink(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, _ *pbresource.Resource) error {
|
||||
err := authorizer.ToAllowAuthorizer().OperatorWriteAllowed(authzContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = authorizer.ToAllowAuthorizer().ACLWriteAllowed(authzContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func aclListHookLink(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext) error {
|
||||
err := authorizer.ToAllowAuthorizer().OperatorReadAllowed(authzContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var ValidateLink = resource.DecodeAndValidate(validateLink)
|
||||
|
||||
func validateLink(res *DecodedLink) error {
|
||||
var err error
|
||||
|
||||
if res.Id.Name != LinkName {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "name",
|
||||
Wrapped: errLinkConfigurationName,
|
||||
})
|
||||
}
|
||||
|
||||
if res.Data.ClientId == "" {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "client_id",
|
||||
Wrapped: resource.ErrMissing,
|
||||
})
|
||||
}
|
||||
|
||||
if res.Data.ClientSecret == "" {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "client_secret",
|
||||
Wrapped: resource.ErrMissing,
|
||||
})
|
||||
}
|
||||
|
||||
if res.Data.ResourceId == "" {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "resource_id",
|
||||
Wrapped: resource.ErrMissing,
|
||||
})
|
||||
} else {
|
||||
_, parseErr := hcpresource.FromString(res.Data.ResourceId)
|
||||
if parseErr != nil {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "resource_id",
|
||||
Wrapped: errInvalidHCPResourceID,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
|
@ -1,205 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
func createCloudLinkResource(t *testing.T, data protoreflect.ProtoMessage) *pbresource.Resource {
|
||||
res := &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbhcp.LinkType,
|
||||
Name: "global",
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
res.Data, err = anypb.New(data)
|
||||
require.NoError(t, err)
|
||||
return res
|
||||
}
|
||||
|
||||
func TestValidateLink_Ok(t *testing.T) {
|
||||
data := &pbhcp.Link{
|
||||
ClientId: "abc",
|
||||
ClientSecret: "abc",
|
||||
ResourceId: GenerateTestResourceID(t),
|
||||
}
|
||||
|
||||
res := createCloudLinkResource(t, data)
|
||||
|
||||
err := ValidateLink(res)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestValidateLink_ParseError(t *testing.T) {
|
||||
// Any type other than the Link type would work
|
||||
// to cause the error we are expecting
|
||||
data := &pbresource.Type{Group: "a", GroupVersion: "b", Kind: "c"}
|
||||
|
||||
res := createCloudLinkResource(t, data)
|
||||
|
||||
err := ValidateLink(res)
|
||||
require.Error(t, err)
|
||||
require.ErrorAs(t, err, &resource.ErrDataParse{})
|
||||
}
|
||||
|
||||
func TestValidateLink_InvalidName(t *testing.T) {
|
||||
data := &pbhcp.Link{
|
||||
ClientId: "abc",
|
||||
ClientSecret: "abc",
|
||||
ResourceId: GenerateTestResourceID(t),
|
||||
}
|
||||
|
||||
res := createCloudLinkResource(t, data)
|
||||
res.Id.Name = "default"
|
||||
|
||||
err := ValidateLink(res)
|
||||
|
||||
expected := resource.ErrInvalidField{
|
||||
Name: "name",
|
||||
Wrapped: errLinkConfigurationName,
|
||||
}
|
||||
|
||||
var actual resource.ErrInvalidField
|
||||
require.ErrorAs(t, err, &actual)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestValidateLink_MissingClientId(t *testing.T) {
|
||||
data := &pbhcp.Link{
|
||||
ClientId: "",
|
||||
ClientSecret: "abc",
|
||||
ResourceId: GenerateTestResourceID(t),
|
||||
}
|
||||
|
||||
res := createCloudLinkResource(t, data)
|
||||
|
||||
err := ValidateLink(res)
|
||||
|
||||
expected := resource.ErrInvalidField{
|
||||
Name: "client_id",
|
||||
Wrapped: resource.ErrMissing,
|
||||
}
|
||||
|
||||
var actual resource.ErrInvalidField
|
||||
require.ErrorAs(t, err, &actual)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestValidateLink_MissingClientSecret(t *testing.T) {
|
||||
data := &pbhcp.Link{
|
||||
ClientId: "abc",
|
||||
ClientSecret: "",
|
||||
ResourceId: GenerateTestResourceID(t),
|
||||
}
|
||||
|
||||
res := createCloudLinkResource(t, data)
|
||||
|
||||
err := ValidateLink(res)
|
||||
|
||||
expected := resource.ErrInvalidField{
|
||||
Name: "client_secret",
|
||||
Wrapped: resource.ErrMissing,
|
||||
}
|
||||
|
||||
var actual resource.ErrInvalidField
|
||||
require.ErrorAs(t, err, &actual)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestValidateLink_MissingResourceId(t *testing.T) {
|
||||
data := &pbhcp.Link{
|
||||
ClientId: "abc",
|
||||
ClientSecret: "abc",
|
||||
ResourceId: "",
|
||||
}
|
||||
|
||||
res := createCloudLinkResource(t, data)
|
||||
|
||||
err := ValidateLink(res)
|
||||
|
||||
expected := resource.ErrInvalidField{
|
||||
Name: "resource_id",
|
||||
Wrapped: resource.ErrMissing,
|
||||
}
|
||||
|
||||
var actual resource.ErrInvalidField
|
||||
require.ErrorAs(t, err, &actual)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestValidateLink_InvalidResourceId(t *testing.T) {
|
||||
data := &pbhcp.Link{
|
||||
ClientId: "abc",
|
||||
ClientSecret: "abc",
|
||||
ResourceId: "abc",
|
||||
}
|
||||
|
||||
res := createCloudLinkResource(t, data)
|
||||
|
||||
err := ValidateLink(res)
|
||||
|
||||
expected := resource.ErrInvalidField{
|
||||
Name: "resource_id",
|
||||
Wrapped: errInvalidHCPResourceID,
|
||||
}
|
||||
|
||||
var actual resource.ErrInvalidField
|
||||
require.ErrorAs(t, err, &actual)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
// Currently, we have no specific ACLs configured so the default `operator` permissions are required
|
||||
func TestLinkACLs(t *testing.T) {
|
||||
registry := resource.NewRegistry()
|
||||
RegisterLink(registry)
|
||||
|
||||
data := &pbhcp.Link{
|
||||
ClientId: "abc",
|
||||
ClientSecret: "abc",
|
||||
ResourceId: GenerateTestResourceID(t),
|
||||
}
|
||||
link := createCloudLinkResource(t, data)
|
||||
|
||||
cases := map[string]rtest.ACLTestCase{
|
||||
"no rules": {
|
||||
Rules: ``,
|
||||
Res: link,
|
||||
ReadOK: rtest.DENY,
|
||||
WriteOK: rtest.DENY,
|
||||
ListOK: rtest.DENY,
|
||||
},
|
||||
"link test read and list": {
|
||||
Rules: `{"operator": "read"}`,
|
||||
Res: link,
|
||||
ReadOK: rtest.ALLOW,
|
||||
WriteOK: rtest.DENY,
|
||||
ListOK: rtest.ALLOW,
|
||||
},
|
||||
"link test write": {
|
||||
Rules: `{"operator": "write", "acl": "write"}`,
|
||||
Res: link,
|
||||
ReadOK: rtest.ALLOW,
|
||||
WriteOK: rtest.ALLOW,
|
||||
ListOK: rtest.ALLOW,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
rtest.RunACLTestCase(t, tc, registry)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbhcp "github.com/hashicorp/consul/proto-public/pbhcp/v2"
|
||||
)
|
||||
|
||||
type DecodedTelemetryState = resource.DecodedResource[*pbhcp.TelemetryState]
|
||||
|
||||
var (
|
||||
telemetryStateConfigurationNameError = errors.New("only a single Telemetry resource is allowed and it must be named global")
|
||||
)
|
||||
|
||||
func RegisterTelemetryState(r resource.Registry) {
|
||||
r.Register(resource.Registration{
|
||||
Type: pbhcp.TelemetryStateType,
|
||||
Proto: &pbhcp.TelemetryState{},
|
||||
Scope: resource.ScopeCluster,
|
||||
Validate: ValidateTelemetryState,
|
||||
})
|
||||
}
|
||||
|
||||
var ValidateTelemetryState = resource.DecodeAndValidate(validateTelemetryState)
|
||||
|
||||
func validateTelemetryState(res *DecodedTelemetryState) error {
|
||||
var err error
|
||||
|
||||
if res.GetId().GetName() != "global" {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "name",
|
||||
Wrapped: telemetryStateConfigurationNameError,
|
||||
})
|
||||
}
|
||||
|
||||
if res.GetData().GetClientId() == "" {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "client_id",
|
||||
Wrapped: resource.ErrMissing,
|
||||
})
|
||||
}
|
||||
|
||||
if res.GetData().GetClientSecret() == "" {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "client_secret",
|
||||
Wrapped: resource.ErrMissing,
|
||||
})
|
||||
}
|
||||
|
||||
if res.GetData().GetResourceId() == "" {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "resource_id",
|
||||
Wrapped: resource.ErrMissing,
|
||||
})
|
||||
}
|
||||
|
||||
if res.GetData().GetMetrics().GetEndpoint() == "" {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "metrics.endpoint",
|
||||
Wrapped: resource.ErrMissing,
|
||||
})
|
||||
}
|
||||
|
||||
if res.GetData().GetMetrics().GetIncludeList() == nil {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "metrics.include_list",
|
||||
Wrapped: resource.ErrMissing,
|
||||
})
|
||||
}
|
||||
|
||||
if res.GetData().GetMetrics().GetLabels() == nil {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "metrics.labels",
|
||||
Wrapped: resource.ErrMissing,
|
||||
})
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func GenerateTestResourceID(t *testing.T) string {
|
||||
orgID, err := uuid.GenerateUUID()
|
||||
require.NoError(t, err)
|
||||
|
||||
projectID, err := uuid.GenerateUUID()
|
||||
require.NoError(t, err)
|
||||
|
||||
template := "organization/%s/project/%s/hashicorp.consul.global-network-manager.cluster/test-cluster"
|
||||
return fmt.Sprintf(template, orgID, projectID)
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package types
|
||||
|
||||
import "github.com/hashicorp/consul/internal/resource"
|
||||
|
||||
func Register(r resource.Registry) {
|
||||
RegisterLink(r)
|
||||
RegisterTelemetryState(r)
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package resource
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/go-bexpr"
|
||||
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
type MetadataFilterableResources interface {
|
||||
GetMetadata() map[string]string
|
||||
}
|
||||
|
||||
// FilterResourcesByMetadata will use the provided go-bexpr based filter to
|
||||
// retain matching items from the provided slice.
|
||||
//
|
||||
// The only variables usable in the expressions are the metadata keys prefixed
|
||||
// by "metadata."
|
||||
//
|
||||
// If no filter is provided, then this does nothing and returns the input.
|
||||
func FilterResourcesByMetadata[T MetadataFilterableResources](resources []T, filter string) ([]T, error) {
|
||||
if filter == "" || len(resources) == 0 {
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
eval, err := createMetadataFilterEvaluator(filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filtered := make([]T, 0, len(resources))
|
||||
for _, res := range resources {
|
||||
vars := &metadataFilterFieldDetails{
|
||||
Meta: res.GetMetadata(),
|
||||
}
|
||||
match, err := eval.Evaluate(vars)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
filtered = append(filtered, res)
|
||||
}
|
||||
}
|
||||
if len(filtered) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// FilterMatchesResourceMetadata will use the provided go-bexpr based filter to
|
||||
// determine if the provided resource matches.
|
||||
//
|
||||
// The only variables usable in the expressions are the metadata keys prefixed
|
||||
// by "metadata."
|
||||
//
|
||||
// If no filter is provided, then this returns true.
|
||||
func FilterMatchesResourceMetadata(res *pbresource.Resource, filter string) (bool, error) {
|
||||
if res == nil {
|
||||
return false, nil
|
||||
} else if filter == "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
eval, err := createMetadataFilterEvaluator(filter)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
vars := &metadataFilterFieldDetails{
|
||||
Meta: res.Metadata,
|
||||
}
|
||||
match, err := eval.Evaluate(vars)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return match, nil
|
||||
}
|
||||
|
||||
// ValidateMetadataFilter will validate that the provided filter is going to be
|
||||
// a valid input to the FilterResourcesByMetadata function.
|
||||
//
|
||||
// This is best called from a Validate hook.
|
||||
func ValidateMetadataFilter(filter string) error {
|
||||
if filter == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := createMetadataFilterEvaluator(filter)
|
||||
return err
|
||||
}
|
||||
|
||||
func createMetadataFilterEvaluator(filter string) (*bexpr.Evaluator, error) {
|
||||
sampleVars := &metadataFilterFieldDetails{
|
||||
Meta: make(map[string]string),
|
||||
}
|
||||
eval, err := bexpr.CreateEvaluatorForType(filter, nil, sampleVars)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("filter %q is invalid: %w", filter, err)
|
||||
}
|
||||
return eval, nil
|
||||
}
|
||||
|
||||
type metadataFilterFieldDetails struct {
|
||||
Meta map[string]string `bexpr:"metadata"`
|
||||
}
|
|
@ -1,195 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package resource
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/proto/private/prototest"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
||||
func TestFilterResourcesByMetadata(t *testing.T) {
|
||||
type testcase struct {
|
||||
in []*pbresource.Resource
|
||||
filter string
|
||||
expect []*pbresource.Resource
|
||||
expectErr string
|
||||
}
|
||||
|
||||
create := func(name string, kvs ...string) *pbresource.Resource {
|
||||
require.True(t, len(kvs)%2 == 0)
|
||||
|
||||
meta := make(map[string]string)
|
||||
for i := 0; i < len(kvs); i += 2 {
|
||||
meta[kvs[i]] = kvs[i+1]
|
||||
}
|
||||
|
||||
return &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Name: name,
|
||||
},
|
||||
Metadata: meta,
|
||||
}
|
||||
}
|
||||
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
got, err := FilterResourcesByMetadata(tc.in, tc.filter)
|
||||
if tc.expectErr != "" {
|
||||
require.Error(t, err)
|
||||
testutil.RequireErrorContains(t, err, tc.expectErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
prototest.AssertDeepEqual(t, tc.expect, got)
|
||||
}
|
||||
}
|
||||
|
||||
cases := map[string]testcase{
|
||||
"nil input": {},
|
||||
"no filter": {
|
||||
in: []*pbresource.Resource{
|
||||
create("one"),
|
||||
create("two"),
|
||||
create("three"),
|
||||
create("four"),
|
||||
},
|
||||
filter: "",
|
||||
expect: []*pbresource.Resource{
|
||||
create("one"),
|
||||
create("two"),
|
||||
create("three"),
|
||||
create("four"),
|
||||
},
|
||||
},
|
||||
"bad filter": {
|
||||
in: []*pbresource.Resource{
|
||||
create("one"),
|
||||
create("two"),
|
||||
create("three"),
|
||||
create("four"),
|
||||
},
|
||||
filter: "garbage.value == zzz",
|
||||
expectErr: `Selector "garbage" is not valid`,
|
||||
},
|
||||
"filter everything out": {
|
||||
in: []*pbresource.Resource{
|
||||
create("one"),
|
||||
create("two"),
|
||||
create("three"),
|
||||
create("four"),
|
||||
},
|
||||
filter: "metadata.foo == bar",
|
||||
},
|
||||
"filter simply": {
|
||||
in: []*pbresource.Resource{
|
||||
create("one", "foo", "bar"),
|
||||
create("two", "foo", "baz"),
|
||||
create("three", "zim", "gir"),
|
||||
create("four", "zim", "gaz", "foo", "bar"),
|
||||
},
|
||||
filter: "metadata.foo == bar",
|
||||
expect: []*pbresource.Resource{
|
||||
create("one", "foo", "bar"),
|
||||
create("four", "zim", "gaz", "foo", "bar"),
|
||||
},
|
||||
},
|
||||
"filter prefix": {
|
||||
in: []*pbresource.Resource{
|
||||
create("one", "foo", "bar"),
|
||||
create("two", "foo", "baz"),
|
||||
create("three", "zim", "gir"),
|
||||
create("four", "zim", "gaz", "foo", "bar"),
|
||||
create("four", "zim", "zzz"),
|
||||
},
|
||||
filter: "(zim in metadata) and (metadata.zim matches `^g.`)",
|
||||
expect: []*pbresource.Resource{
|
||||
create("three", "zim", "gir"),
|
||||
create("four", "zim", "gaz", "foo", "bar"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterMatchesResourceMetadata(t *testing.T) {
|
||||
type testcase struct {
|
||||
res *pbresource.Resource
|
||||
filter string
|
||||
expect bool
|
||||
expectErr string
|
||||
}
|
||||
|
||||
create := func(name string, kvs ...string) *pbresource.Resource {
|
||||
require.True(t, len(kvs)%2 == 0)
|
||||
|
||||
meta := make(map[string]string)
|
||||
for i := 0; i < len(kvs); i += 2 {
|
||||
meta[kvs[i]] = kvs[i+1]
|
||||
}
|
||||
|
||||
return &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Name: name,
|
||||
},
|
||||
Metadata: meta,
|
||||
}
|
||||
}
|
||||
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
got, err := FilterMatchesResourceMetadata(tc.res, tc.filter)
|
||||
if tc.expectErr != "" {
|
||||
require.Error(t, err)
|
||||
testutil.RequireErrorContains(t, err, tc.expectErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expect, got)
|
||||
}
|
||||
}
|
||||
|
||||
cases := map[string]testcase{
|
||||
"nil input": {},
|
||||
"no filter": {
|
||||
res: create("one"),
|
||||
filter: "",
|
||||
expect: true,
|
||||
},
|
||||
"bad filter": {
|
||||
res: create("one"),
|
||||
filter: "garbage.value == zzz",
|
||||
expectErr: `Selector "garbage" is not valid`,
|
||||
},
|
||||
"no match": {
|
||||
res: create("one"),
|
||||
filter: "metadata.foo == bar",
|
||||
},
|
||||
"match simply": {
|
||||
res: create("one", "foo", "bar"),
|
||||
filter: "metadata.foo == bar",
|
||||
expect: true,
|
||||
},
|
||||
"match via prefix": {
|
||||
res: create("four", "zim", "gaz", "foo", "bar"),
|
||||
filter: "(zim in metadata) and (metadata.zim matches `^g.`)",
|
||||
expect: true,
|
||||
},
|
||||
"no match via prefix": {
|
||||
res: create("four", "zim", "zzz", "foo", "bar"),
|
||||
filter: "(zim in metadata) and (metadata.zim matches `^g.`)",
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -14,10 +14,10 @@ import (
|
|||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/storage"
|
||||
"github.com/hashicorp/consul/internal/storage/inmem"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
|
||||
pbstorage "github.com/hashicorp/consul/proto/private/pbstorage"
|
||||
)
|
||||
|
||||
|
@ -53,7 +53,7 @@ func NewBackend(h Handle, l hclog.Logger) (*Backend, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := &Backend{handle: h, store: s}
|
||||
b := &Backend{handle: h, store: s, logger: l}
|
||||
b.forwardingServer = newForwardingServer(b)
|
||||
b.forwardingClient = newForwardingClient(h, l)
|
||||
return b, nil
|
||||
|
@ -80,6 +80,7 @@ type Handle interface {
|
|||
type Backend struct {
|
||||
handle Handle
|
||||
store *inmem.Store
|
||||
logger hclog.Logger
|
||||
|
||||
forwardingServer *forwardingServer
|
||||
forwardingClient *forwardingClient
|
||||
|
@ -225,6 +226,24 @@ func (b *Backend) ListByOwner(_ context.Context, id *pbresource.ID) ([]*pbresour
|
|||
return b.store.ListByOwner(id)
|
||||
}
|
||||
|
||||
// isRetiredType ensures that types that have been formally retired (deprecated
|
||||
// and deleted) do not sneak back in during a snapshot restore.
|
||||
func isRetiredType(typ *pbresource.Type) bool {
|
||||
switch typ.GetGroupVersion() {
|
||||
case "v2":
|
||||
switch typ.GetGroup() {
|
||||
case "hcp":
|
||||
return true
|
||||
}
|
||||
case "v2beta1":
|
||||
switch typ.GetGroup() {
|
||||
case "auth", "catalog", "mesh", "multicluster", "tenancy":
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Apply is called by the FSM with the bytes of a Raft log entry, with Consul's
|
||||
// envelope (i.e. type prefix and msgpack wrapper) stripped off.
|
||||
func (b *Backend) Apply(buf []byte, idx uint64) any {
|
||||
|
@ -239,8 +258,18 @@ func (b *Backend) Apply(buf []byte, idx uint64) any {
|
|||
oldVsn := res.Version
|
||||
res.Version = strconv.Itoa(int(idx))
|
||||
|
||||
if err := b.store.WriteCAS(res, oldVsn); err != nil {
|
||||
return err
|
||||
if isRetiredType(res.GetId().GetType()) {
|
||||
// When a type is retired, the caller should think that the write
|
||||
// was applied, but we should simply skip loading it. This means
|
||||
// that retired types will not linger in the database indefinitely.
|
||||
b.logger.Warn("ignoring operation for retired type",
|
||||
"operation", "apply",
|
||||
"type", resource.ToGVK(res.GetId().GetType()),
|
||||
)
|
||||
} else {
|
||||
if err := b.store.WriteCAS(res, oldVsn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return &pbstorage.LogResponse{
|
||||
|
@ -250,8 +279,19 @@ func (b *Backend) Apply(buf []byte, idx uint64) any {
|
|||
}
|
||||
case pbstorage.LogType_LOG_TYPE_DELETE:
|
||||
req := req.GetDelete()
|
||||
if err := b.store.DeleteCAS(req.Id, req.Version); err != nil {
|
||||
return err
|
||||
|
||||
if isRetiredType(req.GetId().GetType()) {
|
||||
// When a type is retired, the caller should think that the write
|
||||
// was applied, but we should simply skip loading it. This means
|
||||
// that retired types will not linger in the database indefinitely.
|
||||
b.logger.Warn("ignoring operation for retired type",
|
||||
"operation", "delete",
|
||||
"type", resource.ToGVK(req.GetId().GetType()),
|
||||
)
|
||||
} else {
|
||||
if err := b.store.DeleteCAS(req.Id, req.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return &pbstorage.LogResponse{
|
||||
Response: &pbstorage.LogResponse_Delete{},
|
||||
|
|
|
@ -0,0 +1,392 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package raft
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
func TestIsRetiredType(t *testing.T) {
|
||||
var retired []*pbresource.Type
|
||||
{
|
||||
const (
|
||||
GroupName = "hcp"
|
||||
Version = "v2"
|
||||
|
||||
LinkKind = "Link"
|
||||
TelemetryStateKind = "TelemetryState"
|
||||
)
|
||||
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: LinkKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: TelemetryStateKind,
|
||||
})
|
||||
}
|
||||
{
|
||||
const (
|
||||
GroupName = "tenancy"
|
||||
Version = "v2beta1"
|
||||
|
||||
NamespaceKind = "Namespace"
|
||||
PartitionKind = "Partition"
|
||||
)
|
||||
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: NamespaceKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: PartitionKind,
|
||||
})
|
||||
}
|
||||
{
|
||||
const (
|
||||
GroupName = "multicluster"
|
||||
Version = "v2beta1"
|
||||
|
||||
SamenessGroupKind = "SamenessGroup"
|
||||
)
|
||||
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: SamenessGroupKind,
|
||||
})
|
||||
}
|
||||
{
|
||||
const (
|
||||
GroupName = "mesh"
|
||||
Version = "v2beta1"
|
||||
|
||||
APIGatewayKind = "APIGateway"
|
||||
ComputedExplicitDestinationsKind = "ComputedExplicitDestinations"
|
||||
ComputedGatewayRoutesKind = "ComputedGatewayRoutes"
|
||||
ComputedImplicitDestinationsKind = "ComputedImplicitDestinations"
|
||||
ComputedProxyConfigurationKind = "ComputedProxyConfiguration"
|
||||
ComputedRoutesKind = "ComputedRoutes"
|
||||
DestinationPolicyKind = "DestinationPolicy"
|
||||
DestinationsKind = "Destinations"
|
||||
DestinationsConfigurationKind = "DestinationsConfiguration"
|
||||
GRPCRouteKind = "GRPCRoute"
|
||||
HTTPRouteKind = "HTTPRoute"
|
||||
MeshConfigurationKind = "MeshConfiguration"
|
||||
MeshGatewayKind = "MeshGateway"
|
||||
ProxyConfigurationKind = "ProxyConfiguration"
|
||||
ProxyStateTemplateKind = "ProxyStateTemplate"
|
||||
TCPRouteKind = "TCPRoute"
|
||||
)
|
||||
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: APIGatewayKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ComputedExplicitDestinationsKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ComputedGatewayRoutesKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ComputedImplicitDestinationsKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ComputedProxyConfigurationKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ComputedRoutesKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: DestinationPolicyKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: DestinationsKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: DestinationsConfigurationKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: GRPCRouteKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: HTTPRouteKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: MeshConfigurationKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: MeshGatewayKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ProxyConfigurationKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ProxyStateTemplateKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: TCPRouteKind,
|
||||
})
|
||||
}
|
||||
{
|
||||
const (
|
||||
GroupName = "auth"
|
||||
Version = "v2beta1"
|
||||
|
||||
ComputedTrafficPermissionsKind = "ComputedTrafficPermissions"
|
||||
NamespaceTrafficPermissionsKind = "NamespaceTrafficPermissions"
|
||||
PartitionTrafficPermissionsKind = "PartitionTrafficPermissions"
|
||||
TrafficPermissionsKind = "TrafficPermissions"
|
||||
WorkloadIdentityKind = "WorkloadIdentity"
|
||||
)
|
||||
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ComputedTrafficPermissionsKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: NamespaceTrafficPermissionsKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: PartitionTrafficPermissionsKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: TrafficPermissionsKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: WorkloadIdentityKind,
|
||||
})
|
||||
}
|
||||
{
|
||||
const (
|
||||
GroupName = "catalog"
|
||||
Version = "v2beta1"
|
||||
|
||||
ComputedFailoverPolicyKind = "ComputedFailoverPolicy"
|
||||
FailoverPolicyKind = "FailoverPolicy"
|
||||
HealthChecksKind = "HealthChecks"
|
||||
HealthStatusKind = "HealthStatus"
|
||||
NodeKind = "Node"
|
||||
NodeHealthStatusKind = "NodeHealthStatus"
|
||||
ServiceKind = "Service"
|
||||
ServiceEndpointsKind = "ServiceEndpoints"
|
||||
VirtualIPsKind = "VirtualIPs"
|
||||
WorkloadKind = "Workload"
|
||||
)
|
||||
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ComputedFailoverPolicyKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: FailoverPolicyKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: HealthChecksKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: HealthStatusKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: NodeKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: NodeHealthStatusKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ServiceKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ServiceEndpointsKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: VirtualIPsKind,
|
||||
})
|
||||
retired = append(retired, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: WorkloadKind,
|
||||
})
|
||||
}
|
||||
/*
|
||||
*/
|
||||
|
||||
var retained []*pbresource.Type
|
||||
{
|
||||
const (
|
||||
GroupName = "demo"
|
||||
Version = "v2"
|
||||
|
||||
AlbumKind = "Album"
|
||||
ArtistKind = "Artist"
|
||||
FestivalKind = "Festival"
|
||||
)
|
||||
|
||||
retained = append(retained, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: AlbumKind,
|
||||
})
|
||||
retained = append(retained, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ArtistKind,
|
||||
})
|
||||
retained = append(retained, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: FestivalKind,
|
||||
})
|
||||
}
|
||||
{
|
||||
const (
|
||||
GroupName = "demo"
|
||||
Version = "v1"
|
||||
|
||||
AlbumKind = "Album"
|
||||
ArtistKind = "Artist"
|
||||
ConceptKind = "Concept"
|
||||
ExecutiveKind = "Executive"
|
||||
RecordLabelKind = "RecordLabel"
|
||||
)
|
||||
|
||||
retained = append(retained, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: AlbumKind,
|
||||
})
|
||||
retained = append(retained, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ArtistKind,
|
||||
})
|
||||
retained = append(retained, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ConceptKind,
|
||||
})
|
||||
retained = append(retained, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ExecutiveKind,
|
||||
})
|
||||
retained = append(retained, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: RecordLabelKind,
|
||||
})
|
||||
}
|
||||
{
|
||||
const (
|
||||
GroupName = "multicluster"
|
||||
Version = "v2"
|
||||
|
||||
ComputedExportedServicesKind = "ComputedExportedServices"
|
||||
ExportedServicesKind = "ExportedServices"
|
||||
NamespaceExportedServicesKind = "NamespaceExportedServices"
|
||||
PartitionExportedServicesKind = "PartitionExportedServices"
|
||||
)
|
||||
|
||||
retained = append(retained, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ComputedExportedServicesKind,
|
||||
})
|
||||
retained = append(retained, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: ExportedServicesKind,
|
||||
})
|
||||
retained = append(retained, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: NamespaceExportedServicesKind,
|
||||
})
|
||||
retained = append(retained, &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: PartitionExportedServicesKind,
|
||||
})
|
||||
}
|
||||
|
||||
for _, typ := range retired {
|
||||
t.Run("gone - "+resource.ToGVK(typ), func(t *testing.T) {
|
||||
require.True(t, isRetiredType(typ))
|
||||
})
|
||||
}
|
||||
for _, typ := range retained {
|
||||
t.Run("allowed - "+resource.ToGVK(typ), func(t *testing.T) {
|
||||
require.False(t, isRetiredType(typ))
|
||||
})
|
||||
}
|
||||
}
|
|
@ -80,3 +80,17 @@ func CloneStringSlice(s []string) []string {
|
|||
copy(out, s)
|
||||
return out
|
||||
}
|
||||
|
||||
// EqualMapKeys returns true if the slice equals the keys of
|
||||
// the map ignoring any ordering.
|
||||
func EqualMapKeys[V any](a []string, b map[string]V) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for _, ip := range a {
|
||||
if _, ok := b[ip]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -63,3 +63,28 @@ func TestMergeSorted(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEqualMapKeys(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
a []string
|
||||
b map[string]int
|
||||
same bool
|
||||
}{
|
||||
// same
|
||||
{nil, nil, true},
|
||||
{[]string{}, nil, true},
|
||||
{nil, map[string]int{}, true},
|
||||
{[]string{}, map[string]int{}, true},
|
||||
{[]string{"a"}, map[string]int{"a": 1}, true},
|
||||
{[]string{"b", "a"}, map[string]int{"a": 1, "b": 1}, true},
|
||||
// different
|
||||
{[]string{"a"}, map[string]int{}, false},
|
||||
{[]string{}, map[string]int{"a": 1}, false},
|
||||
{[]string{"b", "a"}, map[string]int{"c": 1, "a": 1, "b": 1}, false},
|
||||
{[]string{"b", "a"}, map[string]int{"c": 1, "a": 1, "b": 1}, false},
|
||||
{[]string{"b", "a", "c"}, map[string]int{"a": 1, "b": 1}, false},
|
||||
} {
|
||||
got := EqualMapKeys(tc.a, tc.b)
|
||||
require.Equal(t, tc.same, got)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
// Code generated by protoc-gen-go-binary. DO NOT EDIT.
|
||||
// source: pbhcp/v2/hcp_config.proto
|
||||
|
||||
package hcpv2
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler
|
||||
func (msg *HCPConfig) MarshalBinary() ([]byte, error) {
|
||||
return proto.Marshal(msg)
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||
func (msg *HCPConfig) UnmarshalBinary(b []byte) error {
|
||||
return proto.Unmarshal(b, msg)
|
||||
}
|
|
@ -1,199 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc (unknown)
|
||||
// source: pbhcp/v2/hcp_config.proto
|
||||
|
||||
package hcpv2
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// HCPConfig is used to configure the HCP SDK for communicating with
|
||||
// the HashiCorp Cloud Platform. All configuration is optional with default
|
||||
// values provided by the SDK.
|
||||
type HCPConfig struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// AuthUrl is the URL which will be used to authenticate.
|
||||
AuthUrl string `protobuf:"bytes,1,opt,name=auth_url,json=authUrl,proto3" json:"auth_url,omitempty"`
|
||||
// ApiAddress is the address (<hostname>[:port]) of the HCP api.
|
||||
ApiAddress string `protobuf:"bytes,2,opt,name=api_address,json=apiAddress,proto3" json:"api_address,omitempty"`
|
||||
// ScadaAddress is the address (<hostname>[:port]) of the HCP SCADA endpoint.
|
||||
ScadaAddress string `protobuf:"bytes,3,opt,name=scada_address,json=scadaAddress,proto3" json:"scada_address,omitempty"`
|
||||
// TlsInsecureSkipVerify if true will ignore server name verification when making HTTPS requests
|
||||
TlsInsecureSkipVerify bool `protobuf:"varint,4,opt,name=tls_insecure_skip_verify,json=tlsInsecureSkipVerify,proto3" json:"tls_insecure_skip_verify,omitempty"`
|
||||
}
|
||||
|
||||
func (x *HCPConfig) Reset() {
|
||||
*x = HCPConfig{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pbhcp_v2_hcp_config_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *HCPConfig) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HCPConfig) ProtoMessage() {}
|
||||
|
||||
func (x *HCPConfig) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pbhcp_v2_hcp_config_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HCPConfig.ProtoReflect.Descriptor instead.
|
||||
func (*HCPConfig) Descriptor() ([]byte, []int) {
|
||||
return file_pbhcp_v2_hcp_config_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *HCPConfig) GetAuthUrl() string {
|
||||
if x != nil {
|
||||
return x.AuthUrl
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HCPConfig) GetApiAddress() string {
|
||||
if x != nil {
|
||||
return x.ApiAddress
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HCPConfig) GetScadaAddress() string {
|
||||
if x != nil {
|
||||
return x.ScadaAddress
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HCPConfig) GetTlsInsecureSkipVerify() bool {
|
||||
if x != nil {
|
||||
return x.TlsInsecureSkipVerify
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var File_pbhcp_v2_hcp_config_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_pbhcp_v2_hcp_config_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x63, 0x70, 0x5f, 0x63,
|
||||
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x68, 0x61, 0x73,
|
||||
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63,
|
||||
0x70, 0x2e, 0x76, 0x32, 0x22, 0xa5, 0x01, 0x0a, 0x09, 0x48, 0x43, 0x50, 0x43, 0x6f, 0x6e, 0x66,
|
||||
0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x75, 0x74, 0x68, 0x55, 0x72, 0x6c, 0x12, 0x1f, 0x0a,
|
||||
0x0b, 0x61, 0x70, 0x69, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x23,
|
||||
0x0a, 0x0d, 0x73, 0x63, 0x61, 0x64, 0x61, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18,
|
||||
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x63, 0x61, 0x64, 0x61, 0x41, 0x64, 0x64, 0x72,
|
||||
0x65, 0x73, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x74, 0x6c, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x65, 0x63,
|
||||
0x75, 0x72, 0x65, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18,
|
||||
0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x74, 0x6c, 0x73, 0x49, 0x6e, 0x73, 0x65, 0x63, 0x75,
|
||||
0x72, 0x65, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x42, 0xe5, 0x01, 0x0a,
|
||||
0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63,
|
||||
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x42, 0x0e, 0x48, 0x63,
|
||||
0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69,
|
||||
0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76,
|
||||
0x32, 0x3b, 0x68, 0x63, 0x70, 0x76, 0x32, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x48, 0xaa, 0x02, 0x17,
|
||||
0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
|
||||
0x2e, 0x48, 0x63, 0x70, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x17, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63,
|
||||
0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, 0x5c, 0x56,
|
||||
0x32, 0xe2, 0x02, 0x23, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f,
|
||||
0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, 0x5c, 0x56, 0x32, 0x5c, 0x47, 0x50, 0x42, 0x4d,
|
||||
0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63,
|
||||
0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x48, 0x63, 0x70,
|
||||
0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_pbhcp_v2_hcp_config_proto_rawDescOnce sync.Once
|
||||
file_pbhcp_v2_hcp_config_proto_rawDescData = file_pbhcp_v2_hcp_config_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_pbhcp_v2_hcp_config_proto_rawDescGZIP() []byte {
|
||||
file_pbhcp_v2_hcp_config_proto_rawDescOnce.Do(func() {
|
||||
file_pbhcp_v2_hcp_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbhcp_v2_hcp_config_proto_rawDescData)
|
||||
})
|
||||
return file_pbhcp_v2_hcp_config_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_pbhcp_v2_hcp_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_pbhcp_v2_hcp_config_proto_goTypes = []interface{}{
|
||||
(*HCPConfig)(nil), // 0: hashicorp.consul.hcp.v2.HCPConfig
|
||||
}
|
||||
var file_pbhcp_v2_hcp_config_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_pbhcp_v2_hcp_config_proto_init() }
|
||||
func file_pbhcp_v2_hcp_config_proto_init() {
|
||||
if File_pbhcp_v2_hcp_config_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_pbhcp_v2_hcp_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*HCPConfig); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_pbhcp_v2_hcp_config_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_pbhcp_v2_hcp_config_proto_goTypes,
|
||||
DependencyIndexes: file_pbhcp_v2_hcp_config_proto_depIdxs,
|
||||
MessageInfos: file_pbhcp_v2_hcp_config_proto_msgTypes,
|
||||
}.Build()
|
||||
File_pbhcp_v2_hcp_config_proto = out.File
|
||||
file_pbhcp_v2_hcp_config_proto_rawDesc = nil
|
||||
file_pbhcp_v2_hcp_config_proto_goTypes = nil
|
||||
file_pbhcp_v2_hcp_config_proto_depIdxs = nil
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package hashicorp.consul.hcp.v2;
|
||||
|
||||
// HCPConfig is used to configure the HCP SDK for communicating with
|
||||
// the HashiCorp Cloud Platform. All configuration is optional with default
|
||||
// values provided by the SDK.
|
||||
message HCPConfig {
|
||||
// AuthUrl is the URL which will be used to authenticate.
|
||||
string auth_url = 1;
|
||||
|
||||
// ApiAddress is the address (<hostname>[:port]) of the HCP api.
|
||||
string api_address = 2;
|
||||
|
||||
// ScadaAddress is the address (<hostname>[:port]) of the HCP SCADA endpoint.
|
||||
string scada_address = 3;
|
||||
|
||||
// TlsInsecureSkipVerify if true will ignore server name verification when making HTTPS requests
|
||||
bool tls_insecure_skip_verify = 4;
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
// Code generated by protoc-gen-deepcopy. DO NOT EDIT.
|
||||
package hcpv2
|
||||
|
||||
import (
|
||||
proto "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// DeepCopyInto supports using HCPConfig within kubernetes types, where deepcopy-gen is used.
|
||||
func (in *HCPConfig) DeepCopyInto(out *HCPConfig) {
|
||||
proto.Reset(out)
|
||||
proto.Merge(out, proto.Clone(in))
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HCPConfig. Required by controller-gen.
|
||||
func (in *HCPConfig) DeepCopy() *HCPConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HCPConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new HCPConfig. Required by controller-gen.
|
||||
func (in *HCPConfig) DeepCopyInterface() interface{} {
|
||||
return in.DeepCopy()
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
// Code generated by protoc-json-shim. DO NOT EDIT.
|
||||
package hcpv2
|
||||
|
||||
import (
|
||||
protojson "google.golang.org/protobuf/encoding/protojson"
|
||||
)
|
||||
|
||||
// MarshalJSON is a custom marshaler for HCPConfig
|
||||
func (this *HCPConfig) MarshalJSON() ([]byte, error) {
|
||||
str, err := HcpConfigMarshaler.Marshal(this)
|
||||
return []byte(str), err
|
||||
}
|
||||
|
||||
// UnmarshalJSON is a custom unmarshaler for HCPConfig
|
||||
func (this *HCPConfig) UnmarshalJSON(b []byte) error {
|
||||
return HcpConfigUnmarshaler.Unmarshal(b, this)
|
||||
}
|
||||
|
||||
var (
|
||||
HcpConfigMarshaler = &protojson.MarshalOptions{}
|
||||
HcpConfigUnmarshaler = &protojson.UnmarshalOptions{DiscardUnknown: false}
|
||||
)
|
|
@ -1,18 +0,0 @@
|
|||
// Code generated by protoc-gen-go-binary. DO NOT EDIT.
|
||||
// source: pbhcp/v2/link.proto
|
||||
|
||||
package hcpv2
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler
|
||||
func (msg *Link) MarshalBinary() ([]byte, error) {
|
||||
return proto.Marshal(msg)
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||
func (msg *Link) UnmarshalBinary(b []byte) error {
|
||||
return proto.Unmarshal(b, msg)
|
||||
}
|
|
@ -1,283 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc (unknown)
|
||||
// source: pbhcp/v2/link.proto
|
||||
|
||||
package hcpv2
|
||||
|
||||
import (
|
||||
_ "github.com/hashicorp/consul/proto-public/pbresource"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type AccessLevel int32
|
||||
|
||||
const (
|
||||
AccessLevel_ACCESS_LEVEL_UNSPECIFIED AccessLevel = 0
|
||||
AccessLevel_ACCESS_LEVEL_GLOBAL_READ_WRITE AccessLevel = 1
|
||||
AccessLevel_ACCESS_LEVEL_GLOBAL_READ_ONLY AccessLevel = 2
|
||||
)
|
||||
|
||||
// Enum value maps for AccessLevel.
|
||||
var (
|
||||
AccessLevel_name = map[int32]string{
|
||||
0: "ACCESS_LEVEL_UNSPECIFIED",
|
||||
1: "ACCESS_LEVEL_GLOBAL_READ_WRITE",
|
||||
2: "ACCESS_LEVEL_GLOBAL_READ_ONLY",
|
||||
}
|
||||
AccessLevel_value = map[string]int32{
|
||||
"ACCESS_LEVEL_UNSPECIFIED": 0,
|
||||
"ACCESS_LEVEL_GLOBAL_READ_WRITE": 1,
|
||||
"ACCESS_LEVEL_GLOBAL_READ_ONLY": 2,
|
||||
}
|
||||
)
|
||||
|
||||
func (x AccessLevel) Enum() *AccessLevel {
|
||||
p := new(AccessLevel)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x AccessLevel) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (AccessLevel) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_pbhcp_v2_link_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (AccessLevel) Type() protoreflect.EnumType {
|
||||
return &file_pbhcp_v2_link_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x AccessLevel) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use AccessLevel.Descriptor instead.
|
||||
func (AccessLevel) EnumDescriptor() ([]byte, []int) {
|
||||
return file_pbhcp_v2_link_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
type Link struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"`
|
||||
ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
|
||||
ClientSecret string `protobuf:"bytes,3,opt,name=client_secret,json=clientSecret,proto3" json:"client_secret,omitempty"`
|
||||
HcpClusterUrl string `protobuf:"bytes,4,opt,name=hcp_cluster_url,json=hcpClusterUrl,proto3" json:"hcp_cluster_url,omitempty"`
|
||||
AccessLevel AccessLevel `protobuf:"varint,5,opt,name=access_level,json=accessLevel,proto3,enum=hashicorp.consul.hcp.v2.AccessLevel" json:"access_level,omitempty"`
|
||||
HcpConfig *HCPConfig `protobuf:"bytes,6,opt,name=hcp_config,json=hcpConfig,proto3" json:"hcp_config,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Link) Reset() {
|
||||
*x = Link{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pbhcp_v2_link_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Link) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Link) ProtoMessage() {}
|
||||
|
||||
func (x *Link) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pbhcp_v2_link_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Link.ProtoReflect.Descriptor instead.
|
||||
func (*Link) Descriptor() ([]byte, []int) {
|
||||
return file_pbhcp_v2_link_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Link) GetResourceId() string {
|
||||
if x != nil {
|
||||
return x.ResourceId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Link) GetClientId() string {
|
||||
if x != nil {
|
||||
return x.ClientId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Link) GetClientSecret() string {
|
||||
if x != nil {
|
||||
return x.ClientSecret
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Link) GetHcpClusterUrl() string {
|
||||
if x != nil {
|
||||
return x.HcpClusterUrl
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Link) GetAccessLevel() AccessLevel {
|
||||
if x != nil {
|
||||
return x.AccessLevel
|
||||
}
|
||||
return AccessLevel_ACCESS_LEVEL_UNSPECIFIED
|
||||
}
|
||||
|
||||
func (x *Link) GetHcpConfig() *HCPConfig {
|
||||
if x != nil {
|
||||
return x.HcpConfig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_pbhcp_v2_link_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_pbhcp_v2_link_proto_rawDesc = []byte{
|
||||
0x0a, 0x13, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x2f, 0x6c, 0x69, 0x6e, 0x6b, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
|
||||
0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x1a, 0x19,
|
||||
0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x2f, 0x68, 0x63, 0x70, 0x5f, 0x63, 0x6f, 0x6e,
|
||||
0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x70, 0x62, 0x72, 0x65, 0x73,
|
||||
0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x02, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b,
|
||||
0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49,
|
||||
0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x23,
|
||||
0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18,
|
||||
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63,
|
||||
0x72, 0x65, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x68, 0x63, 0x70, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74,
|
||||
0x65, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x68, 0x63,
|
||||
0x70, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x55, 0x72, 0x6c, 0x12, 0x47, 0x0a, 0x0c, 0x61,
|
||||
0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28,
|
||||
0x0e, 0x32, 0x24, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f,
|
||||
0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65,
|
||||
0x73, 0x73, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c,
|
||||
0x65, 0x76, 0x65, 0x6c, 0x12, 0x41, 0x0a, 0x0a, 0x68, 0x63, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
|
||||
0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69,
|
||||
0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e,
|
||||
0x76, 0x32, 0x2e, 0x48, 0x43, 0x50, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x68, 0x63,
|
||||
0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08, 0x01, 0x2a,
|
||||
0x72, 0x0a, 0x0b, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x1c,
|
||||
0x0a, 0x18, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55,
|
||||
0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x22, 0x0a, 0x1e,
|
||||
0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x47, 0x4c, 0x4f,
|
||||
0x42, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01,
|
||||
0x12, 0x21, 0x0a, 0x1d, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c,
|
||||
0x5f, 0x47, 0x4c, 0x4f, 0x42, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c,
|
||||
0x59, 0x10, 0x02, 0x42, 0xe0, 0x01, 0x0a, 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68,
|
||||
0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70,
|
||||
0x2e, 0x76, 0x32, 0x42, 0x09, 0x4c, 0x69, 0x6e, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
||||
0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73,
|
||||
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x68, 0x63, 0x70,
|
||||
0x2f, 0x76, 0x32, 0x3b, 0x68, 0x63, 0x70, 0x76, 0x32, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x48, 0xaa,
|
||||
0x02, 0x17, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73,
|
||||
0x75, 0x6c, 0x2e, 0x48, 0x63, 0x70, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x17, 0x48, 0x61, 0x73, 0x68,
|
||||
0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70,
|
||||
0x5c, 0x56, 0x32, 0xe2, 0x02, 0x23, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c,
|
||||
0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, 0x5c, 0x56, 0x32, 0x5c, 0x47, 0x50,
|
||||
0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68,
|
||||
0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x48,
|
||||
0x63, 0x70, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_pbhcp_v2_link_proto_rawDescOnce sync.Once
|
||||
file_pbhcp_v2_link_proto_rawDescData = file_pbhcp_v2_link_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_pbhcp_v2_link_proto_rawDescGZIP() []byte {
|
||||
file_pbhcp_v2_link_proto_rawDescOnce.Do(func() {
|
||||
file_pbhcp_v2_link_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbhcp_v2_link_proto_rawDescData)
|
||||
})
|
||||
return file_pbhcp_v2_link_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_pbhcp_v2_link_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_pbhcp_v2_link_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_pbhcp_v2_link_proto_goTypes = []interface{}{
|
||||
(AccessLevel)(0), // 0: hashicorp.consul.hcp.v2.AccessLevel
|
||||
(*Link)(nil), // 1: hashicorp.consul.hcp.v2.Link
|
||||
(*HCPConfig)(nil), // 2: hashicorp.consul.hcp.v2.HCPConfig
|
||||
}
|
||||
var file_pbhcp_v2_link_proto_depIdxs = []int32{
|
||||
0, // 0: hashicorp.consul.hcp.v2.Link.access_level:type_name -> hashicorp.consul.hcp.v2.AccessLevel
|
||||
2, // 1: hashicorp.consul.hcp.v2.Link.hcp_config:type_name -> hashicorp.consul.hcp.v2.HCPConfig
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_pbhcp_v2_link_proto_init() }
|
||||
func file_pbhcp_v2_link_proto_init() {
|
||||
if File_pbhcp_v2_link_proto != nil {
|
||||
return
|
||||
}
|
||||
file_pbhcp_v2_hcp_config_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_pbhcp_v2_link_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Link); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_pbhcp_v2_link_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_pbhcp_v2_link_proto_goTypes,
|
||||
DependencyIndexes: file_pbhcp_v2_link_proto_depIdxs,
|
||||
EnumInfos: file_pbhcp_v2_link_proto_enumTypes,
|
||||
MessageInfos: file_pbhcp_v2_link_proto_msgTypes,
|
||||
}.Build()
|
||||
File_pbhcp_v2_link_proto = out.File
|
||||
file_pbhcp_v2_link_proto_rawDesc = nil
|
||||
file_pbhcp_v2_link_proto_goTypes = nil
|
||||
file_pbhcp_v2_link_proto_depIdxs = nil
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package hashicorp.consul.hcp.v2;
|
||||
|
||||
import "pbhcp/v2/hcp_config.proto";
|
||||
import "pbresource/annotations.proto";
|
||||
|
||||
enum AccessLevel {
|
||||
ACCESS_LEVEL_UNSPECIFIED = 0;
|
||||
ACCESS_LEVEL_GLOBAL_READ_WRITE = 1;
|
||||
ACCESS_LEVEL_GLOBAL_READ_ONLY = 2;
|
||||
}
|
||||
|
||||
message Link {
|
||||
option (hashicorp.consul.resource.spec) = {scope: SCOPE_CLUSTER};
|
||||
|
||||
string resource_id = 1;
|
||||
string client_id = 2;
|
||||
string client_secret = 3;
|
||||
string hcp_cluster_url = 4;
|
||||
AccessLevel access_level = 5;
|
||||
HCPConfig hcp_config = 6;
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
// Code generated by protoc-gen-deepcopy. DO NOT EDIT.
|
||||
package hcpv2
|
||||
|
||||
import (
|
||||
proto "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// DeepCopyInto supports using Link within kubernetes types, where deepcopy-gen is used.
|
||||
func (in *Link) DeepCopyInto(out *Link) {
|
||||
proto.Reset(out)
|
||||
proto.Merge(out, proto.Clone(in))
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Link. Required by controller-gen.
|
||||
func (in *Link) DeepCopy() *Link {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Link)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Link. Required by controller-gen.
|
||||
func (in *Link) DeepCopyInterface() interface{} {
|
||||
return in.DeepCopy()
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
// Code generated by protoc-json-shim. DO NOT EDIT.
|
||||
package hcpv2
|
||||
|
||||
import (
|
||||
protojson "google.golang.org/protobuf/encoding/protojson"
|
||||
)
|
||||
|
||||
// MarshalJSON is a custom marshaler for Link
|
||||
func (this *Link) MarshalJSON() ([]byte, error) {
|
||||
str, err := LinkMarshaler.Marshal(this)
|
||||
return []byte(str), err
|
||||
}
|
||||
|
||||
// UnmarshalJSON is a custom unmarshaler for Link
|
||||
func (this *Link) UnmarshalJSON(b []byte) error {
|
||||
return LinkUnmarshaler.Unmarshal(b, this)
|
||||
}
|
||||
|
||||
var (
|
||||
LinkMarshaler = &protojson.MarshalOptions{}
|
||||
LinkUnmarshaler = &protojson.UnmarshalOptions{DiscardUnknown: false}
|
||||
)
|
|
@ -1,29 +0,0 @@
|
|||
// Code generated by protoc-gen-resource-types. DO NOT EDIT.
|
||||
|
||||
package hcpv2
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
const (
|
||||
GroupName = "hcp"
|
||||
Version = "v2"
|
||||
|
||||
LinkKind = "Link"
|
||||
TelemetryStateKind = "TelemetryState"
|
||||
)
|
||||
|
||||
var (
|
||||
LinkType = &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: LinkKind,
|
||||
}
|
||||
|
||||
TelemetryStateType = &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: TelemetryStateKind,
|
||||
}
|
||||
)
|
|
@ -1,38 +0,0 @@
|
|||
// Code generated by protoc-gen-go-binary. DO NOT EDIT.
|
||||
// source: pbhcp/v2/telemetry_state.proto
|
||||
|
||||
package hcpv2
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler
|
||||
func (msg *TelemetryState) MarshalBinary() ([]byte, error) {
|
||||
return proto.Marshal(msg)
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||
func (msg *TelemetryState) UnmarshalBinary(b []byte) error {
|
||||
return proto.Unmarshal(b, msg)
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler
|
||||
func (msg *MetricsConfig) MarshalBinary() ([]byte, error) {
|
||||
return proto.Marshal(msg)
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||
func (msg *MetricsConfig) UnmarshalBinary(b []byte) error {
|
||||
return proto.Unmarshal(b, msg)
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler
|
||||
func (msg *ProxyConfig) MarshalBinary() ([]byte, error) {
|
||||
return proto.Marshal(msg)
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||
func (msg *ProxyConfig) UnmarshalBinary(b []byte) error {
|
||||
return proto.Unmarshal(b, msg)
|
||||
}
|
|
@ -1,426 +0,0 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc (unknown)
|
||||
// source: pbhcp/v2/telemetry_state.proto
|
||||
|
||||
package hcpv2
|
||||
|
||||
import (
|
||||
_ "github.com/hashicorp/consul/proto-public/pbresource"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// TelemetryState describes configuration required to forward telemetry to the HashiCorp Cloud Platform.
|
||||
// This resource is managed internally and is only written if the cluster is linked to HCP. Any
|
||||
// manual changes to the resource will be reconciled and overwritten with the internally computed
|
||||
// state.
|
||||
type TelemetryState struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// ResourceId is the identifier for the cluster linked with HCP.
|
||||
ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"`
|
||||
// ClientId is the oauth client identifier for cluster.
|
||||
// This client has capabilities limited to writing telemetry data for this cluster.
|
||||
ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
|
||||
// ClientSecret is the oauth secret used to authenticate requests to send telemetry data to HCP.
|
||||
ClientSecret string `protobuf:"bytes,3,opt,name=client_secret,json=clientSecret,proto3" json:"client_secret,omitempty"`
|
||||
HcpConfig *HCPConfig `protobuf:"bytes,4,opt,name=hcp_config,json=hcpConfig,proto3" json:"hcp_config,omitempty"`
|
||||
Proxy *ProxyConfig `protobuf:"bytes,5,opt,name=proxy,proto3" json:"proxy,omitempty"`
|
||||
Metrics *MetricsConfig `protobuf:"bytes,6,opt,name=metrics,proto3" json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
func (x *TelemetryState) Reset() {
|
||||
*x = TelemetryState{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *TelemetryState) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*TelemetryState) ProtoMessage() {}
|
||||
|
||||
func (x *TelemetryState) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use TelemetryState.ProtoReflect.Descriptor instead.
|
||||
func (*TelemetryState) Descriptor() ([]byte, []int) {
|
||||
return file_pbhcp_v2_telemetry_state_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *TelemetryState) GetResourceId() string {
|
||||
if x != nil {
|
||||
return x.ResourceId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *TelemetryState) GetClientId() string {
|
||||
if x != nil {
|
||||
return x.ClientId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *TelemetryState) GetClientSecret() string {
|
||||
if x != nil {
|
||||
return x.ClientSecret
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *TelemetryState) GetHcpConfig() *HCPConfig {
|
||||
if x != nil {
|
||||
return x.HcpConfig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *TelemetryState) GetProxy() *ProxyConfig {
|
||||
if x != nil {
|
||||
return x.Proxy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *TelemetryState) GetMetrics() *MetricsConfig {
|
||||
if x != nil {
|
||||
return x.Metrics
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MetricsConfig configures metric specific collection details
|
||||
type MetricsConfig struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Endpoint is the HTTPS address and path to forward metrics to
|
||||
Endpoint string `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
|
||||
// IncludeList contains patterns to match against metric names. Only matched metrics are forwarded.
|
||||
IncludeList []string `protobuf:"bytes,2,rep,name=include_list,json=includeList,proto3" json:"include_list,omitempty"`
|
||||
// Labels contains key value pairs that are associated with all metrics collected and fowarded.
|
||||
Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// Disabled toggles metric forwarding. If true, metric forwarding will stop until disabled is set to false.
|
||||
Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MetricsConfig) Reset() {
|
||||
*x = MetricsConfig{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MetricsConfig) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MetricsConfig) ProtoMessage() {}
|
||||
|
||||
func (x *MetricsConfig) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MetricsConfig.ProtoReflect.Descriptor instead.
|
||||
func (*MetricsConfig) Descriptor() ([]byte, []int) {
|
||||
return file_pbhcp_v2_telemetry_state_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *MetricsConfig) GetEndpoint() string {
|
||||
if x != nil {
|
||||
return x.Endpoint
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *MetricsConfig) GetIncludeList() []string {
|
||||
if x != nil {
|
||||
return x.IncludeList
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *MetricsConfig) GetLabels() map[string]string {
|
||||
if x != nil {
|
||||
return x.Labels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *MetricsConfig) GetDisabled() bool {
|
||||
if x != nil {
|
||||
return x.Disabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ProxyConfig describes configuration for forwarding requests through an http proxy
|
||||
type ProxyConfig struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// HttpProxy configures the http proxy to use for HTTP (non-TLS) requests.
|
||||
HttpProxy string `protobuf:"bytes,1,opt,name=http_proxy,json=httpProxy,proto3" json:"http_proxy,omitempty"`
|
||||
// HttpsProxy configures the http proxy to use for HTTPS (TLS) requests.
|
||||
HttpsProxy string `protobuf:"bytes,2,opt,name=https_proxy,json=httpsProxy,proto3" json:"https_proxy,omitempty"`
|
||||
// NoProxy can be configured to include domains which should NOT be forwarded through the configured http proxy
|
||||
NoProxy []string `protobuf:"bytes,3,rep,name=no_proxy,json=noProxy,proto3" json:"no_proxy,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ProxyConfig) Reset() {
|
||||
*x = ProxyConfig{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ProxyConfig) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ProxyConfig) ProtoMessage() {}
|
||||
|
||||
func (x *ProxyConfig) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pbhcp_v2_telemetry_state_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ProxyConfig.ProtoReflect.Descriptor instead.
|
||||
func (*ProxyConfig) Descriptor() ([]byte, []int) {
|
||||
return file_pbhcp_v2_telemetry_state_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ProxyConfig) GetHttpProxy() string {
|
||||
if x != nil {
|
||||
return x.HttpProxy
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ProxyConfig) GetHttpsProxy() string {
|
||||
if x != nil {
|
||||
return x.HttpsProxy
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ProxyConfig) GetNoProxy() []string {
|
||||
if x != nil {
|
||||
return x.NoProxy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_pbhcp_v2_telemetry_state_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_pbhcp_v2_telemetry_state_proto_rawDesc = []byte{
|
||||
0x0a, 0x1e, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d,
|
||||
0x65, 0x74, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x12, 0x17, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73,
|
||||
0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x70, 0x62, 0x68, 0x63, 0x70,
|
||||
0x2f, 0x76, 0x32, 0x2f, 0x68, 0x63, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x70, 0x62, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
|
||||
0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x22, 0xbc, 0x02, 0x0a, 0x0e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
|
||||
0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f,
|
||||
0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
|
||||
0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e,
|
||||
0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65,
|
||||
0x63, 0x72, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65,
|
||||
0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x41, 0x0a, 0x0a, 0x68, 0x63, 0x70, 0x5f,
|
||||
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x68,
|
||||
0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e,
|
||||
0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x43, 0x50, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
|
||||
0x52, 0x09, 0x68, 0x63, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x05, 0x70,
|
||||
0x72, 0x6f, 0x78, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x68, 0x61, 0x73,
|
||||
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63,
|
||||
0x70, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
|
||||
0x52, 0x05, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69,
|
||||
0x63, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69,
|
||||
0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e,
|
||||
0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
|
||||
0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08,
|
||||
0x01, 0x22, 0xf1, 0x01, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x43, 0x6f, 0x6e,
|
||||
0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12,
|
||||
0x21, 0x0a, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18,
|
||||
0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x12, 0x4a, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03,
|
||||
0x28, 0x0b, 0x32, 0x32, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63,
|
||||
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74,
|
||||
0x72, 0x69, 0x63, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c,
|
||||
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x1a,
|
||||
0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
|
||||
0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61,
|
||||
0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x68, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f,
|
||||
0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x72, 0x6f,
|
||||
0x78, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x68, 0x74, 0x74, 0x70, 0x50, 0x72,
|
||||
0x6f, 0x78, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, 0x70, 0x72, 0x6f,
|
||||
0x78, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x73, 0x50,
|
||||
0x72, 0x6f, 0x78, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79,
|
||||
0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x6f, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x42,
|
||||
0xea, 0x01, 0x0a, 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
|
||||
0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x68, 0x63, 0x70, 0x2e, 0x76, 0x32, 0x42,
|
||||
0x13, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e,
|
||||
0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
|
||||
0x2f, 0x70, 0x62, 0x68, 0x63, 0x70, 0x2f, 0x76, 0x32, 0x3b, 0x68, 0x63, 0x70, 0x76, 0x32, 0xa2,
|
||||
0x02, 0x03, 0x48, 0x43, 0x48, 0xaa, 0x02, 0x17, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
|
||||
0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x48, 0x63, 0x70, 0x2e, 0x56, 0x32, 0xca,
|
||||
0x02, 0x17, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73,
|
||||
0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70, 0x5c, 0x56, 0x32, 0xe2, 0x02, 0x23, 0x48, 0x61, 0x73, 0x68,
|
||||
0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x48, 0x63, 0x70,
|
||||
0x5c, 0x56, 0x32, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea,
|
||||
0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e,
|
||||
0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x48, 0x63, 0x70, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_pbhcp_v2_telemetry_state_proto_rawDescOnce sync.Once
|
||||
file_pbhcp_v2_telemetry_state_proto_rawDescData = file_pbhcp_v2_telemetry_state_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_pbhcp_v2_telemetry_state_proto_rawDescGZIP() []byte {
|
||||
file_pbhcp_v2_telemetry_state_proto_rawDescOnce.Do(func() {
|
||||
file_pbhcp_v2_telemetry_state_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbhcp_v2_telemetry_state_proto_rawDescData)
|
||||
})
|
||||
return file_pbhcp_v2_telemetry_state_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_pbhcp_v2_telemetry_state_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_pbhcp_v2_telemetry_state_proto_goTypes = []interface{}{
|
||||
(*TelemetryState)(nil), // 0: hashicorp.consul.hcp.v2.TelemetryState
|
||||
(*MetricsConfig)(nil), // 1: hashicorp.consul.hcp.v2.MetricsConfig
|
||||
(*ProxyConfig)(nil), // 2: hashicorp.consul.hcp.v2.ProxyConfig
|
||||
nil, // 3: hashicorp.consul.hcp.v2.MetricsConfig.LabelsEntry
|
||||
(*HCPConfig)(nil), // 4: hashicorp.consul.hcp.v2.HCPConfig
|
||||
}
|
||||
var file_pbhcp_v2_telemetry_state_proto_depIdxs = []int32{
|
||||
4, // 0: hashicorp.consul.hcp.v2.TelemetryState.hcp_config:type_name -> hashicorp.consul.hcp.v2.HCPConfig
|
||||
2, // 1: hashicorp.consul.hcp.v2.TelemetryState.proxy:type_name -> hashicorp.consul.hcp.v2.ProxyConfig
|
||||
1, // 2: hashicorp.consul.hcp.v2.TelemetryState.metrics:type_name -> hashicorp.consul.hcp.v2.MetricsConfig
|
||||
3, // 3: hashicorp.consul.hcp.v2.MetricsConfig.labels:type_name -> hashicorp.consul.hcp.v2.MetricsConfig.LabelsEntry
|
||||
4, // [4:4] is the sub-list for method output_type
|
||||
4, // [4:4] is the sub-list for method input_type
|
||||
4, // [4:4] is the sub-list for extension type_name
|
||||
4, // [4:4] is the sub-list for extension extendee
|
||||
0, // [0:4] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_pbhcp_v2_telemetry_state_proto_init() }
|
||||
func file_pbhcp_v2_telemetry_state_proto_init() {
|
||||
if File_pbhcp_v2_telemetry_state_proto != nil {
|
||||
return
|
||||
}
|
||||
file_pbhcp_v2_hcp_config_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_pbhcp_v2_telemetry_state_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*TelemetryState); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_pbhcp_v2_telemetry_state_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MetricsConfig); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_pbhcp_v2_telemetry_state_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ProxyConfig); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_pbhcp_v2_telemetry_state_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_pbhcp_v2_telemetry_state_proto_goTypes,
|
||||
DependencyIndexes: file_pbhcp_v2_telemetry_state_proto_depIdxs,
|
||||
MessageInfos: file_pbhcp_v2_telemetry_state_proto_msgTypes,
|
||||
}.Build()
|
||||
File_pbhcp_v2_telemetry_state_proto = out.File
|
||||
file_pbhcp_v2_telemetry_state_proto_rawDesc = nil
|
||||
file_pbhcp_v2_telemetry_state_proto_goTypes = nil
|
||||
file_pbhcp_v2_telemetry_state_proto_depIdxs = nil
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package hashicorp.consul.hcp.v2;
|
||||
|
||||
import "pbhcp/v2/hcp_config.proto";
|
||||
import "pbresource/annotations.proto";
|
||||
|
||||
// TelemetryState describes configuration required to forward telemetry to the HashiCorp Cloud Platform.
|
||||
// This resource is managed internally and is only written if the cluster is linked to HCP. Any
|
||||
// manual changes to the resource will be reconciled and overwritten with the internally computed
|
||||
// state.
|
||||
message TelemetryState {
|
||||
option (hashicorp.consul.resource.spec) = {scope: SCOPE_CLUSTER};
|
||||
|
||||
// ResourceId is the identifier for the cluster linked with HCP.
|
||||
string resource_id = 1;
|
||||
|
||||
// ClientId is the oauth client identifier for cluster.
|
||||
// This client has capabilities limited to writing telemetry data for this cluster.
|
||||
string client_id = 2;
|
||||
|
||||
// ClientSecret is the oauth secret used to authenticate requests to send telemetry data to HCP.
|
||||
string client_secret = 3;
|
||||
|
||||
HCPConfig hcp_config = 4;
|
||||
ProxyConfig proxy = 5;
|
||||
MetricsConfig metrics = 6;
|
||||
}
|
||||
|
||||
// MetricsConfig configures metric specific collection details
|
||||
message MetricsConfig {
|
||||
// Endpoint is the HTTPS address and path to forward metrics to
|
||||
string endpoint = 1;
|
||||
|
||||
// IncludeList contains patterns to match against metric names. Only matched metrics are forwarded.
|
||||
repeated string include_list = 2;
|
||||
|
||||
// Labels contains key value pairs that are associated with all metrics collected and fowarded.
|
||||
map<string, string> labels = 3;
|
||||
|
||||
// Disabled toggles metric forwarding. If true, metric forwarding will stop until disabled is set to false.
|
||||
bool disabled = 4;
|
||||
}
|
||||
|
||||
// ProxyConfig describes configuration for forwarding requests through an http proxy
|
||||
message ProxyConfig {
|
||||
// HttpProxy configures the http proxy to use for HTTP (non-TLS) requests.
|
||||
string http_proxy = 1;
|
||||
|
||||
// HttpsProxy configures the http proxy to use for HTTPS (TLS) requests.
|
||||
string https_proxy = 2;
|
||||
|
||||
// NoProxy can be configured to include domains which should NOT be forwarded through the configured http proxy
|
||||
repeated string no_proxy = 3;
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
// Code generated by protoc-gen-deepcopy. DO NOT EDIT.
|
||||
package hcpv2
|
||||
|
||||
import (
|
||||
proto "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// DeepCopyInto supports using TelemetryState within kubernetes types, where deepcopy-gen is used.
|
||||
func (in *TelemetryState) DeepCopyInto(out *TelemetryState) {
|
||||
proto.Reset(out)
|
||||
proto.Merge(out, proto.Clone(in))
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryState. Required by controller-gen.
|
||||
func (in *TelemetryState) DeepCopy() *TelemetryState {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TelemetryState)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new TelemetryState. Required by controller-gen.
|
||||
func (in *TelemetryState) DeepCopyInterface() interface{} {
|
||||
return in.DeepCopy()
|
||||
}
|
||||
|
||||
// DeepCopyInto supports using MetricsConfig within kubernetes types, where deepcopy-gen is used.
|
||||
func (in *MetricsConfig) DeepCopyInto(out *MetricsConfig) {
|
||||
proto.Reset(out)
|
||||
proto.Merge(out, proto.Clone(in))
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsConfig. Required by controller-gen.
|
||||
func (in *MetricsConfig) DeepCopy() *MetricsConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new MetricsConfig. Required by controller-gen.
|
||||
func (in *MetricsConfig) DeepCopyInterface() interface{} {
|
||||
return in.DeepCopy()
|
||||
}
|
||||
|
||||
// DeepCopyInto supports using ProxyConfig within kubernetes types, where deepcopy-gen is used.
|
||||
func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) {
|
||||
proto.Reset(out)
|
||||
proto.Merge(out, proto.Clone(in))
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. Required by controller-gen.
|
||||
func (in *ProxyConfig) DeepCopy() *ProxyConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ProxyConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. Required by controller-gen.
|
||||
func (in *ProxyConfig) DeepCopyInterface() interface{} {
|
||||
return in.DeepCopy()
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
// Code generated by protoc-json-shim. DO NOT EDIT.
|
||||
package hcpv2
|
||||
|
||||
import (
|
||||
protojson "google.golang.org/protobuf/encoding/protojson"
|
||||
)
|
||||
|
||||
// MarshalJSON is a custom marshaler for TelemetryState
|
||||
func (this *TelemetryState) MarshalJSON() ([]byte, error) {
|
||||
str, err := TelemetryStateMarshaler.Marshal(this)
|
||||
return []byte(str), err
|
||||
}
|
||||
|
||||
// UnmarshalJSON is a custom unmarshaler for TelemetryState
|
||||
func (this *TelemetryState) UnmarshalJSON(b []byte) error {
|
||||
return TelemetryStateUnmarshaler.Unmarshal(b, this)
|
||||
}
|
||||
|
||||
// MarshalJSON is a custom marshaler for MetricsConfig
|
||||
func (this *MetricsConfig) MarshalJSON() ([]byte, error) {
|
||||
str, err := TelemetryStateMarshaler.Marshal(this)
|
||||
return []byte(str), err
|
||||
}
|
||||
|
||||
// UnmarshalJSON is a custom unmarshaler for MetricsConfig
|
||||
func (this *MetricsConfig) UnmarshalJSON(b []byte) error {
|
||||
return TelemetryStateUnmarshaler.Unmarshal(b, this)
|
||||
}
|
||||
|
||||
// MarshalJSON is a custom marshaler for ProxyConfig
|
||||
func (this *ProxyConfig) MarshalJSON() ([]byte, error) {
|
||||
str, err := TelemetryStateMarshaler.Marshal(this)
|
||||
return []byte(str), err
|
||||
}
|
||||
|
||||
// UnmarshalJSON is a custom unmarshaler for ProxyConfig
|
||||
func (this *ProxyConfig) UnmarshalJSON(b []byte) error {
|
||||
return TelemetryStateUnmarshaler.Unmarshal(b, this)
|
||||
}
|
||||
|
||||
var (
|
||||
TelemetryStateMarshaler = &protojson.MarshalOptions{}
|
||||
TelemetryStateUnmarshaler = &protojson.UnmarshalOptions{DiscardUnknown: false}
|
||||
)
|
3
scan.hcl
3
scan.hcl
|
@ -28,8 +28,7 @@ repository {
|
|||
# periodically cleaned up to remove items that are no longer found by the scanner.
|
||||
triage {
|
||||
suppress {
|
||||
# N.b. `vulnerabilites` is the correct spelling for this tool.
|
||||
vulnerabilites = [
|
||||
vulnerabilities = [
|
||||
]
|
||||
paths = [
|
||||
"internal/tools/proto-gen-rpc-glue/e2e/consul/*",
|
||||
|
|
|
@ -5,7 +5,7 @@ go 1.22
|
|||
toolchain go1.22.5
|
||||
|
||||
require (
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/hashicorp/consul/api v1.29.4
|
||||
github.com/hashicorp/consul/proto-public v0.6.2
|
||||
github.com/hashicorp/consul/sdk v0.16.1
|
||||
|
@ -16,7 +16,7 @@ require (
|
|||
github.com/mitchellh/copystructure v1.2.0
|
||||
github.com/rboyer/blankspace v0.2.1
|
||||
github.com/stretchr/testify v1.8.4
|
||||
golang.org/x/net v0.24.0
|
||||
golang.org/x/net v0.25.0
|
||||
google.golang.org/grpc v1.58.3
|
||||
)
|
||||
|
||||
|
@ -64,7 +64,7 @@ require (
|
|||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||
github.com/hashicorp/go-version v1.2.1 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.1-vault-7 // indirect
|
||||
github.com/hashicorp/hcl/v2 v2.16.2 // indirect
|
||||
github.com/hashicorp/memberlist v0.5.0 // indirect
|
||||
github.com/hashicorp/serf v0.10.1 // indirect
|
||||
|
@ -99,12 +99,13 @@ require (
|
|||
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 // indirect
|
||||
github.com/testcontainers/testcontainers-go v0.22.0 // indirect
|
||||
github.com/zclconf/go-cty v1.12.1 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect
|
||||
golang.org/x/mod v0.13.0 // indirect
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.14.0 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue