Merge branch 'main' into southworks/qa-consul

pull/17694/head
Ashesh Vidyut 1 year ago committed by GitHub
commit f6424ab6a7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,3 @@
```release-note:bug
Fix a bug that wrongly trims domains when there is an overlap with DC name.
```

@ -0,0 +1,3 @@
```release-note:bug
http: fixed API endpoint `PUT /acl/token/:AccessorID` (update token), no longer requires `AccessorID` in the request body. Web UI can now update tokens.
```

@ -0,0 +1,3 @@
```release-note:feature
cli: `consul watch` command uses `-filter` expression to filter response from checks, services, nodes, and service.
```

@ -0,0 +1,3 @@
```release-note:improvement
ca: Vault CA provider config no longer requires root_pki_path for secondary datacenters
```

@ -0,0 +1,3 @@
```release-note:improvement
connect: Add capture group labels from Envoy cluster FQDNs to Envoy exported metric labels
```

@ -13,7 +13,7 @@ permissions:
contents: read
env:
GOPRIVATE: github.com/hashicorp
GOPRIVATE: github.com/hashicorp # Required for enterprise deps
jobs:
setup:

@ -15,6 +15,7 @@ permissions:
env:
GOTAGS: ${{ endsWith(github.repository, '-enterprise') && 'consulent' || '' }}
GOPRIVATE: github.com/hashicorp # Required for enterprise deps
jobs:
setup:

@ -14,6 +14,7 @@ on:
env:
PKG_NAME: consul
METADATA: oss
GOPRIVATE: github.com/hashicorp # Required for enterprise deps
jobs:
set-product-version:

@ -21,6 +21,7 @@ permissions:
env:
TEST_RESULTS: /tmp/test-results
GOPRIVATE: github.com/hashicorp # Required for enterprise deps
jobs:
setup:
@ -50,7 +51,7 @@ jobs:
check-generated-protobuf:
needs:
- setup
runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }}
runs-on: ${{ fromJSON(needs.setup.outputs.compute-medium) }}
steps:
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2
# NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos.

@ -8,9 +8,10 @@ on:
workflow_dispatch: {}
env:
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
BRANCH: "release/1.13.x"
BRANCH_NAME: "release-1.13.x" # Used for naming artifacts
BRANCH_NAME: "release-1.13.x" # Used for naming artifacts
GOPRIVATE: github.com/hashicorp # Required for enterprise deps
jobs:
frontend-test-workspace-node:

@ -8,9 +8,10 @@ on:
workflow_dispatch: {}
env:
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
BRANCH: "release/1.14.x"
BRANCH_NAME: "release-1.14.x" # Used for naming artifacts
BRANCH_NAME: "release-1.14.x" # Used for naming artifacts
GOPRIVATE: github.com/hashicorp # Required for enterprise deps
jobs:
frontend-test-workspace-node:

@ -8,9 +8,10 @@ on:
workflow_dispatch: {}
env:
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
BRANCH: "release/1.15.x"
BRANCH_NAME: "release-1.15.x" # Used for naming artifacts
BRANCH_NAME: "release-1.15.x" # Used for naming artifacts
GOPRIVATE: github.com/hashicorp # Required for enterprise deps
jobs:
frontend-test-workspace-node:

@ -8,9 +8,10 @@ on:
workflow_dispatch: {}
env:
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
BRANCH: "release/1.16.x"
BRANCH_NAME: "release-1.16.x" # Used for naming artifacts
BRANCH_NAME: "release-1.16.x" # Used for naming artifacts
GOPRIVATE: github.com/hashicorp # Required for enterprise deps
jobs:
frontend-test-workspace-node:

@ -8,9 +8,10 @@ on:
workflow_dispatch: {}
env:
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
BRANCH: "main"
BRANCH_NAME: "main" # Used for naming artifacts
BRANCH_NAME: "main" # Used for naming artifacts
GOPRIVATE: github.com/hashicorp # Required for enterprise deps
jobs:
frontend-test-workspace-node:

@ -8,7 +8,7 @@ on:
- closed
branches:
- main
- 'release/*.*.x'
- release/**
jobs:
trigger-oss-merge:

@ -20,6 +20,7 @@ on:
env:
GOTAGS: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}"
GOARCH: ${{inputs.go-arch}}
GOPRIVATE: github.com/hashicorp # Required for enterprise deps
jobs:
lint:

@ -51,6 +51,7 @@ env:
TOTAL_RUNNERS: ${{inputs.runner-count}}
CONSUL_LICENSE: ${{secrets.consul-license}}
GOTAGS: ${{ inputs.go-tags}}
GOPRIVATE: github.com/hashicorp # Required for enterprise deps
DATADOG_API_KEY: ${{secrets.datadog-api-key}}
jobs:

@ -46,6 +46,7 @@ env:
GOARCH: ${{inputs.go-arch}}
CONSUL_LICENSE: ${{secrets.consul-license}}
GOTAGS: ${{ inputs.go-tags}}
GOPRIVATE: github.com/hashicorp # Required for enterprise deps
DATADOG_API_KEY: ${{secrets.datadog-api-key}}
jobs:

@ -23,6 +23,7 @@ env:
CONSUL_BINARY_UPLOAD_NAME: consul-bin
# strip the hashicorp/ off the front of github.repository for consul
CONSUL_LATEST_IMAGE_NAME: ${{ endsWith(github.repository, '-enterprise') && github.repository || 'consul' }}
GOPRIVATE: github.com/hashicorp # Required for enterprise deps
jobs:
setup:
@ -365,6 +366,10 @@ jobs:
ENVOY_VERSION: "1.25.4"
steps:
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2
# NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos.
- name: Setup Git
if: ${{ endsWith(github.repository, '-enterprise') }}
run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com"
- uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1
with:
go-version-file: 'go.mod'
@ -476,6 +481,10 @@ jobs:
ENVOY_VERSION: "1.24.6"
steps:
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2
# NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos.
- name: Setup Git
if: ${{ endsWith(github.repository, '-enterprise') }}
run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com"
- uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1
with:
go-version-file: 'go.mod'

@ -0,0 +1,78 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
name: Verify Release - Linux
on:
workflow_dispatch:
inputs:
packageName:
description: 'Name of consul release package (consul vs consul-enterprise)'
required: true
default: 'consul'
type: choice
options:
- consul
- consul-enterprise
version:
description: The x.y.z version (also need to specify applicable suffixes like +ent and -dev)'
required: true
type: string
jobs:
verify-ubuntu-amd64:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2
- name: docker build with version
run: |
docker build \
--build-arg PACKAGE=${{ inputs.packageName }} \
--build-arg VERSION=${{ inputs.version }} \
--build-arg TARGETARCH=amd64 \
-f ./build-support/docker/Verify-Release-Ubuntu.dockerfile .
verify-debian-amd64:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2
- name: docker build with version
run: |
docker build \
--build-arg PACKAGE=${{ inputs.packageName }} \
--build-arg VERSION=${{ inputs.version }} \
--build-arg TARGETARCH=amd64 \
-f ./build-support/docker/Verify-Release-Debian.dockerfile .
verify-fedora:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2
- name: docker build with version
run: |
docker build \
--build-arg PACKAGE=${{ inputs.packageName }} \
--build-arg VERSION=${{ inputs.version }} \
-f ./build-support/docker/Verify-Release-Fedora.dockerfile .
verify-centos:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2
- name: docker build with version
run: |
docker build \
--build-arg PACKAGE=${{ inputs.packageName }} \
--build-arg VERSION=${{ inputs.version }} \
-f ./build-support/docker/Verify-Release-CentOS.dockerfile .
verify-amazon:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2
- name: docker build with version
run: |
docker build \
--build-arg PACKAGE=${{ inputs.packageName }} \
--build-arg VERSION=${{ inputs.version }} \
-f ./build-support/docker/Verify-Release-Amazon.dockerfile .

@ -1,3 +1,97 @@
## 1.15.4 (June 26, 2023)
FEATURES:
* cli: `consul operator raft list-peers` command shows the number of commits each follower is trailing the leader by to aid in troubleshooting. [[GH-17582](https://github.com/hashicorp/consul/issues/17582)]
* server: **(Enterprise Only)** allow automatic license utilization reporting. [[GH-5102](https://github.com/hashicorp/consul/issues/5102)]
IMPROVEMENTS:
* connect: update supported envoy versions to 1.22.11, 1.23.9, 1.24.7, 1.25.6 [[GH-17545](https://github.com/hashicorp/consul/issues/17545)]
* debug: change default setting of consul debug command. now default duration is 5ms and default log level is 'TRACE' [[GH-17596](https://github.com/hashicorp/consul/issues/17596)]
* fix metric names in /docs/agent/telemetry [[GH-17577](https://github.com/hashicorp/consul/issues/17577)]
* gateway: Change status condition reason for invalid certificate on a listener from "Accepted" to "ResolvedRefs". [[GH-17115](https://github.com/hashicorp/consul/issues/17115)]
* systemd: set service type to notify. [[GH-16845](https://github.com/hashicorp/consul/issues/16845)]
BUG FIXES:
* cache: fix a few minor goroutine leaks in leaf certs and the agent cache [[GH-17636](https://github.com/hashicorp/consul/issues/17636)]
* docs: fix list of telemetry metrics [[GH-17593](https://github.com/hashicorp/consul/issues/17593)]
* gateways: **(Enterprise only)** Fixed a bug in API gateways where gateway configuration objects in non-default partitions did not reconcile properly. [[GH-17581](https://github.com/hashicorp/consul/issues/17581)]
* gateways: Fixed a bug in API gateways where binding a route that only targets a service imported from a peer results
in the programmed gateway having no routes. [[GH-17609](https://github.com/hashicorp/consul/issues/17609)]
* gateways: Fixed a bug where API gateways were not being taken into account in determining xDS rate limits. [[GH-17631](https://github.com/hashicorp/consul/issues/17631)]
* http: fixed API endpoint `PUT /acl/token/:AccessorID` (update token), no longer requires `AccessorID` in the request body. Web UI can now update tokens. [[GH-17739](https://github.com/hashicorp/consul/issues/17739)]
* namespaces: **(Enterprise only)** fixes a bug where agent health checks stop syncing for all services on a node if the namespace of any service has been removed from the server.
* namespaces: **(Enterprise only)** fixes a bug where namespaces are stuck in a deferred deletion state indefinitely under some conditions.
Also fixes the Consul query metadata present in the HTTP headers of the namespace read and list endpoints.
* peering: Fix a bug that caused server agents to continue cleaning up peering resources even after loss of leadership. [[GH-17483](https://github.com/hashicorp/consul/issues/17483)]
* xds: Fixed a bug where modifying ACLs on a token being actively used for an xDS connection caused all xDS updates to fail. [[GH-17566](https://github.com/hashicorp/consul/issues/17566)]
## 1.14.8 (June 26, 2023)
SECURITY:
* Update to UBI base image to 9.2. [[GH-17513](https://github.com/hashicorp/consul/issues/17513)]
FEATURES:
* cli: `consul operator raft list-peers` command shows the number of commits each follower is trailing the leader by to aid in troubleshooting. [[GH-17582](https://github.com/hashicorp/consul/issues/17582)]
* server: **(Enterprise Only)** allow automatic license utilization reporting. [[GH-5102](https://github.com/hashicorp/consul/issues/5102)]
IMPROVEMENTS:
* connect: update supported envoy versions to 1.21.6, 1.22.11, 1.23.9, 1.24.7 [[GH-17547](https://github.com/hashicorp/consul/issues/17547)]
* debug: change default setting of consul debug command. now default duration is 5ms and default log level is 'TRACE' [[GH-17596](https://github.com/hashicorp/consul/issues/17596)]
* fix metric names in /docs/agent/telemetry [[GH-17577](https://github.com/hashicorp/consul/issues/17577)]
* peering: gRPC queries for TrustBundleList, TrustBundleRead, PeeringList, and PeeringRead now support blocking semantics,
reducing network and CPU demand.
The HTTP APIs for Peering List and Read have been updated to support blocking. [[GH-17426](https://github.com/hashicorp/consul/issues/17426)]
* raft: Remove expensive reflection from raft/mesh hot path [[GH-16552](https://github.com/hashicorp/consul/issues/16552)]
* systemd: set service type to notify. [[GH-16845](https://github.com/hashicorp/consul/issues/16845)]
BUG FIXES:
* cache: fix a few minor goroutine leaks in leaf certs and the agent cache [[GH-17636](https://github.com/hashicorp/consul/issues/17636)]
* connect: reverts #17317 fix that caused a downstream error for Ingress/Mesh/Terminating GWs when their respective config entry does not already exist. [[GH-17541](https://github.com/hashicorp/consul/issues/17541)]
* namespaces: **(Enterprise only)** fixes a bug where agent health checks stop syncing for all services on a node if the namespace of any service has been removed from the server.
* namespaces: **(Enterprise only)** fixes a bug where namespaces are stuck in a deferred deletion state indefinitely under some conditions.
Also fixes the Consul query metadata present in the HTTP headers of the namespace read and list endpoints.
* namespaces: adjusts the return type from HTTP list API to return the `api` module representation of a namespace.
This fixes an error with the `consul namespace list` command when a namespace has a deferred deletion timestamp.
* peering: Fix a bug that caused server agents to continue cleaning up peering resources even after loss of leadership. [[GH-17483](https://github.com/hashicorp/consul/issues/17483)]
* peering: Fix issue where modifying the list of exported services did not correctly replicate changes for services that exist in a non-default namespace. [[GH-17456](https://github.com/hashicorp/consul/issues/17456)]
## 1.13.9 (June 26, 2023)
BREAKING CHANGES:
* connect: Disable peering by default in connect proxies for Consul 1.13. This change was made to prevent inefficient polling
queries from having a negative impact on server performance. Peering in Consul 1.13 is an experimental feature and is not
recommended for use in production environments. If you still wish to use the experimental peering feature, ensure
[`peering.enabled = true`](https://developer.hashicorp.com/consul/docs/v1.13.x/agent/config/config-files#peering_enabled)
is set on all clients and servers. [[GH-17731](https://github.com/hashicorp/consul/issues/17731)]
SECURITY:
* Update to UBI base image to 9.2. [[GH-17513](https://github.com/hashicorp/consul/issues/17513)]
FEATURES:
* server: **(Enterprise Only)** allow automatic license utilization reporting. [[GH-5102](https://github.com/hashicorp/consul/issues/5102)]
IMPROVEMENTS:
* debug: change default setting of consul debug command. now default duration is 5ms and default log level is 'TRACE' [[GH-17596](https://github.com/hashicorp/consul/issues/17596)]
* systemd: set service type to notify. [[GH-16845](https://github.com/hashicorp/consul/issues/16845)]
BUG FIXES:
* cache: fix a few minor goroutine leaks in leaf certs and the agent cache [[GH-17636](https://github.com/hashicorp/consul/issues/17636)]
* namespaces: **(Enterprise only)** fixes a bug where namespaces are stuck in a deferred deletion state indefinitely under some conditions.
Also fixes the Consul query metadata present in the HTTP headers of the namespace read and list endpoints.
* namespaces: adjusts the return type from HTTP list API to return the `api` module representation of a namespace.
This fixes an error with the `consul namespace list` command when a namespace has a deferred deletion timestamp.
* peering: Fix a bug that caused server agents to continue cleaning up peering resources even after loss of leadership. [[GH-17483](https://github.com/hashicorp/consul/issues/17483)]
## 1.16.0-rc1 (June 12, 2023)
BREAKING CHANGES:

@ -171,7 +171,7 @@ dev-build:
dev-docker-dbg: dev-docker
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
@# 'consul-dbg:local' tag is needed to run the integration tests
@# 'consul-dev:latest' is needed by older workflows
@ -183,7 +183,7 @@ dev-docker-dbg: dev-docker
dev-docker: linux dev-build
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
@# 'consul:local' tag is needed to run the integration tests
@# 'consul-dev:latest' is needed by older workflows
@ -194,6 +194,7 @@ dev-docker: linux dev-build
--label version=$(CONSUL_VERSION) \
--load \
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
docker tag 'consul:local' '$(CONSUL_COMPAT_TEST_IMAGE):local'
check-remote-dev-image-env:
ifndef REMOTE_DEV_IMAGE
@ -204,7 +205,7 @@ remote-docker: check-remote-dev-image-env
$(MAKE) GOARCH=amd64 linux
$(MAKE) GOARCH=arm64 linux
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building and Pushing Consul Development container - $(REMOTE_DEV_IMAGE)"
@if ! docker buildx inspect consul-builder; then \
docker buildx create --name consul-builder --driver docker-container --bootstrap; \
@ -221,7 +222,7 @@ remote-docker: check-remote-dev-image-env
# should only run in CI and not locally.
ci.dev-docker:
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building Consul Development container - $(CI_DEV_DOCKER_IMAGE_NAME)"
@docker build $(NOCACHE) $(QUIET) -t '$(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):$(GIT_COMMIT)' \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \

@ -441,8 +441,16 @@ func (s *HTTPHandlers) aclTokenSetInternal(req *http.Request, tokenAccessorID st
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Token decoding failed: %v", err)}
}
if !create && args.ACLToken.AccessorID != tokenAccessorID {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Token Accessor ID in URL and payload do not match"}
if !create {
// NOTE: AccessorID in the request body is optional when not creating a new token.
// If not present in the body and only in the URL then it will be filled in by Consul.
if args.ACLToken.AccessorID == "" {
args.ACLToken.AccessorID = tokenAccessorID
}
if args.ACLToken.AccessorID != tokenAccessorID {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Token Accessor ID in URL and payload do not match"}
}
}
var out structs.ACLToken

@ -907,6 +907,48 @@ func TestACL_HTTP(t *testing.T) {
tokenMap[token.AccessorID] = token
})
t.Run("Update without AccessorID in request body", func(t *testing.T) {
originalToken := tokenMap[idMap["token-cloned"]]
// Secret will be filled in
tokenInput := &structs.ACLToken{
Description: "Even Better description for this cloned token",
Policies: []structs.ACLTokenPolicyLink{
{
ID: idMap["policy-read-all-nodes"],
Name: policyMap[idMap["policy-read-all-nodes"]].Name,
},
},
NodeIdentities: []*structs.ACLNodeIdentity{
{
NodeName: "foo",
Datacenter: "bar",
},
},
}
req, _ := http.NewRequest("PUT", "/v1/acl/token/"+originalToken.AccessorID, jsonBody(tokenInput))
req.Header.Add("X-Consul-Token", "root")
resp := httptest.NewRecorder()
obj, err := a.srv.ACLTokenCRUD(resp, req)
require.NoError(t, err)
token, ok := obj.(*structs.ACLToken)
require.True(t, ok)
require.Equal(t, originalToken.AccessorID, token.AccessorID)
require.Equal(t, originalToken.SecretID, token.SecretID)
require.Equal(t, tokenInput.Description, token.Description)
require.Equal(t, tokenInput.Policies, token.Policies)
require.Equal(t, tokenInput.NodeIdentities, token.NodeIdentities)
require.True(t, token.CreateIndex > 0)
require.True(t, token.CreateIndex < token.ModifyIndex)
require.NotNil(t, token.Hash)
require.NotEqual(t, token.Hash, []byte{})
require.NotEqual(t, token.Hash, originalToken.Hash)
tokenMap[token.AccessorID] = token
})
t.Run("CRUD Missing Token Accessor ID", func(t *testing.T) {
req, _ := http.NewRequest("GET", "/v1/acl/token/", nil)
req.Header.Add("X-Consul-Token", "root")

@ -1473,7 +1473,7 @@ func (b *builder) validate(rt RuntimeConfig) error {
return err
}
case structs.VaultCAProvider:
if _, err := ca.ParseVaultCAConfig(rt.ConnectCAConfig); err != nil {
if _, err := ca.ParseVaultCAConfig(rt.ConnectCAConfig, rt.PrimaryDatacenter == rt.Datacenter); err != nil {
return err
}
case structs.AWSCAProvider:

@ -113,7 +113,7 @@ func TestStructs_CAConfiguration_MsgpackEncodeDecode(t *testing.T) {
TLSSkipVerify: true,
},
parseFunc: func(t *testing.T, raw map[string]interface{}) interface{} {
config, err := ParseVaultCAConfig(raw)
config, err := ParseVaultCAConfig(raw, true)
require.NoError(t, err)
return config
},

@ -101,7 +101,7 @@ func vaultTLSConfig(config *structs.VaultCAProviderConfig) *vaultapi.TLSConfig {
// Configure sets up the provider using the given configuration.
// Configure supports being called multiple times to re-configure the provider.
func (v *VaultProvider) Configure(cfg ProviderConfig) error {
config, err := ParseVaultCAConfig(cfg.RawConfig)
config, err := ParseVaultCAConfig(cfg.RawConfig, v.isPrimary)
if err != nil {
return err
}
@ -192,11 +192,11 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error {
}
func (v *VaultProvider) ValidateConfigUpdate(prevRaw, nextRaw map[string]interface{}) error {
prev, err := ParseVaultCAConfig(prevRaw)
prev, err := ParseVaultCAConfig(prevRaw, v.isPrimary)
if err != nil {
return fmt.Errorf("failed to parse existing CA config: %w", err)
}
next, err := ParseVaultCAConfig(nextRaw)
next, err := ParseVaultCAConfig(nextRaw, v.isPrimary)
if err != nil {
return fmt.Errorf("failed to parse new CA config: %w", err)
}
@ -800,7 +800,7 @@ func (v *VaultProvider) Cleanup(providerTypeChange bool, otherConfig map[string]
v.Stop()
if !providerTypeChange {
newConfig, err := ParseVaultCAConfig(otherConfig)
newConfig, err := ParseVaultCAConfig(otherConfig, v.isPrimary)
if err != nil {
return err
}
@ -900,7 +900,7 @@ func (v *VaultProvider) autotidyIssuers(path string) (bool, string) {
return tidySet, errStr
}
func ParseVaultCAConfig(raw map[string]interface{}) (*structs.VaultCAProviderConfig, error) {
func ParseVaultCAConfig(raw map[string]interface{}, isPrimary bool) (*structs.VaultCAProviderConfig, error) {
config := structs.VaultCAProviderConfig{
CommonCAProviderConfig: defaultCommonConfig(),
}
@ -931,10 +931,10 @@ func ParseVaultCAConfig(raw map[string]interface{}) (*structs.VaultCAProviderCon
return nil, fmt.Errorf("only one of Vault token or Vault auth method can be provided, but not both")
}
if config.RootPKIPath == "" {
if isPrimary && config.RootPKIPath == "" {
return nil, fmt.Errorf("must provide a valid path to a root PKI backend")
}
if !strings.HasSuffix(config.RootPKIPath, "/") {
if config.RootPKIPath != "" && !strings.HasSuffix(config.RootPKIPath, "/") {
config.RootPKIPath += "/"
}

@ -60,6 +60,7 @@ func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) {
cases := map[string]struct {
rawConfig map[string]interface{}
expConfig *structs.VaultCAProviderConfig
isPrimary bool
expError string
}{
"no token and no auth method provided": {
@ -70,15 +71,26 @@ func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) {
rawConfig: map[string]interface{}{"Token": "test", "AuthMethod": map[string]interface{}{"Type": "test"}},
expError: "only one of Vault token or Vault auth method can be provided, but not both",
},
"no root PKI path": {
rawConfig: map[string]interface{}{"Token": "test"},
"primary no root PKI path": {
rawConfig: map[string]interface{}{"Token": "test", "IntermediatePKIPath": "test"},
isPrimary: true,
expError: "must provide a valid path to a root PKI backend",
},
"secondary no root PKI path": {
rawConfig: map[string]interface{}{"Token": "test", "IntermediatePKIPath": "test"},
isPrimary: false,
expConfig: &structs.VaultCAProviderConfig{
CommonCAProviderConfig: defaultCommonConfig(),
Token: "test",
IntermediatePKIPath: "test/",
},
},
"no root intermediate path": {
rawConfig: map[string]interface{}{"Token": "test", "RootPKIPath": "test"},
expError: "must provide a valid path for the intermediate PKI backend",
},
"adds a slash to RootPKIPath and IntermediatePKIPath": {
isPrimary: true,
rawConfig: map[string]interface{}{"Token": "test", "RootPKIPath": "test", "IntermediatePKIPath": "test"},
expConfig: &structs.VaultCAProviderConfig{
CommonCAProviderConfig: defaultCommonConfig(),
@ -91,7 +103,7 @@ func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) {
for name, c := range cases {
t.Run(name, func(t *testing.T) {
config, err := ParseVaultCAConfig(c.rawConfig)
config, err := ParseVaultCAConfig(c.rawConfig, c.isPrimary)
if c.expError != "" {
require.EqualError(t, err, c.expError)
} else {

@ -1767,5 +1767,11 @@ func TestHealth_RPC_Filter(t *testing.T) {
out = new(structs.IndexedHealthChecks)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &args, out))
require.Len(t, out.HealthChecks, 1)
args.State = api.HealthAny
args.Filter = "connect in ServiceTags and v2 in ServiceTags"
out = new(structs.IndexedHealthChecks)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &args, out))
require.Len(t, out.HealthChecks, 1)
})
}

@ -1055,7 +1055,7 @@ func (d *DNSServer) trimDomain(query string) string {
longer, shorter = shorter, longer
}
if strings.HasSuffix(query, longer) {
if strings.HasSuffix(query, "."+strings.TrimLeft(longer, ".")) {
return strings.TrimSuffix(query, longer)
}
return strings.TrimSuffix(query, shorter)

@ -7071,6 +7071,45 @@ func TestDNS_AltDomains_Overlap(t *testing.T) {
}
}
func TestDNS_AltDomain_DCName_Overlap(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
// this tests the DC name overlap with the consul domain/alt-domain
// we should get response when DC suffix is a prefix of consul alt-domain
t.Parallel()
a := NewTestAgent(t, `
datacenter = "dc-test"
node_name = "test-node"
alt_domain = "test.consul."
`)
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc-test")
questions := []string{
"test-node.node.dc-test.consul.",
"test-node.node.dc-test.test.consul.",
}
for _, question := range questions {
m := new(dns.Msg)
m.SetQuestion(question, dns.TypeA)
c := new(dns.Client)
in, _, err := c.Exchange(m, a.DNSAddr())
if err != nil {
t.Fatalf("err: %v", err)
}
require.Len(t, in.Answer, 1)
aRec, ok := in.Answer[0].(*dns.A)
require.True(t, ok)
require.Equal(t, aRec.A.To4().String(), "127.0.0.1")
}
}
func TestDNS_PreparedQuery_AllowStale(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")

@ -59,7 +59,7 @@ func TestConstructor(t *testing.T) {
},
},
},
errMsg: `invalid host for Target.URI "foo.bar.com:9191": expected 'localhost' or '127.0.0.1'`,
errMsg: `invalid host for Target.URI "foo.bar.com:9191": expected "localhost", "127.0.0.1", or "::1"`,
},
"non-loopback address": {
args: map[string]any{
@ -72,7 +72,34 @@ func TestConstructor(t *testing.T) {
},
},
},
errMsg: `invalid host for Target.URI "10.0.0.1:9191": expected 'localhost' or '127.0.0.1'`,
errMsg: `invalid host for Target.URI "10.0.0.1:9191": expected "localhost", "127.0.0.1", or "::1"`,
},
"invalid target port": {
args: map[string]any{
"ProxyType": "connect-proxy",
"Config": map[string]any{
"GrpcService": map[string]any{
"Target": map[string]any{
"URI": "localhost:zero",
},
},
},
},
errMsg: `invalid format for Target.URI "localhost:zero": expected host:port`,
},
"invalid target timeout": {
args: map[string]any{
"ProxyType": "connect-proxy",
"Config": map[string]any{
"GrpcService": map[string]any{
"Target": map[string]any{
"URI": "localhost:9191",
"Timeout": "one",
},
},
},
},
errMsg: `failed to parse Target.Timeout "one" as a duration`,
},
"no uri or service target": {
args: map[string]any{

@ -34,6 +34,9 @@ const (
defaultMetadataNS = "consul"
defaultStatPrefix = "response"
defaultStatusOnError = 403
localhost = "localhost"
localhostIPv4 = "127.0.0.1"
localhostIPv6 = "::1"
)
type extAuthzConfig struct {
@ -185,6 +188,12 @@ func (c *extAuthzConfig) toEnvoyCluster(_ *cmn.RuntimeConfig) (*envoy_cluster_v3
return nil, err
}
clusterType := &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_STATIC}
if host == localhost {
// If the host is "localhost" use a STRICT_DNS cluster type to perform DNS lookup.
clusterType = &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_STRICT_DNS}
}
var typedExtProtoOpts map[string]*anypb.Any
if c.isGRPC() {
// By default HTTP/1.1 is used for the transport protocol. gRPC requires that we explicitly configure HTTP/2
@ -205,7 +214,7 @@ func (c *extAuthzConfig) toEnvoyCluster(_ *cmn.RuntimeConfig) (*envoy_cluster_v3
return &envoy_cluster_v3.Cluster{
Name: LocalExtAuthzClusterName,
ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_STATIC},
ClusterDiscoveryType: clusterType,
ConnectTimeout: target.timeoutDurationPB(),
LoadAssignment: &envoy_endpoint_v3.ClusterLoadAssignment{
ClusterName: LocalExtAuthzClusterName,
@ -645,18 +654,13 @@ func (t *Target) validate() error {
}
if t.isURI() {
// Strip the protocol if one was provided
if _, addr, hasProto := strings.Cut(t.URI, "://"); hasProto {
t.URI = addr
}
addr := strings.Split(t.URI, ":")
if len(addr) == 2 {
t.host = addr[0]
if t.host != "localhost" && t.host != "127.0.0.1" {
resultErr = multierror.Append(resultErr, fmt.Errorf("invalid host for Target.URI %q: expected 'localhost' or '127.0.0.1'", t.URI))
}
if t.port, err = strconv.Atoi(addr[1]); err != nil {
resultErr = multierror.Append(resultErr, fmt.Errorf("invalid port for Target.URI %q", addr[1]))
t.host, t.port, err = parseAddr(t.URI)
if err == nil {
switch t.host {
case localhost, localhostIPv4, localhostIPv6:
default:
resultErr = multierror.Append(resultErr,
fmt.Errorf("invalid host for Target.URI %q: expected %q, %q, or %q", t.URI, localhost, localhostIPv4, localhostIPv6))
}
} else {
resultErr = multierror.Append(resultErr, fmt.Errorf("invalid format for Target.URI %q: expected host:port", t.URI))
@ -672,3 +676,22 @@ func (t *Target) validate() error {
}
return resultErr
}
func parseAddr(s string) (host string, port int, err error) {
// Strip the protocol if one was provided
if _, addr, hasProto := strings.Cut(s, "://"); hasProto {
s = addr
}
idx := strings.LastIndex(s, ":")
switch idx {
case -1, len(s) - 1:
err = fmt.Errorf("invalid input format %q: expected host:port", s)
case 0:
host = localhost
port, err = strconv.Atoi(s[idx+1:])
default:
host = s[:idx]
port, err = strconv.Atoi(s[idx+1:])
}
return
}

@ -74,7 +74,7 @@ func TestListByOwner_TypeNotRegistered(t *testing.T) {
})
require.Error(t, err)
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
require.Contains(t, err.Error(), "resource type demo.v2.artist not registered")
require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered")
}
func TestListByOwner_Empty(t *testing.T) {
@ -126,7 +126,7 @@ func TestListByOwner_Many(t *testing.T) {
}
func TestListByOwner_ACL_PerTypeDenied(t *testing.T) {
authz := AuthorizerFrom(t, `key_prefix "resource/demo.v2.album/" { policy = "deny" }`)
authz := AuthorizerFrom(t, `key_prefix "resource/demo.v2.Album/" { policy = "deny" }`)
_, rsp, err := roundTripListByOwner(t, authz)
// verify resource filtered out, hence no results
@ -135,7 +135,7 @@ func TestListByOwner_ACL_PerTypeDenied(t *testing.T) {
}
func TestListByOwner_ACL_PerTypeAllowed(t *testing.T) {
authz := AuthorizerFrom(t, `key_prefix "resource/demo.v2.album/" { policy = "read" }`)
authz := AuthorizerFrom(t, `key_prefix "resource/demo.v2.Album/" { policy = "read" }`)
album, rsp, err := roundTripListByOwner(t, authz)
// verify resource not filtered out

@ -58,7 +58,7 @@ func TestList_TypeNotFound(t *testing.T) {
})
require.Error(t, err)
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
require.Contains(t, err.Error(), "resource type demo.v2.artist not registered")
require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered")
}
func TestList_Empty(t *testing.T) {
@ -178,7 +178,7 @@ func TestList_ACL_ListAllowed_ReadDenied(t *testing.T) {
// allow list, deny read
authz := AuthorizerFrom(t, demo.ArtistV2ListPolicy,
`key_prefix "resource/demo.v2.artist/" { policy = "deny" }`)
`key_prefix "resource/demo.v2.Artist/" { policy = "deny" }`)
_, rsp, err := roundTripList(t, authz)
// verify resource filtered out by key:read denied hence no results

@ -71,7 +71,7 @@ func TestRead_TypeNotFound(t *testing.T) {
_, err = client.Read(context.Background(), &pbresource.ReadRequest{Id: artist.Id})
require.Error(t, err)
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
require.Contains(t, err.Error(), "resource type demo.v2.artist not registered")
require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered")
}
func TestRead_ResourceNotFound(t *testing.T) {

@ -66,7 +66,7 @@ func TestWatchList_TypeNotFound(t *testing.T) {
err = mustGetError(t, rspCh)
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
require.Contains(t, err.Error(), "resource type demo.v2.artist not registered")
require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered")
}
func TestWatchList_GroupVersionMatches(t *testing.T) {
@ -172,7 +172,7 @@ func TestWatchList_ACL_ListAllowed_ReadDenied(t *testing.T) {
// allow list, deny read
authz := AuthorizerFrom(t, `
key_prefix "resource/" { policy = "list" }
key_prefix "resource/demo.v2.artist/" { policy = "deny" }
key_prefix "resource/demo.v2.Artist/" { policy = "deny" }
`)
rspCh, _ := roundTripACL(t, authz)
@ -187,7 +187,7 @@ func TestWatchList_ACL_ListAllowed_ReadAllowed(t *testing.T) {
// allow list, allow read
authz := AuthorizerFrom(t, `
key_prefix "resource/" { policy = "list" }
key_prefix "resource/demo.v2.artist/" { policy = "read" }
key_prefix "resource/demo.v2.Artist/" { policy = "read" }
`)
rspCh, artist := roundTripACL(t, authz)

@ -180,7 +180,7 @@ func TestWriteStatus_TypeNotFound(t *testing.T) {
_, err = client.WriteStatus(testContext(t), validWriteStatusRequest(t, res))
require.Error(t, err)
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
require.Contains(t, err.Error(), "resource type demo.v2.artist not registered")
require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered")
}
func TestWriteStatus_ResourceNotFound(t *testing.T) {

@ -151,7 +151,7 @@ func TestWrite_TypeNotFound(t *testing.T) {
_, err = client.Write(testContext(t), &pbresource.WriteRequest{Resource: res})
require.Error(t, err)
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
require.Contains(t, err.Error(), "resource type demo.v2.artist not registered")
require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered")
}
func TestWrite_ACLs(t *testing.T) {

@ -13,6 +13,10 @@ import (
// DeepCopy generates a deep copy of *ConfigSnapshot
func (o *ConfigSnapshot) DeepCopy() *ConfigSnapshot {
var cp ConfigSnapshot = *o
if o.ServiceLocality != nil {
cp.ServiceLocality = new(structs.Locality)
*cp.ServiceLocality = *o.ServiceLocality
}
if o.ServiceMeta != nil {
cp.ServiceMeta = make(map[string]string, len(o.ServiceMeta))
for k2, v2 := range o.ServiceMeta {

@ -901,6 +901,7 @@ func IngressListenerKeyFromListener(l structs.IngressListener) IngressListenerKe
type ConfigSnapshot struct {
Kind structs.ServiceKind
Service string
ServiceLocality *structs.Locality
ProxyID ProxyID
Address string
Port int

@ -124,6 +124,7 @@ type serviceInstance struct {
taggedAddresses map[string]structs.ServiceAddress
proxyCfg structs.ConnectProxyConfig
token string
locality *structs.Locality
}
func copyProxyConfig(ns *structs.NodeService) (structs.ConnectProxyConfig, error) {
@ -244,6 +245,7 @@ func newServiceInstanceFromNodeService(id ProxyID, ns *structs.NodeService, toke
return serviceInstance{
kind: ns.Kind,
service: ns.Service,
locality: ns.Locality,
proxyID: id,
address: ns.Address,
port: ns.Port,
@ -303,6 +305,7 @@ func newConfigSnapshotFromServiceInstance(s serviceInstance, config stateConfig)
return ConfigSnapshot{
Kind: s.kind,
Service: s.service,
ServiceLocality: s.locality,
ProxyID: s.proxyID,
Address: s.address,
Port: s.port,

@ -316,6 +316,15 @@ func (e *JWTProviderConfigEntry) GetRaftIndex() *RaftIndex { retur
func (e *JWTProviderConfigEntry) CanRead(authz acl.Authorizer) error {
var authzContext acl.AuthorizerContext
e.FillAuthzContext(&authzContext)
// allow service-identity tokens the ability to read jwt-providers
// this is a workaround to allow sidecar proxies to read the jwt-providers
// see issue: https://github.com/hashicorp/consul/issues/17886 for more details
err := authz.ToAllowAuthorizer().ServiceWriteAnyAllowed(&authzContext)
if err == nil {
return err
}
return authz.ToAllowAuthorizer().MeshReadAllowed(&authzContext)
}

@ -338,6 +338,24 @@ func TestJWTProviderConfigEntry_ACLs(t *testing.T) {
canRead: false,
canWrite: false,
},
{
name: "jwt-provider: any service write",
authorizer: newTestAuthz(t, `service "" { policy = "write" }`),
canRead: true,
canWrite: false,
},
{
name: "jwt-provider: specific service write",
authorizer: newTestAuthz(t, `service "web" { policy = "write" }`),
canRead: true,
canWrite: false,
},
{
name: "jwt-provider: any service prefix write",
authorizer: newTestAuthz(t, `service_prefix "" { policy = "write" }`),
canRead: true,
canWrite: false,
},
{
name: "jwt-provider: mesh read",
authorizer: newTestAuthz(t, `mesh = "read"`),

@ -2095,6 +2095,18 @@ func (csn *CheckServiceNode) CanRead(authz acl.Authorizer) acl.EnforcementDecisi
return acl.Allow
}
func (csn *CheckServiceNode) Locality() *Locality {
if csn.Service != nil && csn.Service.Locality != nil {
return csn.Service.Locality
}
if csn.Node != nil && csn.Node.Locality != nil {
return csn.Node.Locality
}
return nil
}
type CheckServiceNodes []CheckServiceNode
func (csns CheckServiceNodes) DeepCopy() CheckServiceNodes {

@ -676,6 +676,7 @@ end`,
ns.Proxy.EnvoyExtensions = makeExtAuthzEnvoyExtension(
"http",
"dest=local",
"target-uri=localhost:9191",
"insert=AfterLastMatch:envoy.filters.http.header_to_metadata",
)
}, nil)

@ -50,6 +50,13 @@ func makeExtAuthzEnvoyExtension(svc string, opts ...string) []structs.EnvoyExten
"FilterName": filterName,
}
}
case "target-uri":
target = map[string]any{"URI": v}
configMap = map[string]any{
serviceKey: map[string]any{
"Target": target,
},
}
case "config-type":
if v == "full" {
target["Timeout"] = "2s"

@ -135,7 +135,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
endpoints, ok := cfgSnap.ConnectProxy.PreparedQueryEndpoints[uid]
if ok {
la := makeLoadAssignment(
cfgSnap,
clusterName,
nil,
[]loadAssignmentEndpointGroup{
{Endpoints: endpoints},
},
@ -158,7 +160,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
endpoints, ok := cfgSnap.ConnectProxy.DestinationGateways.Get(uid)
if ok {
la := makeLoadAssignment(
cfgSnap,
name,
nil,
[]loadAssignmentEndpointGroup{
{Endpoints: endpoints},
},
@ -224,7 +228,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.C
clusterName := connect.GatewaySNI(key.Datacenter, key.Partition, cfgSnap.Roots.TrustDomain)
la := makeLoadAssignment(
cfgSnap,
clusterName,
nil,
[]loadAssignmentEndpointGroup{
{Endpoints: endpoints},
},
@ -239,7 +245,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.C
clusterName := cfgSnap.ServerSNIFn(key.Datacenter, "")
la := makeLoadAssignment(
cfgSnap,
clusterName,
nil,
[]loadAssignmentEndpointGroup{
{Endpoints: endpoints},
},
@ -409,7 +417,9 @@ func (s *ResourceGenerator) endpointsFromServicesAndResolvers(
for subsetName, groups := range clusterEndpoints {
clusterName := connect.ServiceSNI(svc.Name, subsetName, svc.NamespaceOrDefault(), svc.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain)
la := makeLoadAssignment(
cfgSnap,
clusterName,
nil,
groups,
cfgSnap.Locality,
)
@ -444,7 +454,9 @@ func (s *ResourceGenerator) makeEndpointsForOutgoingPeeredServices(
groups := []loadAssignmentEndpointGroup{{Endpoints: serviceGroup.Nodes, OnlyPassing: false}}
la := makeLoadAssignment(
cfgSnap,
clusterName,
nil,
groups,
// Use an empty key here so that it never matches. This will force the mesh gateway to always
// reference the remote mesh gateway's wan addr.
@ -606,7 +618,9 @@ func (s *ResourceGenerator) makeUpstreamLoadAssignmentForPeerService(
return la, nil
}
la = makeLoadAssignment(
cfgSnap,
clusterName,
nil,
[]loadAssignmentEndpointGroup{
{Endpoints: localGw},
},
@ -626,7 +640,9 @@ func (s *ResourceGenerator) makeUpstreamLoadAssignmentForPeerService(
return nil, nil
}
la = makeLoadAssignment(
cfgSnap,
clusterName,
nil,
[]loadAssignmentEndpointGroup{
{Endpoints: endpoints},
},
@ -756,7 +772,9 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
}
la := makeLoadAssignment(
cfgSnap,
clusterName,
ti.PrioritizeByLocality,
[]loadAssignmentEndpointGroup{endpointGroup},
gatewayKey,
)
@ -842,7 +860,7 @@ type loadAssignmentEndpointGroup struct {
OverrideHealth envoy_core_v3.HealthStatus
}
func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpointGroup, localKey proxycfg.GatewayKey) *envoy_endpoint_v3.ClusterLoadAssignment {
func makeLoadAssignment(cfgSnap *proxycfg.ConfigSnapshot, clusterName string, policy *structs.DiscoveryPrioritizeByLocality, endpointGroups []loadAssignmentEndpointGroup, localKey proxycfg.GatewayKey) *envoy_endpoint_v3.ClusterLoadAssignment {
cla := &envoy_endpoint_v3.ClusterLoadAssignment{
ClusterName: clusterName,
Endpoints: make([]*envoy_endpoint_v3.LocalityLbEndpoints, 0, len(endpointGroups)),
@ -856,35 +874,46 @@ func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpo
}
}
for priority, endpointGroup := range endpointGroups {
endpoints := endpointGroup.Endpoints
es := make([]*envoy_endpoint_v3.LbEndpoint, 0, len(endpoints))
var priority uint32
for _, ep := range endpoints {
// TODO (mesh-gateway) - should we respect the translate_wan_addrs configuration here or just always use the wan for cross-dc?
_, addr, port := ep.BestAddress(!localKey.Matches(ep.Node.Datacenter, ep.Node.PartitionOrDefault()))
healthStatus, weight := calculateEndpointHealthAndWeight(ep, endpointGroup.OnlyPassing)
for _, endpointGroup := range endpointGroups {
endpointsByLocality, err := groupedEndpoints(cfgSnap.ServiceLocality, policy, endpointGroup.Endpoints)
if endpointGroup.OverrideHealth != envoy_core_v3.HealthStatus_UNKNOWN {
healthStatus = endpointGroup.OverrideHealth
}
if err != nil {
continue
}
endpoint := &envoy_endpoint_v3.Endpoint{
Address: makeAddress(addr, port),
for _, endpoints := range endpointsByLocality {
es := make([]*envoy_endpoint_v3.LbEndpoint, 0, len(endpointGroup.Endpoints))
for _, ep := range endpoints {
// TODO (mesh-gateway) - should we respect the translate_wan_addrs configuration here or just always use the wan for cross-dc?
_, addr, port := ep.BestAddress(!localKey.Matches(ep.Node.Datacenter, ep.Node.PartitionOrDefault()))
healthStatus, weight := calculateEndpointHealthAndWeight(ep, endpointGroup.OnlyPassing)
if endpointGroup.OverrideHealth != envoy_core_v3.HealthStatus_UNKNOWN {
healthStatus = endpointGroup.OverrideHealth
}
endpoint := &envoy_endpoint_v3.Endpoint{
Address: makeAddress(addr, port),
}
es = append(es, &envoy_endpoint_v3.LbEndpoint{
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
Endpoint: endpoint,
},
HealthStatus: healthStatus,
LoadBalancingWeight: makeUint32Value(weight),
})
}
es = append(es, &envoy_endpoint_v3.LbEndpoint{
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
Endpoint: endpoint,
},
HealthStatus: healthStatus,
LoadBalancingWeight: makeUint32Value(weight),
cla.Endpoints = append(cla.Endpoints, &envoy_endpoint_v3.LocalityLbEndpoints{
Priority: priority,
LbEndpoints: es,
})
}
cla.Endpoints = append(cla.Endpoints, &envoy_endpoint_v3.LocalityLbEndpoints{
Priority: uint32(priority),
LbEndpoints: es,
})
priority++
}
}
return cla

@ -101,6 +101,7 @@ func Test_makeLoadAssignment(t *testing.T) {
tests := []struct {
name string
clusterName string
locality *structs.Locality
endpoints []loadAssignmentEndpointGroup
want *envoy_endpoint_v3.ClusterLoadAssignment
}{
@ -211,11 +212,24 @@ func Test_makeLoadAssignment(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := makeLoadAssignment(
&proxycfg.ConfigSnapshot{ServiceLocality: tt.locality},
tt.clusterName,
nil,
tt.endpoints,
proxycfg.GatewayKey{Datacenter: "dc1"},
)
require.Equal(t, tt.want, got)
if tt.locality == nil {
got := makeLoadAssignment(
&proxycfg.ConfigSnapshot{ServiceLocality: &structs.Locality{Region: "us-west-1", Zone: "us-west-1a"}},
tt.clusterName,
nil,
tt.endpoints,
proxycfg.GatewayKey{Datacenter: "dc1"},
)
require.Equal(t, tt.want, got)
}
})
}
}

@ -27,6 +27,8 @@ type targetInfo struct {
// Region is the region from the failover target's Locality. nil means the
// target is in the local Consul cluster.
Region *string
PrioritizeByLocality *structs.DiscoveryPrioritizeByLocality
}
type discoChainTargetGroup struct {
@ -87,7 +89,7 @@ func (s *ResourceGenerator) mapDiscoChainTargets(cfgSnap *proxycfg.ConfigSnapsho
var sni, rootPEMs string
var spiffeIDs []string
targetUID := proxycfg.NewUpstreamIDFromTargetID(tid)
ti := targetInfo{TargetID: tid}
ti := targetInfo{TargetID: tid, PrioritizeByLocality: target.PrioritizeByLocality}
configureTLS := true
if forMeshGateway {

@ -1109,6 +1109,15 @@ func TestListenersFromSnapshot(t *testing.T) {
nil)
},
},
{
name: "connect-proxy-without-tproxy-and-permissive-mtls",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
ns.Proxy.MutualTLSMode = structs.MutualTLSModePermissive
},
nil)
},
},
}
tests = append(tests, makeListenerDiscoChainTests(false)...)

@ -0,0 +1,21 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package xds
import (
"fmt"
"github.com/hashicorp/consul/agent/structs"
)
func groupedEndpoints(locality *structs.Locality, policy *structs.DiscoveryPrioritizeByLocality, csns structs.CheckServiceNodes) ([]structs.CheckServiceNodes, error) {
switch {
case policy == nil || policy.Mode == "" || policy.Mode == "none":
return []structs.CheckServiceNodes{csns}, nil
case policy.Mode == "failover":
return prioritizeByLocalityFailover(locality, csns), nil
default:
return nil, fmt.Errorf("unexpected priortize-by-locality mode %q", policy.Mode)
}
}

@ -0,0 +1,15 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
//go:build !consulent
// +build !consulent
package xds
import (
"github.com/hashicorp/consul/agent/structs"
)
func prioritizeByLocalityFailover(locality *structs.Locality, csns structs.CheckServiceNodes) []structs.CheckServiceNodes {
return nil
}

@ -142,7 +142,7 @@
{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
"name": "local_ext_authz",
"type": "STATIC",
"type": "STRICT_DNS",
"loadAssignment": {
"clusterName": "local_ext_authz",
"endpoints": [
@ -152,7 +152,7 @@
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.1",
"address": "localhost",
"portValue": 9191
}
}

@ -0,0 +1,115 @@
{
"versionInfo": "00000001",
"resources": [
{
"@type": "type.googleapis.com/envoy.config.listener.v3.Listener",
"name": "db:127.0.0.1:9191",
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 9191
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.filters.network.tcp_proxy",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy",
"statPrefix": "upstream.db.default.default.dc1",
"cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
}
}
]
}
],
"trafficDirection": "OUTBOUND"
},
{
"@type": "type.googleapis.com/envoy.config.listener.v3.Listener",
"name": "prepared_query:geo-cache:127.10.10.10:8181",
"address": {
"socketAddress": {
"address": "127.10.10.10",
"portValue": 8181
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.filters.network.tcp_proxy",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy",
"statPrefix": "upstream.prepared_query_geo-cache",
"cluster": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul"
}
}
]
}
],
"trafficDirection": "OUTBOUND"
},
{
"@type": "type.googleapis.com/envoy.config.listener.v3.Listener",
"name": "public_listener:0.0.0.0:9999",
"address": {
"socketAddress": {
"address": "0.0.0.0",
"portValue": 9999
}
},
"filterChains": [
{
"filters": [
{
"name": "envoy.filters.network.rbac",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC",
"rules": {},
"statPrefix": "connect_authz"
}
},
{
"name": "envoy.filters.network.tcp_proxy",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy",
"statPrefix": "public_listener",
"cluster": "local_app"
}
}
],
"transportSocket": {
"name": "tls",
"typedConfig": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext",
"commonTlsContext": {
"tlsParams": {},
"tlsCertificates": [
{
"certificateChain": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
},
"privateKey": {
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
}
}
],
"validationContext": {
"trustedCa": {
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
}
}
},
"requireClientCertificate": true
}
}
}
],
"trafficDirection": "INBOUND"
}
],
"typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener",
"nonce": "00000001"
}

@ -92,13 +92,20 @@ func keyPrefixWatch(params map[string]interface{}) (WatcherFunc, error) {
// servicesWatch is used to watch the list of available services
func servicesWatch(params map[string]interface{}) (WatcherFunc, error) {
stale := false
filter := ""
if err := assignValueBool(params, "stale", &stale); err != nil {
return nil, err
}
if err := assignValue(params, "filter", &filter); err != nil {
return nil, err
}
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
catalog := p.client.Catalog()
opts := makeQueryOptionsWithContext(p, stale)
if filter != "" {
opts.Filter = filter
}
defer p.cancelFunc()
services, meta, err := catalog.Services(&opts)
if err != nil {
@ -112,13 +119,20 @@ func servicesWatch(params map[string]interface{}) (WatcherFunc, error) {
// nodesWatch is used to watch the list of available nodes
func nodesWatch(params map[string]interface{}) (WatcherFunc, error) {
stale := false
filter := ""
if err := assignValueBool(params, "stale", &stale); err != nil {
return nil, err
}
if err := assignValue(params, "filter", &filter); err != nil {
return nil, err
}
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
catalog := p.client.Catalog()
opts := makeQueryOptionsWithContext(p, stale)
if filter != "" {
opts.Filter = filter
}
defer p.cancelFunc()
nodes, meta, err := catalog.Nodes(&opts)
if err != nil {
@ -132,9 +146,13 @@ func nodesWatch(params map[string]interface{}) (WatcherFunc, error) {
// serviceWatch is used to watch a specific service for changes
func serviceWatch(params map[string]interface{}) (WatcherFunc, error) {
stale := false
filter := ""
if err := assignValueBool(params, "stale", &stale); err != nil {
return nil, err
}
if err := assignValue(params, "filter", &filter); err != nil {
return nil, err
}
var (
service string
@ -158,6 +176,9 @@ func serviceWatch(params map[string]interface{}) (WatcherFunc, error) {
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
health := p.client.Health()
opts := makeQueryOptionsWithContext(p, stale)
if filter != "" {
opts.Filter = filter
}
defer p.cancelFunc()
nodes, meta, err := health.ServiceMultipleTags(service, tags, passingOnly, &opts)
if err != nil {
@ -175,13 +196,16 @@ func checksWatch(params map[string]interface{}) (WatcherFunc, error) {
return nil, err
}
var service, state string
var service, state, filter string
if err := assignValue(params, "service", &service); err != nil {
return nil, err
}
if err := assignValue(params, "state", &state); err != nil {
return nil, err
}
if err := assignValue(params, "filter", &filter); err != nil {
return nil, err
}
if service != "" && state != "" {
return nil, fmt.Errorf("Cannot specify service and state")
}
@ -196,6 +220,9 @@ func checksWatch(params map[string]interface{}) (WatcherFunc, error) {
var checks []*consulapi.HealthCheck
var meta *consulapi.QueryMeta
var err error
if filter != "" {
opts.Filter = filter
}
if state != "" {
checks, meta, err = health.State(state, &opts)
} else {

@ -378,6 +378,82 @@ func TestServicesWatch(t *testing.T) {
}
func TestServicesWatch_Filter(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
var (
wakeups []map[string][]string
notifyCh = make(chan struct{})
)
plan := mustParse(t, `{"type":"services", "filter":"b in ServiceTags and a in ServiceTags"}`)
plan.Handler = func(idx uint64, raw interface{}) {
if raw == nil {
return // ignore
}
v, ok := raw.(map[string][]string)
if !ok {
return // ignore
}
wakeups = append(wakeups, v)
notifyCh <- struct{}{}
}
// Register some services
{
agent := c.Agent()
// we don't want to find this
reg := &api.AgentServiceRegistration{
ID: "foo",
Name: "foo",
Tags: []string{"b"},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
// // we want to find this
reg = &api.AgentServiceRegistration{
ID: "bar",
Name: "bar",
Tags: []string{"a", "b"},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
if err := plan.Run(s.HTTPAddr); err != nil {
t.Errorf("err: %v", err)
}
}()
defer plan.Stop()
// Wait for second wakeup.
<-notifyCh
plan.Stop()
wg.Wait()
require.Len(t, wakeups, 1)
{
v := wakeups[0]
require.Len(t, v, 1)
_, ok := v["bar"]
require.True(t, ok)
}
}
func TestNodesWatch(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
@ -453,6 +529,82 @@ func TestNodesWatch(t *testing.T) {
}
}
func TestNodesWatch_Filter(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t) // wait for AE to sync
var (
wakeups [][]*api.Node
notifyCh = make(chan struct{})
)
plan := mustParse(t, `{"type":"nodes", "filter":"Node == foo"}`)
plan.Handler = func(idx uint64, raw interface{}) {
if raw == nil {
return // ignore
}
v, ok := raw.([]*api.Node)
if !ok {
return // ignore
}
wakeups = append(wakeups, v)
notifyCh <- struct{}{}
}
// Register 2 nodes
{
catalog := c.Catalog()
// we want to find this node
reg := &api.CatalogRegistration{
Node: "foo",
Address: "1.1.1.1",
Datacenter: "dc1",
}
if _, err := catalog.Register(reg, nil); err != nil {
t.Fatalf("err: %v", err)
}
// we don't want to find this node
reg = &api.CatalogRegistration{
Node: "bar",
Address: "2.2.2.2",
Datacenter: "dc1",
}
if _, err := catalog.Register(reg, nil); err != nil {
t.Fatalf("err: %v", err)
}
}
var wg sync.WaitGroup
wg.Add(1)
// Start the watch nodes plan
go func() {
defer wg.Done()
if err := plan.Run(s.HTTPAddr); err != nil {
t.Errorf("err: %v", err)
}
}()
defer plan.Stop()
// Wait for first wakeup.
<-notifyCh
plan.Stop()
wg.Wait()
require.Len(t, wakeups, 1)
{
v := wakeups[0]
require.Len(t, v, 1)
require.Equal(t, "foo", v[0].Node)
}
}
func TestServiceWatch(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
@ -616,6 +768,94 @@ func TestServiceMultipleTagsWatch(t *testing.T) {
}
}
func TestServiceWatch_Filter(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
var (
wakeups [][]*api.ServiceEntry
notifyCh = make(chan struct{})
)
plan := mustParse(t, `{"type":"service", "service":"foo", "filter":"bar in Service.Tags and buzz in Service.Tags"}`)
plan.Handler = func(idx uint64, raw interface{}) {
if raw == nil {
return // ignore
}
v, ok := raw.([]*api.ServiceEntry)
if !ok {
return // ignore
}
wakeups = append(wakeups, v)
notifyCh <- struct{}{}
}
// register some services
{
agent := c.Agent()
// we do not want to find this one.
reg := &api.AgentServiceRegistration{
ID: "foobarbiff",
Name: "foo",
Tags: []string{"bar", "biff"},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
// we do not want to find this one.
reg = &api.AgentServiceRegistration{
ID: "foobuzzbiff",
Name: "foo",
Tags: []string{"buzz", "biff"},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
// we want to find this one
reg = &api.AgentServiceRegistration{
ID: "foobarbuzzbiff",
Name: "foo",
Tags: []string{"bar", "buzz", "biff"},
}
if err := agent.ServiceRegister(reg); err != nil {
t.Fatalf("err: %v", err)
}
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
if err := plan.Run(s.HTTPAddr); err != nil {
t.Errorf("err: %v", err)
}
}()
defer plan.Stop()
// Wait for second wakeup.
<-notifyCh
plan.Stop()
wg.Wait()
require.Len(t, wakeups, 1)
{
v := wakeups[0]
require.Len(t, v, 1)
require.Equal(t, "foobarbuzzbiff", v[0].Service.ID)
require.ElementsMatch(t, []string{"bar", "buzz", "biff"}, v[0].Service.Tags)
}
}
func TestChecksWatch_State(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
@ -772,6 +1012,294 @@ func TestChecksWatch_Service(t *testing.T) {
}
}
func TestChecksWatch_Service_Filter(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
var (
wakeups [][]*api.HealthCheck
notifyCh = make(chan struct{})
)
plan := mustParse(t, `{"type":"checks", "filter":"b in ServiceTags and a in ServiceTags"}`)
plan.Handler = func(idx uint64, raw interface{}) {
if raw == nil {
return // ignore
}
v, ok := raw.([]*api.HealthCheck)
if !ok {
return // ignore
}
wakeups = append(wakeups, v)
notifyCh <- struct{}{}
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
if err := plan.Run(s.HTTPAddr); err != nil {
t.Errorf("err: %v", err)
}
}()
defer plan.Stop()
// Wait for first wakeup.
<-notifyCh
{
catalog := c.Catalog()
reg := &api.CatalogRegistration{
Node: "foobar",
Address: "1.1.1.1",
Datacenter: "dc1",
Service: &api.AgentService{
ID: "foobar",
Service: "foobar",
Tags: []string{"a", "b"},
},
Check: &api.AgentCheck{
Node: "foobar",
CheckID: "foobar",
Name: "foobar",
Status: api.HealthPassing,
ServiceID: "foobar",
},
}
if _, err := catalog.Register(reg, nil); err != nil {
t.Fatalf("err: %v", err)
}
}
// Wait for second wakeup.
<-notifyCh
plan.Stop()
wg.Wait()
require.Len(t, wakeups, 2)
{
v := wakeups[0]
require.Len(t, v, 0)
}
{
v := wakeups[1]
require.Len(t, v, 1)
require.Equal(t, "foobar", v[0].CheckID)
}
}
func TestChecksWatch_Filter(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
var (
wakeups [][]*api.HealthCheck
notifyCh = make(chan struct{})
)
plan := mustParse(t, `{"type":"checks", "filter":"b in ServiceTags and a in ServiceTags"}`)
plan.Handler = func(idx uint64, raw interface{}) {
if raw == nil {
return // ignore
}
v, ok := raw.([]*api.HealthCheck)
if !ok {
return // ignore
}
wakeups = append(wakeups, v)
notifyCh <- struct{}{}
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
if err := plan.Run(s.HTTPAddr); err != nil {
t.Errorf("err: %v", err)
}
}()
defer plan.Stop()
// Wait for first wakeup.
<-notifyCh
{
catalog := c.Catalog()
// we don't want to find this one
reg := &api.CatalogRegistration{
Node: "foo",
Address: "1.1.1.1",
Datacenter: "dc1",
Service: &api.AgentService{
ID: "foo",
Service: "foo",
Tags: []string{"a"},
},
Check: &api.AgentCheck{
Node: "foo",
CheckID: "foo",
Name: "foo",
Status: api.HealthPassing,
ServiceID: "foo",
},
}
if _, err := catalog.Register(reg, nil); err != nil {
t.Fatalf("err: %v", err)
}
// we want to find this one
reg = &api.CatalogRegistration{
Node: "bar",
Address: "2.2.2.2",
Datacenter: "dc1",
Service: &api.AgentService{
ID: "bar",
Service: "bar",
Tags: []string{"a", "b"},
},
Check: &api.AgentCheck{
Node: "bar",
CheckID: "bar",
Name: "bar",
Status: api.HealthPassing,
ServiceID: "bar",
},
}
if _, err := catalog.Register(reg, nil); err != nil {
t.Fatalf("err: %v", err)
}
}
// Wait for second wakeup.
<-notifyCh
plan.Stop()
wg.Wait()
require.Len(t, wakeups, 2)
{
v := wakeups[0]
require.Len(t, v, 0)
}
{
v := wakeups[1]
require.Len(t, v, 1)
require.Equal(t, "bar", v[0].CheckID)
}
}
func TestChecksWatch_Filter_by_ServiceNameStatus(t *testing.T) {
t.Parallel()
c, s := makeClient(t)
defer s.Stop()
s.WaitForSerfCheck(t)
var (
wakeups [][]*api.HealthCheck
notifyCh = make(chan struct{})
)
plan := mustParse(t, `{"type":"checks", "filter":"ServiceName == bar and Status == critical"}`)
plan.Handler = func(idx uint64, raw interface{}) {
if raw == nil {
return // ignore
}
v, ok := raw.([]*api.HealthCheck)
if !ok {
return // ignore
}
wakeups = append(wakeups, v)
notifyCh <- struct{}{}
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
if err := plan.Run(s.HTTPAddr); err != nil {
t.Errorf("err: %v", err)
}
}()
defer plan.Stop()
// Wait for first wakeup.
<-notifyCh
{
catalog := c.Catalog()
// we don't want to find this one
reg := &api.CatalogRegistration{
Node: "foo",
Address: "1.1.1.1",
Datacenter: "dc1",
Service: &api.AgentService{
ID: "foo",
Service: "foo",
Tags: []string{"a"},
},
Check: &api.AgentCheck{
Node: "foo",
CheckID: "foo",
Name: "foo",
Status: api.HealthPassing,
ServiceID: "foo",
},
}
if _, err := catalog.Register(reg, nil); err != nil {
t.Fatalf("err: %v", err)
}
// we want to find this one
reg = &api.CatalogRegistration{
Node: "bar",
Address: "2.2.2.2",
Datacenter: "dc1",
Service: &api.AgentService{
ID: "bar",
Service: "bar",
Tags: []string{"a", "b"},
},
Check: &api.AgentCheck{
Node: "bar",
CheckID: "bar",
Name: "bar",
Status: api.HealthCritical,
ServiceID: "bar",
},
}
if _, err := catalog.Register(reg, nil); err != nil {
t.Fatalf("err: %v", err)
}
}
// Wait for second wakeup.
<-notifyCh
plan.Stop()
wg.Wait()
require.Len(t, wakeups, 2)
{
v := wakeups[0]
require.Len(t, v, 0)
}
{
v := wakeups[1]
require.Len(t, v, 1)
require.Equal(t, "bar", v[0].CheckID)
}
}
func TestEventWatch(t *testing.T) {
t.Parallel()
c, s := makeClient(t)

@ -2,7 +2,7 @@
# SPDX-License-Identifier: MPL-2.0
ARG CONSUL_IMAGE_VERSION=latest
FROM consul:${CONSUL_IMAGE_VERSION}
FROM hashicorp/consul:${CONSUL_IMAGE_VERSION}
RUN apk update && apk add iptables
ARG TARGETARCH
COPY linux_${TARGETARCH}/consul /bin/consul

@ -2,6 +2,6 @@
# SPDX-License-Identifier: MPL-2.0
ARG CONSUL_IMAGE_VERSION=latest
FROM consul:${CONSUL_IMAGE_VERSION}
FROM hashicorp/consul:${CONSUL_IMAGE_VERSION}
RUN apk update && apk add iptables
COPY consul /bin/consul

@ -0,0 +1,10 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
FROM amazonlinux:latest
RUN yum install -y yum-utils shadow-utils
RUN yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo
ARG PACKAGE=consul \
ARG VERSION \
ARG SUFFIX=1
RUN yum install -y ${PACKAGE}-${VERSION}-${SUFFIX}

@ -0,0 +1,10 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
FROM centos:7
RUN yum install -y yum-utils
RUN yum-config-manager --add-repo https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo
ARG PACKAGE=consul \
ARG VERSION \
ARG SUFFIX=1
RUN yum install -y ${PACKAGE}-${VERSION}-${SUFFIX}

@ -0,0 +1,12 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
FROM debian:bullseye
RUN apt update && apt install -y software-properties-common curl gnupg
RUN curl -fsSL https://apt.releases.hashicorp.com/gpg | apt-key add -
ARG TARGETARCH=amd64
RUN apt-add-repository "deb [arch=${TARGETARCH}] https://apt.releases.hashicorp.com $(lsb_release -cs) main"
ARG PACKAGE=consul \
ARG VERSION \
ARG SUFFIX=1
RUN apt-get update && apt-get install -y ${PACKAGE}=${VERSION}-${SUFFIX}

@ -0,0 +1,10 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
FROM fedora:latest
RUN dnf install -y dnf-plugins-core
RUN dnf config-manager --add-repo https://rpm.releases.hashicorp.com/fedora/hashicorp.repo
ARG PACKAGE=consul \
ARG VERSION \
ARG SUFFIX=1
RUN dnf install -y ${PACKAGE}-${VERSION}-${SUFFIX}

@ -0,0 +1,12 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
FROM i386/ubuntu:latest
RUN apt update && apt install -y software-properties-common curl
RUN curl -fsSL https://apt.releases.hashicorp.com/gpg | apt-key add -
ARG TARGETARCH=amd64
RUN apt-add-repository "deb [arch=${TARGETARCH}] https://apt.releases.hashicorp.com $(lsb_release -cs) main"
ARG PACKAGE=consul \
ARG VERSION \
ARG SUFFIX=1
RUN apt-get update && apt-get install -y ${PACKAGE}=${VERSION}-${SUFFIX}

@ -0,0 +1,12 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
FROM ubuntu:latest
RUN apt update && apt install -y software-properties-common curl
RUN curl -fsSL https://apt.releases.hashicorp.com/gpg | apt-key add -
ARG TARGETARCH=amd64
RUN apt-add-repository "deb [arch=${TARGETARCH}] https://apt.releases.hashicorp.com $(lsb_release -cs) main"
ARG PACKAGE=consul \
ARG VERSION \
ARG SUFFIX=1
RUN apt-get update && apt-get install -y ${PACKAGE}=${VERSION}-${SUFFIX}

@ -72,6 +72,10 @@ function main {
status "Generated gRPC rate limit mapping file"
generate_protoset_file
status "Generated protoset file"
return 0
}
@ -152,5 +156,11 @@ function generate_rate_limit_mappings {
}
}
function generate_protoset_file {
local pkg_dir="${SOURCE_DIR}/pkg"
mkdir -p "$pkg_dir"
print_run buf build -o "${pkg_dir}/consul.protoset"
}
main "$@"
exit $?

@ -847,7 +847,8 @@ func appendTelemetryCollectorConfig(args *BootstrapTplArgs, telemetryCollectorBi
"envoy_grpc": {
"cluster_name": "consul_telemetry_collector_loopback"
}
}
},
"emit_tags_as_labels": true
}
}`

@ -539,7 +539,8 @@ const (
"envoy_grpc": {
"cluster_name": "consul_telemetry_collector_loopback"
}
}
},
"emit_tags_as_labels": true
}
}`
@ -638,7 +639,8 @@ func TestBootstrapConfig_ConfigureArgs(t *testing.T) {
"envoy_grpc": {
"cluster_name": "consul_telemetry_collector_loopback"
}
}
},
"emit_tags_as_labels": true
}
}`,
StaticClustersJSON: `{

@ -89,7 +89,8 @@
"envoy_grpc": {
"cluster_name": "consul_telemetry_collector_loopback"
}
}
},
"emit_tags_as_labels": true
}
}
],

@ -45,6 +45,7 @@ type cmd struct {
state string
name string
shell bool
filter string
}
func (c *cmd) init() {
@ -71,6 +72,7 @@ func (c *cmd) init() {
"Specifies the states to watch. Optional for 'checks' type.")
c.flags.StringVar(&c.name, "name", "",
"Specifies an event name to watch. Only for 'event' type.")
c.flags.StringVar(&c.filter, "filter", "", "Filter to use with the request")
c.http = &flags.HTTPFlags{}
flags.Merge(c.flags, c.http.ClientFlags())
@ -128,6 +130,9 @@ func (c *cmd) Run(args []string) int {
if c.service != "" {
params["service"] = c.service
}
if c.filter != "" {
params["filter"] = c.filter
}
if len(c.tag) > 0 {
params["tag"] = c.tag
}

@ -40,6 +40,7 @@ Also see the [FAQ](./faq.md).
1. [Integration Tests](../test/integration/connect/envoy/README.md)
1. [Upgrade Tests](../test/integration/consul-container/test/upgrade/README.md)
1. [Remote Debugging Integration Tests](../test/integration/consul-container/test/debugging.md)
## Important Directories

@ -21,7 +21,7 @@ require (
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e
github.com/armon/go-metrics v0.4.1
github.com/armon/go-radix v1.0.0
github.com/aws/aws-sdk-go v1.42.34
github.com/aws/aws-sdk-go v1.44.289
github.com/coredns/coredns v1.6.6
github.com/coreos/go-oidc v2.1.0+incompatible
github.com/docker/go-connections v0.4.0

@ -161,8 +161,8 @@ github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.42.34 h1:fqGAiKmCSRY1rEa4G9VqgkKKbNmLKYq5dKmLtQkvYi8=
github.com/aws/aws-sdk-go v1.42.34/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc=
github.com/aws/aws-sdk-go v1.44.289 h1:5CVEjiHFvdiVlKPBzv0rjG4zH/21W/onT18R5AH/qx0=
github.com/aws/aws-sdk-go v1.44.289/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/benbjohnson/immutable v0.4.0 h1:CTqXbEerYso8YzVPxmWxh2gnoRQbbB9X1quUC8+vGZA=
github.com/benbjohnson/immutable v0.4.0/go.mod h1:iAr8OjJGLnLmVUr9MZ/rz4PWUy6Ouc2JLYuMArmvAJM=
@ -1100,6 +1100,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
@ -1169,6 +1170,7 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
@ -1210,6 +1212,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1268,9 +1271,10 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -1305,6 +1309,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1399,13 +1404,16 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1419,6 +1427,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1496,6 +1505,7 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

@ -190,7 +190,7 @@ func TestController_String(t *testing.T) {
WithPlacement(controller.PlacementEachServer)
require.Equal(t,
`<Controller managed_type="demo.v2.artist", watched_types=["demo.v2.album"], backoff=<base="5s", max="1h0m0s">, placement="each-server">`,
`<Controller managed_type="demo.v2.Artist", watched_types=["demo.v2.Album"], backoff=<base="5s", max="1h0m0s">, placement="each-server">`,
ctrl.String(),
)
}
@ -201,7 +201,7 @@ func TestController_NoReconciler(t *testing.T) {
ctrl := controller.ForType(demo.TypeV2Artist)
require.PanicsWithValue(t,
`cannot register controller without a reconciler <Controller managed_type="demo.v2.artist", watched_types=[], backoff=<base="5ms", max="16m40s">, placement="singleton">`,
`cannot register controller without a reconciler <Controller managed_type="demo.v2.Artist", watched_types=[], backoff=<base="5ms", max="16m40s">, placement="singleton">`,
func() { mgr.Register(ctrl) })
}

@ -0,0 +1,17 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
//go:build !consulent
// +build !consulent
package resource
import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// AuthorizerContext builds an ACL AuthorizerContext for the given tenancy.
func AuthorizerContext(t *pbresource.Tenancy) *acl.AuthorizerContext {
return &acl.AuthorizerContext{Peer: t.PeerName}
}

@ -33,36 +33,36 @@ var (
TypeV1Artist = &pbresource.Type{
Group: "demo",
GroupVersion: "v1",
Kind: "artist",
Kind: "Artist",
}
// TypeV1Album represents a collection of an artist's songs.
TypeV1Album = &pbresource.Type{
Group: "demo",
GroupVersion: "v1",
Kind: "album",
Kind: "Album",
}
// TypeV2Artist represents a musician or group of musicians.
TypeV2Artist = &pbresource.Type{
Group: "demo",
GroupVersion: "v2",
Kind: "artist",
Kind: "Artist",
}
// TypeV2Album represents a collection of an artist's songs.
TypeV2Album = &pbresource.Type{
Group: "demo",
GroupVersion: "v2",
Kind: "album",
Kind: "Album",
}
)
const (
ArtistV1ReadPolicy = `key_prefix "resource/demo.v1.artist/" { policy = "read" }`
ArtistV1WritePolicy = `key_prefix "resource/demo.v1.artist/" { policy = "write" }`
ArtistV2ReadPolicy = `key_prefix "resource/demo.v2.artist/" { policy = "read" }`
ArtistV2WritePolicy = `key_prefix "resource/demo.v2.artist/" { policy = "write" }`
ArtistV1ReadPolicy = `key_prefix "resource/demo.v1.Artist/" { policy = "read" }`
ArtistV1WritePolicy = `key_prefix "resource/demo.v1.Artist/" { policy = "write" }`
ArtistV2ReadPolicy = `key_prefix "resource/demo.v2.Artist/" { policy = "read" }`
ArtistV2WritePolicy = `key_prefix "resource/demo.v2.Artist/" { policy = "write" }`
ArtistV2ListPolicy = `key_prefix "resource/" { policy = "list" }`
)

@ -5,6 +5,7 @@ package resource
import (
"fmt"
"regexp"
"sync"
"google.golang.org/protobuf/proto"
@ -13,6 +14,12 @@ import (
"github.com/hashicorp/consul/proto-public/pbresource"
)
var (
groupRegexp = regexp.MustCompile(`^[a-z][a-z\d_]+$`)
groupVersionRegexp = regexp.MustCompile(`^v([a-z\d]+)?\d$`)
kindRegexp = regexp.MustCompile(`^[A-Z][A-Za-z\d]+$`)
)
type Registry interface {
// Register the given resource type and its hooks.
Register(reg Registration)
@ -82,14 +89,23 @@ func NewRegistry() Registry {
}
func (r *TypeRegistry) Register(registration Registration) {
r.lock.Lock()
defer r.lock.Unlock()
typ := registration.Type
if typ.Group == "" || typ.GroupVersion == "" || typ.Kind == "" {
panic("type field(s) cannot be empty")
}
switch {
case !groupRegexp.MatchString(typ.Group):
panic(fmt.Sprintf("Type.Group must be in snake_case. Got: %q", typ.Group))
case !groupVersionRegexp.MatchString(typ.GroupVersion):
panic(fmt.Sprintf("Type.GroupVersion must be lowercase, start with `v`, and end with a number (e.g. `v2` or `v1alpha1`). Got: %q", typ.Group))
case !kindRegexp.MatchString(typ.Kind):
panic(fmt.Sprintf("Type.Kind must be in PascalCase. Got: %q", typ.Kind))
}
r.lock.Lock()
defer r.lock.Unlock()
key := ToGVK(registration.Type)
if _, ok := r.registrations[key]; ok {
panic(fmt.Sprintf("resource type %s already registered", key))

@ -28,36 +28,9 @@ func TestRegister(t *testing.T) {
require.True(t, proto.Equal(demo.TypeV2Artist, actual.Type))
// register existing should panic
require.PanicsWithValue(t, "resource type demo.v2.artist already registered", func() {
require.PanicsWithValue(t, "resource type demo.v2.Artist already registered", func() {
r.Register(reg)
})
// type missing required fields should panic
testcases := map[string]*pbresource.Type{
"empty group": {
Group: "",
GroupVersion: "v2",
Kind: "artist",
},
"empty group version": {
Group: "",
GroupVersion: "v2",
Kind: "artist",
},
"empty kind": {
Group: "demo",
GroupVersion: "v2",
Kind: "",
},
}
for desc, typ := range testcases {
t.Run(desc, func(t *testing.T) {
require.PanicsWithValue(t, "type field(s) cannot be empty", func() {
r.Register(resource.Registration{Type: typ})
})
})
}
}
func TestRegister_Defaults(t *testing.T) {
@ -102,7 +75,7 @@ func TestResolve(t *testing.T) {
serviceType := &pbresource.Type{
Group: "mesh",
GroupVersion: "v1",
Kind: "service",
Kind: "Service",
}
// not found
@ -115,3 +88,89 @@ func TestResolve(t *testing.T) {
assert.True(t, ok)
assert.Equal(t, registration.Type, serviceType)
}
func TestRegister_TypeValidation(t *testing.T) {
registry := resource.NewRegistry()
testCases := map[string]struct {
fn func(*pbresource.Type)
valid bool
}{
"Valid": {valid: true},
"Group empty": {
fn: func(t *pbresource.Type) { t.Group = "" },
valid: false,
},
"Group PascalCase": {
fn: func(t *pbresource.Type) { t.Group = "Foo" },
valid: false,
},
"Group kebab-case": {
fn: func(t *pbresource.Type) { t.Group = "foo-bar" },
valid: false,
},
"Group snake_case": {
fn: func(t *pbresource.Type) { t.Group = "foo_bar" },
valid: true,
},
"GroupVersion empty": {
fn: func(t *pbresource.Type) { t.GroupVersion = "" },
valid: false,
},
"GroupVersion snake_case": {
fn: func(t *pbresource.Type) { t.GroupVersion = "v_1" },
valid: false,
},
"GroupVersion kebab-case": {
fn: func(t *pbresource.Type) { t.GroupVersion = "v-1" },
valid: false,
},
"GroupVersion no leading v": {
fn: func(t *pbresource.Type) { t.GroupVersion = "1" },
valid: false,
},
"GroupVersion no trailing number": {
fn: func(t *pbresource.Type) { t.GroupVersion = "OnePointOh" },
valid: false,
},
"Kind PascalCase with numbers": {
fn: func(t *pbresource.Type) { t.Kind = "Number1" },
valid: true,
},
"Kind camelCase": {
fn: func(t *pbresource.Type) { t.Kind = "barBaz" },
valid: false,
},
"Kind snake_case": {
fn: func(t *pbresource.Type) { t.Kind = "bar_baz" },
valid: false,
},
"Kind empty": {
fn: func(t *pbresource.Type) { t.Kind = "" },
valid: false,
},
}
for desc, tc := range testCases {
t.Run(desc, func(t *testing.T) {
reg := func() {
typ := &pbresource.Type{
Group: "foo",
GroupVersion: "v1",
Kind: "Bar",
}
if tc.fn != nil {
tc.fn(typ)
}
registry.Register(resource.Registration{
Type: typ,
})
}
if tc.valid {
require.NotPanics(t, reg)
} else {
require.Panics(t, reg)
}
})
}
}

@ -6,6 +6,6 @@ var (
TypeV1Tombstone = &pbresource.Type{
Group: "internal",
GroupVersion: "v1",
Kind: "tombstone",
Kind: "Tombstone",
}
)

@ -53,12 +53,30 @@ EnvoyExtensions = [
Path = "/upstream_connection_options/tcp_keepalive/keepalive_probes"
Value = 1234
},
{
ResourceFilter = {
ResourceType = "cluster"
TrafficDirection = "outbound"
}
Op = "add"
Path = "/outlier_detection/max_ejection_time/seconds"
Value = 120
},
{
ResourceFilter = {
ResourceType = "cluster"
TrafficDirection = "outbound"
}
Op = "add"
Path = "/outlier_detection/max_ejection_time_jitter/seconds"
Value = 1
},
{
ResourceFilter = {
ResourceType = "cluster"
TrafficDirection = "outbound"
Services = [{
Name = "s2"
Name = "s3"
}]
}
Op = "remove"

@ -19,13 +19,14 @@ load helpers
[ "$status" == 0 ]
[ "$(echo "$output" | jq -r '.upstream_connection_options.tcp_keepalive.keepalive_probes')" == "1234" ]
[ "$(echo "$output" | jq -r '.outlier_detection')" == "null" ]
[ "$(echo "$output" | jq -r '.outlier_detection.max_ejection_time')" == "120s" ]
[ "$(echo "$output" | jq -r '.outlier_detection.max_ejection_time_jitter')" == "1s" ]
run get_envoy_cluster_config localhost:19000 s3
[ "$status" == 0 ]
[ "$(echo "$output" | jq -r '.upstream_connection_options.tcp_keepalive.keepalive_probes')" == "1234" ]
[ "$(echo "$output" | jq -r '.outlier_detection')" == "{}" ]
[ "$(echo "$output" | jq -r '.outlier_detection')" == "null" ]
}
@test "s2 proxy is configured with the expected envoy patches" {

@ -40,6 +40,20 @@ func CatalogServiceExists(t *testing.T, c *api.Client, svc string, opts *api.Que
})
}
// CatalogServiceHasInstanceCount verifies the service name exists in the Consul catalog and has the specified
// number of instances.
func CatalogServiceHasInstanceCount(t *testing.T, c *api.Client, svc string, count int, opts *api.QueryOptions) {
retry.Run(t, func(r *retry.R) {
services, _, err := c.Catalog().Service(svc, "", opts)
if err != nil {
r.Fatal("error reading service data")
}
if len(services) != count {
r.Fatalf("did not find %d catalog entries for %s", count, svc)
}
})
}
// CatalogServiceExists verifies the node name exists in the Consul catalog
func CatalogNodeExists(t *testing.T, c *api.Client, nodeName string) {
retry.Run(t, func(r *retry.R) {

@ -46,6 +46,7 @@ type ServiceOpts struct {
Checks Checks
Connect SidecarService
Namespace string
Locality *api.Locality
}
// createAndRegisterStaticServerAndSidecar register the services and launch static-server containers
@ -119,6 +120,7 @@ func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts
Namespace: serviceOpts.Namespace,
Meta: serviceOpts.Meta,
Check: &agentCheck,
Locality: serviceOpts.Locality,
}
return createAndRegisterStaticServerAndSidecar(node, serviceOpts.HTTPPort, serviceOpts.GRPCPort, req, containerArgs...)
}

@ -0,0 +1,78 @@
# Remote Debugging Integration Tests
- [Introduction](#introduction)
- [How it works](#how-it-works)
- [Getting Started](#getting-started)
- [Prerequisites](#prerequisites)
- [Running Upgrade integration tests](#debugging-integration-tests)
- [Building images](#building-images)
- [Remote debugging using GoLand](#remote-debugging-using-goland)
## Introduction
Remote debugging integration tests allows you to attach your debugger to the consul container and debug go code running on that container.
### How it works
The `dev-docker-dbg` Make target will build consul docker container that has the following:
- [delve (dlv) debugger](https://github.com/go-delve/delve) installed.
- a port exposed on the container that allows a debugger from your development environment to connect and attach to the consul process and debug it remotely.
- logs out the host and port information so that you have the information needed to connect to the port.
The integration tests have been modified to expose the `--debug` flag that will switch the test from using a `consul:local` image that can be built using `make dev-docker` to using the `consul-dbg:local` image that was built from `make dev-docker-dbg`.
The test is run in debug mode with a breakpoint set to just after the cluster is created and you can retrieve the port information. From there, you can set up a remote debugging session that connects to this port.
## Getting Started
### Prerequisites
To run/debug integration tests locally, the following tools are required on your machine:
- Install [Go](https://go.dev/) (the version should match that of our CI config's Go image).
- Install [`Makefile`](https://www.gnu.org/software/make/manual/make.html).
- Install [`Docker`](https://docs.docker.com/get-docker/) required to run tests locally.
### Debugging integration tests
#### Building images
- Build a consul image with dlv installed and a port exposed that the debugger can attach to.
```
make dev-docker-dbg
```
- Build a consul-envoy container image from the consul root directory that is required for testing but not for debugging.
```
docker build -t consul-envoy:target-version --build-arg CONSUL_IMAGE=consul:local --build-arg ENVOY_VERSION=1.24.6 -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets
```
#### Remote debugging using GoLand
(For additional information, see [GoLand's documentation on remote debugging](https://www.jetbrains.com/help/go/attach-to-running-go-processes-with-debugger.html#attach-to-a-process-on-a-remote-machine).)
##### Set up the Debug Configuration for your test
- Create the configuration for debugging the test. (You may have to debug the test once so GoLand creates the configuration for you.)
- Go to `Run > Edit Configurations` and select the appropriate configuration.
- Add `--debug` to `Program arguments` and click OK.
<img src="./util/test_debug_configuration.png" alt="isolated" width="550"/>
##### Obtain the debug port of your container
(This is required every time a test is debugged.)
- Put a breakpoint in the test that you are running right after the cluster has been created. This should be on the line after the call to `topology.NewCluster()`.
- Debug the test and wait for the debug session to stop on the breakpoint in the test.
- In the Debug window, search for `debug info` on the Console tab and note the host and port.
<img src="./util/test_debug_info.png" alt="isolated" width="550"/>
- Go to `Run > Edit Configurations` and add a `Go Remote` configuration with the host and port that your test has exposed. Click OK.
<img src="./util/test_debug_remote_configuration.png" alt="isolated" width="550"/>
- Debug the configuration that you just created. Verify that it shows as connected in the `Debugger` of this configuration in the `Debug` window.
<img src="./util/test_debug_remote_connected.png" alt="isolated" width="550"/>
##### Debug the consul backend
- Set an appropriate breakpoint in the backend code of the endpoint that your test will call and that you wish to debug.
- Go to the test debugging tab for the integration test in the `Debug` window and `Resume Program`.
<img src="./util/test_debug_resume_program.png" alt="isolated" width="350"/>
- The remote debugging session should stop on the breakpoint, and you can freely debug the code path.
<img src="./util/test_debug_breakpoint_hit.png" alt="isolated" width="550"/>
#### Remote debugging using VSCode
(For additional information, see [VSCode's documentation on remote debugging](https://github.com/golang/vscode-go/blob/master/docs/debugging.md#remote-debugging).)
[comment]: <> (TODO: Openly looking for someone to add VSCode specific instructions.)

@ -32,8 +32,6 @@ const (
// - logs for exceeding
func TestServerRequestRateLimit(t *testing.T) {
t.Parallel()
type action struct {
function func(client *api.Client) error
rateLimitOperation string
@ -52,6 +50,7 @@ func TestServerRequestRateLimit(t *testing.T) {
mode string
}
// getKV and putKV are net/RPC calls
getKV := action{
function: func(client *api.Client) error {
_, _, err := client.KV().Get("foo", &api.QueryOptions{})
@ -99,13 +98,13 @@ func TestServerRequestRateLimit(t *testing.T) {
action: putKV,
expectedErrorMsg: "",
expectExceededLog: true,
expectMetric: false,
expectMetric: true,
},
{
action: getKV,
expectedErrorMsg: "",
expectExceededLog: true,
expectMetric: false,
expectMetric: true,
},
},
},
@ -127,10 +126,13 @@ func TestServerRequestRateLimit(t *testing.T) {
expectMetric: true,
},
},
}}
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.description, func(t *testing.T) {
t.Parallel()
clusterConfig := &libtopology.ClusterConfig{
NumServers: 1,
NumClients: 0,
@ -144,12 +146,9 @@ func TestServerRequestRateLimit(t *testing.T) {
ApplyDefaultProxySettings: false,
}
cluster, _, _ := libtopology.NewCluster(t, clusterConfig)
cluster, client := setupClusterAndClient(t, clusterConfig, true)
defer terminate(t, cluster)
client, err := cluster.GetClient(nil, true)
require.NoError(t, err)
// perform actions and validate returned errors to client
for _, op := range tc.operations {
err := op.action.function(client)
@ -165,22 +164,14 @@ func TestServerRequestRateLimit(t *testing.T) {
// doing this in a separate loop so we can perform actions, allow metrics
// and logs to collect and then assert on each.
for _, op := range tc.operations {
timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond}
timer := &retry.Timer{Timeout: 15 * time.Second, Wait: 500 * time.Millisecond}
retry.RunWith(timer, t, func(r *retry.R) {
// validate metrics
metricsInfo, err := client.Agent().Metrics()
// TODO(NET-1978): currently returns NaN error
// require.NoError(t, err)
if metricsInfo != nil && err == nil {
if op.expectMetric {
checkForMetric(r, metricsInfo, op.action.rateLimitOperation, op.action.rateLimitType, tc.mode)
}
}
checkForMetric(t, cluster, op.action.rateLimitOperation, op.action.rateLimitType, tc.mode, op.expectMetric)
// validate logs
// putting this last as there are cases where logs
// were not present in consumer when assertion was made.
checkLogsForMessage(r, clusterConfig.LogConsumer.Msgs,
checkLogsForMessage(t, clusterConfig.LogConsumer.Msgs,
fmt.Sprintf("[DEBUG] agent.server.rpc-rate-limit: RPC exceeded allowed rate limit: rpc=%s", op.action.rateLimitOperation),
op.action.rateLimitOperation, "exceeded", op.expectExceededLog)
@ -190,43 +181,65 @@ func TestServerRequestRateLimit(t *testing.T) {
}
}
func checkForMetric(t *retry.R, metricsInfo *api.MetricsInfo, operationName string, expectedLimitType string, expectedMode string) {
const counterName = "consul.rpc.rate_limit.exceeded"
func setupClusterAndClient(t *testing.T, config *libtopology.ClusterConfig, isServer bool) (*libcluster.Cluster, *api.Client) {
cluster, _, _ := libtopology.NewCluster(t, config)
var counter api.SampledValue
for _, c := range metricsInfo.Counters {
if c.Name == counterName {
counter = c
break
}
}
require.NotEmptyf(t, counter.Name, "counter not found: %s", counterName)
client, err := cluster.GetClient(nil, isServer)
require.NoError(t, err)
return cluster, client
}
func checkForMetric(t *testing.T, cluster *libcluster.Cluster, operationName string, expectedLimitType string, expectedMode string, expectMetric bool) {
// validate metrics
server, err := cluster.GetClient(nil, true)
require.NoError(t, err)
metricsInfo, err := server.Agent().Metrics()
// TODO(NET-1978): currently returns NaN error
// require.NoError(t, err)
if metricsInfo != nil && err == nil {
if expectMetric {
const counterName = "consul.rpc.rate_limit.exceeded"
var counter api.SampledValue
for _, c := range metricsInfo.Counters {
if c.Name == counterName {
counter = c
break
}
}
require.NotEmptyf(t, counter.Name, "counter not found: %s", counterName)
operation, ok := counter.Labels["op"]
require.True(t, ok)
operation, ok := counter.Labels["op"]
require.True(t, ok)
limitType, ok := counter.Labels["limit_type"]
require.True(t, ok)
limitType, ok := counter.Labels["limit_type"]
require.True(t, ok)
mode, ok := counter.Labels["mode"]
require.True(t, ok)
mode, ok := counter.Labels["mode"]
require.True(t, ok)
if operation == operationName {
require.GreaterOrEqual(t, counter.Count, 1)
require.Equal(t, expectedLimitType, limitType)
require.Equal(t, expectedMode, mode)
if operation == operationName {
require.GreaterOrEqual(t, counter.Count, 1)
require.Equal(t, expectedLimitType, limitType)
require.Equal(t, expectedMode, mode)
}
}
}
}
func checkLogsForMessage(t *retry.R, logs []string, msg string, operationName string, logType string, logShouldExist bool) {
found := false
for _, log := range logs {
if strings.Contains(log, msg) {
found = true
break
func checkLogsForMessage(t *testing.T, logs []string, msg string, operationName string, logType string, logShouldExist bool) {
if logShouldExist {
found := false
for _, log := range logs {
if strings.Contains(log, msg) {
found = true
break
}
}
expectedLog := fmt.Sprintf("%s log check failed for: %s. Log expected: %t", logType, operationName, logShouldExist)
require.Equal(t, logShouldExist, found, expectedLog)
}
require.Equal(t, logShouldExist, found, fmt.Sprintf("%s log check failed for: %s. Log expected: %t", logType, operationName, logShouldExist))
}
func terminate(t *testing.T, cluster *libcluster.Cluster) {

Binary file not shown.

After

Width:  |  Height:  |  Size: 640 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 287 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 608 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 279 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

@ -80,7 +80,7 @@ information when `debug` is running. By default, it captures all information.
| `members` | A list of all the WAN and LAN members in the cluster. |
| `metrics` | Metrics from the in-memory metrics endpoint in the target, captured at the interval. |
| `logs` | `TRACE` level logs for the target agent, captured for the duration. |
| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU and traces are captured for `duration` in a single file while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/consul/docs/agent/config/config-files#enable_debug) is set to `true` on the target agent or ACLs are enable and an ACL token with `operator:read` is provided. |
| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU and traces are captured for `duration` in a single file while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/consul/docs/agent/config/config-files#enable_debug) is set to `true` on the target agent or ACLs are enabled and an ACL token with `operator:read` is provided. |
## Examples

@ -53,6 +53,11 @@ or optionally provided. There is more documentation on watch
- `-type` - Watch type. Required, one of "`key`, `keyprefix`, `services`,
`nodes`, `service`, `checks`, or `event`.
- `-filter=<filter>` - Expression to use for filtering the results. Optional for
`checks` `nodes`, `services`, and `service` type.
See the [`/catalog/nodes` API documentation](/consul/api-docs/catalog#filtering) for a
description of what is filterable.
#### API Options
@include 'http_api_options_client.mdx'

@ -472,8 +472,7 @@ Refer to the [formatting specification](https://golang.org/pkg/time/#ParseDurati
that match a registering service instance. If it finds any, the agent will merge the centralized defaults with the service instance configuration. This allows for things like service protocol or proxy configuration to be defined centrally and inherited by any affected service registrations.
This defaults to `false` in versions of Consul prior to 1.9.0, and defaults to `true` in Consul 1.9.0 and later.
- `enable_debug` When set, enables some additional debugging features. Currently, this is only used to
access runtime profiling HTTP endpoints, which are available with an `operator:read` ACL regardless of the value of `enable_debug`.
- `enable_debug` (boolean, default is `false`): When set to `true`, enables Consul to report additional debugging information, including runtime profiling (`pprof`) data. This setting is only required for clusters without ACL [enabled](#acl_enabled). If you change this setting, you must restart the agent for the change to take effect.
- `enable_script_checks` Equivalent to the [`-enable-script-checks` command-line flag](/consul/docs/agent/config/cli-flags#_enable_script_checks).

@ -7,7 +7,7 @@ description: >-
# Upgrade Consul API gateway for Kubernetes
Since Consul v1.15, the Consul API gateway is a native feature within the Consul binary and is installed during the normal Consul installation process. Since Consul v1.16, the CRDs necessary for using the Consul API gateway for Kubernetes are also included. You can install Consul v1.16 using the Consul Helm chart v1.2 and later. Refer to [Install API gateway for Kubernetes](/consul/docs/api-gateway/install) for additional information.
Since Consul v1.15, the Consul API gateway is a native feature within the Consul binary and is installed during the normal Consul installation process. Since Consul on Kubernetes v1.2 (Consul v1.16), the CRDs necessary for using the Consul API gateway for Kubernetes are also included. You can install Consul v1.16 using the Consul Helm chart v1.2 and later. Refer to [Install API gateway for Kubernetes](/consul/docs/api-gateway/install) for additional information.
## Introduction
@ -47,62 +47,114 @@ To begin using the native API gateway, complete one of the following upgrade pat
## Upgrade to native Consul API gateway
You must begin the upgrade procedure with Consul API gateway v1.1 installed. If you are currently using a version of Consul API gateway older than v1.1, complete the necessary stages of the upgrade path to v1.1 before you begin upgrading to the native API gateway. Refer to the [Introduction](#introduction) for an overview of the upgrade paths.
You must begin the upgrade procedure with API gateway with Consul on Kubernetes v1.1 installed. If you are currently using a version of Consul on Kubernetes older than v1.1, complete the necessary stages of the upgrade path to v1.1 before you begin upgrading to the native API gateway. Refer to the [Introduction](#introduction) for an overview of the upgrade paths.
### Consul-managed CRDs
If you are able to tolerate downtime for your applications, you should delete previously installed CRDs and allow Consul to install and manage them for future updates. The amount of downtime depends on how quickly you are able to install the new version of Consul. If you are unable to tolerate any downtime, refer to [Self-managed CRDs](#self-managed-crds) for instructions on how to upgrade without downtime.
1. Run the `kubectl delete` command and reference the kustomize directory to delete the existing CRDs. The following example deletes the CRDs that were installed with API gateway v0.5.1:
1. Run the `kubectl delete` command and reference the `kustomize` directory to delete the existing CRDs. The following example deletes the CRDs that were installed with API gateway `v0.5.1`:
```shell-session
$ kubectl delete --kustomize="github.com/hashicorp/consul-api-gateway/config/crd?ref=v0.5.1"
```
1. Issue the following command to apply the configuration and complete the installation:
1. Issue the following command to use the API gateway packaged in Consul. Since Consul will not detected an external CRD, it will try to install the API gateway packaged with Consul.
```shell-session
$ kubectl apply -f apigw-installation.yaml
$ consul-k8s install -config-file values.yaml
```
1. Create `ServiceIntentions` allowing `Gateways` to communicate with any backend services that they route to. Refer to [Service intentions configuration entry reference](/consul/docs/connect/config-entries/service-intentions) for additional information.
1. Change any existing `Gateways` to reference the new `GatewayClass` `consul`. Refer to [gatewayClass](/consul/docs/api-gateway/configuration/gateway#gatewayclassname) for additional information.
1. After updating all of your `gateway` configurations to use the new controller, you can complete the upgrade again and completely remove the `apiGateway` block to remove the old controller.
1. After updating all of your `gateway` configurations to use the new controller, you can remove the `apiGateway` block from the Helm chart and upgrade your Consul cluster. This completely removes the old gateway controller.
<CodeBlockConfig filename="values.yaml">
```diff
global:
image: hashicorp/consul:1.15
imageK8S: hashicorp/consul-k8s-control-plane:1.1
- apiGateway:
- enabled: true
- image: hashicorp/consul-api-gateway:0.5.4
- managedGatewayClass:
- enabled: true
```
</CodeBlockConfig>
```shell-session
$ consul-k8s install -config-file values.yaml
```
### Self-managed CRDs
<Note>
This upgrade method uses `connectInject.apiGateway.manageExternalCRDs`, which was introduced in Consul on Kubernetes v1.2. As a result, you must be on at least Consul on Kubernetes v1.2 for this upgrade method.
</Note>
If you are unable to tolerate any downtime, you can complete the following steps to upgrade to the native Consul API gateway. If you choose this upgrade option, you must continue to manually install the CRDs necessary for operating the API gateway.
1. Create a values file that installs the version of Consul API gateway that ships with Consul and disables externally-managed CRDs:
1. Create a Helm chart that installs the version of Consul API gateway that ships with Consul and disables externally-managed CRDs:
<CodeBlock heading="apigw-installation.yaml">
<CodeBlockConfig filename="values.yaml" highlight="2-3,5-6">
```yaml
global:
image: hashicorp/consul:1.16
imageK8S: hashicorp/consul-k8s-control-plane
connectInject:
```yaml
global:
image: hashicorp/consul:1.16
imageK8S: hashicorp/consul-k8s-control-plane:1.2
connectInject:
apiGateway:
manageExternalCRDs: false
apiGateway:
manageExternalCRDs: false
apiGateway:
enabled: true
image: hashicorp/consul-api-gateway:0.5.4
managedGatewayClass:
enabled: true
```
image: hashicorp/consul-api-gateway:0.5.4
managedGatewayClass:
enabled: true
```
</CodeBlock>
</CodeBlockConfig>
You must set `connectInject.apiGateway.manageExternalCRDs` to `false`. If you have external CRDs with legacy installation and you do not set this, you will get an error when you try to upgrade because Helm will try to install CRDs that already exist.
1. Issue the following command to apply the configuration and complete the installation:
1. Issue the following command to install the new version of API gateway and disables externally-managed CRDs:
```shell-session
$ kubectl apply -f apigw-installation.yaml
$ consul-k8s install -config-file values.yaml
```
1. Create `ServiceIntentions` allowing `Gateways` to communicate with any backend services that they route to. Refer to [Service intentions configuration entry reference](/consul/docs/connect/config-entries/service-intentions) for additional information.
1. Change any existing `Gateways` to reference the new `GatewayClass` `consul`. Refer to [gatewayClass](/consul/docs/api-gateway/configuration/gateway#gatewayclassname) for additional information.
1. After updating all of your `gateway` configurations to use the new controller, you can remove the `apiGateway` block from the Helm chart and rerun it. This completely removes the old gateway controller.
1. After updating all of your `gateway` configurations to use the new controller, you can remove the `apiGateway` block from the Helm chart and upgrade your Consul cluster. This completely removes the old gateway controller.
<CodeBlockConfig filename="values.yaml">
```diff
global:
image: hashicorp/consul:1.16
imageK8S: hashicorp/consul-k8s-control-plane:1.2
connectInject:
apiGateway:
manageExternalCRDs: false
- apiGateway:
- enabled: true
- image: hashicorp/consul-api-gateway:0.5.4
- managedGatewayClass:
- enabled: true
```
</CodeBlockConfig>
```shell-session
$ consul-k8s install -config-file values.yaml
```
## Upgrade to v0.4.0
@ -153,7 +205,7 @@ Complete the following steps after performing standard upgrade procedure.
You should receive a response similar to the following:
```log
```log hideClipboard
"hashicorp/consul-api-gateway:0.4.0"
```
@ -166,7 +218,7 @@ Complete the following steps after performing standard upgrade procedure.
```
If you have any active `ReferencePolicy` resources, you will receive output similar to the response below.
```log
```log hideClipboard
Warning: ReferencePolicy has been renamed to ReferenceGrant. ReferencePolicy will be removed in v0.6.0 in favor of the identical ReferenceGrant resource.
NAMESPACE NAME
default example-reference-policy
@ -291,7 +343,7 @@ Ensure that the following requirements are met prior to upgrading:
You should receive a response similar to the following:
```log
```log hideClipboard
"hashicorp/consul-api-gateway:0.2.1"
```
@ -445,7 +497,7 @@ Ensure that the following requirements are met prior to upgrading:
You should receive the following response:
```log
```log hideClipboard
"hashicorp/consul-api-gateway:0.1.0"
```

@ -58,3 +58,18 @@ The installation process typically fails after this error message is generated.
**Resolution:**
Install the required CRDs. Refer to the [Consul API Gateway installation instructions](/consul/docs/api-gateway/install#installation) for instructions.
## Operation cannot be fulfilled, the object has been modified
```
{"error": "Operation cannot be fulfilled on gatewayclassconfigs.consul.hashicorp.com \"consul-api-gateway\": the object has been modified; please apply your changes to the latest version and try again"}
```
**Conditions:**
This error occurs when the gateway controller attempts to update an object that has been modified previously. It is a normal part of running the controller and will resolve itself by automatically retrying.
**Impact:**
Excessive error logs are produced, but there is no impact to the functionality of the controller.
**Resolution:**
No action needs to be taken to resolve this issue.

@ -21,8 +21,8 @@ Some of the benefits of a service mesh include;
- automatic failover
- traffic management
- encryption
- observability and traceability,
- authentication and authorization,
- observability and traceability
- authentication and authorization
- network automation
A common use case for leveraging a service mesh is to achieve a [_zero trust_ model](https://www.consul.io/use-cases/zero-trust-networking).

@ -21,7 +21,7 @@ support for using
[Vault as a CA](/consul/docs/connect/ca/vault). With Vault, the root certificate
and private key material remain with the Vault cluster.
### CA and Certificate relationship
## CA and Certificate relationship
This diagram shows the relationship between the CA certificates in a Consul primary datacenter and a
secondary Consul datacenter.
@ -34,9 +34,22 @@ services.
- the Leaf Cert Client Agent is created by auto-encrypt and auto-config. It is used by
client agents for HTTP API TLS, and for mTLS for RPC requests to servers.
Any secondary datacenters receive an intermediate certificate, signed by the Primary Root
CA, which is used as the CA certificate to sign leaf certificates in the secondary
datacenter.
Any secondary datacenters use their CA provider to generate an intermediate certificate
signing request (CSR) to be signed by the primary root CA. They receive an intermediate
CA certificate, which is used to sign leaf certificates in the secondary datacenter.
You can use different providers across primary and secondary datacenters.
For example, an operator may use a Vault CA provider for extra security in the primary
datacenter but choose to use the built-in CA provider in the secondary datacenter, which
may not have a reachable Vault cluster. The following table compares the built-in and Vault providers.
## CA Provider Comparison
| | Consul built-in | Vault |
|------------|------------------------------------|-----------------------------------------------------------------------------------|
| Security | CA private keys are stored on disk | CA private keys are stored in Vault and are never exposed to Consul server agents |
| Resiliency | No dependency on external systems. If Consul is available, it can sign certificates | Dependent on Vault availability |
| Latency | Consul signs certificates locally | A network call to Vault is required to sign certificates |
## CA Bootstrapping

@ -7,7 +7,7 @@ description: >-
# Vault as a Service Mesh Certificate Authority
You can configure Consul to use [Vault](https://www.vaultproject.io/) as the certificate authority (CA) so that Vault can manage and sign certificates distributed to services in the mesh.
You can configure Consul to use [Vault](/vault) as the certificate authority (CA) so that Vault can manage and sign certificates distributed to services in the mesh.
The Vault CA provider uses the [Vault PKI secrets engine](/vault/docs/secrets/pki) to generate and sign certificates.
This page describes how configure the Vault CA provider.
@ -15,11 +15,19 @@ This page describes how configure the Vault CA provider.
## Requirements
- Vault 0.10.3 or higher
~> **Compatibility note:** If you use Vault 1.11.0+ as Consul's service mesh CA, versions of Consul released before Dec 13, 2022 will develop an issue with Consul control plane or service mesh communication ([GH-15525](https://github.com/hashicorp/consul/pull/15525)). Use or upgrade to a [Consul version that includes the fix](https://support.hashicorp.com/hc/en-us/articles/11308460105491#01GMC24E6PPGXMRX8DMT4HZYTW) to avoid this problem.
## Recommendations
- Refer to [Service Mesh Certificate Authority Overview](/consul/docs/connect/ca) for important background information about how Consul manages certificates with configurable CA providers.
- Vault 0.10.3 to 1.10.x.
- For best performance and resiliency, every datacenter should have a Vault cluster local to its Consul cluster.
~> **Compatibility note:** If you use Vault 1.11.0+ as Consul's service mesh CA, versions of Consul released before Dec 13, 2022 will develop an issue with Consul control plane or service mesh communication ([GH-15525](https://github.com/hashicorp/consul/pull/15525)). Use or upgrade to a [Consul version that includes the fix](https://support.hashicorp.com/hc/en-us/articles/11308460105491#01GMC24E6PPGXMRX8DMT4HZYTW) to avoid this problem.
- If your Consul datacenters are WAN-federated and the secondary datacenter uses Vault Enterprise
[performance secondaries](/vault/docs/enterprise/replication#performance-replication), we recommend
configuring [`local`](/vault/docs/enterprise/replication#local) mounts for their [`intermediate_pki_path`](/consul/docs/connect/ca/vault#intermediatepkipath).
## Enable Vault as the CA
@ -104,7 +112,8 @@ The key after the slash refers to the corresponding option name in the agent con
Only the authentication related fields (for example, JWT's `path` and `role`) are supported. The optional management fields (for example: `remove_jwt_after_reading`) are not supported.
- `RootPKIPath` / `root_pki_path` (`string: <required>`) - The path to
a PKI secrets engine for the root certificate.
a PKI secrets engine for the root certificate. Required for primary
datacenters. Secondary datacenters do not use this path.
If the path does not
exist, Consul will mount a new PKI secrets engine at the specified path with the
@ -114,9 +123,6 @@ The key after the slash refers to the corresponding option name in the agent con
the root certificate TTL was set to 8760 hour, or 1 year, and was not configurable.
The root certificate will expire at the end of the specified period.
When WAN Federation is enabled, each secondary datacenter must use the same Vault cluster and share the same `root_pki_path`
with the primary datacenter.
To use an intermediate certificate as the primary CA in Consul, initialize the
`RootPKIPath` in Vault with a PEM bundle. The first certificate in the bundle
must be the intermediate certificate that Consul will use as the primary CA.
@ -133,8 +139,10 @@ The key after the slash refers to the corresponding option name in the agent con
path does not exist, Consul will attempt to mount and configure this
automatically.
When WAN Federation is enabled, every secondary
datacenter must specify a unique `intermediate_pki_path`.
When WAN federation is enabled, every secondary datacenter that shares a common Vault cluster
must specify a unique `intermediate_pki_path`. If a Vault cluster is not used by more than one Consul datacenter,
then you do not need to specify a unique value for the `intermediate_pki_path`. We still recommend using a
unique `intermediate_pki_path` for each datacenter, however, to improve operational and diagnostic clarity.
- `IntermediatePKINamespace` / `intermediate_pki_namespace` (`string: <optional>`) - The absolute namespace
that the `IntermediatePKIPath` is in. Setting this parameter overrides the `Namespace` option for the `IntermediatePKIPath`. Introduced in 1.12.3.

@ -952,6 +952,22 @@ Defines behavior for caching the validation result of previously encountered JWT
</Tab>
</Tabs>
## Metrics
Envoy proxies expose metrics that can track JWT authentication details. Use the following Envoy metrics:
```yaml
http.public_listener.jwt_authn.allowed
http.public_listener.jwt_authn.cors_preflight_bypassed
http.public_listener.jwt_authn.denied
http.public_listener.jwt_authn.jwks_fetch_failed
http.public_listener.jwt_authn.jwks_fetch_success
http.public_listener.jwt_authn.jwt_cache_hit
http.public_listener.jwt_authn.jwt_cache_miss
```
~> **Note:** Currently, Envoy does not reference these metrics in their documentation. Refer to [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/) for more information about exposed metrics.
## Examples
The following examples demonstrate common JWT provider configuration patterns for specific use cases.

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save