diff --git a/.changelog/17160.txt b/.changelog/17160.txt new file mode 100644 index 0000000000..666a6e8f25 --- /dev/null +++ b/.changelog/17160.txt @@ -0,0 +1,3 @@ +```release-note:bug +Fix a bug that wrongly trims domains when there is an overlap with DC name. +``` diff --git a/.changelog/17739.txt b/.changelog/17739.txt new file mode 100644 index 0000000000..14bbceeaa0 --- /dev/null +++ b/.changelog/17739.txt @@ -0,0 +1,3 @@ +```release-note:bug +http: fixed API endpoint `PUT /acl/token/:AccessorID` (update token), no longer requires `AccessorID` in the request body. Web UI can now update tokens. + ``` diff --git a/.changelog/17780.txt b/.changelog/17780.txt new file mode 100644 index 0000000000..b90925a8b9 --- /dev/null +++ b/.changelog/17780.txt @@ -0,0 +1,3 @@ +```release-note:feature +cli: `consul watch` command uses `-filter` expression to filter response from checks, services, nodes, and service. +``` diff --git a/.changelog/17831.txt b/.changelog/17831.txt new file mode 100644 index 0000000000..2833bda1d5 --- /dev/null +++ b/.changelog/17831.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ca: Vault CA provider config no longer requires root_pki_path for secondary datacenters +``` diff --git a/.changelog/17888.txt b/.changelog/17888.txt new file mode 100644 index 0000000000..f50fcae09b --- /dev/null +++ b/.changelog/17888.txt @@ -0,0 +1,3 @@ +```release-note:improvement +connect: Add capture group labels from Envoy cluster FQDNs to Envoy exported metric labels +``` \ No newline at end of file diff --git a/.github/workflows/build-artifacts.yml b/.github/workflows/build-artifacts.yml index 9d7a2583f9..57e2eba8e4 100644 --- a/.github/workflows/build-artifacts.yml +++ b/.github/workflows/build-artifacts.yml @@ -13,7 +13,7 @@ permissions: contents: read env: - GOPRIVATE: github.com/hashicorp + GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: setup: diff --git a/.github/workflows/build-distros.yml b/.github/workflows/build-distros.yml index 166706493f..8b88345d2e 100644 --- a/.github/workflows/build-distros.yml +++ b/.github/workflows/build-distros.yml @@ -15,6 +15,7 @@ permissions: env: GOTAGS: ${{ endsWith(github.repository, '-enterprise') && 'consulent' || '' }} + GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: setup: diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index cfeab4a04c..9186f12bfe 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -14,6 +14,7 @@ on: env: PKG_NAME: consul METADATA: oss + GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: set-product-version: diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 0d3b6b0992..831271f6f8 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -21,6 +21,7 @@ permissions: env: TEST_RESULTS: /tmp/test-results + GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: setup: @@ -50,7 +51,7 @@ jobs: check-generated-protobuf: needs: - setup - runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-medium) }} steps: - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. diff --git a/.github/workflows/nightly-test-1.13.x.yaml b/.github/workflows/nightly-test-1.13.x.yaml index 767072b6d2..de852c9114 100644 --- a/.github/workflows/nightly-test-1.13.x.yaml +++ b/.github/workflows/nightly-test-1.13.x.yaml @@ -8,9 +8,10 @@ on: workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition BRANCH: "release/1.13.x" - BRANCH_NAME: "release-1.13.x" # Used for naming artifacts + BRANCH_NAME: "release-1.13.x" # Used for naming artifacts + GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: frontend-test-workspace-node: diff --git a/.github/workflows/nightly-test-1.14.x.yaml b/.github/workflows/nightly-test-1.14.x.yaml index fb34eb0d1b..1f319b4bd3 100644 --- a/.github/workflows/nightly-test-1.14.x.yaml +++ b/.github/workflows/nightly-test-1.14.x.yaml @@ -8,9 +8,10 @@ on: workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition BRANCH: "release/1.14.x" - BRANCH_NAME: "release-1.14.x" # Used for naming artifacts + BRANCH_NAME: "release-1.14.x" # Used for naming artifacts + GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: frontend-test-workspace-node: diff --git a/.github/workflows/nightly-test-1.15.x.yaml b/.github/workflows/nightly-test-1.15.x.yaml index 99b8a2b188..d41cf84a62 100644 --- a/.github/workflows/nightly-test-1.15.x.yaml +++ b/.github/workflows/nightly-test-1.15.x.yaml @@ -8,9 +8,10 @@ on: workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition BRANCH: "release/1.15.x" - BRANCH_NAME: "release-1.15.x" # Used for naming artifacts + BRANCH_NAME: "release-1.15.x" # Used for naming artifacts + GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: frontend-test-workspace-node: diff --git a/.github/workflows/nightly-test-1.16.x.yaml b/.github/workflows/nightly-test-1.16.x.yaml index db63ef83fe..98a1f364b6 100644 --- a/.github/workflows/nightly-test-1.16.x.yaml +++ b/.github/workflows/nightly-test-1.16.x.yaml @@ -8,9 +8,10 @@ on: workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition BRANCH: "release/1.16.x" - BRANCH_NAME: "release-1.16.x" # Used for naming artifacts + BRANCH_NAME: "release-1.16.x" # Used for naming artifacts + GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: frontend-test-workspace-node: diff --git a/.github/workflows/nightly-test-main.yaml b/.github/workflows/nightly-test-main.yaml index f7958b53ce..13048656b6 100644 --- a/.github/workflows/nightly-test-main.yaml +++ b/.github/workflows/nightly-test-main.yaml @@ -8,9 +8,10 @@ on: workflow_dispatch: {} env: - EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition + EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition BRANCH: "main" - BRANCH_NAME: "main" # Used for naming artifacts + BRANCH_NAME: "main" # Used for naming artifacts + GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: frontend-test-workspace-node: diff --git a/.github/workflows/oss-merge-trigger.yml b/.github/workflows/oss-merge-trigger.yml index 4a4fdaa208..9146f7bc22 100644 --- a/.github/workflows/oss-merge-trigger.yml +++ b/.github/workflows/oss-merge-trigger.yml @@ -8,7 +8,7 @@ on: - closed branches: - main - - 'release/*.*.x' + - release/** jobs: trigger-oss-merge: @@ -26,4 +26,4 @@ jobs: curl -H "Authorization: token $GH_PAT" \ -H 'Accept: application/json' \ -d "{\"event_type\": \"oss-merge\", \"client_payload\": {\"git-ref\": \"${GIT_REF}\", \"git-sha\": \"${GIT_SHA}\", \"git-actor\": \"${GIT_ACTOR}\" }}" \ - "https://api.github.com/repos/hashicorp/consul-enterprise/dispatches" \ No newline at end of file + "https://api.github.com/repos/hashicorp/consul-enterprise/dispatches" diff --git a/.github/workflows/reusable-lint.yml b/.github/workflows/reusable-lint.yml index fd1e8b9a15..9a9a26f026 100644 --- a/.github/workflows/reusable-lint.yml +++ b/.github/workflows/reusable-lint.yml @@ -20,6 +20,7 @@ on: env: GOTAGS: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" GOARCH: ${{inputs.go-arch}} + GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: lint: diff --git a/.github/workflows/reusable-unit-split.yml b/.github/workflows/reusable-unit-split.yml index 962467d833..e2da192096 100644 --- a/.github/workflows/reusable-unit-split.yml +++ b/.github/workflows/reusable-unit-split.yml @@ -51,6 +51,7 @@ env: TOTAL_RUNNERS: ${{inputs.runner-count}} CONSUL_LICENSE: ${{secrets.consul-license}} GOTAGS: ${{ inputs.go-tags}} + GOPRIVATE: github.com/hashicorp # Required for enterprise deps DATADOG_API_KEY: ${{secrets.datadog-api-key}} jobs: diff --git a/.github/workflows/reusable-unit.yml b/.github/workflows/reusable-unit.yml index 6001cc8bcd..3f7ffa2774 100644 --- a/.github/workflows/reusable-unit.yml +++ b/.github/workflows/reusable-unit.yml @@ -46,6 +46,7 @@ env: GOARCH: ${{inputs.go-arch}} CONSUL_LICENSE: ${{secrets.consul-license}} GOTAGS: ${{ inputs.go-tags}} + GOPRIVATE: github.com/hashicorp # Required for enterprise deps DATADOG_API_KEY: ${{secrets.datadog-api-key}} jobs: diff --git a/.github/workflows/test-integrations.yml b/.github/workflows/test-integrations.yml index 836ed56ee1..641533012d 100644 --- a/.github/workflows/test-integrations.yml +++ b/.github/workflows/test-integrations.yml @@ -23,6 +23,7 @@ env: CONSUL_BINARY_UPLOAD_NAME: consul-bin # strip the hashicorp/ off the front of github.repository for consul CONSUL_LATEST_IMAGE_NAME: ${{ endsWith(github.repository, '-enterprise') && github.repository || 'consul' }} + GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: setup: @@ -365,6 +366,10 @@ jobs: ENVOY_VERSION: "1.25.4" steps: - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. + - name: Setup Git + if: ${{ endsWith(github.repository, '-enterprise') }} + run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 with: go-version-file: 'go.mod' @@ -476,6 +481,10 @@ jobs: ENVOY_VERSION: "1.24.6" steps: - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. + - name: Setup Git + if: ${{ endsWith(github.repository, '-enterprise') }} + run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 with: go-version-file: 'go.mod' diff --git a/.github/workflows/verify-release-linux.yaml b/.github/workflows/verify-release-linux.yaml new file mode 100644 index 0000000000..a86da7f05f --- /dev/null +++ b/.github/workflows/verify-release-linux.yaml @@ -0,0 +1,78 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +name: Verify Release - Linux + +on: + workflow_dispatch: + inputs: + packageName: + description: 'Name of consul release package (consul vs consul-enterprise)' + required: true + default: 'consul' + type: choice + options: + - consul + - consul-enterprise + version: + description: The x.y.z version (also need to specify applicable suffixes like +ent and -dev)' + required: true + type: string + +jobs: + verify-ubuntu-amd64: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - name: docker build with version + run: | + docker build \ + --build-arg PACKAGE=${{ inputs.packageName }} \ + --build-arg VERSION=${{ inputs.version }} \ + --build-arg TARGETARCH=amd64 \ + -f ./build-support/docker/Verify-Release-Ubuntu.dockerfile . + + verify-debian-amd64: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - name: docker build with version + run: | + docker build \ + --build-arg PACKAGE=${{ inputs.packageName }} \ + --build-arg VERSION=${{ inputs.version }} \ + --build-arg TARGETARCH=amd64 \ + -f ./build-support/docker/Verify-Release-Debian.dockerfile . + + verify-fedora: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - name: docker build with version + run: | + docker build \ + --build-arg PACKAGE=${{ inputs.packageName }} \ + --build-arg VERSION=${{ inputs.version }} \ + -f ./build-support/docker/Verify-Release-Fedora.dockerfile . + + verify-centos: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - name: docker build with version + run: | + docker build \ + --build-arg PACKAGE=${{ inputs.packageName }} \ + --build-arg VERSION=${{ inputs.version }} \ + -f ./build-support/docker/Verify-Release-CentOS.dockerfile . + + verify-amazon: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 + - name: docker build with version + run: | + docker build \ + --build-arg PACKAGE=${{ inputs.packageName }} \ + --build-arg VERSION=${{ inputs.version }} \ + -f ./build-support/docker/Verify-Release-Amazon.dockerfile . diff --git a/CHANGELOG.md b/CHANGELOG.md index ef4edc7004..dc70d8b08a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,97 @@ +## 1.15.4 (June 26, 2023) +FEATURES: + +* cli: `consul operator raft list-peers` command shows the number of commits each follower is trailing the leader by to aid in troubleshooting. [[GH-17582](https://github.com/hashicorp/consul/issues/17582)] +* server: **(Enterprise Only)** allow automatic license utilization reporting. [[GH-5102](https://github.com/hashicorp/consul/issues/5102)] + +IMPROVEMENTS: + +* connect: update supported envoy versions to 1.22.11, 1.23.9, 1.24.7, 1.25.6 [[GH-17545](https://github.com/hashicorp/consul/issues/17545)] +* debug: change default setting of consul debug command. now default duration is 5ms and default log level is 'TRACE' [[GH-17596](https://github.com/hashicorp/consul/issues/17596)] +* fix metric names in /docs/agent/telemetry [[GH-17577](https://github.com/hashicorp/consul/issues/17577)] +* gateway: Change status condition reason for invalid certificate on a listener from "Accepted" to "ResolvedRefs". [[GH-17115](https://github.com/hashicorp/consul/issues/17115)] +* systemd: set service type to notify. [[GH-16845](https://github.com/hashicorp/consul/issues/16845)] + +BUG FIXES: + +* cache: fix a few minor goroutine leaks in leaf certs and the agent cache [[GH-17636](https://github.com/hashicorp/consul/issues/17636)] +* docs: fix list of telemetry metrics [[GH-17593](https://github.com/hashicorp/consul/issues/17593)] +* gateways: **(Enterprise only)** Fixed a bug in API gateways where gateway configuration objects in non-default partitions did not reconcile properly. [[GH-17581](https://github.com/hashicorp/consul/issues/17581)] +* gateways: Fixed a bug in API gateways where binding a route that only targets a service imported from a peer results + in the programmed gateway having no routes. [[GH-17609](https://github.com/hashicorp/consul/issues/17609)] +* gateways: Fixed a bug where API gateways were not being taken into account in determining xDS rate limits. [[GH-17631](https://github.com/hashicorp/consul/issues/17631)] +* http: fixed API endpoint `PUT /acl/token/:AccessorID` (update token), no longer requires `AccessorID` in the request body. Web UI can now update tokens. [[GH-17739](https://github.com/hashicorp/consul/issues/17739)] +* namespaces: **(Enterprise only)** fixes a bug where agent health checks stop syncing for all services on a node if the namespace of any service has been removed from the server. +* namespaces: **(Enterprise only)** fixes a bug where namespaces are stuck in a deferred deletion state indefinitely under some conditions. + Also fixes the Consul query metadata present in the HTTP headers of the namespace read and list endpoints. +* peering: Fix a bug that caused server agents to continue cleaning up peering resources even after loss of leadership. [[GH-17483](https://github.com/hashicorp/consul/issues/17483)] +* xds: Fixed a bug where modifying ACLs on a token being actively used for an xDS connection caused all xDS updates to fail. [[GH-17566](https://github.com/hashicorp/consul/issues/17566)] + +## 1.14.8 (June 26, 2023) + +SECURITY: + +* Update to UBI base image to 9.2. [[GH-17513](https://github.com/hashicorp/consul/issues/17513)] + +FEATURES: + +* cli: `consul operator raft list-peers` command shows the number of commits each follower is trailing the leader by to aid in troubleshooting. [[GH-17582](https://github.com/hashicorp/consul/issues/17582)] +* server: **(Enterprise Only)** allow automatic license utilization reporting. [[GH-5102](https://github.com/hashicorp/consul/issues/5102)] + +IMPROVEMENTS: + +* connect: update supported envoy versions to 1.21.6, 1.22.11, 1.23.9, 1.24.7 [[GH-17547](https://github.com/hashicorp/consul/issues/17547)] +* debug: change default setting of consul debug command. now default duration is 5ms and default log level is 'TRACE' [[GH-17596](https://github.com/hashicorp/consul/issues/17596)] +* fix metric names in /docs/agent/telemetry [[GH-17577](https://github.com/hashicorp/consul/issues/17577)] +* peering: gRPC queries for TrustBundleList, TrustBundleRead, PeeringList, and PeeringRead now support blocking semantics, + reducing network and CPU demand. + The HTTP APIs for Peering List and Read have been updated to support blocking. [[GH-17426](https://github.com/hashicorp/consul/issues/17426)] +* raft: Remove expensive reflection from raft/mesh hot path [[GH-16552](https://github.com/hashicorp/consul/issues/16552)] +* systemd: set service type to notify. [[GH-16845](https://github.com/hashicorp/consul/issues/16845)] + +BUG FIXES: + +* cache: fix a few minor goroutine leaks in leaf certs and the agent cache [[GH-17636](https://github.com/hashicorp/consul/issues/17636)] +* connect: reverts #17317 fix that caused a downstream error for Ingress/Mesh/Terminating GWs when their respective config entry does not already exist. [[GH-17541](https://github.com/hashicorp/consul/issues/17541)] +* namespaces: **(Enterprise only)** fixes a bug where agent health checks stop syncing for all services on a node if the namespace of any service has been removed from the server. +* namespaces: **(Enterprise only)** fixes a bug where namespaces are stuck in a deferred deletion state indefinitely under some conditions. + Also fixes the Consul query metadata present in the HTTP headers of the namespace read and list endpoints. +* namespaces: adjusts the return type from HTTP list API to return the `api` module representation of a namespace. + This fixes an error with the `consul namespace list` command when a namespace has a deferred deletion timestamp. +* peering: Fix a bug that caused server agents to continue cleaning up peering resources even after loss of leadership. [[GH-17483](https://github.com/hashicorp/consul/issues/17483)] +* peering: Fix issue where modifying the list of exported services did not correctly replicate changes for services that exist in a non-default namespace. [[GH-17456](https://github.com/hashicorp/consul/issues/17456)] + +## 1.13.9 (June 26, 2023) +BREAKING CHANGES: + +* connect: Disable peering by default in connect proxies for Consul 1.13. This change was made to prevent inefficient polling + queries from having a negative impact on server performance. Peering in Consul 1.13 is an experimental feature and is not + recommended for use in production environments. If you still wish to use the experimental peering feature, ensure + [`peering.enabled = true`](https://developer.hashicorp.com/consul/docs/v1.13.x/agent/config/config-files#peering_enabled) + is set on all clients and servers. [[GH-17731](https://github.com/hashicorp/consul/issues/17731)] + +SECURITY: + +* Update to UBI base image to 9.2. [[GH-17513](https://github.com/hashicorp/consul/issues/17513)] + +FEATURES: + +* server: **(Enterprise Only)** allow automatic license utilization reporting. [[GH-5102](https://github.com/hashicorp/consul/issues/5102)] + +IMPROVEMENTS: + +* debug: change default setting of consul debug command. now default duration is 5ms and default log level is 'TRACE' [[GH-17596](https://github.com/hashicorp/consul/issues/17596)] +* systemd: set service type to notify. [[GH-16845](https://github.com/hashicorp/consul/issues/16845)] + +BUG FIXES: + +* cache: fix a few minor goroutine leaks in leaf certs and the agent cache [[GH-17636](https://github.com/hashicorp/consul/issues/17636)] +* namespaces: **(Enterprise only)** fixes a bug where namespaces are stuck in a deferred deletion state indefinitely under some conditions. + Also fixes the Consul query metadata present in the HTTP headers of the namespace read and list endpoints. +* namespaces: adjusts the return type from HTTP list API to return the `api` module representation of a namespace. + This fixes an error with the `consul namespace list` command when a namespace has a deferred deletion timestamp. +* peering: Fix a bug that caused server agents to continue cleaning up peering resources even after loss of leadership. [[GH-17483](https://github.com/hashicorp/consul/issues/17483)] + ## 1.16.0-rc1 (June 12, 2023) BREAKING CHANGES: diff --git a/GNUmakefile b/GNUmakefile index fe554b3c54..79080311c4 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -171,7 +171,7 @@ dev-build: dev-docker-dbg: dev-docker @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" - @docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null + @docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null @echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)" @# 'consul-dbg:local' tag is needed to run the integration tests @# 'consul-dev:latest' is needed by older workflows @@ -183,7 +183,7 @@ dev-docker-dbg: dev-docker dev-docker: linux dev-build @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" - @docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null + @docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null @echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)" @# 'consul:local' tag is needed to run the integration tests @# 'consul-dev:latest' is needed by older workflows @@ -194,6 +194,7 @@ dev-docker: linux dev-build --label version=$(CONSUL_VERSION) \ --load \ -f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/ + docker tag 'consul:local' '$(CONSUL_COMPAT_TEST_IMAGE):local' check-remote-dev-image-env: ifndef REMOTE_DEV_IMAGE @@ -204,7 +205,7 @@ remote-docker: check-remote-dev-image-env $(MAKE) GOARCH=amd64 linux $(MAKE) GOARCH=arm64 linux @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" - @docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null + @docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null @echo "Building and Pushing Consul Development container - $(REMOTE_DEV_IMAGE)" @if ! docker buildx inspect consul-builder; then \ docker buildx create --name consul-builder --driver docker-container --bootstrap; \ @@ -221,7 +222,7 @@ remote-docker: check-remote-dev-image-env # should only run in CI and not locally. ci.dev-docker: @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" - @docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null + @docker pull hashicorp/consul:$(CONSUL_IMAGE_VERSION) >/dev/null @echo "Building Consul Development container - $(CI_DEV_DOCKER_IMAGE_NAME)" @docker build $(NOCACHE) $(QUIET) -t '$(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):$(GIT_COMMIT)' \ --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \ diff --git a/agent/acl_endpoint.go b/agent/acl_endpoint.go index da838b1a64..d6f230a8f6 100644 --- a/agent/acl_endpoint.go +++ b/agent/acl_endpoint.go @@ -441,8 +441,16 @@ func (s *HTTPHandlers) aclTokenSetInternal(req *http.Request, tokenAccessorID st return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Token decoding failed: %v", err)} } - if !create && args.ACLToken.AccessorID != tokenAccessorID { - return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Token Accessor ID in URL and payload do not match"} + if !create { + // NOTE: AccessorID in the request body is optional when not creating a new token. + // If not present in the body and only in the URL then it will be filled in by Consul. + if args.ACLToken.AccessorID == "" { + args.ACLToken.AccessorID = tokenAccessorID + } + + if args.ACLToken.AccessorID != tokenAccessorID { + return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Token Accessor ID in URL and payload do not match"} + } } var out structs.ACLToken diff --git a/agent/acl_endpoint_test.go b/agent/acl_endpoint_test.go index 20c982492a..0c948880e0 100644 --- a/agent/acl_endpoint_test.go +++ b/agent/acl_endpoint_test.go @@ -907,6 +907,48 @@ func TestACL_HTTP(t *testing.T) { tokenMap[token.AccessorID] = token }) + t.Run("Update without AccessorID in request body", func(t *testing.T) { + originalToken := tokenMap[idMap["token-cloned"]] + + // Secret will be filled in + tokenInput := &structs.ACLToken{ + Description: "Even Better description for this cloned token", + Policies: []structs.ACLTokenPolicyLink{ + { + ID: idMap["policy-read-all-nodes"], + Name: policyMap[idMap["policy-read-all-nodes"]].Name, + }, + }, + NodeIdentities: []*structs.ACLNodeIdentity{ + { + NodeName: "foo", + Datacenter: "bar", + }, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/acl/token/"+originalToken.AccessorID, jsonBody(tokenInput)) + req.Header.Add("X-Consul-Token", "root") + resp := httptest.NewRecorder() + obj, err := a.srv.ACLTokenCRUD(resp, req) + require.NoError(t, err) + token, ok := obj.(*structs.ACLToken) + require.True(t, ok) + + require.Equal(t, originalToken.AccessorID, token.AccessorID) + require.Equal(t, originalToken.SecretID, token.SecretID) + require.Equal(t, tokenInput.Description, token.Description) + require.Equal(t, tokenInput.Policies, token.Policies) + require.Equal(t, tokenInput.NodeIdentities, token.NodeIdentities) + require.True(t, token.CreateIndex > 0) + require.True(t, token.CreateIndex < token.ModifyIndex) + require.NotNil(t, token.Hash) + require.NotEqual(t, token.Hash, []byte{}) + require.NotEqual(t, token.Hash, originalToken.Hash) + + tokenMap[token.AccessorID] = token + }) + t.Run("CRUD Missing Token Accessor ID", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/acl/token/", nil) req.Header.Add("X-Consul-Token", "root") diff --git a/agent/config/builder.go b/agent/config/builder.go index 8e0bb37ef9..6acd1b0039 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -1473,7 +1473,7 @@ func (b *builder) validate(rt RuntimeConfig) error { return err } case structs.VaultCAProvider: - if _, err := ca.ParseVaultCAConfig(rt.ConnectCAConfig); err != nil { + if _, err := ca.ParseVaultCAConfig(rt.ConnectCAConfig, rt.PrimaryDatacenter == rt.Datacenter); err != nil { return err } case structs.AWSCAProvider: diff --git a/agent/connect/ca/provider_test.go b/agent/connect/ca/provider_test.go index b7ed9e29b4..1ff4af3977 100644 --- a/agent/connect/ca/provider_test.go +++ b/agent/connect/ca/provider_test.go @@ -113,7 +113,7 @@ func TestStructs_CAConfiguration_MsgpackEncodeDecode(t *testing.T) { TLSSkipVerify: true, }, parseFunc: func(t *testing.T, raw map[string]interface{}) interface{} { - config, err := ParseVaultCAConfig(raw) + config, err := ParseVaultCAConfig(raw, true) require.NoError(t, err) return config }, diff --git a/agent/connect/ca/provider_vault.go b/agent/connect/ca/provider_vault.go index 00a598d92d..89350d87df 100644 --- a/agent/connect/ca/provider_vault.go +++ b/agent/connect/ca/provider_vault.go @@ -101,7 +101,7 @@ func vaultTLSConfig(config *structs.VaultCAProviderConfig) *vaultapi.TLSConfig { // Configure sets up the provider using the given configuration. // Configure supports being called multiple times to re-configure the provider. func (v *VaultProvider) Configure(cfg ProviderConfig) error { - config, err := ParseVaultCAConfig(cfg.RawConfig) + config, err := ParseVaultCAConfig(cfg.RawConfig, v.isPrimary) if err != nil { return err } @@ -192,11 +192,11 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error { } func (v *VaultProvider) ValidateConfigUpdate(prevRaw, nextRaw map[string]interface{}) error { - prev, err := ParseVaultCAConfig(prevRaw) + prev, err := ParseVaultCAConfig(prevRaw, v.isPrimary) if err != nil { return fmt.Errorf("failed to parse existing CA config: %w", err) } - next, err := ParseVaultCAConfig(nextRaw) + next, err := ParseVaultCAConfig(nextRaw, v.isPrimary) if err != nil { return fmt.Errorf("failed to parse new CA config: %w", err) } @@ -800,7 +800,7 @@ func (v *VaultProvider) Cleanup(providerTypeChange bool, otherConfig map[string] v.Stop() if !providerTypeChange { - newConfig, err := ParseVaultCAConfig(otherConfig) + newConfig, err := ParseVaultCAConfig(otherConfig, v.isPrimary) if err != nil { return err } @@ -900,7 +900,7 @@ func (v *VaultProvider) autotidyIssuers(path string) (bool, string) { return tidySet, errStr } -func ParseVaultCAConfig(raw map[string]interface{}) (*structs.VaultCAProviderConfig, error) { +func ParseVaultCAConfig(raw map[string]interface{}, isPrimary bool) (*structs.VaultCAProviderConfig, error) { config := structs.VaultCAProviderConfig{ CommonCAProviderConfig: defaultCommonConfig(), } @@ -931,10 +931,10 @@ func ParseVaultCAConfig(raw map[string]interface{}) (*structs.VaultCAProviderCon return nil, fmt.Errorf("only one of Vault token or Vault auth method can be provided, but not both") } - if config.RootPKIPath == "" { + if isPrimary && config.RootPKIPath == "" { return nil, fmt.Errorf("must provide a valid path to a root PKI backend") } - if !strings.HasSuffix(config.RootPKIPath, "/") { + if config.RootPKIPath != "" && !strings.HasSuffix(config.RootPKIPath, "/") { config.RootPKIPath += "/" } diff --git a/agent/connect/ca/provider_vault_test.go b/agent/connect/ca/provider_vault_test.go index b0e341fe91..87dc1a04fe 100644 --- a/agent/connect/ca/provider_vault_test.go +++ b/agent/connect/ca/provider_vault_test.go @@ -60,6 +60,7 @@ func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) { cases := map[string]struct { rawConfig map[string]interface{} expConfig *structs.VaultCAProviderConfig + isPrimary bool expError string }{ "no token and no auth method provided": { @@ -70,15 +71,26 @@ func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) { rawConfig: map[string]interface{}{"Token": "test", "AuthMethod": map[string]interface{}{"Type": "test"}}, expError: "only one of Vault token or Vault auth method can be provided, but not both", }, - "no root PKI path": { - rawConfig: map[string]interface{}{"Token": "test"}, + "primary no root PKI path": { + rawConfig: map[string]interface{}{"Token": "test", "IntermediatePKIPath": "test"}, + isPrimary: true, expError: "must provide a valid path to a root PKI backend", }, + "secondary no root PKI path": { + rawConfig: map[string]interface{}{"Token": "test", "IntermediatePKIPath": "test"}, + isPrimary: false, + expConfig: &structs.VaultCAProviderConfig{ + CommonCAProviderConfig: defaultCommonConfig(), + Token: "test", + IntermediatePKIPath: "test/", + }, + }, "no root intermediate path": { rawConfig: map[string]interface{}{"Token": "test", "RootPKIPath": "test"}, expError: "must provide a valid path for the intermediate PKI backend", }, "adds a slash to RootPKIPath and IntermediatePKIPath": { + isPrimary: true, rawConfig: map[string]interface{}{"Token": "test", "RootPKIPath": "test", "IntermediatePKIPath": "test"}, expConfig: &structs.VaultCAProviderConfig{ CommonCAProviderConfig: defaultCommonConfig(), @@ -91,7 +103,7 @@ func TestVaultCAProvider_ParseVaultCAConfig(t *testing.T) { for name, c := range cases { t.Run(name, func(t *testing.T) { - config, err := ParseVaultCAConfig(c.rawConfig) + config, err := ParseVaultCAConfig(c.rawConfig, c.isPrimary) if c.expError != "" { require.EqualError(t, err, c.expError) } else { diff --git a/agent/consul/health_endpoint_test.go b/agent/consul/health_endpoint_test.go index cd37b5ec4c..21a83ea90d 100644 --- a/agent/consul/health_endpoint_test.go +++ b/agent/consul/health_endpoint_test.go @@ -1767,5 +1767,11 @@ func TestHealth_RPC_Filter(t *testing.T) { out = new(structs.IndexedHealthChecks) require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &args, out)) require.Len(t, out.HealthChecks, 1) + + args.State = api.HealthAny + args.Filter = "connect in ServiceTags and v2 in ServiceTags" + out = new(structs.IndexedHealthChecks) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &args, out)) + require.Len(t, out.HealthChecks, 1) }) } diff --git a/agent/dns.go b/agent/dns.go index cb1e3c310d..5804dc97dd 100644 --- a/agent/dns.go +++ b/agent/dns.go @@ -1055,7 +1055,7 @@ func (d *DNSServer) trimDomain(query string) string { longer, shorter = shorter, longer } - if strings.HasSuffix(query, longer) { + if strings.HasSuffix(query, "."+strings.TrimLeft(longer, ".")) { return strings.TrimSuffix(query, longer) } return strings.TrimSuffix(query, shorter) diff --git a/agent/dns_test.go b/agent/dns_test.go index 46a7e758c7..ef5364964d 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -7071,6 +7071,45 @@ func TestDNS_AltDomains_Overlap(t *testing.T) { } } +func TestDNS_AltDomain_DCName_Overlap(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + // this tests the DC name overlap with the consul domain/alt-domain + // we should get response when DC suffix is a prefix of consul alt-domain + t.Parallel() + a := NewTestAgent(t, ` + datacenter = "dc-test" + node_name = "test-node" + alt_domain = "test.consul." + `) + defer a.Shutdown() + testrpc.WaitForLeader(t, a.RPC, "dc-test") + + questions := []string{ + "test-node.node.dc-test.consul.", + "test-node.node.dc-test.test.consul.", + } + + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeA) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + require.Len(t, in.Answer, 1) + + aRec, ok := in.Answer[0].(*dns.A) + require.True(t, ok) + require.Equal(t, aRec.A.To4().String(), "127.0.0.1") + } +} + func TestDNS_PreparedQuery_AllowStale(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") diff --git a/agent/envoyextensions/builtin/ext-authz/ext_authz_test.go b/agent/envoyextensions/builtin/ext-authz/ext_authz_test.go index e0b4245edd..88e87d7e9a 100644 --- a/agent/envoyextensions/builtin/ext-authz/ext_authz_test.go +++ b/agent/envoyextensions/builtin/ext-authz/ext_authz_test.go @@ -59,7 +59,7 @@ func TestConstructor(t *testing.T) { }, }, }, - errMsg: `invalid host for Target.URI "foo.bar.com:9191": expected 'localhost' or '127.0.0.1'`, + errMsg: `invalid host for Target.URI "foo.bar.com:9191": expected "localhost", "127.0.0.1", or "::1"`, }, "non-loopback address": { args: map[string]any{ @@ -72,7 +72,34 @@ func TestConstructor(t *testing.T) { }, }, }, - errMsg: `invalid host for Target.URI "10.0.0.1:9191": expected 'localhost' or '127.0.0.1'`, + errMsg: `invalid host for Target.URI "10.0.0.1:9191": expected "localhost", "127.0.0.1", or "::1"`, + }, + "invalid target port": { + args: map[string]any{ + "ProxyType": "connect-proxy", + "Config": map[string]any{ + "GrpcService": map[string]any{ + "Target": map[string]any{ + "URI": "localhost:zero", + }, + }, + }, + }, + errMsg: `invalid format for Target.URI "localhost:zero": expected host:port`, + }, + "invalid target timeout": { + args: map[string]any{ + "ProxyType": "connect-proxy", + "Config": map[string]any{ + "GrpcService": map[string]any{ + "Target": map[string]any{ + "URI": "localhost:9191", + "Timeout": "one", + }, + }, + }, + }, + errMsg: `failed to parse Target.Timeout "one" as a duration`, }, "no uri or service target": { args: map[string]any{ diff --git a/agent/envoyextensions/builtin/ext-authz/structs.go b/agent/envoyextensions/builtin/ext-authz/structs.go index b64011a991..a14cedd63a 100644 --- a/agent/envoyextensions/builtin/ext-authz/structs.go +++ b/agent/envoyextensions/builtin/ext-authz/structs.go @@ -34,6 +34,9 @@ const ( defaultMetadataNS = "consul" defaultStatPrefix = "response" defaultStatusOnError = 403 + localhost = "localhost" + localhostIPv4 = "127.0.0.1" + localhostIPv6 = "::1" ) type extAuthzConfig struct { @@ -185,6 +188,12 @@ func (c *extAuthzConfig) toEnvoyCluster(_ *cmn.RuntimeConfig) (*envoy_cluster_v3 return nil, err } + clusterType := &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_STATIC} + if host == localhost { + // If the host is "localhost" use a STRICT_DNS cluster type to perform DNS lookup. + clusterType = &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_STRICT_DNS} + } + var typedExtProtoOpts map[string]*anypb.Any if c.isGRPC() { // By default HTTP/1.1 is used for the transport protocol. gRPC requires that we explicitly configure HTTP/2 @@ -205,7 +214,7 @@ func (c *extAuthzConfig) toEnvoyCluster(_ *cmn.RuntimeConfig) (*envoy_cluster_v3 return &envoy_cluster_v3.Cluster{ Name: LocalExtAuthzClusterName, - ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_STATIC}, + ClusterDiscoveryType: clusterType, ConnectTimeout: target.timeoutDurationPB(), LoadAssignment: &envoy_endpoint_v3.ClusterLoadAssignment{ ClusterName: LocalExtAuthzClusterName, @@ -645,18 +654,13 @@ func (t *Target) validate() error { } if t.isURI() { - // Strip the protocol if one was provided - if _, addr, hasProto := strings.Cut(t.URI, "://"); hasProto { - t.URI = addr - } - addr := strings.Split(t.URI, ":") - if len(addr) == 2 { - t.host = addr[0] - if t.host != "localhost" && t.host != "127.0.0.1" { - resultErr = multierror.Append(resultErr, fmt.Errorf("invalid host for Target.URI %q: expected 'localhost' or '127.0.0.1'", t.URI)) - } - if t.port, err = strconv.Atoi(addr[1]); err != nil { - resultErr = multierror.Append(resultErr, fmt.Errorf("invalid port for Target.URI %q", addr[1])) + t.host, t.port, err = parseAddr(t.URI) + if err == nil { + switch t.host { + case localhost, localhostIPv4, localhostIPv6: + default: + resultErr = multierror.Append(resultErr, + fmt.Errorf("invalid host for Target.URI %q: expected %q, %q, or %q", t.URI, localhost, localhostIPv4, localhostIPv6)) } } else { resultErr = multierror.Append(resultErr, fmt.Errorf("invalid format for Target.URI %q: expected host:port", t.URI)) @@ -672,3 +676,22 @@ func (t *Target) validate() error { } return resultErr } + +func parseAddr(s string) (host string, port int, err error) { + // Strip the protocol if one was provided + if _, addr, hasProto := strings.Cut(s, "://"); hasProto { + s = addr + } + idx := strings.LastIndex(s, ":") + switch idx { + case -1, len(s) - 1: + err = fmt.Errorf("invalid input format %q: expected host:port", s) + case 0: + host = localhost + port, err = strconv.Atoi(s[idx+1:]) + default: + host = s[:idx] + port, err = strconv.Atoi(s[idx+1:]) + } + return +} diff --git a/agent/grpc-external/services/resource/list_by_owner_test.go b/agent/grpc-external/services/resource/list_by_owner_test.go index 19fe799caf..218971a050 100644 --- a/agent/grpc-external/services/resource/list_by_owner_test.go +++ b/agent/grpc-external/services/resource/list_by_owner_test.go @@ -74,7 +74,7 @@ func TestListByOwner_TypeNotRegistered(t *testing.T) { }) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), "resource type demo.v2.artist not registered") + require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered") } func TestListByOwner_Empty(t *testing.T) { @@ -126,7 +126,7 @@ func TestListByOwner_Many(t *testing.T) { } func TestListByOwner_ACL_PerTypeDenied(t *testing.T) { - authz := AuthorizerFrom(t, `key_prefix "resource/demo.v2.album/" { policy = "deny" }`) + authz := AuthorizerFrom(t, `key_prefix "resource/demo.v2.Album/" { policy = "deny" }`) _, rsp, err := roundTripListByOwner(t, authz) // verify resource filtered out, hence no results @@ -135,7 +135,7 @@ func TestListByOwner_ACL_PerTypeDenied(t *testing.T) { } func TestListByOwner_ACL_PerTypeAllowed(t *testing.T) { - authz := AuthorizerFrom(t, `key_prefix "resource/demo.v2.album/" { policy = "read" }`) + authz := AuthorizerFrom(t, `key_prefix "resource/demo.v2.Album/" { policy = "read" }`) album, rsp, err := roundTripListByOwner(t, authz) // verify resource not filtered out diff --git a/agent/grpc-external/services/resource/list_test.go b/agent/grpc-external/services/resource/list_test.go index 7d102b090c..4d6b50951b 100644 --- a/agent/grpc-external/services/resource/list_test.go +++ b/agent/grpc-external/services/resource/list_test.go @@ -58,7 +58,7 @@ func TestList_TypeNotFound(t *testing.T) { }) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), "resource type demo.v2.artist not registered") + require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered") } func TestList_Empty(t *testing.T) { @@ -178,7 +178,7 @@ func TestList_ACL_ListAllowed_ReadDenied(t *testing.T) { // allow list, deny read authz := AuthorizerFrom(t, demo.ArtistV2ListPolicy, - `key_prefix "resource/demo.v2.artist/" { policy = "deny" }`) + `key_prefix "resource/demo.v2.Artist/" { policy = "deny" }`) _, rsp, err := roundTripList(t, authz) // verify resource filtered out by key:read denied hence no results diff --git a/agent/grpc-external/services/resource/read_test.go b/agent/grpc-external/services/resource/read_test.go index 237895eacc..cca911ec15 100644 --- a/agent/grpc-external/services/resource/read_test.go +++ b/agent/grpc-external/services/resource/read_test.go @@ -71,7 +71,7 @@ func TestRead_TypeNotFound(t *testing.T) { _, err = client.Read(context.Background(), &pbresource.ReadRequest{Id: artist.Id}) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), "resource type demo.v2.artist not registered") + require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered") } func TestRead_ResourceNotFound(t *testing.T) { diff --git a/agent/grpc-external/services/resource/watch_test.go b/agent/grpc-external/services/resource/watch_test.go index 687fe0d067..95695f295e 100644 --- a/agent/grpc-external/services/resource/watch_test.go +++ b/agent/grpc-external/services/resource/watch_test.go @@ -66,7 +66,7 @@ func TestWatchList_TypeNotFound(t *testing.T) { err = mustGetError(t, rspCh) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), "resource type demo.v2.artist not registered") + require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered") } func TestWatchList_GroupVersionMatches(t *testing.T) { @@ -172,7 +172,7 @@ func TestWatchList_ACL_ListAllowed_ReadDenied(t *testing.T) { // allow list, deny read authz := AuthorizerFrom(t, ` key_prefix "resource/" { policy = "list" } - key_prefix "resource/demo.v2.artist/" { policy = "deny" } + key_prefix "resource/demo.v2.Artist/" { policy = "deny" } `) rspCh, _ := roundTripACL(t, authz) @@ -187,7 +187,7 @@ func TestWatchList_ACL_ListAllowed_ReadAllowed(t *testing.T) { // allow list, allow read authz := AuthorizerFrom(t, ` key_prefix "resource/" { policy = "list" } - key_prefix "resource/demo.v2.artist/" { policy = "read" } + key_prefix "resource/demo.v2.Artist/" { policy = "read" } `) rspCh, artist := roundTripACL(t, authz) diff --git a/agent/grpc-external/services/resource/write_status_test.go b/agent/grpc-external/services/resource/write_status_test.go index f65c7918ff..aa26330176 100644 --- a/agent/grpc-external/services/resource/write_status_test.go +++ b/agent/grpc-external/services/resource/write_status_test.go @@ -180,7 +180,7 @@ func TestWriteStatus_TypeNotFound(t *testing.T) { _, err = client.WriteStatus(testContext(t), validWriteStatusRequest(t, res)) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), "resource type demo.v2.artist not registered") + require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered") } func TestWriteStatus_ResourceNotFound(t *testing.T) { diff --git a/agent/grpc-external/services/resource/write_test.go b/agent/grpc-external/services/resource/write_test.go index 3da4ec478a..4ec25ee26c 100644 --- a/agent/grpc-external/services/resource/write_test.go +++ b/agent/grpc-external/services/resource/write_test.go @@ -151,7 +151,7 @@ func TestWrite_TypeNotFound(t *testing.T) { _, err = client.Write(testContext(t), &pbresource.WriteRequest{Resource: res}) require.Error(t, err) require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), "resource type demo.v2.artist not registered") + require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered") } func TestWrite_ACLs(t *testing.T) { diff --git a/agent/proxycfg/proxycfg.deepcopy.go b/agent/proxycfg/proxycfg.deepcopy.go index 7c577e18aa..350f76bf08 100644 --- a/agent/proxycfg/proxycfg.deepcopy.go +++ b/agent/proxycfg/proxycfg.deepcopy.go @@ -13,6 +13,10 @@ import ( // DeepCopy generates a deep copy of *ConfigSnapshot func (o *ConfigSnapshot) DeepCopy() *ConfigSnapshot { var cp ConfigSnapshot = *o + if o.ServiceLocality != nil { + cp.ServiceLocality = new(structs.Locality) + *cp.ServiceLocality = *o.ServiceLocality + } if o.ServiceMeta != nil { cp.ServiceMeta = make(map[string]string, len(o.ServiceMeta)) for k2, v2 := range o.ServiceMeta { diff --git a/agent/proxycfg/snapshot.go b/agent/proxycfg/snapshot.go index 1880dcd669..e8f95d9651 100644 --- a/agent/proxycfg/snapshot.go +++ b/agent/proxycfg/snapshot.go @@ -901,6 +901,7 @@ func IngressListenerKeyFromListener(l structs.IngressListener) IngressListenerKe type ConfigSnapshot struct { Kind structs.ServiceKind Service string + ServiceLocality *structs.Locality ProxyID ProxyID Address string Port int diff --git a/agent/proxycfg/state.go b/agent/proxycfg/state.go index 911e4f316e..028a3fd59d 100644 --- a/agent/proxycfg/state.go +++ b/agent/proxycfg/state.go @@ -124,6 +124,7 @@ type serviceInstance struct { taggedAddresses map[string]structs.ServiceAddress proxyCfg structs.ConnectProxyConfig token string + locality *structs.Locality } func copyProxyConfig(ns *structs.NodeService) (structs.ConnectProxyConfig, error) { @@ -244,6 +245,7 @@ func newServiceInstanceFromNodeService(id ProxyID, ns *structs.NodeService, toke return serviceInstance{ kind: ns.Kind, service: ns.Service, + locality: ns.Locality, proxyID: id, address: ns.Address, port: ns.Port, @@ -303,6 +305,7 @@ func newConfigSnapshotFromServiceInstance(s serviceInstance, config stateConfig) return ConfigSnapshot{ Kind: s.kind, Service: s.service, + ServiceLocality: s.locality, ProxyID: s.proxyID, Address: s.address, Port: s.port, diff --git a/agent/structs/config_entry_jwt_provider.go b/agent/structs/config_entry_jwt_provider.go index a1e9120ea0..fc0c73950b 100644 --- a/agent/structs/config_entry_jwt_provider.go +++ b/agent/structs/config_entry_jwt_provider.go @@ -316,6 +316,15 @@ func (e *JWTProviderConfigEntry) GetRaftIndex() *RaftIndex { retur func (e *JWTProviderConfigEntry) CanRead(authz acl.Authorizer) error { var authzContext acl.AuthorizerContext e.FillAuthzContext(&authzContext) + + // allow service-identity tokens the ability to read jwt-providers + // this is a workaround to allow sidecar proxies to read the jwt-providers + // see issue: https://github.com/hashicorp/consul/issues/17886 for more details + err := authz.ToAllowAuthorizer().ServiceWriteAnyAllowed(&authzContext) + if err == nil { + return err + } + return authz.ToAllowAuthorizer().MeshReadAllowed(&authzContext) } diff --git a/agent/structs/config_entry_jwt_provider_test.go b/agent/structs/config_entry_jwt_provider_test.go index 814a152573..c02becc2a1 100644 --- a/agent/structs/config_entry_jwt_provider_test.go +++ b/agent/structs/config_entry_jwt_provider_test.go @@ -338,6 +338,24 @@ func TestJWTProviderConfigEntry_ACLs(t *testing.T) { canRead: false, canWrite: false, }, + { + name: "jwt-provider: any service write", + authorizer: newTestAuthz(t, `service "" { policy = "write" }`), + canRead: true, + canWrite: false, + }, + { + name: "jwt-provider: specific service write", + authorizer: newTestAuthz(t, `service "web" { policy = "write" }`), + canRead: true, + canWrite: false, + }, + { + name: "jwt-provider: any service prefix write", + authorizer: newTestAuthz(t, `service_prefix "" { policy = "write" }`), + canRead: true, + canWrite: false, + }, { name: "jwt-provider: mesh read", authorizer: newTestAuthz(t, `mesh = "read"`), diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 1499c35eb8..c1f6d07606 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -2095,6 +2095,18 @@ func (csn *CheckServiceNode) CanRead(authz acl.Authorizer) acl.EnforcementDecisi return acl.Allow } +func (csn *CheckServiceNode) Locality() *Locality { + if csn.Service != nil && csn.Service.Locality != nil { + return csn.Service.Locality + } + + if csn.Node != nil && csn.Node.Locality != nil { + return csn.Node.Locality + } + + return nil +} + type CheckServiceNodes []CheckServiceNode func (csns CheckServiceNodes) DeepCopy() CheckServiceNodes { diff --git a/agent/xds/delta_envoy_extender_oss_test.go b/agent/xds/delta_envoy_extender_oss_test.go index 3d92b6d25d..2f18809b19 100644 --- a/agent/xds/delta_envoy_extender_oss_test.go +++ b/agent/xds/delta_envoy_extender_oss_test.go @@ -676,6 +676,7 @@ end`, ns.Proxy.EnvoyExtensions = makeExtAuthzEnvoyExtension( "http", "dest=local", + "target-uri=localhost:9191", "insert=AfterLastMatch:envoy.filters.http.header_to_metadata", ) }, nil) diff --git a/agent/xds/delta_envoy_extender_test.go b/agent/xds/delta_envoy_extender_test.go index 0a76d62219..6cd57fa53a 100644 --- a/agent/xds/delta_envoy_extender_test.go +++ b/agent/xds/delta_envoy_extender_test.go @@ -50,6 +50,13 @@ func makeExtAuthzEnvoyExtension(svc string, opts ...string) []structs.EnvoyExten "FilterName": filterName, } } + case "target-uri": + target = map[string]any{"URI": v} + configMap = map[string]any{ + serviceKey: map[string]any{ + "Target": target, + }, + } case "config-type": if v == "full" { target["Timeout"] = "2s" diff --git a/agent/xds/endpoints.go b/agent/xds/endpoints.go index ad03971336..aef2dc31c9 100644 --- a/agent/xds/endpoints.go +++ b/agent/xds/endpoints.go @@ -135,7 +135,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg. endpoints, ok := cfgSnap.ConnectProxy.PreparedQueryEndpoints[uid] if ok { la := makeLoadAssignment( + cfgSnap, clusterName, + nil, []loadAssignmentEndpointGroup{ {Endpoints: endpoints}, }, @@ -158,7 +160,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg. endpoints, ok := cfgSnap.ConnectProxy.DestinationGateways.Get(uid) if ok { la := makeLoadAssignment( + cfgSnap, name, + nil, []loadAssignmentEndpointGroup{ {Endpoints: endpoints}, }, @@ -224,7 +228,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.C clusterName := connect.GatewaySNI(key.Datacenter, key.Partition, cfgSnap.Roots.TrustDomain) la := makeLoadAssignment( + cfgSnap, clusterName, + nil, []loadAssignmentEndpointGroup{ {Endpoints: endpoints}, }, @@ -239,7 +245,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.C clusterName := cfgSnap.ServerSNIFn(key.Datacenter, "") la := makeLoadAssignment( + cfgSnap, clusterName, + nil, []loadAssignmentEndpointGroup{ {Endpoints: endpoints}, }, @@ -409,7 +417,9 @@ func (s *ResourceGenerator) endpointsFromServicesAndResolvers( for subsetName, groups := range clusterEndpoints { clusterName := connect.ServiceSNI(svc.Name, subsetName, svc.NamespaceOrDefault(), svc.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain) la := makeLoadAssignment( + cfgSnap, clusterName, + nil, groups, cfgSnap.Locality, ) @@ -444,7 +454,9 @@ func (s *ResourceGenerator) makeEndpointsForOutgoingPeeredServices( groups := []loadAssignmentEndpointGroup{{Endpoints: serviceGroup.Nodes, OnlyPassing: false}} la := makeLoadAssignment( + cfgSnap, clusterName, + nil, groups, // Use an empty key here so that it never matches. This will force the mesh gateway to always // reference the remote mesh gateway's wan addr. @@ -606,7 +618,9 @@ func (s *ResourceGenerator) makeUpstreamLoadAssignmentForPeerService( return la, nil } la = makeLoadAssignment( + cfgSnap, clusterName, + nil, []loadAssignmentEndpointGroup{ {Endpoints: localGw}, }, @@ -626,7 +640,9 @@ func (s *ResourceGenerator) makeUpstreamLoadAssignmentForPeerService( return nil, nil } la = makeLoadAssignment( + cfgSnap, clusterName, + nil, []loadAssignmentEndpointGroup{ {Endpoints: endpoints}, }, @@ -756,7 +772,9 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain( } la := makeLoadAssignment( + cfgSnap, clusterName, + ti.PrioritizeByLocality, []loadAssignmentEndpointGroup{endpointGroup}, gatewayKey, ) @@ -842,7 +860,7 @@ type loadAssignmentEndpointGroup struct { OverrideHealth envoy_core_v3.HealthStatus } -func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpointGroup, localKey proxycfg.GatewayKey) *envoy_endpoint_v3.ClusterLoadAssignment { +func makeLoadAssignment(cfgSnap *proxycfg.ConfigSnapshot, clusterName string, policy *structs.DiscoveryPrioritizeByLocality, endpointGroups []loadAssignmentEndpointGroup, localKey proxycfg.GatewayKey) *envoy_endpoint_v3.ClusterLoadAssignment { cla := &envoy_endpoint_v3.ClusterLoadAssignment{ ClusterName: clusterName, Endpoints: make([]*envoy_endpoint_v3.LocalityLbEndpoints, 0, len(endpointGroups)), @@ -856,35 +874,46 @@ func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpo } } - for priority, endpointGroup := range endpointGroups { - endpoints := endpointGroup.Endpoints - es := make([]*envoy_endpoint_v3.LbEndpoint, 0, len(endpoints)) + var priority uint32 - for _, ep := range endpoints { - // TODO (mesh-gateway) - should we respect the translate_wan_addrs configuration here or just always use the wan for cross-dc? - _, addr, port := ep.BestAddress(!localKey.Matches(ep.Node.Datacenter, ep.Node.PartitionOrDefault())) - healthStatus, weight := calculateEndpointHealthAndWeight(ep, endpointGroup.OnlyPassing) + for _, endpointGroup := range endpointGroups { + endpointsByLocality, err := groupedEndpoints(cfgSnap.ServiceLocality, policy, endpointGroup.Endpoints) - if endpointGroup.OverrideHealth != envoy_core_v3.HealthStatus_UNKNOWN { - healthStatus = endpointGroup.OverrideHealth - } + if err != nil { + continue + } - endpoint := &envoy_endpoint_v3.Endpoint{ - Address: makeAddress(addr, port), + for _, endpoints := range endpointsByLocality { + es := make([]*envoy_endpoint_v3.LbEndpoint, 0, len(endpointGroup.Endpoints)) + + for _, ep := range endpoints { + // TODO (mesh-gateway) - should we respect the translate_wan_addrs configuration here or just always use the wan for cross-dc? + _, addr, port := ep.BestAddress(!localKey.Matches(ep.Node.Datacenter, ep.Node.PartitionOrDefault())) + healthStatus, weight := calculateEndpointHealthAndWeight(ep, endpointGroup.OnlyPassing) + + if endpointGroup.OverrideHealth != envoy_core_v3.HealthStatus_UNKNOWN { + healthStatus = endpointGroup.OverrideHealth + } + + endpoint := &envoy_endpoint_v3.Endpoint{ + Address: makeAddress(addr, port), + } + es = append(es, &envoy_endpoint_v3.LbEndpoint{ + HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{ + Endpoint: endpoint, + }, + HealthStatus: healthStatus, + LoadBalancingWeight: makeUint32Value(weight), + }) } - es = append(es, &envoy_endpoint_v3.LbEndpoint{ - HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{ - Endpoint: endpoint, - }, - HealthStatus: healthStatus, - LoadBalancingWeight: makeUint32Value(weight), + + cla.Endpoints = append(cla.Endpoints, &envoy_endpoint_v3.LocalityLbEndpoints{ + Priority: priority, + LbEndpoints: es, }) - } - cla.Endpoints = append(cla.Endpoints, &envoy_endpoint_v3.LocalityLbEndpoints{ - Priority: uint32(priority), - LbEndpoints: es, - }) + priority++ + } } return cla diff --git a/agent/xds/endpoints_test.go b/agent/xds/endpoints_test.go index ebdd06aa41..eee35103aa 100644 --- a/agent/xds/endpoints_test.go +++ b/agent/xds/endpoints_test.go @@ -101,6 +101,7 @@ func Test_makeLoadAssignment(t *testing.T) { tests := []struct { name string clusterName string + locality *structs.Locality endpoints []loadAssignmentEndpointGroup want *envoy_endpoint_v3.ClusterLoadAssignment }{ @@ -211,11 +212,24 @@ func Test_makeLoadAssignment(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := makeLoadAssignment( + &proxycfg.ConfigSnapshot{ServiceLocality: tt.locality}, tt.clusterName, + nil, tt.endpoints, proxycfg.GatewayKey{Datacenter: "dc1"}, ) require.Equal(t, tt.want, got) + + if tt.locality == nil { + got := makeLoadAssignment( + &proxycfg.ConfigSnapshot{ServiceLocality: &structs.Locality{Region: "us-west-1", Zone: "us-west-1a"}}, + tt.clusterName, + nil, + tt.endpoints, + proxycfg.GatewayKey{Datacenter: "dc1"}, + ) + require.Equal(t, tt.want, got) + } }) } } diff --git a/agent/xds/failover_policy.go b/agent/xds/failover_policy.go index 5edcae914d..77839a37cf 100644 --- a/agent/xds/failover_policy.go +++ b/agent/xds/failover_policy.go @@ -27,6 +27,8 @@ type targetInfo struct { // Region is the region from the failover target's Locality. nil means the // target is in the local Consul cluster. Region *string + + PrioritizeByLocality *structs.DiscoveryPrioritizeByLocality } type discoChainTargetGroup struct { @@ -87,7 +89,7 @@ func (s *ResourceGenerator) mapDiscoChainTargets(cfgSnap *proxycfg.ConfigSnapsho var sni, rootPEMs string var spiffeIDs []string targetUID := proxycfg.NewUpstreamIDFromTargetID(tid) - ti := targetInfo{TargetID: tid} + ti := targetInfo{TargetID: tid, PrioritizeByLocality: target.PrioritizeByLocality} configureTLS := true if forMeshGateway { diff --git a/agent/xds/listeners_test.go b/agent/xds/listeners_test.go index 21e3149bf1..10b358bdad 100644 --- a/agent/xds/listeners_test.go +++ b/agent/xds/listeners_test.go @@ -1109,6 +1109,15 @@ func TestListenersFromSnapshot(t *testing.T) { nil) }, }, + { + name: "connect-proxy-without-tproxy-and-permissive-mtls", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) { + ns.Proxy.MutualTLSMode = structs.MutualTLSModePermissive + }, + nil) + }, + }, } tests = append(tests, makeListenerDiscoChainTests(false)...) diff --git a/agent/xds/locality_policy.go b/agent/xds/locality_policy.go new file mode 100644 index 0000000000..d2dd977f1a --- /dev/null +++ b/agent/xds/locality_policy.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package xds + +import ( + "fmt" + + "github.com/hashicorp/consul/agent/structs" +) + +func groupedEndpoints(locality *structs.Locality, policy *structs.DiscoveryPrioritizeByLocality, csns structs.CheckServiceNodes) ([]structs.CheckServiceNodes, error) { + switch { + case policy == nil || policy.Mode == "" || policy.Mode == "none": + return []structs.CheckServiceNodes{csns}, nil + case policy.Mode == "failover": + return prioritizeByLocalityFailover(locality, csns), nil + default: + return nil, fmt.Errorf("unexpected priortize-by-locality mode %q", policy.Mode) + } +} diff --git a/agent/xds/locality_policy_oss.go b/agent/xds/locality_policy_oss.go new file mode 100644 index 0000000000..16147aeb0c --- /dev/null +++ b/agent/xds/locality_policy_oss.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !consulent +// +build !consulent + +package xds + +import ( + "github.com/hashicorp/consul/agent/structs" +) + +func prioritizeByLocalityFailover(locality *structs.Locality, csns structs.CheckServiceNodes) []structs.CheckServiceNodes { + return nil +} diff --git a/agent/xds/testdata/builtin_extension/clusters/ext-authz-http-local-http-service.latest.golden b/agent/xds/testdata/builtin_extension/clusters/ext-authz-http-local-http-service.latest.golden index 3b0f2da69c..992da1ae68 100644 --- a/agent/xds/testdata/builtin_extension/clusters/ext-authz-http-local-http-service.latest.golden +++ b/agent/xds/testdata/builtin_extension/clusters/ext-authz-http-local-http-service.latest.golden @@ -142,7 +142,7 @@ { "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", "name": "local_ext_authz", - "type": "STATIC", + "type": "STRICT_DNS", "loadAssignment": { "clusterName": "local_ext_authz", "endpoints": [ @@ -152,7 +152,7 @@ "endpoint": { "address": { "socketAddress": { - "address": "127.0.0.1", + "address": "localhost", "portValue": 9191 } } diff --git a/agent/xds/testdata/listeners/connect-proxy-without-tproxy-and-permissive-mtls.latest.golden b/agent/xds/testdata/listeners/connect-proxy-without-tproxy-and-permissive-mtls.latest.golden new file mode 100644 index 0000000000..b15ccf4a14 --- /dev/null +++ b/agent/xds/testdata/listeners/connect-proxy-without-tproxy-and-permissive-mtls.latest.golden @@ -0,0 +1,115 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "db:127.0.0.1:9191", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 9191 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "prepared_query:geo-cache:127.10.10.10:8181", + "address": { + "socketAddress": { + "address": "127.10.10.10", + "portValue": 8181 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.prepared_query_geo-cache", + "cluster": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "public_listener:0.0.0.0:9999", + "address": { + "socketAddress": { + "address": "0.0.0.0", + "portValue": 9999 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.rbac.v3.RBAC", + "rules": {}, + "statPrefix": "connect_authz" + } + }, + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "public_listener", + "cluster": "local_app" + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": {}, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} diff --git a/api/watch/funcs.go b/api/watch/funcs.go index 3c057aa664..0d0f6e100c 100644 --- a/api/watch/funcs.go +++ b/api/watch/funcs.go @@ -92,13 +92,20 @@ func keyPrefixWatch(params map[string]interface{}) (WatcherFunc, error) { // servicesWatch is used to watch the list of available services func servicesWatch(params map[string]interface{}) (WatcherFunc, error) { stale := false + filter := "" if err := assignValueBool(params, "stale", &stale); err != nil { return nil, err } + if err := assignValue(params, "filter", &filter); err != nil { + return nil, err + } fn := func(p *Plan) (BlockingParamVal, interface{}, error) { catalog := p.client.Catalog() opts := makeQueryOptionsWithContext(p, stale) + if filter != "" { + opts.Filter = filter + } defer p.cancelFunc() services, meta, err := catalog.Services(&opts) if err != nil { @@ -112,13 +119,20 @@ func servicesWatch(params map[string]interface{}) (WatcherFunc, error) { // nodesWatch is used to watch the list of available nodes func nodesWatch(params map[string]interface{}) (WatcherFunc, error) { stale := false + filter := "" if err := assignValueBool(params, "stale", &stale); err != nil { return nil, err } + if err := assignValue(params, "filter", &filter); err != nil { + return nil, err + } fn := func(p *Plan) (BlockingParamVal, interface{}, error) { catalog := p.client.Catalog() opts := makeQueryOptionsWithContext(p, stale) + if filter != "" { + opts.Filter = filter + } defer p.cancelFunc() nodes, meta, err := catalog.Nodes(&opts) if err != nil { @@ -132,9 +146,13 @@ func nodesWatch(params map[string]interface{}) (WatcherFunc, error) { // serviceWatch is used to watch a specific service for changes func serviceWatch(params map[string]interface{}) (WatcherFunc, error) { stale := false + filter := "" if err := assignValueBool(params, "stale", &stale); err != nil { return nil, err } + if err := assignValue(params, "filter", &filter); err != nil { + return nil, err + } var ( service string @@ -158,6 +176,9 @@ func serviceWatch(params map[string]interface{}) (WatcherFunc, error) { fn := func(p *Plan) (BlockingParamVal, interface{}, error) { health := p.client.Health() opts := makeQueryOptionsWithContext(p, stale) + if filter != "" { + opts.Filter = filter + } defer p.cancelFunc() nodes, meta, err := health.ServiceMultipleTags(service, tags, passingOnly, &opts) if err != nil { @@ -175,13 +196,16 @@ func checksWatch(params map[string]interface{}) (WatcherFunc, error) { return nil, err } - var service, state string + var service, state, filter string if err := assignValue(params, "service", &service); err != nil { return nil, err } if err := assignValue(params, "state", &state); err != nil { return nil, err } + if err := assignValue(params, "filter", &filter); err != nil { + return nil, err + } if service != "" && state != "" { return nil, fmt.Errorf("Cannot specify service and state") } @@ -196,6 +220,9 @@ func checksWatch(params map[string]interface{}) (WatcherFunc, error) { var checks []*consulapi.HealthCheck var meta *consulapi.QueryMeta var err error + if filter != "" { + opts.Filter = filter + } if state != "" { checks, meta, err = health.State(state, &opts) } else { diff --git a/api/watch/funcs_test.go b/api/watch/funcs_test.go index d972def6ac..4bd79a59c1 100644 --- a/api/watch/funcs_test.go +++ b/api/watch/funcs_test.go @@ -378,6 +378,82 @@ func TestServicesWatch(t *testing.T) { } +func TestServicesWatch_Filter(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + s.WaitForSerfCheck(t) + + var ( + wakeups []map[string][]string + notifyCh = make(chan struct{}) + ) + + plan := mustParse(t, `{"type":"services", "filter":"b in ServiceTags and a in ServiceTags"}`) + plan.Handler = func(idx uint64, raw interface{}) { + if raw == nil { + return // ignore + } + v, ok := raw.(map[string][]string) + if !ok { + return // ignore + } + wakeups = append(wakeups, v) + notifyCh <- struct{}{} + } + + // Register some services + { + agent := c.Agent() + + // we don't want to find this + reg := &api.AgentServiceRegistration{ + ID: "foo", + Name: "foo", + Tags: []string{"b"}, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + // // we want to find this + reg = &api.AgentServiceRegistration{ + ID: "bar", + Name: "bar", + Tags: []string{"a", "b"}, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + if err := plan.Run(s.HTTPAddr); err != nil { + t.Errorf("err: %v", err) + } + }() + defer plan.Stop() + + // Wait for second wakeup. + <-notifyCh + + plan.Stop() + wg.Wait() + + require.Len(t, wakeups, 1) + + { + v := wakeups[0] + require.Len(t, v, 1) + _, ok := v["bar"] + require.True(t, ok) + } +} + func TestNodesWatch(t *testing.T) { t.Parallel() c, s := makeClient(t) @@ -453,6 +529,82 @@ func TestNodesWatch(t *testing.T) { } } +func TestNodesWatch_Filter(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + s.WaitForSerfCheck(t) // wait for AE to sync + + var ( + wakeups [][]*api.Node + notifyCh = make(chan struct{}) + ) + + plan := mustParse(t, `{"type":"nodes", "filter":"Node == foo"}`) + plan.Handler = func(idx uint64, raw interface{}) { + if raw == nil { + return // ignore + } + v, ok := raw.([]*api.Node) + if !ok { + return // ignore + } + wakeups = append(wakeups, v) + notifyCh <- struct{}{} + } + + // Register 2 nodes + { + catalog := c.Catalog() + + // we want to find this node + reg := &api.CatalogRegistration{ + Node: "foo", + Address: "1.1.1.1", + Datacenter: "dc1", + } + if _, err := catalog.Register(reg, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // we don't want to find this node + reg = &api.CatalogRegistration{ + Node: "bar", + Address: "2.2.2.2", + Datacenter: "dc1", + } + if _, err := catalog.Register(reg, nil); err != nil { + t.Fatalf("err: %v", err) + } + } + + var wg sync.WaitGroup + wg.Add(1) + // Start the watch nodes plan + go func() { + defer wg.Done() + if err := plan.Run(s.HTTPAddr); err != nil { + t.Errorf("err: %v", err) + } + }() + defer plan.Stop() + + // Wait for first wakeup. + <-notifyCh + + plan.Stop() + wg.Wait() + + require.Len(t, wakeups, 1) + + { + v := wakeups[0] + require.Len(t, v, 1) + require.Equal(t, "foo", v[0].Node) + } +} + func TestServiceWatch(t *testing.T) { t.Parallel() c, s := makeClient(t) @@ -616,6 +768,94 @@ func TestServiceMultipleTagsWatch(t *testing.T) { } } +func TestServiceWatch_Filter(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + s.WaitForSerfCheck(t) + + var ( + wakeups [][]*api.ServiceEntry + notifyCh = make(chan struct{}) + ) + + plan := mustParse(t, `{"type":"service", "service":"foo", "filter":"bar in Service.Tags and buzz in Service.Tags"}`) + plan.Handler = func(idx uint64, raw interface{}) { + if raw == nil { + return // ignore + } + v, ok := raw.([]*api.ServiceEntry) + if !ok { + return // ignore + } + + wakeups = append(wakeups, v) + notifyCh <- struct{}{} + } + + // register some services + { + agent := c.Agent() + + // we do not want to find this one. + reg := &api.AgentServiceRegistration{ + ID: "foobarbiff", + Name: "foo", + Tags: []string{"bar", "biff"}, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + // we do not want to find this one. + reg = &api.AgentServiceRegistration{ + ID: "foobuzzbiff", + Name: "foo", + Tags: []string{"buzz", "biff"}, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + // we want to find this one + reg = &api.AgentServiceRegistration{ + ID: "foobarbuzzbiff", + Name: "foo", + Tags: []string{"bar", "buzz", "biff"}, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + if err := plan.Run(s.HTTPAddr); err != nil { + t.Errorf("err: %v", err) + } + }() + defer plan.Stop() + + // Wait for second wakeup. + <-notifyCh + + plan.Stop() + wg.Wait() + + require.Len(t, wakeups, 1) + + { + v := wakeups[0] + require.Len(t, v, 1) + + require.Equal(t, "foobarbuzzbiff", v[0].Service.ID) + require.ElementsMatch(t, []string{"bar", "buzz", "biff"}, v[0].Service.Tags) + } +} + func TestChecksWatch_State(t *testing.T) { t.Parallel() c, s := makeClient(t) @@ -772,6 +1012,294 @@ func TestChecksWatch_Service(t *testing.T) { } } +func TestChecksWatch_Service_Filter(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + s.WaitForSerfCheck(t) + + var ( + wakeups [][]*api.HealthCheck + notifyCh = make(chan struct{}) + ) + + plan := mustParse(t, `{"type":"checks", "filter":"b in ServiceTags and a in ServiceTags"}`) + plan.Handler = func(idx uint64, raw interface{}) { + if raw == nil { + return // ignore + } + v, ok := raw.([]*api.HealthCheck) + if !ok { + return // ignore + } + wakeups = append(wakeups, v) + notifyCh <- struct{}{} + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + if err := plan.Run(s.HTTPAddr); err != nil { + t.Errorf("err: %v", err) + } + }() + defer plan.Stop() + + // Wait for first wakeup. + <-notifyCh + { + catalog := c.Catalog() + reg := &api.CatalogRegistration{ + Node: "foobar", + Address: "1.1.1.1", + Datacenter: "dc1", + Service: &api.AgentService{ + ID: "foobar", + Service: "foobar", + Tags: []string{"a", "b"}, + }, + Check: &api.AgentCheck{ + Node: "foobar", + CheckID: "foobar", + Name: "foobar", + Status: api.HealthPassing, + ServiceID: "foobar", + }, + } + if _, err := catalog.Register(reg, nil); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Wait for second wakeup. + <-notifyCh + + plan.Stop() + wg.Wait() + + require.Len(t, wakeups, 2) + + { + v := wakeups[0] + require.Len(t, v, 0) + } + { + v := wakeups[1] + require.Len(t, v, 1) + require.Equal(t, "foobar", v[0].CheckID) + } +} + +func TestChecksWatch_Filter(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + s.WaitForSerfCheck(t) + + var ( + wakeups [][]*api.HealthCheck + notifyCh = make(chan struct{}) + ) + + plan := mustParse(t, `{"type":"checks", "filter":"b in ServiceTags and a in ServiceTags"}`) + plan.Handler = func(idx uint64, raw interface{}) { + if raw == nil { + return // ignore + } + v, ok := raw.([]*api.HealthCheck) + if !ok { + return // ignore + } + wakeups = append(wakeups, v) + notifyCh <- struct{}{} + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + if err := plan.Run(s.HTTPAddr); err != nil { + t.Errorf("err: %v", err) + } + }() + defer plan.Stop() + + // Wait for first wakeup. + <-notifyCh + { + catalog := c.Catalog() + + // we don't want to find this one + reg := &api.CatalogRegistration{ + Node: "foo", + Address: "1.1.1.1", + Datacenter: "dc1", + Service: &api.AgentService{ + ID: "foo", + Service: "foo", + Tags: []string{"a"}, + }, + Check: &api.AgentCheck{ + Node: "foo", + CheckID: "foo", + Name: "foo", + Status: api.HealthPassing, + ServiceID: "foo", + }, + } + if _, err := catalog.Register(reg, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // we want to find this one + reg = &api.CatalogRegistration{ + Node: "bar", + Address: "2.2.2.2", + Datacenter: "dc1", + Service: &api.AgentService{ + ID: "bar", + Service: "bar", + Tags: []string{"a", "b"}, + }, + Check: &api.AgentCheck{ + Node: "bar", + CheckID: "bar", + Name: "bar", + Status: api.HealthPassing, + ServiceID: "bar", + }, + } + if _, err := catalog.Register(reg, nil); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Wait for second wakeup. + <-notifyCh + + plan.Stop() + wg.Wait() + + require.Len(t, wakeups, 2) + + { + v := wakeups[0] + require.Len(t, v, 0) + } + { + v := wakeups[1] + require.Len(t, v, 1) + require.Equal(t, "bar", v[0].CheckID) + } +} + +func TestChecksWatch_Filter_by_ServiceNameStatus(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + s.WaitForSerfCheck(t) + + var ( + wakeups [][]*api.HealthCheck + notifyCh = make(chan struct{}) + ) + + plan := mustParse(t, `{"type":"checks", "filter":"ServiceName == bar and Status == critical"}`) + plan.Handler = func(idx uint64, raw interface{}) { + if raw == nil { + return // ignore + } + v, ok := raw.([]*api.HealthCheck) + if !ok { + return // ignore + } + wakeups = append(wakeups, v) + notifyCh <- struct{}{} + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + if err := plan.Run(s.HTTPAddr); err != nil { + t.Errorf("err: %v", err) + } + }() + defer plan.Stop() + + // Wait for first wakeup. + <-notifyCh + { + catalog := c.Catalog() + + // we don't want to find this one + reg := &api.CatalogRegistration{ + Node: "foo", + Address: "1.1.1.1", + Datacenter: "dc1", + Service: &api.AgentService{ + ID: "foo", + Service: "foo", + Tags: []string{"a"}, + }, + Check: &api.AgentCheck{ + Node: "foo", + CheckID: "foo", + Name: "foo", + Status: api.HealthPassing, + ServiceID: "foo", + }, + } + if _, err := catalog.Register(reg, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // we want to find this one + reg = &api.CatalogRegistration{ + Node: "bar", + Address: "2.2.2.2", + Datacenter: "dc1", + Service: &api.AgentService{ + ID: "bar", + Service: "bar", + Tags: []string{"a", "b"}, + }, + Check: &api.AgentCheck{ + Node: "bar", + CheckID: "bar", + Name: "bar", + Status: api.HealthCritical, + ServiceID: "bar", + }, + } + if _, err := catalog.Register(reg, nil); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Wait for second wakeup. + <-notifyCh + + plan.Stop() + wg.Wait() + + require.Len(t, wakeups, 2) + + { + v := wakeups[0] + require.Len(t, v, 0) + } + { + v := wakeups[1] + require.Len(t, v, 1) + require.Equal(t, "bar", v[0].CheckID) + } +} + func TestEventWatch(t *testing.T) { t.Parallel() c, s := makeClient(t) diff --git a/build-support/docker/Consul-Dev-Multiarch.dockerfile b/build-support/docker/Consul-Dev-Multiarch.dockerfile index 53c08879d9..265a1804cf 100644 --- a/build-support/docker/Consul-Dev-Multiarch.dockerfile +++ b/build-support/docker/Consul-Dev-Multiarch.dockerfile @@ -2,7 +2,7 @@ # SPDX-License-Identifier: MPL-2.0 ARG CONSUL_IMAGE_VERSION=latest -FROM consul:${CONSUL_IMAGE_VERSION} +FROM hashicorp/consul:${CONSUL_IMAGE_VERSION} RUN apk update && apk add iptables ARG TARGETARCH COPY linux_${TARGETARCH}/consul /bin/consul diff --git a/build-support/docker/Consul-Dev.dockerfile b/build-support/docker/Consul-Dev.dockerfile index 57b3b37ea0..122bc3192a 100644 --- a/build-support/docker/Consul-Dev.dockerfile +++ b/build-support/docker/Consul-Dev.dockerfile @@ -2,6 +2,6 @@ # SPDX-License-Identifier: MPL-2.0 ARG CONSUL_IMAGE_VERSION=latest -FROM consul:${CONSUL_IMAGE_VERSION} +FROM hashicorp/consul:${CONSUL_IMAGE_VERSION} RUN apk update && apk add iptables COPY consul /bin/consul diff --git a/build-support/docker/Verify-Release-Amazon.dockerfile b/build-support/docker/Verify-Release-Amazon.dockerfile new file mode 100644 index 0000000000..591b234c3b --- /dev/null +++ b/build-support/docker/Verify-Release-Amazon.dockerfile @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +FROM amazonlinux:latest +RUN yum install -y yum-utils shadow-utils +RUN yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo +ARG PACKAGE=consul \ +ARG VERSION \ +ARG SUFFIX=1 +RUN yum install -y ${PACKAGE}-${VERSION}-${SUFFIX} diff --git a/build-support/docker/Verify-Release-CentOS.dockerfile b/build-support/docker/Verify-Release-CentOS.dockerfile new file mode 100644 index 0000000000..a2be67ac77 --- /dev/null +++ b/build-support/docker/Verify-Release-CentOS.dockerfile @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +FROM centos:7 +RUN yum install -y yum-utils +RUN yum-config-manager --add-repo https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo +ARG PACKAGE=consul \ +ARG VERSION \ +ARG SUFFIX=1 +RUN yum install -y ${PACKAGE}-${VERSION}-${SUFFIX} \ No newline at end of file diff --git a/build-support/docker/Verify-Release-Debian.dockerfile b/build-support/docker/Verify-Release-Debian.dockerfile new file mode 100644 index 0000000000..533890bca4 --- /dev/null +++ b/build-support/docker/Verify-Release-Debian.dockerfile @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +FROM debian:bullseye +RUN apt update && apt install -y software-properties-common curl gnupg +RUN curl -fsSL https://apt.releases.hashicorp.com/gpg | apt-key add - +ARG TARGETARCH=amd64 +RUN apt-add-repository "deb [arch=${TARGETARCH}] https://apt.releases.hashicorp.com $(lsb_release -cs) main" +ARG PACKAGE=consul \ +ARG VERSION \ +ARG SUFFIX=1 +RUN apt-get update && apt-get install -y ${PACKAGE}=${VERSION}-${SUFFIX} \ No newline at end of file diff --git a/build-support/docker/Verify-Release-Fedora.dockerfile b/build-support/docker/Verify-Release-Fedora.dockerfile new file mode 100644 index 0000000000..601751a911 --- /dev/null +++ b/build-support/docker/Verify-Release-Fedora.dockerfile @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +FROM fedora:latest +RUN dnf install -y dnf-plugins-core +RUN dnf config-manager --add-repo https://rpm.releases.hashicorp.com/fedora/hashicorp.repo +ARG PACKAGE=consul \ +ARG VERSION \ +ARG SUFFIX=1 +RUN dnf install -y ${PACKAGE}-${VERSION}-${SUFFIX} diff --git a/build-support/docker/Verify-Release-Ubunt-i386.dockerfile b/build-support/docker/Verify-Release-Ubunt-i386.dockerfile new file mode 100644 index 0000000000..82913b4f72 --- /dev/null +++ b/build-support/docker/Verify-Release-Ubunt-i386.dockerfile @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +FROM i386/ubuntu:latest +RUN apt update && apt install -y software-properties-common curl +RUN curl -fsSL https://apt.releases.hashicorp.com/gpg | apt-key add - +ARG TARGETARCH=amd64 +RUN apt-add-repository "deb [arch=${TARGETARCH}] https://apt.releases.hashicorp.com $(lsb_release -cs) main" +ARG PACKAGE=consul \ +ARG VERSION \ +ARG SUFFIX=1 +RUN apt-get update && apt-get install -y ${PACKAGE}=${VERSION}-${SUFFIX} diff --git a/build-support/docker/Verify-Release-Ubuntu.dockerfile b/build-support/docker/Verify-Release-Ubuntu.dockerfile new file mode 100644 index 0000000000..ddeffc40c5 --- /dev/null +++ b/build-support/docker/Verify-Release-Ubuntu.dockerfile @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +FROM ubuntu:latest +RUN apt update && apt install -y software-properties-common curl +RUN curl -fsSL https://apt.releases.hashicorp.com/gpg | apt-key add - +ARG TARGETARCH=amd64 +RUN apt-add-repository "deb [arch=${TARGETARCH}] https://apt.releases.hashicorp.com $(lsb_release -cs) main" +ARG PACKAGE=consul \ +ARG VERSION \ +ARG SUFFIX=1 +RUN apt-get update && apt-get install -y ${PACKAGE}=${VERSION}-${SUFFIX} diff --git a/build-support/scripts/protobuf.sh b/build-support/scripts/protobuf.sh index 420d66d6a1..f7b8ce5594 100755 --- a/build-support/scripts/protobuf.sh +++ b/build-support/scripts/protobuf.sh @@ -72,6 +72,10 @@ function main { status "Generated gRPC rate limit mapping file" + generate_protoset_file + + status "Generated protoset file" + return 0 } @@ -152,5 +156,11 @@ function generate_rate_limit_mappings { } } +function generate_protoset_file { + local pkg_dir="${SOURCE_DIR}/pkg" + mkdir -p "$pkg_dir" + print_run buf build -o "${pkg_dir}/consul.protoset" +} + main "$@" exit $? diff --git a/command/connect/envoy/bootstrap_config.go b/command/connect/envoy/bootstrap_config.go index a50eaf36fe..2a0e21c4d2 100644 --- a/command/connect/envoy/bootstrap_config.go +++ b/command/connect/envoy/bootstrap_config.go @@ -847,7 +847,8 @@ func appendTelemetryCollectorConfig(args *BootstrapTplArgs, telemetryCollectorBi "envoy_grpc": { "cluster_name": "consul_telemetry_collector_loopback" } - } + }, + "emit_tags_as_labels": true } }` diff --git a/command/connect/envoy/bootstrap_config_test.go b/command/connect/envoy/bootstrap_config_test.go index 8de9ae007d..293aee6600 100644 --- a/command/connect/envoy/bootstrap_config_test.go +++ b/command/connect/envoy/bootstrap_config_test.go @@ -539,7 +539,8 @@ const ( "envoy_grpc": { "cluster_name": "consul_telemetry_collector_loopback" } - } + }, + "emit_tags_as_labels": true } }` @@ -638,7 +639,8 @@ func TestBootstrapConfig_ConfigureArgs(t *testing.T) { "envoy_grpc": { "cluster_name": "consul_telemetry_collector_loopback" } - } + }, + "emit_tags_as_labels": true } }`, StaticClustersJSON: `{ diff --git a/command/connect/envoy/testdata/telemetry-collector.golden b/command/connect/envoy/testdata/telemetry-collector.golden index 7c584864a0..3977ce65bb 100644 --- a/command/connect/envoy/testdata/telemetry-collector.golden +++ b/command/connect/envoy/testdata/telemetry-collector.golden @@ -89,7 +89,8 @@ "envoy_grpc": { "cluster_name": "consul_telemetry_collector_loopback" } - } + }, + "emit_tags_as_labels": true } } ], diff --git a/command/watch/watch.go b/command/watch/watch.go index 791f93a57c..b187fa369a 100644 --- a/command/watch/watch.go +++ b/command/watch/watch.go @@ -45,6 +45,7 @@ type cmd struct { state string name string shell bool + filter string } func (c *cmd) init() { @@ -71,6 +72,7 @@ func (c *cmd) init() { "Specifies the states to watch. Optional for 'checks' type.") c.flags.StringVar(&c.name, "name", "", "Specifies an event name to watch. Only for 'event' type.") + c.flags.StringVar(&c.filter, "filter", "", "Filter to use with the request") c.http = &flags.HTTPFlags{} flags.Merge(c.flags, c.http.ClientFlags()) @@ -128,6 +130,9 @@ func (c *cmd) Run(args []string) int { if c.service != "" { params["service"] = c.service } + if c.filter != "" { + params["filter"] = c.filter + } if len(c.tag) > 0 { params["tag"] = c.tag } diff --git a/docs/README.md b/docs/README.md index d3483710b3..8bebb848c9 100644 --- a/docs/README.md +++ b/docs/README.md @@ -40,6 +40,7 @@ Also see the [FAQ](./faq.md). 1. [Integration Tests](../test/integration/connect/envoy/README.md) 1. [Upgrade Tests](../test/integration/consul-container/test/upgrade/README.md) +1. [Remote Debugging Integration Tests](../test/integration/consul-container/test/debugging.md) ## Important Directories diff --git a/go.mod b/go.mod index 0fb7592886..b85c3028a8 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e github.com/armon/go-metrics v0.4.1 github.com/armon/go-radix v1.0.0 - github.com/aws/aws-sdk-go v1.42.34 + github.com/aws/aws-sdk-go v1.44.289 github.com/coredns/coredns v1.6.6 github.com/coreos/go-oidc v2.1.0+incompatible github.com/docker/go-connections v0.4.0 diff --git a/go.sum b/go.sum index 50778d1408..218b22b821 100644 --- a/go.sum +++ b/go.sum @@ -161,8 +161,8 @@ github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.42.34 h1:fqGAiKmCSRY1rEa4G9VqgkKKbNmLKYq5dKmLtQkvYi8= -github.com/aws/aws-sdk-go v1.42.34/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= +github.com/aws/aws-sdk-go v1.44.289 h1:5CVEjiHFvdiVlKPBzv0rjG4zH/21W/onT18R5AH/qx0= +github.com/aws/aws-sdk-go v1.44.289/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/benbjohnson/immutable v0.4.0 h1:CTqXbEerYso8YzVPxmWxh2gnoRQbbB9X1quUC8+vGZA= github.com/benbjohnson/immutable v0.4.0/go.mod h1:iAr8OjJGLnLmVUr9MZ/rz4PWUy6Ouc2JLYuMArmvAJM= @@ -1100,6 +1100,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1169,6 +1170,7 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= @@ -1210,6 +1212,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1268,9 +1271,10 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1305,6 +1309,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1399,13 +1404,16 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1419,6 +1427,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1496,6 +1505,7 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/controller/api_test.go b/internal/controller/api_test.go index 2006664b20..e80a2d7d71 100644 --- a/internal/controller/api_test.go +++ b/internal/controller/api_test.go @@ -190,7 +190,7 @@ func TestController_String(t *testing.T) { WithPlacement(controller.PlacementEachServer) require.Equal(t, - `, placement="each-server">`, + `, placement="each-server">`, ctrl.String(), ) } @@ -201,7 +201,7 @@ func TestController_NoReconciler(t *testing.T) { ctrl := controller.ForType(demo.TypeV2Artist) require.PanicsWithValue(t, - `cannot register controller without a reconciler , placement="singleton">`, + `cannot register controller without a reconciler , placement="singleton">`, func() { mgr.Register(ctrl) }) } diff --git a/internal/resource/authz_oss.go b/internal/resource/authz_oss.go new file mode 100644 index 0000000000..014318f228 --- /dev/null +++ b/internal/resource/authz_oss.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !consulent +// +build !consulent + +package resource + +import ( + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +// AuthorizerContext builds an ACL AuthorizerContext for the given tenancy. +func AuthorizerContext(t *pbresource.Tenancy) *acl.AuthorizerContext { + return &acl.AuthorizerContext{Peer: t.PeerName} +} diff --git a/internal/resource/demo/demo.go b/internal/resource/demo/demo.go index 842b75739b..20ad89c962 100644 --- a/internal/resource/demo/demo.go +++ b/internal/resource/demo/demo.go @@ -33,36 +33,36 @@ var ( TypeV1Artist = &pbresource.Type{ Group: "demo", GroupVersion: "v1", - Kind: "artist", + Kind: "Artist", } // TypeV1Album represents a collection of an artist's songs. TypeV1Album = &pbresource.Type{ Group: "demo", GroupVersion: "v1", - Kind: "album", + Kind: "Album", } // TypeV2Artist represents a musician or group of musicians. TypeV2Artist = &pbresource.Type{ Group: "demo", GroupVersion: "v2", - Kind: "artist", + Kind: "Artist", } // TypeV2Album represents a collection of an artist's songs. TypeV2Album = &pbresource.Type{ Group: "demo", GroupVersion: "v2", - Kind: "album", + Kind: "Album", } ) const ( - ArtistV1ReadPolicy = `key_prefix "resource/demo.v1.artist/" { policy = "read" }` - ArtistV1WritePolicy = `key_prefix "resource/demo.v1.artist/" { policy = "write" }` - ArtistV2ReadPolicy = `key_prefix "resource/demo.v2.artist/" { policy = "read" }` - ArtistV2WritePolicy = `key_prefix "resource/demo.v2.artist/" { policy = "write" }` + ArtistV1ReadPolicy = `key_prefix "resource/demo.v1.Artist/" { policy = "read" }` + ArtistV1WritePolicy = `key_prefix "resource/demo.v1.Artist/" { policy = "write" }` + ArtistV2ReadPolicy = `key_prefix "resource/demo.v2.Artist/" { policy = "read" }` + ArtistV2WritePolicy = `key_prefix "resource/demo.v2.Artist/" { policy = "write" }` ArtistV2ListPolicy = `key_prefix "resource/" { policy = "list" }` ) diff --git a/internal/resource/registry.go b/internal/resource/registry.go index 232e3998d4..0004acfff4 100644 --- a/internal/resource/registry.go +++ b/internal/resource/registry.go @@ -5,6 +5,7 @@ package resource import ( "fmt" + "regexp" "sync" "google.golang.org/protobuf/proto" @@ -13,6 +14,12 @@ import ( "github.com/hashicorp/consul/proto-public/pbresource" ) +var ( + groupRegexp = regexp.MustCompile(`^[a-z][a-z\d_]+$`) + groupVersionRegexp = regexp.MustCompile(`^v([a-z\d]+)?\d$`) + kindRegexp = regexp.MustCompile(`^[A-Z][A-Za-z\d]+$`) +) + type Registry interface { // Register the given resource type and its hooks. Register(reg Registration) @@ -82,14 +89,23 @@ func NewRegistry() Registry { } func (r *TypeRegistry) Register(registration Registration) { - r.lock.Lock() - defer r.lock.Unlock() - typ := registration.Type if typ.Group == "" || typ.GroupVersion == "" || typ.Kind == "" { panic("type field(s) cannot be empty") } + switch { + case !groupRegexp.MatchString(typ.Group): + panic(fmt.Sprintf("Type.Group must be in snake_case. Got: %q", typ.Group)) + case !groupVersionRegexp.MatchString(typ.GroupVersion): + panic(fmt.Sprintf("Type.GroupVersion must be lowercase, start with `v`, and end with a number (e.g. `v2` or `v1alpha1`). Got: %q", typ.Group)) + case !kindRegexp.MatchString(typ.Kind): + panic(fmt.Sprintf("Type.Kind must be in PascalCase. Got: %q", typ.Kind)) + } + + r.lock.Lock() + defer r.lock.Unlock() + key := ToGVK(registration.Type) if _, ok := r.registrations[key]; ok { panic(fmt.Sprintf("resource type %s already registered", key)) diff --git a/internal/resource/registry_test.go b/internal/resource/registry_test.go index 7979d618c4..c9d1777159 100644 --- a/internal/resource/registry_test.go +++ b/internal/resource/registry_test.go @@ -28,36 +28,9 @@ func TestRegister(t *testing.T) { require.True(t, proto.Equal(demo.TypeV2Artist, actual.Type)) // register existing should panic - require.PanicsWithValue(t, "resource type demo.v2.artist already registered", func() { + require.PanicsWithValue(t, "resource type demo.v2.Artist already registered", func() { r.Register(reg) }) - - // type missing required fields should panic - testcases := map[string]*pbresource.Type{ - "empty group": { - Group: "", - GroupVersion: "v2", - Kind: "artist", - }, - "empty group version": { - Group: "", - GroupVersion: "v2", - Kind: "artist", - }, - "empty kind": { - Group: "demo", - GroupVersion: "v2", - Kind: "", - }, - } - - for desc, typ := range testcases { - t.Run(desc, func(t *testing.T) { - require.PanicsWithValue(t, "type field(s) cannot be empty", func() { - r.Register(resource.Registration{Type: typ}) - }) - }) - } } func TestRegister_Defaults(t *testing.T) { @@ -102,7 +75,7 @@ func TestResolve(t *testing.T) { serviceType := &pbresource.Type{ Group: "mesh", GroupVersion: "v1", - Kind: "service", + Kind: "Service", } // not found @@ -115,3 +88,89 @@ func TestResolve(t *testing.T) { assert.True(t, ok) assert.Equal(t, registration.Type, serviceType) } + +func TestRegister_TypeValidation(t *testing.T) { + registry := resource.NewRegistry() + + testCases := map[string]struct { + fn func(*pbresource.Type) + valid bool + }{ + "Valid": {valid: true}, + "Group empty": { + fn: func(t *pbresource.Type) { t.Group = "" }, + valid: false, + }, + "Group PascalCase": { + fn: func(t *pbresource.Type) { t.Group = "Foo" }, + valid: false, + }, + "Group kebab-case": { + fn: func(t *pbresource.Type) { t.Group = "foo-bar" }, + valid: false, + }, + "Group snake_case": { + fn: func(t *pbresource.Type) { t.Group = "foo_bar" }, + valid: true, + }, + "GroupVersion empty": { + fn: func(t *pbresource.Type) { t.GroupVersion = "" }, + valid: false, + }, + "GroupVersion snake_case": { + fn: func(t *pbresource.Type) { t.GroupVersion = "v_1" }, + valid: false, + }, + "GroupVersion kebab-case": { + fn: func(t *pbresource.Type) { t.GroupVersion = "v-1" }, + valid: false, + }, + "GroupVersion no leading v": { + fn: func(t *pbresource.Type) { t.GroupVersion = "1" }, + valid: false, + }, + "GroupVersion no trailing number": { + fn: func(t *pbresource.Type) { t.GroupVersion = "OnePointOh" }, + valid: false, + }, + "Kind PascalCase with numbers": { + fn: func(t *pbresource.Type) { t.Kind = "Number1" }, + valid: true, + }, + "Kind camelCase": { + fn: func(t *pbresource.Type) { t.Kind = "barBaz" }, + valid: false, + }, + "Kind snake_case": { + fn: func(t *pbresource.Type) { t.Kind = "bar_baz" }, + valid: false, + }, + "Kind empty": { + fn: func(t *pbresource.Type) { t.Kind = "" }, + valid: false, + }, + } + for desc, tc := range testCases { + t.Run(desc, func(t *testing.T) { + reg := func() { + typ := &pbresource.Type{ + Group: "foo", + GroupVersion: "v1", + Kind: "Bar", + } + if tc.fn != nil { + tc.fn(typ) + } + registry.Register(resource.Registration{ + Type: typ, + }) + } + + if tc.valid { + require.NotPanics(t, reg) + } else { + require.Panics(t, reg) + } + }) + } +} diff --git a/internal/resource/tombstone.go b/internal/resource/tombstone.go index 289aec2d51..6d0285c602 100644 --- a/internal/resource/tombstone.go +++ b/internal/resource/tombstone.go @@ -6,6 +6,6 @@ var ( TypeV1Tombstone = &pbresource.Type{ Group: "internal", GroupVersion: "v1", - Kind: "tombstone", + Kind: "Tombstone", } ) diff --git a/test/integration/connect/envoy/case-property-override/setup.sh b/test/integration/connect/envoy/case-property-override/setup.sh index 1bf2021c0c..744055f949 100644 --- a/test/integration/connect/envoy/case-property-override/setup.sh +++ b/test/integration/connect/envoy/case-property-override/setup.sh @@ -53,12 +53,30 @@ EnvoyExtensions = [ Path = "/upstream_connection_options/tcp_keepalive/keepalive_probes" Value = 1234 }, + { + ResourceFilter = { + ResourceType = "cluster" + TrafficDirection = "outbound" + } + Op = "add" + Path = "/outlier_detection/max_ejection_time/seconds" + Value = 120 + }, + { + ResourceFilter = { + ResourceType = "cluster" + TrafficDirection = "outbound" + } + Op = "add" + Path = "/outlier_detection/max_ejection_time_jitter/seconds" + Value = 1 + }, { ResourceFilter = { ResourceType = "cluster" TrafficDirection = "outbound" Services = [{ - Name = "s2" + Name = "s3" }] } Op = "remove" diff --git a/test/integration/connect/envoy/case-property-override/verify.bats b/test/integration/connect/envoy/case-property-override/verify.bats index 4453409eed..446ef061da 100644 --- a/test/integration/connect/envoy/case-property-override/verify.bats +++ b/test/integration/connect/envoy/case-property-override/verify.bats @@ -19,13 +19,14 @@ load helpers [ "$status" == 0 ] [ "$(echo "$output" | jq -r '.upstream_connection_options.tcp_keepalive.keepalive_probes')" == "1234" ] - [ "$(echo "$output" | jq -r '.outlier_detection')" == "null" ] + [ "$(echo "$output" | jq -r '.outlier_detection.max_ejection_time')" == "120s" ] + [ "$(echo "$output" | jq -r '.outlier_detection.max_ejection_time_jitter')" == "1s" ] run get_envoy_cluster_config localhost:19000 s3 [ "$status" == 0 ] [ "$(echo "$output" | jq -r '.upstream_connection_options.tcp_keepalive.keepalive_probes')" == "1234" ] - [ "$(echo "$output" | jq -r '.outlier_detection')" == "{}" ] + [ "$(echo "$output" | jq -r '.outlier_detection')" == "null" ] } @test "s2 proxy is configured with the expected envoy patches" { diff --git a/test/integration/consul-container/libs/assert/service.go b/test/integration/consul-container/libs/assert/service.go index 57e853e1e7..f8ff1d0ba9 100644 --- a/test/integration/consul-container/libs/assert/service.go +++ b/test/integration/consul-container/libs/assert/service.go @@ -40,6 +40,20 @@ func CatalogServiceExists(t *testing.T, c *api.Client, svc string, opts *api.Que }) } +// CatalogServiceHasInstanceCount verifies the service name exists in the Consul catalog and has the specified +// number of instances. +func CatalogServiceHasInstanceCount(t *testing.T, c *api.Client, svc string, count int, opts *api.QueryOptions) { + retry.Run(t, func(r *retry.R) { + services, _, err := c.Catalog().Service(svc, "", opts) + if err != nil { + r.Fatal("error reading service data") + } + if len(services) != count { + r.Fatalf("did not find %d catalog entries for %s", count, svc) + } + }) +} + // CatalogServiceExists verifies the node name exists in the Consul catalog func CatalogNodeExists(t *testing.T, c *api.Client, nodeName string) { retry.Run(t, func(r *retry.R) { diff --git a/test/integration/consul-container/libs/service/helpers.go b/test/integration/consul-container/libs/service/helpers.go index ac254b846a..70624bf001 100644 --- a/test/integration/consul-container/libs/service/helpers.go +++ b/test/integration/consul-container/libs/service/helpers.go @@ -46,6 +46,7 @@ type ServiceOpts struct { Checks Checks Connect SidecarService Namespace string + Locality *api.Locality } // createAndRegisterStaticServerAndSidecar register the services and launch static-server containers @@ -119,6 +120,7 @@ func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts Namespace: serviceOpts.Namespace, Meta: serviceOpts.Meta, Check: &agentCheck, + Locality: serviceOpts.Locality, } return createAndRegisterStaticServerAndSidecar(node, serviceOpts.HTTPPort, serviceOpts.GRPCPort, req, containerArgs...) } diff --git a/test/integration/consul-container/test/debugging.md b/test/integration/consul-container/test/debugging.md new file mode 100644 index 0000000000..2957b520ac --- /dev/null +++ b/test/integration/consul-container/test/debugging.md @@ -0,0 +1,78 @@ +# Remote Debugging Integration Tests + +- [Introduction](#introduction) + - [How it works](#how-it-works) +- [Getting Started](#getting-started) + - [Prerequisites](#prerequisites) + - [Running Upgrade integration tests](#debugging-integration-tests) + - [Building images](#building-images) + - [Remote debugging using GoLand](#remote-debugging-using-goland) + + +## Introduction + +Remote debugging integration tests allows you to attach your debugger to the consul container and debug go code running on that container. + +### How it works +The `dev-docker-dbg` Make target will build consul docker container that has the following: +- [delve (dlv) debugger](https://github.com/go-delve/delve) installed. +- a port exposed on the container that allows a debugger from your development environment to connect and attach to the consul process and debug it remotely. +- logs out the host and port information so that you have the information needed to connect to the port. + +The integration tests have been modified to expose the `--debug` flag that will switch the test from using a `consul:local` image that can be built using `make dev-docker` to using the `consul-dbg:local` image that was built from `make dev-docker-dbg`. + +The test is run in debug mode with a breakpoint set to just after the cluster is created and you can retrieve the port information. From there, you can set up a remote debugging session that connects to this port. + +## Getting Started +### Prerequisites +To run/debug integration tests locally, the following tools are required on your machine: +- Install [Go](https://go.dev/) (the version should match that of our CI config's Go image). +- Install [`Makefile`](https://www.gnu.org/software/make/manual/make.html). +- Install [`Docker`](https://docs.docker.com/get-docker/) required to run tests locally. + +### Debugging integration tests +#### Building images +- Build a consul image with dlv installed and a port exposed that the debugger can attach to. + ``` + make dev-docker-dbg + ``` +- Build a consul-envoy container image from the consul root directory that is required for testing but not for debugging. + ``` + docker build -t consul-envoy:target-version --build-arg CONSUL_IMAGE=consul:local --build-arg ENVOY_VERSION=1.24.6 -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets + ``` + +#### Remote debugging using GoLand +(For additional information, see [GoLand's documentation on remote debugging](https://www.jetbrains.com/help/go/attach-to-running-go-processes-with-debugger.html#attach-to-a-process-on-a-remote-machine).) +##### Set up the Debug Configuration for your test +- Create the configuration for debugging the test. (You may have to debug the test once so GoLand creates the configuration for you.) +- Go to `Run > Edit Configurations` and select the appropriate configuration. +- Add `--debug` to `Program arguments` and click OK. + + isolated +##### Obtain the debug port of your container +(This is required every time a test is debugged.) + +- Put a breakpoint in the test that you are running right after the cluster has been created. This should be on the line after the call to `topology.NewCluster()`. +- Debug the test and wait for the debug session to stop on the breakpoint in the test. +- In the Debug window, search for `debug info` on the Console tab and note the host and port. + + isolated +- Go to `Run > Edit Configurations` and add a `Go Remote` configuration with the host and port that your test has exposed. Click OK. + + isolated +- Debug the configuration that you just created. Verify that it shows as connected in the `Debugger` of this configuration in the `Debug` window. + + isolated +##### Debug the consul backend +- Set an appropriate breakpoint in the backend code of the endpoint that your test will call and that you wish to debug. +- Go to the test debugging tab for the integration test in the `Debug` window and `Resume Program`. + + isolated +- The remote debugging session should stop on the breakpoint, and you can freely debug the code path. + + isolated + +#### Remote debugging using VSCode +(For additional information, see [VSCode's documentation on remote debugging](https://github.com/golang/vscode-go/blob/master/docs/debugging.md#remote-debugging).) + +[comment]: <> (TODO: Openly looking for someone to add VSCode specific instructions.) diff --git a/test/integration/consul-container/test/ratelimit/ratelimit_test.go b/test/integration/consul-container/test/ratelimit/ratelimit_test.go index 18258c2ab8..e3aa20e5ba 100644 --- a/test/integration/consul-container/test/ratelimit/ratelimit_test.go +++ b/test/integration/consul-container/test/ratelimit/ratelimit_test.go @@ -32,8 +32,6 @@ const ( // - logs for exceeding func TestServerRequestRateLimit(t *testing.T) { - t.Parallel() - type action struct { function func(client *api.Client) error rateLimitOperation string @@ -52,6 +50,7 @@ func TestServerRequestRateLimit(t *testing.T) { mode string } + // getKV and putKV are net/RPC calls getKV := action{ function: func(client *api.Client) error { _, _, err := client.KV().Get("foo", &api.QueryOptions{}) @@ -99,13 +98,13 @@ func TestServerRequestRateLimit(t *testing.T) { action: putKV, expectedErrorMsg: "", expectExceededLog: true, - expectMetric: false, + expectMetric: true, }, { action: getKV, expectedErrorMsg: "", expectExceededLog: true, - expectMetric: false, + expectMetric: true, }, }, }, @@ -127,10 +126,13 @@ func TestServerRequestRateLimit(t *testing.T) { expectMetric: true, }, }, - }} + }, + } for _, tc := range testCases { + tc := tc t.Run(tc.description, func(t *testing.T) { + t.Parallel() clusterConfig := &libtopology.ClusterConfig{ NumServers: 1, NumClients: 0, @@ -144,12 +146,9 @@ func TestServerRequestRateLimit(t *testing.T) { ApplyDefaultProxySettings: false, } - cluster, _, _ := libtopology.NewCluster(t, clusterConfig) + cluster, client := setupClusterAndClient(t, clusterConfig, true) defer terminate(t, cluster) - client, err := cluster.GetClient(nil, true) - require.NoError(t, err) - // perform actions and validate returned errors to client for _, op := range tc.operations { err := op.action.function(client) @@ -165,22 +164,14 @@ func TestServerRequestRateLimit(t *testing.T) { // doing this in a separate loop so we can perform actions, allow metrics // and logs to collect and then assert on each. for _, op := range tc.operations { - timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond} + timer := &retry.Timer{Timeout: 15 * time.Second, Wait: 500 * time.Millisecond} retry.RunWith(timer, t, func(r *retry.R) { - // validate metrics - metricsInfo, err := client.Agent().Metrics() - // TODO(NET-1978): currently returns NaN error - // require.NoError(t, err) - if metricsInfo != nil && err == nil { - if op.expectMetric { - checkForMetric(r, metricsInfo, op.action.rateLimitOperation, op.action.rateLimitType, tc.mode) - } - } + checkForMetric(t, cluster, op.action.rateLimitOperation, op.action.rateLimitType, tc.mode, op.expectMetric) // validate logs // putting this last as there are cases where logs // were not present in consumer when assertion was made. - checkLogsForMessage(r, clusterConfig.LogConsumer.Msgs, + checkLogsForMessage(t, clusterConfig.LogConsumer.Msgs, fmt.Sprintf("[DEBUG] agent.server.rpc-rate-limit: RPC exceeded allowed rate limit: rpc=%s", op.action.rateLimitOperation), op.action.rateLimitOperation, "exceeded", op.expectExceededLog) @@ -190,43 +181,65 @@ func TestServerRequestRateLimit(t *testing.T) { } } -func checkForMetric(t *retry.R, metricsInfo *api.MetricsInfo, operationName string, expectedLimitType string, expectedMode string) { - const counterName = "consul.rpc.rate_limit.exceeded" +func setupClusterAndClient(t *testing.T, config *libtopology.ClusterConfig, isServer bool) (*libcluster.Cluster, *api.Client) { + cluster, _, _ := libtopology.NewCluster(t, config) - var counter api.SampledValue - for _, c := range metricsInfo.Counters { - if c.Name == counterName { - counter = c - break - } - } - require.NotEmptyf(t, counter.Name, "counter not found: %s", counterName) + client, err := cluster.GetClient(nil, isServer) + require.NoError(t, err) + + return cluster, client +} + +func checkForMetric(t *testing.T, cluster *libcluster.Cluster, operationName string, expectedLimitType string, expectedMode string, expectMetric bool) { + // validate metrics + server, err := cluster.GetClient(nil, true) + require.NoError(t, err) + metricsInfo, err := server.Agent().Metrics() + // TODO(NET-1978): currently returns NaN error + // require.NoError(t, err) + if metricsInfo != nil && err == nil { + if expectMetric { + const counterName = "consul.rpc.rate_limit.exceeded" + + var counter api.SampledValue + for _, c := range metricsInfo.Counters { + if c.Name == counterName { + counter = c + break + } + } + require.NotEmptyf(t, counter.Name, "counter not found: %s", counterName) - operation, ok := counter.Labels["op"] - require.True(t, ok) + operation, ok := counter.Labels["op"] + require.True(t, ok) - limitType, ok := counter.Labels["limit_type"] - require.True(t, ok) + limitType, ok := counter.Labels["limit_type"] + require.True(t, ok) - mode, ok := counter.Labels["mode"] - require.True(t, ok) + mode, ok := counter.Labels["mode"] + require.True(t, ok) - if operation == operationName { - require.GreaterOrEqual(t, counter.Count, 1) - require.Equal(t, expectedLimitType, limitType) - require.Equal(t, expectedMode, mode) + if operation == operationName { + require.GreaterOrEqual(t, counter.Count, 1) + require.Equal(t, expectedLimitType, limitType) + require.Equal(t, expectedMode, mode) + } + } } } -func checkLogsForMessage(t *retry.R, logs []string, msg string, operationName string, logType string, logShouldExist bool) { - found := false - for _, log := range logs { - if strings.Contains(log, msg) { - found = true - break +func checkLogsForMessage(t *testing.T, logs []string, msg string, operationName string, logType string, logShouldExist bool) { + if logShouldExist { + found := false + for _, log := range logs { + if strings.Contains(log, msg) { + found = true + break + } } + expectedLog := fmt.Sprintf("%s log check failed for: %s. Log expected: %t", logType, operationName, logShouldExist) + require.Equal(t, logShouldExist, found, expectedLog) } - require.Equal(t, logShouldExist, found, fmt.Sprintf("%s log check failed for: %s. Log expected: %t", logType, operationName, logShouldExist)) } func terminate(t *testing.T, cluster *libcluster.Cluster) { diff --git a/test/integration/consul-container/test/util/test_debug_breakpoint_hit.png b/test/integration/consul-container/test/util/test_debug_breakpoint_hit.png new file mode 100644 index 0000000000..2eae03da3b Binary files /dev/null and b/test/integration/consul-container/test/util/test_debug_breakpoint_hit.png differ diff --git a/test/integration/consul-container/test/util/test_debug_configuration.png b/test/integration/consul-container/test/util/test_debug_configuration.png new file mode 100644 index 0000000000..8fa19ba939 Binary files /dev/null and b/test/integration/consul-container/test/util/test_debug_configuration.png differ diff --git a/test/integration/consul-container/test/util/test_debug_info.png b/test/integration/consul-container/test/util/test_debug_info.png new file mode 100644 index 0000000000..a177999c0d Binary files /dev/null and b/test/integration/consul-container/test/util/test_debug_info.png differ diff --git a/test/integration/consul-container/test/util/test_debug_remote_configuration.png b/test/integration/consul-container/test/util/test_debug_remote_configuration.png new file mode 100644 index 0000000000..01b14eada6 Binary files /dev/null and b/test/integration/consul-container/test/util/test_debug_remote_configuration.png differ diff --git a/test/integration/consul-container/test/util/test_debug_remote_connected.png b/test/integration/consul-container/test/util/test_debug_remote_connected.png new file mode 100644 index 0000000000..52fc905ef2 Binary files /dev/null and b/test/integration/consul-container/test/util/test_debug_remote_connected.png differ diff --git a/test/integration/consul-container/test/util/test_debug_resume_program.png b/test/integration/consul-container/test/util/test_debug_resume_program.png new file mode 100644 index 0000000000..99c2899019 Binary files /dev/null and b/test/integration/consul-container/test/util/test_debug_resume_program.png differ diff --git a/website/content/commands/debug.mdx b/website/content/commands/debug.mdx index bebbe955a2..1514158ff9 100644 --- a/website/content/commands/debug.mdx +++ b/website/content/commands/debug.mdx @@ -80,7 +80,7 @@ information when `debug` is running. By default, it captures all information. | `members` | A list of all the WAN and LAN members in the cluster. | | `metrics` | Metrics from the in-memory metrics endpoint in the target, captured at the interval. | | `logs` | `TRACE` level logs for the target agent, captured for the duration. | -| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU and traces are captured for `duration` in a single file while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/consul/docs/agent/config/config-files#enable_debug) is set to `true` on the target agent or ACLs are enable and an ACL token with `operator:read` is provided. | +| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU and traces are captured for `duration` in a single file while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/consul/docs/agent/config/config-files#enable_debug) is set to `true` on the target agent or ACLs are enabled and an ACL token with `operator:read` is provided. | ## Examples diff --git a/website/content/commands/watch.mdx b/website/content/commands/watch.mdx index da32cdefdc..806864dae9 100644 --- a/website/content/commands/watch.mdx +++ b/website/content/commands/watch.mdx @@ -53,6 +53,11 @@ or optionally provided. There is more documentation on watch - `-type` - Watch type. Required, one of "`key`, `keyprefix`, `services`, `nodes`, `service`, `checks`, or `event`. +- `-filter=` - Expression to use for filtering the results. Optional for + `checks` `nodes`, `services`, and `service` type. + See the [`/catalog/nodes` API documentation](/consul/api-docs/catalog#filtering) for a + description of what is filterable. + #### API Options @include 'http_api_options_client.mdx' diff --git a/website/content/docs/agent/config/config-files.mdx b/website/content/docs/agent/config/config-files.mdx index 4183a5a7d2..1b382341e4 100644 --- a/website/content/docs/agent/config/config-files.mdx +++ b/website/content/docs/agent/config/config-files.mdx @@ -472,8 +472,7 @@ Refer to the [formatting specification](https://golang.org/pkg/time/#ParseDurati that match a registering service instance. If it finds any, the agent will merge the centralized defaults with the service instance configuration. This allows for things like service protocol or proxy configuration to be defined centrally and inherited by any affected service registrations. This defaults to `false` in versions of Consul prior to 1.9.0, and defaults to `true` in Consul 1.9.0 and later. -- `enable_debug` When set, enables some additional debugging features. Currently, this is only used to - access runtime profiling HTTP endpoints, which are available with an `operator:read` ACL regardless of the value of `enable_debug`. +- `enable_debug` (boolean, default is `false`): When set to `true`, enables Consul to report additional debugging information, including runtime profiling (`pprof`) data. This setting is only required for clusters without ACL [enabled](#acl_enabled). If you change this setting, you must restart the agent for the change to take effect. - `enable_script_checks` Equivalent to the [`-enable-script-checks` command-line flag](/consul/docs/agent/config/cli-flags#_enable_script_checks). diff --git a/website/content/docs/api-gateway/upgrades.mdx b/website/content/docs/api-gateway/upgrades.mdx index 31bc1ec823..577920fd7d 100644 --- a/website/content/docs/api-gateway/upgrades.mdx +++ b/website/content/docs/api-gateway/upgrades.mdx @@ -7,7 +7,7 @@ description: >- # Upgrade Consul API gateway for Kubernetes -Since Consul v1.15, the Consul API gateway is a native feature within the Consul binary and is installed during the normal Consul installation process. Since Consul v1.16, the CRDs necessary for using the Consul API gateway for Kubernetes are also included. You can install Consul v1.16 using the Consul Helm chart v1.2 and later. Refer to [Install API gateway for Kubernetes](/consul/docs/api-gateway/install) for additional information. +Since Consul v1.15, the Consul API gateway is a native feature within the Consul binary and is installed during the normal Consul installation process. Since Consul on Kubernetes v1.2 (Consul v1.16), the CRDs necessary for using the Consul API gateway for Kubernetes are also included. You can install Consul v1.16 using the Consul Helm chart v1.2 and later. Refer to [Install API gateway for Kubernetes](/consul/docs/api-gateway/install) for additional information. ## Introduction @@ -47,62 +47,114 @@ To begin using the native API gateway, complete one of the following upgrade pat ## Upgrade to native Consul API gateway -You must begin the upgrade procedure with Consul API gateway v1.1 installed. If you are currently using a version of Consul API gateway older than v1.1, complete the necessary stages of the upgrade path to v1.1 before you begin upgrading to the native API gateway. Refer to the [Introduction](#introduction) for an overview of the upgrade paths. +You must begin the upgrade procedure with API gateway with Consul on Kubernetes v1.1 installed. If you are currently using a version of Consul on Kubernetes older than v1.1, complete the necessary stages of the upgrade path to v1.1 before you begin upgrading to the native API gateway. Refer to the [Introduction](#introduction) for an overview of the upgrade paths. ### Consul-managed CRDs If you are able to tolerate downtime for your applications, you should delete previously installed CRDs and allow Consul to install and manage them for future updates. The amount of downtime depends on how quickly you are able to install the new version of Consul. If you are unable to tolerate any downtime, refer to [Self-managed CRDs](#self-managed-crds) for instructions on how to upgrade without downtime. -1. Run the `kubectl delete` command and reference the kustomize directory to delete the existing CRDs. The following example deletes the CRDs that were installed with API gateway v0.5.1: +1. Run the `kubectl delete` command and reference the `kustomize` directory to delete the existing CRDs. The following example deletes the CRDs that were installed with API gateway `v0.5.1`: ```shell-session $ kubectl delete --kustomize="github.com/hashicorp/consul-api-gateway/config/crd?ref=v0.5.1" ``` -1. Issue the following command to apply the configuration and complete the installation: +1. Issue the following command to use the API gateway packaged in Consul. Since Consul will not detected an external CRD, it will try to install the API gateway packaged with Consul. ```shell-session - $ kubectl apply -f apigw-installation.yaml + $ consul-k8s install -config-file values.yaml ``` +1. Create `ServiceIntentions` allowing `Gateways` to communicate with any backend services that they route to. Refer to [Service intentions configuration entry reference](/consul/docs/connect/config-entries/service-intentions) for additional information. + 1. Change any existing `Gateways` to reference the new `GatewayClass` `consul`. Refer to [gatewayClass](/consul/docs/api-gateway/configuration/gateway#gatewayclassname) for additional information. -1. After updating all of your `gateway` configurations to use the new controller, you can complete the upgrade again and completely remove the `apiGateway` block to remove the old controller. +1. After updating all of your `gateway` configurations to use the new controller, you can remove the `apiGateway` block from the Helm chart and upgrade your Consul cluster. This completely removes the old gateway controller. + + + + ```diff + global: + image: hashicorp/consul:1.15 + imageK8S: hashicorp/consul-k8s-control-plane:1.1 + - apiGateway: + - enabled: true + - image: hashicorp/consul-api-gateway:0.5.4 + - managedGatewayClass: + - enabled: true + ``` + + + + ```shell-session + $ consul-k8s install -config-file values.yaml + ``` ### Self-managed CRDs + + + This upgrade method uses `connectInject.apiGateway.manageExternalCRDs`, which was introduced in Consul on Kubernetes v1.2. As a result, you must be on at least Consul on Kubernetes v1.2 for this upgrade method. + + + If you are unable to tolerate any downtime, you can complete the following steps to upgrade to the native Consul API gateway. If you choose this upgrade option, you must continue to manually install the CRDs necessary for operating the API gateway. -1. Create a values file that installs the version of Consul API gateway that ships with Consul and disables externally-managed CRDs: +1. Create a Helm chart that installs the version of Consul API gateway that ships with Consul and disables externally-managed CRDs: - + - ```yaml - global: - image: hashicorp/consul:1.16 - imageK8S: hashicorp/consul-k8s-control-plane - connectInject: + ```yaml + global: + image: hashicorp/consul:1.16 + imageK8S: hashicorp/consul-k8s-control-plane:1.2 + connectInject: + apiGateway: + manageExternalCRDs: false apiGateway: - manageExternalCRDs: false - apiGateway: - enabled: true - image: hashicorp/consul-api-gateway:0.5.4 - managedGatewayClass: enabled: true - ``` + image: hashicorp/consul-api-gateway:0.5.4 + managedGatewayClass: + enabled: true + ``` - + + + You must set `connectInject.apiGateway.manageExternalCRDs` to `false`. If you have external CRDs with legacy installation and you do not set this, you will get an error when you try to upgrade because Helm will try to install CRDs that already exist. -1. Issue the following command to apply the configuration and complete the installation: +1. Issue the following command to install the new version of API gateway and disables externally-managed CRDs: ```shell-session - $ kubectl apply -f apigw-installation.yaml + $ consul-k8s install -config-file values.yaml ``` +1. Create `ServiceIntentions` allowing `Gateways` to communicate with any backend services that they route to. Refer to [Service intentions configuration entry reference](/consul/docs/connect/config-entries/service-intentions) for additional information. + 1. Change any existing `Gateways` to reference the new `GatewayClass` `consul`. Refer to [gatewayClass](/consul/docs/api-gateway/configuration/gateway#gatewayclassname) for additional information. -1. After updating all of your `gateway` configurations to use the new controller, you can remove the `apiGateway` block from the Helm chart and rerun it. This completely removes the old gateway controller. +1. After updating all of your `gateway` configurations to use the new controller, you can remove the `apiGateway` block from the Helm chart and upgrade your Consul cluster. This completely removes the old gateway controller. + + + ```diff + global: + image: hashicorp/consul:1.16 + imageK8S: hashicorp/consul-k8s-control-plane:1.2 + connectInject: + apiGateway: + manageExternalCRDs: false + - apiGateway: + - enabled: true + - image: hashicorp/consul-api-gateway:0.5.4 + - managedGatewayClass: + - enabled: true + ``` + + + + ```shell-session + $ consul-k8s install -config-file values.yaml + ``` ## Upgrade to v0.4.0 @@ -153,7 +205,7 @@ Complete the following steps after performing standard upgrade procedure. You should receive a response similar to the following: - ```log + ```log hideClipboard "hashicorp/consul-api-gateway:0.4.0" ``` @@ -166,7 +218,7 @@ Complete the following steps after performing standard upgrade procedure. ``` If you have any active `ReferencePolicy` resources, you will receive output similar to the response below. - ```log + ```log hideClipboard Warning: ReferencePolicy has been renamed to ReferenceGrant. ReferencePolicy will be removed in v0.6.0 in favor of the identical ReferenceGrant resource. NAMESPACE NAME default example-reference-policy @@ -291,7 +343,7 @@ Ensure that the following requirements are met prior to upgrading: You should receive a response similar to the following: - ```log + ```log hideClipboard "hashicorp/consul-api-gateway:0.2.1" ``` @@ -445,7 +497,7 @@ Ensure that the following requirements are met prior to upgrading: You should receive the following response: - ```log + ```log hideClipboard "hashicorp/consul-api-gateway:0.1.0" ``` diff --git a/website/content/docs/api-gateway/usage/errors.mdx b/website/content/docs/api-gateway/usage/errors.mdx index c873c55812..ba2c40f6f2 100644 --- a/website/content/docs/api-gateway/usage/errors.mdx +++ b/website/content/docs/api-gateway/usage/errors.mdx @@ -58,3 +58,18 @@ The installation process typically fails after this error message is generated. **Resolution:** Install the required CRDs. Refer to the [Consul API Gateway installation instructions](/consul/docs/api-gateway/install#installation) for instructions. + +## Operation cannot be fulfilled, the object has been modified + +``` +{"error": "Operation cannot be fulfilled on gatewayclassconfigs.consul.hashicorp.com \"consul-api-gateway\": the object has been modified; please apply your changes to the latest version and try again"} + +``` +**Conditions:** +This error occurs when the gateway controller attempts to update an object that has been modified previously. It is a normal part of running the controller and will resolve itself by automatically retrying. + +**Impact:** +Excessive error logs are produced, but there is no impact to the functionality of the controller. + +**Resolution:** +No action needs to be taken to resolve this issue. diff --git a/website/content/docs/concepts/service-mesh.mdx b/website/content/docs/concepts/service-mesh.mdx index 334a6639f1..2e793f2441 100644 --- a/website/content/docs/concepts/service-mesh.mdx +++ b/website/content/docs/concepts/service-mesh.mdx @@ -21,8 +21,8 @@ Some of the benefits of a service mesh include; - automatic failover - traffic management - encryption -- observability and traceability, -- authentication and authorization, +- observability and traceability +- authentication and authorization - network automation A common use case for leveraging a service mesh is to achieve a [_zero trust_ model](https://www.consul.io/use-cases/zero-trust-networking). diff --git a/website/content/docs/connect/ca/index.mdx b/website/content/docs/connect/ca/index.mdx index 13cc56c72d..c49e07516f 100644 --- a/website/content/docs/connect/ca/index.mdx +++ b/website/content/docs/connect/ca/index.mdx @@ -21,7 +21,7 @@ support for using [Vault as a CA](/consul/docs/connect/ca/vault). With Vault, the root certificate and private key material remain with the Vault cluster. -### CA and Certificate relationship +## CA and Certificate relationship This diagram shows the relationship between the CA certificates in a Consul primary datacenter and a secondary Consul datacenter. @@ -34,9 +34,22 @@ services. - the Leaf Cert Client Agent is created by auto-encrypt and auto-config. It is used by client agents for HTTP API TLS, and for mTLS for RPC requests to servers. -Any secondary datacenters receive an intermediate certificate, signed by the Primary Root -CA, which is used as the CA certificate to sign leaf certificates in the secondary -datacenter. +Any secondary datacenters use their CA provider to generate an intermediate certificate +signing request (CSR) to be signed by the primary root CA. They receive an intermediate +CA certificate, which is used to sign leaf certificates in the secondary datacenter. + +You can use different providers across primary and secondary datacenters. +For example, an operator may use a Vault CA provider for extra security in the primary +datacenter but choose to use the built-in CA provider in the secondary datacenter, which +may not have a reachable Vault cluster. The following table compares the built-in and Vault providers. + +## CA Provider Comparison + +| | Consul built-in | Vault | +|------------|------------------------------------|-----------------------------------------------------------------------------------| +| Security | CA private keys are stored on disk | CA private keys are stored in Vault and are never exposed to Consul server agents | +| Resiliency | No dependency on external systems. If Consul is available, it can sign certificates | Dependent on Vault availability | +| Latency | Consul signs certificates locally | A network call to Vault is required to sign certificates | ## CA Bootstrapping diff --git a/website/content/docs/connect/ca/vault.mdx b/website/content/docs/connect/ca/vault.mdx index ce35744e92..828a6937ca 100644 --- a/website/content/docs/connect/ca/vault.mdx +++ b/website/content/docs/connect/ca/vault.mdx @@ -7,19 +7,27 @@ description: >- # Vault as a Service Mesh Certificate Authority -You can configure Consul to use [Vault](https://www.vaultproject.io/) as the certificate authority (CA) so that Vault can manage and sign certificates distributed to services in the mesh. -The Vault CA provider uses the [Vault PKI secrets engine](/vault/docs/secrets/pki) to generate and sign certificates. +You can configure Consul to use [Vault](/vault) as the certificate authority (CA) so that Vault can manage and sign certificates distributed to services in the mesh. +The Vault CA provider uses the [Vault PKI secrets engine](/vault/docs/secrets/pki) to generate and sign certificates. This page describes how configure the Vault CA provider. > **Tutorial:** Complete the [Vault as Consul Service Mesh Certification Authority](/consul/tutorials/vault-secure/vault-pki-consul-connect-ca) tutorial for hands-on guidance on how to configure Vault as the Consul service mesh certification authority. ## Requirements +- Vault 0.10.3 or higher + +~> **Compatibility note:** If you use Vault 1.11.0+ as Consul's service mesh CA, versions of Consul released before Dec 13, 2022 will develop an issue with Consul control plane or service mesh communication ([GH-15525](https://github.com/hashicorp/consul/pull/15525)). Use or upgrade to a [Consul version that includes the fix](https://support.hashicorp.com/hc/en-us/articles/11308460105491#01GMC24E6PPGXMRX8DMT4HZYTW) to avoid this problem. + +## Recommendations + - Refer to [Service Mesh Certificate Authority Overview](/consul/docs/connect/ca) for important background information about how Consul manages certificates with configurable CA providers. -- Vault 0.10.3 to 1.10.x. +- For best performance and resiliency, every datacenter should have a Vault cluster local to its Consul cluster. -~> **Compatibility note:** If you use Vault 1.11.0+ as Consul's service mesh CA, versions of Consul released before Dec 13, 2022 will develop an issue with Consul control plane or service mesh communication ([GH-15525](https://github.com/hashicorp/consul/pull/15525)). Use or upgrade to a [Consul version that includes the fix](https://support.hashicorp.com/hc/en-us/articles/11308460105491#01GMC24E6PPGXMRX8DMT4HZYTW) to avoid this problem. +- If your Consul datacenters are WAN-federated and the secondary datacenter uses Vault Enterprise + [performance secondaries](/vault/docs/enterprise/replication#performance-replication), we recommend + configuring [`local`](/vault/docs/enterprise/replication#local) mounts for their [`intermediate_pki_path`](/consul/docs/connect/ca/vault#intermediatepkipath). ## Enable Vault as the CA @@ -28,7 +36,7 @@ and including the required provider configuration options. You can provide the CA configuration in the server agents' configuration file or in the body of a `PUT` request to the [`/connect/ca/configuration`](/consul/api-docs/connect/ca#update-ca-configuration) API endpoint. -Refer to the [Configuration Reference](#configuration-reference) for details about configuration options and for example use cases. +Refer to the [Configuration Reference](#configuration-reference) for details about configuration options and for example use cases. The following example shows the required configurations for a default implementation: @@ -75,7 +83,7 @@ connect { You can specify the following configuration options. Note that a configuration option's name may differ between API calls and the agent configuration file. The first key refers to the option name for use in API calls. -The key after the slash refers to the corresponding option name in the agent configuration file. +The key after the slash refers to the corresponding option name in the agent configuration file. - `Address` / `address` (`string: `) - The address of the Vault server. @@ -104,7 +112,8 @@ The key after the slash refers to the corresponding option name in the agent con Only the authentication related fields (for example, JWT's `path` and `role`) are supported. The optional management fields (for example: `remove_jwt_after_reading`) are not supported. - `RootPKIPath` / `root_pki_path` (`string: `) - The path to - a PKI secrets engine for the root certificate. + a PKI secrets engine for the root certificate. Required for primary + datacenters. Secondary datacenters do not use this path. If the path does not exist, Consul will mount a new PKI secrets engine at the specified path with the @@ -114,9 +123,6 @@ The key after the slash refers to the corresponding option name in the agent con the root certificate TTL was set to 8760 hour, or 1 year, and was not configurable. The root certificate will expire at the end of the specified period. - When WAN Federation is enabled, each secondary datacenter must use the same Vault cluster and share the same `root_pki_path` - with the primary datacenter. - To use an intermediate certificate as the primary CA in Consul, initialize the `RootPKIPath` in Vault with a PEM bundle. The first certificate in the bundle must be the intermediate certificate that Consul will use as the primary CA. @@ -133,8 +139,10 @@ The key after the slash refers to the corresponding option name in the agent con path does not exist, Consul will attempt to mount and configure this automatically. - When WAN Federation is enabled, every secondary - datacenter must specify a unique `intermediate_pki_path`. + When WAN federation is enabled, every secondary datacenter that shares a common Vault cluster + must specify a unique `intermediate_pki_path`. If a Vault cluster is not used by more than one Consul datacenter, + then you do not need to specify a unique value for the `intermediate_pki_path`. We still recommend using a + unique `intermediate_pki_path` for each datacenter, however, to improve operational and diagnostic clarity. - `IntermediatePKINamespace` / `intermediate_pki_namespace` (`string: `) - The absolute namespace that the `IntermediatePKIPath` is in. Setting this parameter overrides the `Namespace` option for the `IntermediatePKIPath`. Introduced in 1.12.3. @@ -242,7 +250,7 @@ Then, attach the following Vault ACL policy to the CA provider's path "//" { capabilities = [ "read" ] } - + path "//root/sign-intermediate" { capabilities = [ "update" ] } @@ -268,7 +276,7 @@ Then, attach the following Vault ACL policy to the CA provider's capabilities = [ "read" ] } ``` - + #### Define a policy for Consul-managed PKI paths ((#consul-managed-pki-paths)) @@ -329,7 +337,7 @@ Then, attach the following Vault ACL policy to the CA provider's capabilities = [ "read" ] } ``` - + #### Additional Vault ACL policies for sensitive operations @@ -340,7 +348,7 @@ following CA provider configuration changes: - Changing the `RootPKIPath` Those configuration modifications trigger a root CA change that requires an -extremely privileged root cross-sign operation. +extremely privileged root cross-sign operation. For that operation to succeed, the CA provider's [Vault token](#token) or [auth method](#authmethod) must contain the following rule: diff --git a/website/content/docs/connect/config-entries/jwt-provider.mdx b/website/content/docs/connect/config-entries/jwt-provider.mdx index b31427af4f..8867a3e4f9 100644 --- a/website/content/docs/connect/config-entries/jwt-provider.mdx +++ b/website/content/docs/connect/config-entries/jwt-provider.mdx @@ -952,6 +952,22 @@ Defines behavior for caching the validation result of previously encountered JWT +## Metrics + +Envoy proxies expose metrics that can track JWT authentication details. Use the following Envoy metrics: + +```yaml +http.public_listener.jwt_authn.allowed +http.public_listener.jwt_authn.cors_preflight_bypassed +http.public_listener.jwt_authn.denied +http.public_listener.jwt_authn.jwks_fetch_failed +http.public_listener.jwt_authn.jwks_fetch_success +http.public_listener.jwt_authn.jwt_cache_hit +http.public_listener.jwt_authn.jwt_cache_miss +``` + +~> **Note:** Currently, Envoy does not reference these metrics in their documentation. Refer to [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/) for more information about exposed metrics. + ## Examples The following examples demonstrate common JWT provider configuration patterns for specific use cases. @@ -1023,4 +1039,4 @@ spec: ``` - \ No newline at end of file + diff --git a/website/content/docs/connect/config-entries/sameness-group.mdx b/website/content/docs/connect/config-entries/sameness-group.mdx index 8d19b989ff..76752c2c54 100644 --- a/website/content/docs/connect/config-entries/sameness-group.mdx +++ b/website/content/docs/connect/config-entries/sameness-group.mdx @@ -101,8 +101,8 @@ metadata: spec: defaultForFailover: false members: # required - partition: - peer: + - partition: + - peer: ``` @@ -365,11 +365,11 @@ metadata: spec: defaultForFailover: true members: - partition: store-east - partition: inventory-east - peer: dc2-store-west - peer: dc2-inventory-west + - partition: store-east + - partition: inventory-east + - peer: dc2-store-west + - peer: dc2-inventory-west ``` - \ No newline at end of file + diff --git a/website/content/docs/connect/config-entries/service-defaults.mdx b/website/content/docs/connect/config-entries/service-defaults.mdx index 3b057377b9..9787d39b37 100644 --- a/website/content/docs/connect/config-entries/service-defaults.mdx +++ b/website/content/docs/connect/config-entries/service-defaults.mdx @@ -1474,6 +1474,7 @@ represents a location outside the Consul cluster. Services can dial destinations metadata: name: test-destination spec: + protocol: tcp destination: addresses: - "test.com" diff --git a/website/content/docs/connect/config-entries/service-resolver.mdx b/website/content/docs/connect/config-entries/service-resolver.mdx index dcea598054..b4218d6d06 100644 --- a/website/content/docs/connect/config-entries/service-resolver.mdx +++ b/website/content/docs/connect/config-entries/service-resolver.mdx @@ -35,12 +35,14 @@ The following list outlines field hierarchy, language-specific data types, and r - [`ServiceSubset`](#redirect-servicesubset): string - [`Namespace`](#redirect-namespace): string - [`Partition`](#redirect-partition): string | `default` + - [`SamenessGroup`](#redirect-samenessgroup): string - [`Datacenter`](#redirect-datacenter): list - [`Peer`](#redirect-peer): string - [`Failover`](#failover): map - [`Service`](#failover-service): string - [`ServiceSubset`](#failover-servicesubset): string - [`Namespace`](#failover-namespace): string + - [`SamenessGroup`](#failover-samenessgroup): string - [`Datacenters`](#failover-datacenters): list - [`Targets`](#failover-targets): list - [`Service`](#failover-targets-service): string @@ -87,12 +89,14 @@ The following list outlines field hierarchy, language-specific data types, and r - [`serviceSubset`](#spec-redirect-servicesubset): string - [`namespace`](#spec-redirect-namespace): string - [`partition`](#spec-redirect-partition): string + - [`samenessGroup`](#spec-redirect-samenessgroup): string - [`datacenter`](#spec-redirect-datacenter): string - [`peer`](#spec-redirect-peer): string - [`failover`](#spec-failover): map - [`service`](#spec-failover-service): string - [`serviceSubset`](#spec-failover-servicesubset): string - [`namespace`](#spec-failover-namespace): string + - [`samenessGroup`](#spec-failover-samenessgroup): string - [`datacenters`](#spec-failover-datacenters): string - [`targets`](#spec-failover-targets): list - [`service`](#spec-failover-targets-service): string @@ -157,11 +161,12 @@ Redirect = { ServiceSubset = "" Namespace = "" Partition = "" + SamenessGroup = "" Datacenter = "" Peer = "" } -Failover = { ## requires at least one of the following: Service, ServiceSubset, Namespace, Targets, Datacenters +Failover = { ## requires at least one of the following: Service, ServiceSubset, Namespace, Targets, Datacenters, SamenessGroup = { Targets = [ { Service = "" }, @@ -239,11 +244,12 @@ LoadBalancer = { "ServiceSubset":"", "Namespace":"", "Partition":"", + "SamenessGroup":"", "Datacenter":"", "Peer":"" }, - "Failover":{ // requires at least one of the following": Service, ServiceSubset, Namespace, Targets, Datacenters + "Failover":{ // requires at least one of the following": Service, ServiceSubset, Namespace, Targets, Datacenters, SamenessGroup "":{ "Targets":[ {"Service":""}, @@ -314,8 +320,9 @@ spec: servicesubset: namespace: partition: + samenessGroup: peer: - failover: # requires at least one of the following: service, serviceSubset, namespace, targets, datacenters + failover: # requires at least one of the following: service, serviceSubset, namespace, targets, datacenters, samenessGroup : targets: - service: @@ -465,6 +472,7 @@ Specifies redirect instructions for local service traffic so that services deplo - [`ServiceSubset`](#redirect-servicesubset) - [`Namespace`](#redirect-namespace) - [`Partition`](#redirect-partition) + - [`SamenessGroup`](#redirect-samenessgroup) - [`Datacenter`](#redirect-datacenter) - [`Peer`](#redirect-peer) @@ -504,6 +512,14 @@ Specifies the admin partition at the redirect’s destination that resolves loca - Default: None - Data type: String +### `Redirect{}.SamenessGroup` + +Specifies the sameness group at the redirect’s destination that resolves local upstream requests. + +#### Values + +- Default: None +- Data type: String ### `Redirect{}.Datacenter` @@ -529,7 +545,7 @@ Specifies controls for rerouting traffic to an alternate pool of service instanc This parameter is a map, and its key is the name of the local service subset that resolves to another location when it fails. You can specify a `"*"` wildcard to apply failovers to any subset. -`Service`, `ServiceSubset`, `Namespace`, `Targets`, and `Datacenters` cannot all be empty at the same time. +`Service`, `ServiceSubset`, `Namespace`, `Targets`, `SamenessGroup`, and `Datacenters` cannot all be empty at the same time. #### Values @@ -538,6 +554,7 @@ This parameter is a map, and its key is the name of the local service subset tha - [`Service`](#failover-service) - [`ServiceSubset`](#failover-servicesubset) - [`Namespace`](#failover-namespace) + - [`SamenessGroup`](#failover-samenessgroup) - [`Datacenters`](#failover-datacenters) - [`Targets`](#failover-targets) @@ -568,6 +585,15 @@ Specifies the namespace at the failover location where the failover services are - Default: None - Data type: String +### `Failover{}.SamenessGroup` + +Specifies the sameness group at the failover location where the failover services are deployed. + +#### Values + +- Default: None +- Data type: String + ### `Failover{}.Datacenters` Specifies an ordered list of datacenters at the failover location to attempt connections to during a failover scenario. When Consul cannot establish a connection with the first datacenter in the list, it proceeds sequentially until establishing a connection with another datacenter. @@ -907,6 +933,7 @@ Specifies redirect instructions for local service traffic so that services deplo - [`serviceSubset`](#spec-redirect-servicesubset) - [`namespace`](#spec-redirect-namespace) - [`partition`](#spec-redirect-partition) + - [`samenessGroup`](#spec-redirect-samenessgroup) - [`datacenter`](#spec-redirect-datacenter) - [`peer`](#spec-redirect-peer) @@ -946,6 +973,15 @@ Specifies the admin partition at the redirect’s destination that resolves loca - Default: None - Data type: String +### `spec.redirect.samenessGroup` + +Specifies the sameness group at the redirect’s destination that resolves local upstream requests. + +#### Values + +- Default: None +- Data type: String + ### `spec.redirect.datacenter` @@ -971,7 +1007,7 @@ Specifies controls for rerouting traffic to an alternate pool of service instanc This parameter is a map, and its key is the name of the local service subset that resolves to another location when it fails. You can specify a `"*"` wildcard to apply failovers to any subset. -`service`, `serviceSubset`, `namespace`, `targets`, and `datacenters` cannot all be empty at the same time. +`service`, `serviceSubset`, `namespace`, `targets`, `samenessGroup`, and `datacenters` cannot all be empty at the same time. #### Values @@ -980,6 +1016,7 @@ This parameter is a map, and its key is the name of the local service subset tha - [`service`](#spec-failover-service) - [`serviceSubset`](#spec-failover-servicesubset) - [`namespace`](#spec-failover-namespace) + - [`samenessGroup`](#spec-failover-samenessgroup) - [`datacenters`](#spec-failover-datacenters) - [`targets`](#spec-failover-targets) @@ -1010,6 +1047,15 @@ Specifies the namespace at the failover location where the failover services are - Default: None - Data type: String +### `spec.failover.samenessGroup` + +Specifies the sameness group at the failover location where the failover services are deployed. + +#### Values + +- Default: None +- Data type: String + ### `spec.failover.datacenters` Specifies an ordered list of datacenters at the failover location to attempt connections to during a failover scenario. When Consul cannot establish a connection with the first datacenter in the list, it proceeds sequentially until establishing a connection with another datacenter. diff --git a/website/content/docs/connect/gateways/api-gateway/configuration/http-route.mdx b/website/content/docs/connect/gateways/api-gateway/configuration/http-route.mdx index 997e2bbf69..02d2725ad6 100644 --- a/website/content/docs/connect/gateways/api-gateway/configuration/http-route.mdx +++ b/website/content/docs/connect/gateways/api-gateway/configuration/http-route.mdx @@ -533,6 +533,11 @@ Specifies the HTTP method to match. Specifies type of match for the path: `"exact"`, `"prefix"`, or `"regex"`. +If set to `prefix`, Consul uses simple string matching to identify incoming request prefixes. For example, if the route is configured to match incoming requests to services prefixed with `/dev`, then the gateway would match requests to `/dev-` and `/deviate` and route to the upstream. + +This deviates from the +[Kubernetes Gateway API specification](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io%2fv1beta1.PathMatchType), which matches on full path elements. In the previous example, _only_ requests to `/dev` or `/dev/` would match. + #### Values - Default: none diff --git a/website/content/docs/connect/proxies/envoy-extensions/configuration/ext-authz.mdx b/website/content/docs/connect/proxies/envoy-extensions/configuration/ext-authz.mdx index 6b5d8cc272..2d3c48789a 100644 --- a/website/content/docs/connect/proxies/envoy-extensions/configuration/ext-authz.mdx +++ b/website/content/docs/connect/proxies/envoy-extensions/configuration/ext-authz.mdx @@ -348,7 +348,7 @@ The following table describes how to configure parameters for the `Service` fiel ### `Arguments.Config.GrpcService.Target.Uri` -Specifies the URI of the external authorization service. Configure this field when you must provide an explicit URI to the external authorization service, such as cases in which the authorization service is running on the same host or pod. If set, the value of this field must be either `localhost:` or `127.0.0.1:` +Specifies the URI of the external authorization service. Configure this field when you must provide an explicit URI to the external authorization service, such as cases in which the authorization service is running on the same host or pod. If set, the value of this field must be one of `localhost:`, `127.0.0.1:`, or `::1:`. Configure either the `Uri` field or the [`Service`](#arguments-config-grpcservice-target-service) field, but not both. @@ -434,7 +434,7 @@ The following table describes how to configure parameters for the `Service` fiel ### `Arguments{}.Config{}.HttpService{}.Target{}.Uri` -Specifies the URI of the external authorization service. Configure this field when you must provide an explicit URI to the external authorization service, such as cases in which the authorization service is running on the same host or pod. +Specifies the URI of the external authorization service. Configure this field when you must provide an explicit URI to the external authorization service, such as cases in which the authorization service is running on the same host or pod. If set, the value of this field must be one of `localhost:`, `127.0.0.1:`, or `::1:`. Configure either the `Uri` field or the [`Service`](#arguments-config-httpservice-target-service) field, but not both. diff --git a/website/content/docs/connect/proxies/envoy-extensions/configuration/property-override.mdx b/website/content/docs/connect/proxies/envoy-extensions/configuration/property-override.mdx index f88cd476a0..8ccb49a391 100644 --- a/website/content/docs/connect/proxies/envoy-extensions/configuration/property-override.mdx +++ b/website/content/docs/connect/proxies/envoy-extensions/configuration/property-override.mdx @@ -46,13 +46,14 @@ Patches = [ TrafficDirection = "" Services = [ { - Name = "", + Name = "" Namespace = "" Partition = "" } ] - Op = "", - Path = "", + } + Op = "" + Path = "" Value = "" } ] @@ -117,7 +118,7 @@ The following table describes how to configure a `ResourceFilter`: Specifies the JSON Patch operation to perform when the `ResourceFilter` matches a local Envoy proxy configuration. You can specify one of the following values for each patch: -- `add`: Replaces a property or message specified by [`Path`](#patches-path) with the given value. The JSON patch format does not merge objects. To emulate merges, you must configure discrete `add` operations for each changed field. Consul returns an error if the target field does not exist in the corresponding schema. +- `add`: Replaces a property or message specified by [`Path`](#patches-path) with the given value. The JSON Patch `add` operation does not merge objects. To emulate merges, you must configure discrete `add` operations for each changed field. Consul returns an error if the target field does not exist in the corresponding schema. - `remove`: Unsets the value of the field specified by [`Path`](#patches-path). If the field is not set, no changes are made. Consul returns an error if the target field does not exist in the corresponding schema. #### Values @@ -134,7 +135,7 @@ Specifies where the extension performs the associated operation on the specified The `Path` field does not support addressing array elements or protobuf map field entries. Refer to [Constructing paths](/consul/docs/connect/proxies/envoy-extensions/usage/property-override#constructing-paths) for information about how to construct paths. -When setting fields, the extension sets any unset intermediate fields to their default values. A a single operation on a nested field can set multiple intermediate fields. Because Consul sets the intermediate fields to their default values, you may need to configure subsequent patches to satisfy Envoy or Consul validation. +When setting fields, the extension sets any unset intermediate fields to their default values. A single operation on a nested field can set multiple intermediate fields. Because Consul sets the intermediate fields to their default values, you may need to configure subsequent patches to satisfy Envoy or Consul validation. #### Values @@ -144,9 +145,10 @@ When setting fields, the extension sets any unset intermediate fields to their d ### `Patches[].Value{}` -Defines a value to set at the specified [path](#patches-path) if the [operation](#patches-op) is set to `add`. You can specify either a scalar or enum value or define a map that contains string keys and values corresponding to scalar or enum child fields. Refer to the [example configurations](#examples) for additional guidance and to the [Envoy API documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/api) for additional information about Envoy proxy interfaces. +Defines a value to set at the specified [path](#patches-path) if the [operation](#patches-op) is set to `add`. You can specify either a scalar or enum value, an array of scalar or enum values (for repeated fields), or define a map that contains string keys and values corresponding to scalar or enum child fields. Single and repeated scalar and enum values are supported. Refer to the [example configurations](#examples) for additional guidance and to the [Envoy API documentation](https://www.envoyproxy.io/docs/envoy/latest/api-v3/api) for additional information about Envoy proxy interfaces. If Envoy specifies a wrapper as the target field type, the extension automatically coerces simple values to the wrapped type when patching. For example, the value `32768` is allowed when targeting a cluster's `per_connection_buffer_limit_bytes`, which is a `UInt32Value` field. Refer to the [protobuf documentation](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/wrappers.proto) for additional information about wrappers. + #### Values - Default: None @@ -160,9 +162,9 @@ If Envoy specifies a wrapper as the target field type, the extension automatical The following examples demonstrate patterns that you may be able to model your configurations on. -### Enable `enforcing_consecutive_5xx` outlier detection +### Enable `respect_dns_ttl` in a cluster -In the following example, the `add` operation patches an outlier detection property into outbound cluster traffic. The `Path` specifies the `enforcing_consecutive_5xx` interface and sets a value of `1234`: +In the following example, the `add` operation patches the outbound cluster corresponding to the `other-svc` upstream service to enable `respect_dns_ttl`. The `Path` specifies the [Cluster `/respect_dns_ttl`](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/cluster.proto#envoy-v3-api-field-config-cluster-v3-cluster-respect-dns-ttl) top-level field and `Value` specifies a value of `true`: ```hcl Kind = "service-defaults" @@ -183,8 +185,8 @@ EnvoyExtensions = [ }, }, "Op" = "add", - "Path" = "/outlier_detection/enforcing_consecutive_5xx", - "Value" = 1234, + "Path" = "/respect_dns_ttl", + "Value" = true, } ] } @@ -192,9 +194,9 @@ EnvoyExtensions = [ ] ``` -### Update multiple values in the default map +### Update multiple values in a message field -In the following example, two `ResourceFilter` blocks target outbound traffic to the `db` service and add `/outlier_detection/enforcing_consecutive_5xx` and `/outlier_detection/failure_percentage_request_volume` properties: +In the following example, both `ResourceFilter` blocks target the cluster corresponding to the `other-svc` upstream service and modify [Cluster `/outlier_detection`](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/outlier_detection.proto) properties: ```hcl Kind = "service-defaults" @@ -208,27 +210,27 @@ EnvoyExtensions = [ Patches = [ { ResourceFilter = { - ResourceType = "cluster", - TrafficDirection = "outbound", + ResourceType = "cluster" + TrafficDirection = "outbound" Services = [{ Name = "other-svc" - }], - }, - Op = "add", - Path = "/outlier_detection/enforcing_consecutive_5xx", - Value = 1234, + }] + } + Op = "add" + Path = "/outlier_detection/max_ejection_time/seconds" + Value = 120 }, { ResourceFilter = { - ResourceType = "cluster", - TrafficDirection = "outbound", + ResourceType = "cluster" + TrafficDirection = "outbound" Services = [{ Name = "other-svc" - }], - }, - Op = "add", - Path = "/outlier_detection/failure_percentage_request_volume", - Value = 2345, + }] + } + Op = "add" + Path = "/outlier_detection/max_ejection_time_jitter/seconds" + Value = 1 } ] } @@ -236,9 +238,13 @@ EnvoyExtensions = [ ] ``` -### Set multiple values that replace the map +The use of `/seconds` in these examples corresponds to the same field in the [google.protobuf.Duration](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/duration.proto) proto definition, since the extension does not support JSON serialized string forms of common protobuf types (e.g. `120s`). + +-> **Note:** Using separate patches per field preserves any existing configuration of other fields in `outlier_detection` that may be directly set by Consul, such as [`enforcing_consecutive_5xx`](https://developer.hashicorp.com/consul/docs/connect/proxies/envoy#enforcing_consecutive_5xx). + +### Replace a message field -In the following example, a `ResourceFilter` targets outbound traffic to the `db` service and replaces the map of properties located at `/outlier_detection` with `enforcing_consecutive_5xx` and `failure_percentage_request_volume` and properties: +In the following example, a `ResourceFilter` targets the cluster corresponding to the `other-svc` upstream service and _replaces_ the entire map of properties located at `/outlier_detection`, including explicitly set `enforcing_success_rate` and `success_rate_minimum_hosts` properties: ```hcl Kind = "service-defaults" @@ -246,27 +252,29 @@ Name = "my-svc" Protocol = "http" EnvoyExtensions = [ { - Name = "builtin/property-override", + Name = "builtin/property-override" Arguments = { - ProxyType = "connect-proxy", + ProxyType = "connect-proxy" Patches = [ { ResourceFilter = { - ResourceType = "cluster", - TrafficDirection = "outbound", + ResourceType = "cluster" + TrafficDirection = "outbound" Services = [{ Name = "other-svc" - }], - }, - Op = "add", - Path = "/outlier_detection", + }] + } + Op = "add" + Path = "/outlier_detection" Value = { - "enforcing_consecutive_5xx" = 1234, - "failure_percentage_request_volume" = 2345, - }, + "enforcing_success_rate" = 80 + "success_rate_minimum_hosts" = 2 + } } ] } } ] ``` + +Unlike the previous example, other `/outlier_detection` values set by Consul will _not_ be retained unless they match Envoy's defaults, because the entire value of `/outlier_detection` will be replaced. diff --git a/website/content/docs/connect/proxies/envoy-extensions/usage/ext-authz.mdx b/website/content/docs/connect/proxies/envoy-extensions/usage/ext-authz.mdx index f3cc543284..3062879d47 100644 --- a/website/content/docs/connect/proxies/envoy-extensions/usage/ext-authz.mdx +++ b/website/content/docs/connect/proxies/envoy-extensions/usage/ext-authz.mdx @@ -115,7 +115,7 @@ The following Envoy configurations are not supported: | `failure_mode_allow` | Set the `EnvoyExtension.Required` field to `true` in the [service defaults configuration entry](/consul/docs/connect/config-entries/service-defaults#envoyextensions) or [proxy defaults configuration entry](/consul/docs/connect/config-entries/proxy-defaults#envoyextensions). | | `filter_enabled` | Set the `EnvoyExtension.Required` field to `true` in the [service defaults configuration entry](/consul/docs/connect/config-entries/service-defaults#envoyextensions) or [proxy defaults configuration entry](/consul/docs/connect/config-entries/proxy-defaults#envoyextensions). | | `filter_enabled_metadata` | Set the `EnvoyExtension.Required` field to `true` in the [service defaults configuration entry](/consul/docs/connect/config-entries/service-defaults#envoyextensions) or [proxy defaults configuration entry](/consul/docs/connect/config-entries/proxy-defaults#envoyextensions). | -| `transport_api_version` | Consul only supports v3 of the transport API. As a result, there is no workaround for implement the behavior of this field. | +| `transport_api_version` | Consul only supports v3 of the transport API. As a result, there is no workaround for implementing the behavior of this field. | ## Apply the configuration entry diff --git a/website/content/docs/connect/proxies/envoy-extensions/usage/property-override.mdx b/website/content/docs/connect/proxies/envoy-extensions/usage/property-override.mdx index 8055402657..84cf621930 100644 --- a/website/content/docs/connect/proxies/envoy-extensions/usage/property-override.mdx +++ b/website/content/docs/connect/proxies/envoy-extensions/usage/property-override.mdx @@ -8,6 +8,13 @@ description: Learn how to use the property-override extension for Envoy proxies This topic describes how to use the `property-override` extension to set and remove individual properties for the Envoy resources Consul generates. The extension uses the [protoreflect](https://pkg.go.dev/google.golang.org/protobuf/reflect/protoreflect), which enables Consul to dynamically manipulate messages. +The extension currently supports setting scalar and enum fields, removing individual fields addressable by `Path`, and initializing unset intermediate message fields indicated in `Path`. + +It currently does _not_ support the following use cases: +- Adding, updating, or removing repeated field members +- Adding or updating [protobuf `map`](https://protobuf.dev/programming-guides/proto3/#maps) fields +- Adding or updating [protobuf `Any`](https://protobuf.dev/programming-guides/proto3/#any) fields + ## Workflow - Complete the following steps to use the `property-override` extension: @@ -23,9 +30,9 @@ Add Envoy extension configurations to a proxy defaults or service defaults confi - When you configure Envoy extensions on proxy defaults, they apply to every service. - When you configure Envoy extensions on service defaults, they apply to a specific service. -Consul applies Envoy extensions configured in proxy defaults before it applies extensions in service defaults. As a result, the Envoy extension configuration in service defaults may override configurations in proxy defaults. +Consul applies Envoy extensions configured in proxy defaults before it applies extensions in service defaults. As a result, the Envoy extension configuration in service defaults may override configurations in proxy defaults. -In the following service defaults configuration entry example, Consul adds a new `/upstream_connection_options/tcp_keepalive/keepalive_probes-5` field to each of the proxy's cluster configuration for the outbound `db`service upstream. The configuration applies to all `connect-proxy` proxies with services configured to communicate over HTTP: +In the following proxy defaults configuration entry example, Consul sets the `/respect_dns_ttl` field on the `api` service proxy's cluster configuration for the `other-svc` upstream service: @@ -33,7 +40,7 @@ In the following service defaults configuration entry example, Consul adds a new ```hcl Kind = "service-defaults" -Name = "global" +Name = "api" Protocol = "http" EnvoyExtensions = [ { @@ -50,8 +57,8 @@ EnvoyExtensions = [ }] } Op = "add" - Path = "/upstream_connection_options/tcp_keepalive/keepalive_probes" - Value = 5 + Path = "/respect_dns_ttl" + Value = true } ] } @@ -66,9 +73,9 @@ EnvoyExtensions = [ ```json "kind": "service-defaults", -"name": "global", +"name": "api", "protocol": "http", -"envoy_extensions": [{ +"envoyExtensions": [{ "name": "builtin/property-override", "arguments": { "proxyType": "connect-proxy", @@ -76,11 +83,11 @@ EnvoyExtensions = [ "resourceFilter": { "resourceType": "cluster", "trafficDirection": "outbound", - "services": [{ "name": "other-svc" }], - "op": "add", - "path": "/upstream_connection_options/tcp_keepalive/keepalive_probes", - "value": 5 - } + "services": [{ "name": "other-svc" }] + }, + "op": "add", + "path": "/respect_dns_ttl", + "value": true }] } }] @@ -88,13 +95,13 @@ EnvoyExtensions = [ - + ```yaml apiversion: consul.hashicorp.com/v1alpha1 kind: ServiceDefaults metadata: - name: global + name: api spec: protocol: http envoyExtensions: @@ -108,8 +115,8 @@ spec: services: - name: "other-svc" op: "add" - path: "/upstream_connection_options/tcp_keepalive/keepalive_probes", - value: 5 + path: "/respect_dns_ttl", + value: true ``` @@ -136,6 +143,7 @@ EnvoyExtensions = [ { Name = "builtin/property-override" Arguments = { + Debug = true ProxyType = "connect-proxy" Patches = [ { @@ -146,7 +154,7 @@ EnvoyExtensions = [ Op = "add" Path = "" Value = 5 - } + } ] } } @@ -157,19 +165,23 @@ After applying the configuration entry, Consul prints a message that includes th ```shell-session $ consul config write api.hcl -non-empty, non-root Path is required. available cluster fields: -/outlier_detection -/outlier_detection/enforcing_consecutive_5xx -/outlier_detection/failure_percentage_request_volume -/round_robin_lb_config -/round_robin_lb_config/slow_start_config +non-empty, non-root Path is required; +available envoy.config.cluster.v3.Cluster fields: +transport_socket_matches +name +alt_stat_name +type +cluster_type +eds_cluster_config +connect_timeout +... ``` You can use the output to help you construct the appropriate value for the `Path` field. For example: ```shell-session -$ consul config write api.hcl | grep round_robin -/round_robin_lb_config +$ consul config write api.hcl 2>&1 | grep round_robin +round_robin_lb_config ``` diff --git a/website/content/docs/ecs/terraform/secure-configuration.mdx b/website/content/docs/ecs/terraform/secure-configuration.mdx index 07031f5490..4db6c13abb 100644 --- a/website/content/docs/ecs/terraform/secure-configuration.mdx +++ b/website/content/docs/ecs/terraform/secure-configuration.mdx @@ -121,7 +121,7 @@ Follow the instructions described in [Create a task definition](/consul/docs/ecs The secret stores the gossip encryption key that the Consul clients use. - + ```hcl resource "aws_secretsmanager_secret" "gossip_key" { @@ -134,7 +134,7 @@ resource "aws_secretsmanager_secret_version" "gossip_key" { } ``` - + ### Enable secure deployment diff --git a/website/content/docs/enterprise/fips.mdx b/website/content/docs/enterprise/fips.mdx index 6ad145886b..5f5cf28196 100644 --- a/website/content/docs/enterprise/fips.mdx +++ b/website/content/docs/enterprise/fips.mdx @@ -87,13 +87,13 @@ Consul's FIPS 140-2 products on Windows use the CNGCrypto integration in Microso To ensure your build of Consul Enterprise includes FIPS support, confirm that a line with `FIPS: Enabled` appears when you run a `version` command. For example, the following message appears for Linux users: -```shell-session hideClipboard +```log hideClipboard FIPS: FIPS 140-2 Enabled, crypto module boringcrypto ``` The following message appears for Windows users: -```shell-session hideClipboard +```log hideClipboard FIPS: FIPS 140-2 Enabled, crypto module cngcrypto ``` @@ -121,7 +121,7 @@ Similarly, on a FIPS Windows binary, run `go tool nm` on the binary to get a sym On both Linux and Windows non-FIPS builds, the search output yields no results. -### Compliance Validation +### Compliance validation A Lab, authorized by the U.S. Government to certify FIPS 140-2 compliance, is in the process of verifying that Consul Enterprise and its related packages are compliant with the requirements of FIPS 140-2 Level 1. diff --git a/website/content/docs/k8s/deployment-configurations/vault/data-integration/webhook-certs.mdx b/website/content/docs/k8s/deployment-configurations/vault/data-integration/webhook-certs.mdx index aec4fb1d69..b615d31fab 100644 --- a/website/content/docs/k8s/deployment-configurations/vault/data-integration/webhook-certs.mdx +++ b/website/content/docs/k8s/deployment-configurations/vault/data-integration/webhook-certs.mdx @@ -14,16 +14,16 @@ In a Consul Helm chart configuration that does not use Vault, `webhook-cert-mana When Vault is configured as the controller and connect inject Webhook Certificate Provider on Kubernetes: - `webhook-cert-manager` is no longer deployed to the cluster. - - controller and connect inject each get their webhook certificates from its own Vault PKI mount via the injected Vault Agent. - - controller and connect inject each need to be configured with its own Vault Role that has necessary permissions to receive certificates from its respective PKI mount. - - controller and connect inject each locally update its own `mutatingwebhookconfiguration` so that Kubernetes can relay events. + - Controller and connect inject each get their webhook certificates from its own Vault PKI mount via the injected Vault Agent. + - Controller and connect inject each need to be configured with its own Vault Role that has necessary permissions to receive certificates from its respective PKI mount. + - Controller and connect inject each locally update its own `mutatingwebhookconfiguration` so that Kubernetes can relay events. - Vault manages certificate rotation and rotates certificates to each webhook. To use Vault as the controller and connect inject Webhook Certificate Provider, we will need to modify the steps outlined in the [Data Integration](/consul/docs/k8s/deployment-configurations/vault/data-integration) section: These following steps will be repeated for each datacenter: 1. Create a Vault policy that authorizes the desired level of access to the secret. - 1. (Added) Create Vault PKI roles for controller and connect inject each that establish the domains that each is allowed to issue certificates for. + 1. (Added) Create Vault PKI roles for controller and connect inject that each establish the domains that each is allowed to issue certificates for. 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Configure the Vault Kubernetes auth roles in the Consul on Kubernetes helm chart. @@ -74,44 +74,45 @@ Issue the following commands to enable and configure the PKI Secrets Engine to s 1. Create a policy that allows `["create", "update"]` access to the [certificate issuing URL](/vault/api-docs/secret/pki) so Consul controller and connect inject can fetch a new certificate/key pair and provide it to the Kubernetes `mutatingwebhookconfiguration`. - The path to the secret referenced in the `path` resource is the same value that you will configure in the `global.secretsBackend.vault.controller.tlsCert.secretName` and `global.secretsBackend.vault.connectInject.tlsCert.secretName` Helm configuration (refer to [Update Consul on Kubernetes Helm chart](#update-consul-on-kubernetes-helm-chart)). + The path to the secret referenced in the `path` resource is the same value that you will configure in the `global.secretsBackend.vault.controller.tlsCert.secretName` and `global.secretsBackend.vault.connectInject.tlsCert.secretName` Helm configuration (refer to [Update Consul on Kubernetes Helm chart](#update-consul-on-kubernetes-helm-chart)). - ```shell-session - $ vault policy write controller-tls-policy - < \ diff --git a/website/content/docs/k8s/installation/install.mdx b/website/content/docs/k8s/installation/install.mdx index 1d80696ba0..78f90f4807 100644 --- a/website/content/docs/k8s/installation/install.mdx +++ b/website/content/docs/k8s/installation/install.mdx @@ -308,7 +308,7 @@ metadata: spec: containers: - name: example - image: 'consul:latest' + image: 'hashicorp/consul:latest' env: - name: HOST_IP valueFrom: @@ -345,7 +345,7 @@ spec: spec: containers: - name: example - image: 'consul:latest' + image: 'hashicorp/consul:latest' env: - name: HOST_IP valueFrom: diff --git a/website/content/docs/k8s/upgrade/index.mdx b/website/content/docs/k8s/upgrade/index.mdx index 529815df97..e41141c142 100644 --- a/website/content/docs/k8s/upgrade/index.mdx +++ b/website/content/docs/k8s/upgrade/index.mdx @@ -219,7 +219,7 @@ In earlier versions, Consul on Kubernetes used client agents in its deployments. If you upgrade Consul from a version that uses client agents to a version the uses dataplanes, complete the following steps to upgrade your deployment safely and without downtime. -1. Before you upgrade, edit your Helm chart to enable Consul client agents by setting `client.enabled` and `client.updateStrategy`: +1. Before you upgrade, edit your Helm chart configuration to enable Consul client agents by setting `client.enabled` and `client.updateStrategy`: ```yaml filename="values.yaml" client: @@ -228,7 +228,19 @@ If you upgrade Consul from a version that uses client agents to a version the us type: OnDelete ``` -1. Follow our [recommended procedures to upgrade servers](#upgrade-consul-servers) on Kubernetes deployments to upgrade Helm values for the new version of Consul. +1. Update the `connect-injector` to not log out on restart +to make sure that the ACL tokens used by existing services are still valid during the migration to `consul-dataplane`. +Note that you must remove the token manually after completing the migration. + + The following command triggers the deployment rollout. Wait for the rollout to complete before proceeding to next step. + + ```bash + kubectl config set-context --current --namespace= + INJECTOR_DEPLOYMENT=$(kubectl get deploy -l "component=connect-injector" -o=jsonpath='{.items[0].metadata.name}') + kubectl patch deploy $INJECTOR_DEPLOYMENT --type='json' -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/lifecycle"}]' + ``` + +1. Follow our [recommended procedures to upgrade servers](#upgrade-consul-servers) on Kubernetes deployments to upgrade Helm values for the new version of Consul. The latest version of consul-k8s components may be in a CrashLoopBackoff state during the performance of the server upgrade from versions <1.14.x until all Consul servers are on versions >=1.14.x. Components in CrashLoopBackoff will not negatively affect the cluster because older versioned components will still be operating. Once all servers have been fully upgraded, the latest consul-k8s components will automatically restore from CrashLoopBackoff and older component versions will be spun down. 1. Run `kubectl rollout restart` to restart your service mesh applications. Restarting service mesh application causes Kubernetes to re-inject them with the webhook for dataplanes. diff --git a/website/content/docs/release-notes/consul-k8s/v1_2_x.mdx b/website/content/docs/release-notes/consul-k8s/v1_2_x.mdx new file mode 100644 index 0000000000..bd8d65b480 --- /dev/null +++ b/website/content/docs/release-notes/consul-k8s/v1_2_x.mdx @@ -0,0 +1,84 @@ +--- +layout: docs +page_title: 1.2.x +description: >- + Consul on Kubernetes release notes for version 1.2.x +--- + +# Consul on Kubernetes 1.2.0 + +We are pleased to announce the following Consul updates. + +## Release highlights + +- **Sameness groups (Enterprise):** Sameness groups are a user-defined set of partitions that Consul uses to identify services in different administrative partitions with the same name as being the same services. You can use sameness groups to create a blanket failover policy for deployments with cluster peering connections. Refer to the [Sameness groups overview](/consul/docs/connect/cluster-peering/usage/create-sameness-groups) for more information. + + Sameness groups is currently a beta feature in Consul Enterprise v1.16.0. + +- **Permissive mTLS:** You can enable the `permissive` mTLS mode to enable sidecar proxies to accept both mTLS and non-mTLS traffic. Using this mode enables you to onboard services without downtime and without reconfiguring or redeploying your application. Refer to the [Onboard services while in transparent proxy mode](/consul/docs/k8s/connect/onboarding-tproxy-mode) for more information on how to use permissive mTLS to onboard services to Consul. + +- **Transparent proxy enhancements for failover and virtual services:** We have made several internal improvements, such as ensuring that virtual IPs are always available, to reduce the friction associated with operating Consul in transparent proxy mode. Onboarding services, configuring failover redirects, and other operations require less administrative effort and ensure a smoother experience. Refer to the following documentation for additional information: + + - [Onboard services while in transparent proxy mode](/consul/docs/k8s/connect/onboarding-tproxy-mode) + - [Route traffic to virtual services](/consul/docs/k8s/l7-traffic/route-to-virtual-services) + - [Configure failover services](/consul/docs/k8s/l7-traffic/failover-tproxy). + +- **Granular server-side rate limits (Enterprise):** You can now set limits per source IP address. The following steps describe the general process for setting global read and write rate limits: + + 1. Set arbitrary limits to begin understanding the upper boundary of RPC and gRPC loads in your network. Refer to [Initialize rate limit settings](/consul/docs/agent/limits/usage/init-rate-limits) for additional information. + 1. Monitor the metrics and logs and readjust the initial configurations as necessary. Refer to [Monitor rate limit data](/consul/docs/agent/limits/usage/monitor-rate-limits) + 1. Define your final operational limits based on your observations. If you are defining global rate limits, refer to [Set global traffic rate limits](/consul/docs/agent/limits/usage/set-global-traffic-rate-limits) for additional information. For information about setting limits based on source IP, refer to [Limit traffic rates for a source IP](/consul/docs/agent/limits/usage/limit-request-rates-from-ips). + +- **Consul Envoy Extensions:** Consul Envoy extension system enables you to modify Consul-generated Envoy resources. Refer to [Envoy extension overview](/consul/docs/connect/proxies/envoy-extensions) for more information on how to use these extensions for Consul service mesh. + + - **Property Override:** The property override Envoy extension lets you set, remove, or modify individual properties for the Envoy resources Consul generates. Refer to the [Configure Envoy proxy properties](/consul/docs/connect/proxies/envoy-extensions/usage/property-override) for more information on how to use this extension. + + - **Wasm:** The Wasm Envoy extension lets you configure Wasm programs to be used as filters in the service's sidecar proxy. Refer to the [Run WebAssembly plug-ins in Envoy proxy](/consul/docs/connect/proxies/envoy-extensions/usage/wasm) for more information on how to use this extension. + + - **External Authorization:** The external authorization Envoy extension lets you delegate data plane authorization requests to external systems. Refer to the [Delegate authorization to an external service](/consul/docs/connect/proxies/envoy-extensions/usage/ext-authz) for more information on how to use this extension. + +- **Simplified API Gateway installation for Consul on Kubernetes:** API Gateway is now built into Consul. This enables a simplified installation and configuration process for Consul on Kubernetes. Refer to the [API Gateway installation](/consul/docs/api-gateway/install) for more information on the simplified native installation method. + +- **FIPS compliance (Enterprise):** HashiCorp now offers FIPS 140-2 compliant builds of Consul Enterprise that meet the security needs of federal agencies protecting sensitive, unclassified information with approved cryptographic measures. These builds use certified cryptographic modules and restrict configuration settings to comply with FIPS 140-2 Level 1 requirements, enabling compliant Consul deployments. Refer to the [Consul Enterprise FIPS](/consul/docs/enterprise/fips) for more information on FIPS compliance. + +- **JWT Authorization with service intentions:** Consul can now authorize connections based on claims present in JSON Web Token (JWT). You can configure Consul to use one or more JWT providers, which lets you control access to services and specific HTTP paths based on the validity of JWT claims embedded in the service traffic. This ensures a uniform and low latency mechanism to validate and authorize communication based on JWT claims across all services in a diverse service-oriented architecture. Refer to the [Use JWT authorization with service intentions](/consul/docs/connect/intentions/jwt-authorization) for more information. + +- **Automated license utilization reporting (Enterprise):** Consul Enterprise now provides automated license utilization reporting, which sends minimal product-license metering data to HashiCorp. You can use these reports to understand how much more you can deploy under your current contract, which can help you protect against overutilization and budget for predicted consumption. Refer to the [Automated license utilization reporting documentation](/consul/docs/enterprise/license/utilization-reporting) for more information. + +## What's deprecated + +- **Ingress gateway:** Starting with this release, ingress gateway is deprecated and will not be enhanced beyond its current capabilities. Ingress gateway is fully supported in this version but may be removed in a future release of Consul. + + Consul's API gateway is the recommended alternative to ingress gateway. For ingress gateway features not currently supported by API gateway, equivalent functionality will be added to API gateway over the next several releases of Consul. + +- **Legacy API Gateway:** The Consul AP Gateway that was previously packaged (`consul-api-gateway`) and released separately from Consul K8s is now deprecated. This is referred to as the “legacy” API Gateway. + + The legacy API Gateway (v0.5.4) is supported with this version of Consul on Kubernetes in order to simplify the process of migrating from legacy to native API gateways. + +## What's changed + +- The native API Gateway creates "API-gateway" configuration objects in Consul. This is a change from the legacy API Gateway, which creates "ingress-gateway" objects in Consul. + +- The native API Gateway in Consul on Kubernetes v1.2 does not create service intentions automatically. + +## Supported software + + Consul 1.15.x and 1.14.x are not supported. Please refer to Supported Consul and Kubernetes versions for more detail on choosing the correct consul-k8s version. + +- Consul 1.16.x. +- Consul Dataplane v1.2.x. Refer to Envoy and Consul Dataplane for details about Consul Dataplane versions and the available packaged Envoy version. +- Kubernetes 1.24.x - 1.27.x +- kubectl 1.24.x - 1.27.x +- Helm 3.6+ + +## Upgrading + +For more detailed information, please refer to the [upgrade details page](/consul/docs/upgrading/upgrade-specific) and the changelogs. + +## Changelogs + +The changelogs for this major release version and any maintenance versions are listed below. + + These links take you to the changelogs on the GitHub website. + +- [1.2.0-rc1](https://github.com/hashicorp/consul-k8s/releases/tag/v1.2.0-rc1) \ No newline at end of file diff --git a/website/content/docs/release-notes/consul/v1_16_x.mdx b/website/content/docs/release-notes/consul/v1_16_x.mdx index 0472de83f2..33241b6b84 100644 --- a/website/content/docs/release-notes/consul/v1_16_x.mdx +++ b/website/content/docs/release-notes/consul/v1_16_x.mdx @@ -9,11 +9,11 @@ description: >- We are pleased to announce the following Consul updates. -## Release Highlights +## Release highlights - **Sameness groups (Enterprise):** Sameness groups are a user-defined set of partitions that Consul uses to identify services in different administrative partitions with the same name as being the same services. You can use sameness groups to create a blanket failover policy for deployments with cluster peering connections. Refer to the [Sameness groups overview](/consul/docs/connect/cluster-peering/usage/create-sameness-groups) for more information. - Sameness groups is currently a _beta_ feature in Consul Enterprise v1.16.0. + Sameness groups is currently a beta feature in Consul Enterprise v1.16.0. - **Permissive mTLS:** You can enable the `permissive` mTLS mode to enable sidecar proxies to accept both mTLS and non-mTLS traffic. Using this mode enables you to onboard services without downtime and without reconfiguring or redeploying your application. Refer to the [Onboard services while in transparent proxy mode](/consul/docs/k8s/connect/onboarding-tproxy-mode) for more information on how to use permissive mTLS to onboard services to Consul. @@ -43,7 +43,13 @@ We are pleased to announce the following Consul updates. - **JWT Authorization with service intentions:** Consul can now authorize connections based on claims present in JSON Web Token (JWT). You can configure Consul to use one or more JWT providers, which lets you control access to services and specific HTTP paths based on the validity of JWT claims embedded in the service traffic. This ensures a uniform and low latency mechanism to validate and authorize communication based on JWT claims across all services in a diverse service-oriented architecture. Refer to the [Use JWT authorization with service intentions](/consul/docs/connect/intentions/jwt-authorization) for more information. -- **Automated license utilization reporting (Enterprise):** Consul Enteprise now provides automated license utilization reporting, which sends minimal product-license metering data to HashiCorp. You can use these reports to understand how much more you can deploy under your current contract, which can help you protect against overutilization and budget for predicted consumption. Refer to the [Automated license utilization reporting documentation](/consul/docs/enterprise/license/utilization-reporting) for more information. +- **Automated license utilization reporting (Enterprise):** Consul Enterprise now provides automated license utilization reporting, which sends minimal product-license metering data to HashiCorp. You can use these reports to understand how much more you can deploy under your current contract, which can help you protect against overutilization and budget for predicted consumption. Refer to the [Automated license utilization reporting documentation](/consul/docs/enterprise/license/utilization-reporting) for more information. + +## What's deprecated + +- **Ingress gateway:** Starting with this release, ingress gateway is deprecated and will not be enhanced beyond its current capabilities. Ingress gateway is fully supported in this version but may be removed in a future release of Consul. + + Consul's API gateway is the recommended alternative to ingress gateway. For ingress gateway features not currently supported by API gateway, equivalent functionality will be added to API gateway over the next several releases of Consul. ## Upgrading @@ -55,4 +61,4 @@ The changelogs for this major release version and any maintenance versions are l These links take you to the changelogs on the GitHub website. -- [1.16.0-rc1](https://github.com/hashicorp/consul/releases/tag/v1.16.0-rc1) \ No newline at end of file +- [1.16.0](https://github.com/hashicorp/consul/releases/tag/v1.16.0) \ No newline at end of file diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index ed0d2ac2de..6d92dab502 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -182,6 +182,10 @@ { "title": "Consul K8s", "routes": [ + { + "title": "v1.2.x", + "path": "release-notes/consul-k8s/v1_2_x" + }, { "title": "v1.1.x", "path": "release-notes/consul-k8s/v1_1_x" diff --git a/website/redirects.js b/website/redirects.js index b9be8b5f2c..517c73bbfa 100644 --- a/website/redirects.js +++ b/website/redirects.js @@ -38,19 +38,19 @@ module.exports = [ permanent: true, }, { - source: '/consul/docs/v1.16.x/connect/transparent-proxy', - destination: '/consul/docs/v1.16.x/k8s/connect/transparent-proxy', + source: '/consul/docs/connect/transparent-proxy', + destination: '/consul/docs/k8s/connect/transparent-proxy', permanent: true, }, { - source: '/consul/docs/1.16.x/agent/limits/init-rate-limits', - destination: '/consul/docs/1.16.x/agent/limits/usage/init-rate-limits', + source: '/consul/docs/agent/limits/init-rate-limits', + destination: '/consul/docs/agent/limits/usage/init-rate-limits', permanent: true, }, { - source: '/consul/docs/1.16.x/agent/limits/set-global-traffic-rate-limits', + source: '/consul/docs/agent/limits/set-global-traffic-rate-limits', destination: - '/consul/docs/1.16.x/agent/limits/usage/set-global-traffic-rate-limits', + '/consul/docs/agent/limits/usage/set-global-traffic-rate-limits', permanent: true, }, {