mirror of https://github.com/hashicorp/consul
Merge branch 'main' of github.com:hashicorp/consul into derekm/split-grpc-ports
commit
bf769daae4
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
connect: expose new tracing configuration on envoy
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
envoy: adds additional Envoy outlier ejection parameters to passive health check configurations.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bugfix
|
||||||
|
envoy: validate name before deleting proxy default configurations.
|
||||||
|
```
|
|
@ -0,0 +1,4 @@
|
||||||
|
```release-note:feature
|
||||||
|
ui: Use withCredentials for all HTTP API requests
|
||||||
|
```
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bugfix
|
||||||
|
peering: Fix issue preventing deletion and recreation of peerings in TERMINATED state.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
xds: Set `max_ejection_percent` on Envoy's outlier detection to 100% for peered services.
|
||||||
|
```
|
|
@ -0,0 +1,5 @@
|
||||||
|
```release-note:bug
|
||||||
|
api: Fix a breaking change caused by renaming `QueryDatacenterOptions` to
|
||||||
|
`QueryFailoverOptions`. This adds `QueryDatacenterOptions` back as an alias to
|
||||||
|
`QueryFailoverOptions` and marks it as deprecated.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
peering: Add support to failover to services running on cluster peers.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
cli: Adds new subcommands for `peering` workflows. Refer to the [CLI docs](https://www.consul.io/commands/peering) for more information.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
connect: Fixed an issue where intermediate certificates could build up in the root CA because they were never being pruned after expiring.
|
||||||
|
``
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
checks: If set, use proxy address for automatically added sidecar check instead of service address.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
http: Add new `get-or-empty` operation to the txn api. Refer to the [API docs](https://www.consul.io/api-docs/txn#kv-operations) for more information.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
snapshot agent: **(Enterprise only)** Add support for path-based addressing when using s3 backend.
|
||||||
|
```
|
|
@ -816,7 +816,7 @@ jobs:
|
||||||
# Get go binary from workspace
|
# Get go binary from workspace
|
||||||
- attach_workspace:
|
- attach_workspace:
|
||||||
at: .
|
at: .
|
||||||
# Build the consul-dev image from the already built binary
|
# Build the consul:local image from the already built binary
|
||||||
- run:
|
- run:
|
||||||
command: |
|
command: |
|
||||||
sudo rm -rf /usr/local/go
|
sudo rm -rf /usr/local/go
|
||||||
|
@ -887,8 +887,8 @@ jobs:
|
||||||
- attach_workspace:
|
- attach_workspace:
|
||||||
at: .
|
at: .
|
||||||
- run: *install-gotestsum
|
- run: *install-gotestsum
|
||||||
# Build the consul-dev image from the already built binary
|
# Build the consul:local image from the already built binary
|
||||||
- run: docker build -t consul-dev -f ./build-support/docker/Consul-Dev.dockerfile .
|
- run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile .
|
||||||
- run:
|
- run:
|
||||||
name: Envoy Integration Tests
|
name: Envoy Integration Tests
|
||||||
command: |
|
command: |
|
||||||
|
@ -902,6 +902,7 @@ jobs:
|
||||||
GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml
|
GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml
|
||||||
GOTESTSUM_FORMAT: standard-verbose
|
GOTESTSUM_FORMAT: standard-verbose
|
||||||
COMPOSE_INTERACTIVE_NO_CLI: 1
|
COMPOSE_INTERACTIVE_NO_CLI: 1
|
||||||
|
LAMBDA_TESTS_ENABLED: "true"
|
||||||
# tput complains if this isn't set to something.
|
# tput complains if this isn't set to something.
|
||||||
TERM: ansi
|
TERM: ansi
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
|
|
|
@ -16,7 +16,7 @@ jobs:
|
||||||
backport:
|
backport:
|
||||||
if: github.event.pull_request.merged
|
if: github.event.pull_request.merged
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container: hashicorpdev/backport-assistant:0.2.3
|
container: hashicorpdev/backport-assistant:0.2.5
|
||||||
steps:
|
steps:
|
||||||
- name: Run Backport Assistant for stable-website
|
- name: Run Backport Assistant for stable-website
|
||||||
run: |
|
run: |
|
||||||
|
@ -24,6 +24,7 @@ jobs:
|
||||||
env:
|
env:
|
||||||
BACKPORT_LABEL_REGEXP: "type/docs-(?P<target>cherrypick)"
|
BACKPORT_LABEL_REGEXP: "type/docs-(?P<target>cherrypick)"
|
||||||
BACKPORT_TARGET_TEMPLATE: "stable-website"
|
BACKPORT_TARGET_TEMPLATE: "stable-website"
|
||||||
|
BACKPORT_MERGE_COMMIT: true
|
||||||
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
|
||||||
- name: Backport changes to latest release branch
|
- name: Backport changes to latest release branch
|
||||||
run: |
|
run: |
|
||||||
|
|
|
@ -8,6 +8,8 @@ linters:
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- unparam
|
- unparam
|
||||||
- forbidigo
|
- forbidigo
|
||||||
|
- gomodguard
|
||||||
|
- depguard
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
# Disable the default exclude list so that all excludes are explicitly
|
# Disable the default exclude list so that all excludes are explicitly
|
||||||
|
@ -75,6 +77,30 @@ linters-settings:
|
||||||
# Exclude godoc examples from forbidigo checks.
|
# Exclude godoc examples from forbidigo checks.
|
||||||
# Default: true
|
# Default: true
|
||||||
exclude_godoc_examples: false
|
exclude_godoc_examples: false
|
||||||
|
gomodguard:
|
||||||
|
blocked:
|
||||||
|
# List of blocked modules.
|
||||||
|
modules:
|
||||||
|
# Blocked module.
|
||||||
|
- github.com/hashicorp/net-rpc-msgpackrpc:
|
||||||
|
recommendations:
|
||||||
|
- github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc
|
||||||
|
- github.com/hashicorp/go-msgpack:
|
||||||
|
recommendations:
|
||||||
|
- github.com/hashicorp/consul-net-rpc/go-msgpack
|
||||||
|
|
||||||
|
depguard:
|
||||||
|
list-type: denylist
|
||||||
|
include-go-root: true
|
||||||
|
# A list of packages for the list type specified.
|
||||||
|
# Default: []
|
||||||
|
packages:
|
||||||
|
- net/rpc
|
||||||
|
# A list of packages for the list type specified.
|
||||||
|
# Specify an error message to output when a denied package is used.
|
||||||
|
# Default: []
|
||||||
|
packages-with-error-message:
|
||||||
|
- net/rpc: 'only use forked copy in github.com/hashicorp/consul-net-rpc/net/rpc'
|
||||||
|
|
||||||
run:
|
run:
|
||||||
timeout: 10m
|
timeout: 10m
|
||||||
|
|
48
GNUmakefile
48
GNUmakefile
|
@ -16,6 +16,7 @@ PROTOC_GO_INJECT_TAG_VERSION='v1.3.0'
|
||||||
|
|
||||||
GOTAGS ?=
|
GOTAGS ?=
|
||||||
GOPATH=$(shell go env GOPATH)
|
GOPATH=$(shell go env GOPATH)
|
||||||
|
GOARCH?=$(shell go env GOARCH)
|
||||||
MAIN_GOPATH=$(shell go env GOPATH | cut -d: -f1)
|
MAIN_GOPATH=$(shell go env GOPATH | cut -d: -f1)
|
||||||
|
|
||||||
export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH)
|
export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH)
|
||||||
|
@ -129,7 +130,7 @@ export GOLDFLAGS
|
||||||
|
|
||||||
# Allow skipping docker build during integration tests in CI since we already
|
# Allow skipping docker build during integration tests in CI since we already
|
||||||
# have a built binary
|
# have a built binary
|
||||||
ENVOY_INTEG_DEPS?=dev-docker
|
ENVOY_INTEG_DEPS?=docker-envoy-integ
|
||||||
ifdef SKIP_DOCKER_BUILD
|
ifdef SKIP_DOCKER_BUILD
|
||||||
ENVOY_INTEG_DEPS=noop
|
ENVOY_INTEG_DEPS=noop
|
||||||
endif
|
endif
|
||||||
|
@ -152,7 +153,28 @@ dev-docker: linux
|
||||||
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
|
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
|
||||||
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
|
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
|
||||||
# 'consul:local' tag is needed to run the integration tests
|
# 'consul:local' tag is needed to run the integration tests
|
||||||
@DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' -t 'consul:local' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
|
@docker buildx use default && docker buildx build -t 'consul:local' \
|
||||||
|
--platform linux/$(GOARCH) \
|
||||||
|
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
|
||||||
|
--load \
|
||||||
|
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
|
||||||
|
|
||||||
|
check-remote-dev-image-env:
|
||||||
|
ifndef REMOTE_DEV_IMAGE
|
||||||
|
$(error REMOTE_DEV_IMAGE is undefined: set this image to <your_docker_repo>/<your_docker_image>:<image_tag>, e.g. hashicorp/consul-k8s-dev:latest)
|
||||||
|
endif
|
||||||
|
|
||||||
|
remote-docker: check-remote-dev-image-env
|
||||||
|
$(MAKE) GOARCH=amd64 linux
|
||||||
|
$(MAKE) GOARCH=arm64 linux
|
||||||
|
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
|
||||||
|
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
|
||||||
|
@echo "Building and Pushing Consul Development container - $(REMOTE_DEV_IMAGE)"
|
||||||
|
@docker buildx use default && docker buildx build -t '$(REMOTE_DEV_IMAGE)' \
|
||||||
|
--platform linux/amd64,linux/arm64 \
|
||||||
|
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
|
||||||
|
--push \
|
||||||
|
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
|
||||||
|
|
||||||
# In CircleCI, the linux binary will be attached from a previous step at bin/. This make target
|
# In CircleCI, the linux binary will be attached from a previous step at bin/. This make target
|
||||||
# should only run in CI and not locally.
|
# should only run in CI and not locally.
|
||||||
|
@ -174,10 +196,10 @@ ifeq ($(CIRCLE_BRANCH), main)
|
||||||
@docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest
|
@docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# linux builds a linux binary independent of the source platform
|
# linux builds a linux binary compatible with the source platform
|
||||||
linux:
|
linux:
|
||||||
@mkdir -p ./pkg/bin/linux_amd64
|
@mkdir -p ./pkg/bin/linux_$(GOARCH)
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./pkg/bin/linux_amd64 -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
|
CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) go build -o ./pkg/bin/linux_$(GOARCH) -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
|
||||||
|
|
||||||
# dist builds binaries for all platforms and packages them for distribution
|
# dist builds binaries for all platforms and packages them for distribution
|
||||||
dist:
|
dist:
|
||||||
|
@ -324,8 +346,22 @@ consul-docker: go-build-image
|
||||||
ui-docker: ui-build-image
|
ui-docker: ui-build-image
|
||||||
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui
|
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui
|
||||||
|
|
||||||
|
# Build image used to run integration tests locally.
|
||||||
|
docker-envoy-integ:
|
||||||
|
$(MAKE) GOARCH=amd64 linux
|
||||||
|
docker build \
|
||||||
|
--platform linux/amd64 $(NOCACHE) $(QUIET) \
|
||||||
|
-t 'consul:local' \
|
||||||
|
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
|
||||||
|
$(CURDIR)/pkg/bin/linux_amd64 \
|
||||||
|
-f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
|
||||||
|
|
||||||
|
# Run integration tests.
|
||||||
|
# Use GO_TEST_FLAGS to run specific tests:
|
||||||
|
# make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/case-basic"
|
||||||
|
# NOTE: Always uses amd64 images, even when running on M1 macs, to match CI/CD environment.
|
||||||
test-envoy-integ: $(ENVOY_INTEG_DEPS)
|
test-envoy-integ: $(ENVOY_INTEG_DEPS)
|
||||||
@go test -v -timeout=30m -tags integration ./test/integration/connect/envoy
|
@go test -v -timeout=30m -tags integration $(GO_TEST_FLAGS) ./test/integration/connect/envoy
|
||||||
|
|
||||||
.PHONY: test-compat-integ
|
.PHONY: test-compat-integ
|
||||||
test-compat-integ: dev-docker
|
test-compat-integ: dev-docker
|
||||||
|
|
|
@ -3786,7 +3786,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
|
||||||
fmt.Println("TCP Check:= ", v)
|
fmt.Println("TCP Check:= ", v)
|
||||||
}
|
}
|
||||||
if hasNoCorrectTCPCheck {
|
if hasNoCorrectTCPCheck {
|
||||||
t.Fatalf("Did not find the expected TCP Healtcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs)
|
t.Fatalf("Did not find the expected TCP Healthcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs)
|
||||||
}
|
}
|
||||||
require.Equal(t, sidecarSvc, gotSidecar)
|
require.Equal(t, sidecarSvc, gotSidecar)
|
||||||
})
|
})
|
||||||
|
|
|
@ -1399,8 +1399,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
|
||||||
Protocol: "http",
|
Protocol: "http",
|
||||||
MeshGateway: structs.MeshGatewayConfig{Mode: structs.MeshGatewayModeRemote},
|
MeshGateway: structs.MeshGatewayConfig{Mode: structs.MeshGatewayModeRemote},
|
||||||
PassiveHealthCheck: &structs.PassiveHealthCheck{
|
PassiveHealthCheck: &structs.PassiveHealthCheck{
|
||||||
Interval: 10,
|
Interval: 10,
|
||||||
MaxFailures: 2,
|
MaxFailures: 2,
|
||||||
|
EnforcingConsecutive5xx: uintPointer(60),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Overrides: []*structs.UpstreamConfig{
|
Overrides: []*structs.UpstreamConfig{
|
||||||
|
@ -1432,8 +1433,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
|
||||||
Upstream: wildcard,
|
Upstream: wildcard,
|
||||||
Config: map[string]interface{}{
|
Config: map[string]interface{}{
|
||||||
"passive_health_check": map[string]interface{}{
|
"passive_health_check": map[string]interface{}{
|
||||||
"Interval": int64(10),
|
"Interval": int64(10),
|
||||||
"MaxFailures": int64(2),
|
"MaxFailures": int64(2),
|
||||||
|
"EnforcingConsecutive5xx": int64(60),
|
||||||
},
|
},
|
||||||
"mesh_gateway": map[string]interface{}{
|
"mesh_gateway": map[string]interface{}{
|
||||||
"Mode": "remote",
|
"Mode": "remote",
|
||||||
|
@ -1445,8 +1447,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
|
||||||
Upstream: mysql,
|
Upstream: mysql,
|
||||||
Config: map[string]interface{}{
|
Config: map[string]interface{}{
|
||||||
"passive_health_check": map[string]interface{}{
|
"passive_health_check": map[string]interface{}{
|
||||||
"Interval": int64(10),
|
"Interval": int64(10),
|
||||||
"MaxFailures": int64(2),
|
"MaxFailures": int64(2),
|
||||||
|
"EnforcingConsecutive5xx": int64(60),
|
||||||
},
|
},
|
||||||
"mesh_gateway": map[string]interface{}{
|
"mesh_gateway": map[string]interface{}{
|
||||||
"Mode": "local",
|
"Mode": "local",
|
||||||
|
@ -2507,3 +2510,7 @@ func Test_gateWriteToSecondary_AllowedKinds(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func uintPointer(v uint32) *uint32 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
|
@ -153,64 +153,87 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
|
||||||
&args.QueryOptions,
|
&args.QueryOptions,
|
||||||
&reply.QueryMeta,
|
&reply.QueryMeta,
|
||||||
func(ws memdb.WatchSet, state *state.Store) error {
|
func(ws memdb.WatchSet, state *state.Store) error {
|
||||||
// we don't support calling this endpoint for a specific peer
|
|
||||||
if args.PeerName != "" {
|
|
||||||
return fmt.Errorf("this endpoint does not support specifying a peer: %q", args.PeerName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// this maxIndex will be the max of the ServiceDump calls and the PeeringList call
|
// this maxIndex will be the max of the ServiceDump calls and the PeeringList call
|
||||||
var maxIndex uint64
|
var maxIndex uint64
|
||||||
|
|
||||||
// get a local dump for services
|
// If PeerName is not empty, we return only the imported services from that peer
|
||||||
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword)
|
if args.PeerName != "" {
|
||||||
if err != nil {
|
// get a local dump for services
|
||||||
return fmt.Errorf("could not get a service dump for local nodes: %w", err)
|
index, nodes, err := state.ServiceDump(ws,
|
||||||
}
|
args.ServiceKind,
|
||||||
|
args.UseServiceKind,
|
||||||
if index > maxIndex {
|
// Note we fetch imported services with wildcard namespace because imported services' namespaces
|
||||||
maxIndex = index
|
// are in a different locality; regardless of our local namespace, we return all imported services
|
||||||
}
|
// of the local partition.
|
||||||
reply.Nodes = nodes
|
args.EnterpriseMeta.WithWildcardNamespace(),
|
||||||
|
args.PeerName)
|
||||||
// get a list of all peerings
|
|
||||||
index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("could not list peers for service dump %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if index > maxIndex {
|
|
||||||
maxIndex = index
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range listedPeerings {
|
|
||||||
index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, p.Name)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err)
|
return fmt.Errorf("could not get a service dump for peer %q: %w", args.PeerName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if index > maxIndex {
|
if index > maxIndex {
|
||||||
maxIndex = index
|
maxIndex = index
|
||||||
}
|
}
|
||||||
reply.ImportedNodes = append(reply.ImportedNodes, importedNodes...)
|
reply.Index = maxIndex
|
||||||
}
|
reply.ImportedNodes = nodes
|
||||||
|
|
||||||
// Get, store, and filter gateway services
|
} else {
|
||||||
idx, gatewayServices, err := state.DumpGatewayServices(ws)
|
// otherwise return both local and all imported services
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
reply.Gateways = gatewayServices
|
|
||||||
|
|
||||||
if idx > maxIndex {
|
// get a local dump for services
|
||||||
maxIndex = idx
|
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword)
|
||||||
}
|
if err != nil {
|
||||||
reply.Index = maxIndex
|
return fmt.Errorf("could not get a service dump for local nodes: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
raw, err := filter.Execute(reply.Nodes)
|
if index > maxIndex {
|
||||||
if err != nil {
|
maxIndex = index
|
||||||
return fmt.Errorf("could not filter local service dump: %w", err)
|
}
|
||||||
|
reply.Nodes = nodes
|
||||||
|
|
||||||
|
// get a list of all peerings
|
||||||
|
index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not list peers for service dump %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if index > maxIndex {
|
||||||
|
maxIndex = index
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range listedPeerings {
|
||||||
|
// Note we fetch imported services with wildcard namespace because imported services' namespaces
|
||||||
|
// are in a different locality; regardless of our local namespace, we return all imported services
|
||||||
|
// of the local partition.
|
||||||
|
index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, args.EnterpriseMeta.WithWildcardNamespace(), p.Name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if index > maxIndex {
|
||||||
|
maxIndex = index
|
||||||
|
}
|
||||||
|
reply.ImportedNodes = append(reply.ImportedNodes, importedNodes...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get, store, and filter gateway services
|
||||||
|
idx, gatewayServices, err := state.DumpGatewayServices(ws)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
reply.Gateways = gatewayServices
|
||||||
|
|
||||||
|
if idx > maxIndex {
|
||||||
|
maxIndex = idx
|
||||||
|
}
|
||||||
|
reply.Index = maxIndex
|
||||||
|
|
||||||
|
raw, err := filter.Execute(reply.Nodes)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not filter local service dump: %w", err)
|
||||||
|
}
|
||||||
|
reply.Nodes = raw.(structs.CheckServiceNodes)
|
||||||
}
|
}
|
||||||
reply.Nodes = raw.(structs.CheckServiceNodes)
|
|
||||||
|
|
||||||
importedRaw, err := filter.Execute(reply.ImportedNodes)
|
importedRaw, err := filter.Execute(reply.ImportedNodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -49,7 +49,7 @@ func kvsPreApply(logger hclog.Logger, srv *Server, authz resolver.Result, op api
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
case api.KVGet, api.KVGetTree:
|
case api.KVGet, api.KVGetTree, api.KVGetOrEmpty:
|
||||||
// Filtering for GETs is done on the output side.
|
// Filtering for GETs is done on the output side.
|
||||||
|
|
||||||
case api.KVCheckSession, api.KVCheckIndex:
|
case api.KVCheckSession, api.KVCheckIndex:
|
||||||
|
|
|
@ -1098,11 +1098,36 @@ func setLeafSigningCert(caRoot *structs.CARoot, pem string) error {
|
||||||
return fmt.Errorf("error parsing leaf signing cert: %w", err)
|
return fmt.Errorf("error parsing leaf signing cert: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := pruneExpiredIntermediates(caRoot); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
caRoot.IntermediateCerts = append(caRoot.IntermediateCerts, pem)
|
caRoot.IntermediateCerts = append(caRoot.IntermediateCerts, pem)
|
||||||
caRoot.SigningKeyID = connect.EncodeSigningKeyID(cert.SubjectKeyId)
|
caRoot.SigningKeyID = connect.EncodeSigningKeyID(cert.SubjectKeyId)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// pruneExpiredIntermediates removes expired intermediate certificates
|
||||||
|
// from the given CARoot.
|
||||||
|
func pruneExpiredIntermediates(caRoot *structs.CARoot) error {
|
||||||
|
var newIntermediates []string
|
||||||
|
now := time.Now()
|
||||||
|
for _, intermediatePEM := range caRoot.IntermediateCerts {
|
||||||
|
cert, err := connect.ParseCert(intermediatePEM)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing leaf signing cert: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only keep the intermediate cert if it's still valid.
|
||||||
|
if cert.NotAfter.After(now) {
|
||||||
|
newIntermediates = append(newIntermediates, intermediatePEM)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
caRoot.IntermediateCerts = newIntermediates
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// runRenewIntermediate periodically attempts to renew the intermediate cert.
|
// runRenewIntermediate periodically attempts to renew the intermediate cert.
|
||||||
func (c *CAManager) runRenewIntermediate(ctx context.Context) error {
|
func (c *CAManager) runRenewIntermediate(ctx context.Context) error {
|
||||||
isPrimary := c.serverConf.Datacenter == c.serverConf.PrimaryDatacenter
|
isPrimary := c.serverConf.Datacenter == c.serverConf.PrimaryDatacenter
|
||||||
|
|
|
@ -435,7 +435,6 @@ func TestCAManager_SignCertificate_WithExpiredCert(t *testing.T) {
|
||||||
errorMsg string
|
errorMsg string
|
||||||
}{
|
}{
|
||||||
{"intermediate valid", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""},
|
{"intermediate valid", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""},
|
||||||
{"intermediate expired", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), true, "intermediate expired: certificate expired, expiration date"},
|
|
||||||
{"root expired", time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), true, "root expired: certificate expired, expiration date"},
|
{"root expired", time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), true, "root expired: certificate expired, expiration date"},
|
||||||
// a cert that is not yet valid is ok, assume it will be valid soon enough
|
// a cert that is not yet valid is ok, assume it will be valid soon enough
|
||||||
{"intermediate in the future", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), false, ""},
|
{"intermediate in the future", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), false, ""},
|
||||||
|
|
|
@ -401,6 +401,18 @@ func TestCAManager_RenewIntermediate_Vault_Primary(t *testing.T) {
|
||||||
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
|
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
verifyLeafCert(t, activeRoot, cert.CertPEM)
|
verifyLeafCert(t, activeRoot, cert.CertPEM)
|
||||||
|
|
||||||
|
// Wait for the primary's old intermediate to be pruned after expiring.
|
||||||
|
oldIntermediate := activeRoot.IntermediateCerts[0]
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
store := s1.caManager.delegate.State()
|
||||||
|
_, storedRoot, err := store.CARootActive(nil)
|
||||||
|
r.Check(err)
|
||||||
|
|
||||||
|
if storedRoot.IntermediateCerts[0] == oldIntermediate {
|
||||||
|
r.Fatal("old intermediate should be gone")
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func patchIntermediateCertRenewInterval(t *testing.T) {
|
func patchIntermediateCertRenewInterval(t *testing.T) {
|
||||||
|
@ -516,6 +528,18 @@ func TestCAManager_RenewIntermediate_Secondary(t *testing.T) {
|
||||||
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
|
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
verifyLeafCert(t, activeRoot, cert.CertPEM)
|
verifyLeafCert(t, activeRoot, cert.CertPEM)
|
||||||
|
|
||||||
|
// Wait for dc2's old intermediate to be pruned after expiring.
|
||||||
|
oldIntermediate := activeRoot.IntermediateCerts[0]
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
store := s2.caManager.delegate.State()
|
||||||
|
_, storedRoot, err := store.CARootActive(nil)
|
||||||
|
r.Check(err)
|
||||||
|
|
||||||
|
if storedRoot.IntermediateCerts[0] == oldIntermediate {
|
||||||
|
r.Fatal("old intermediate should be gone")
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
|
func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {
|
||||||
|
|
|
@ -112,7 +112,7 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
|
||||||
if status.NeverConnected {
|
if status.NeverConnected {
|
||||||
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels)
|
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels)
|
||||||
} else {
|
} else {
|
||||||
healthy := status.IsHealthy()
|
healthy := s.peerStreamServer.Tracker.IsHealthy(status)
|
||||||
healthyInt := 0
|
healthyInt := 0
|
||||||
if healthy {
|
if healthy {
|
||||||
healthyInt = 1
|
healthyInt = 1
|
||||||
|
@ -305,7 +305,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
|
||||||
|
|
||||||
logger.Trace("establishing stream to peer")
|
logger.Trace("establishing stream to peer")
|
||||||
|
|
||||||
streamStatus, err := s.peerStreamTracker.Register(peer.ID)
|
streamStatus, err := s.peerStreamServer.Tracker.Register(peer.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to register stream: %v", err)
|
return fmt.Errorf("failed to register stream: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,6 +40,7 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
|
||||||
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true)
|
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) {
|
func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("too slow for testing.Short")
|
t.Skip("too slow for testing.Short")
|
||||||
|
@ -137,9 +138,11 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
|
||||||
|
|
||||||
// Delete the peering to trigger the termination sequence.
|
// Delete the peering to trigger the termination sequence.
|
||||||
deleted := &pbpeering.Peering{
|
deleted := &pbpeering.Peering{
|
||||||
ID: p.Peering.ID,
|
ID: p.Peering.ID,
|
||||||
Name: "my-peer-acceptor",
|
Name: "my-peer-acceptor",
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
State: pbpeering.PeeringState_DELETING,
|
||||||
|
PeerServerAddresses: p.Peering.PeerServerAddresses,
|
||||||
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
}
|
}
|
||||||
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted}))
|
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted}))
|
||||||
dialer.logger.Trace("deleted peering for my-peer-acceptor")
|
dialer.logger.Trace("deleted peering for my-peer-acceptor")
|
||||||
|
@ -262,6 +265,7 @@ func testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t *testing.T, enableTLS b
|
||||||
deleted := &pbpeering.Peering{
|
deleted := &pbpeering.Peering{
|
||||||
ID: p.Peering.PeerID,
|
ID: p.Peering.PeerID,
|
||||||
Name: "my-peer-dialer",
|
Name: "my-peer-dialer",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -431,6 +435,7 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: peerID,
|
ID: peerID,
|
||||||
Name: peerName,
|
Name: peerName,
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
|
@ -1165,6 +1170,7 @@ func TestLeader_Peering_NoDeletionWhenPeeringDisabled(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: peerID,
|
ID: peerID,
|
||||||
Name: peerName,
|
Name: peerName,
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
|
@ -1216,7 +1222,7 @@ func TestLeader_Peering_NoEstablishmentWhenPeeringDisabled(t *testing.T) {
|
||||||
}))
|
}))
|
||||||
|
|
||||||
require.Never(t, func() bool {
|
require.Never(t, func() bool {
|
||||||
_, found := s1.peerStreamTracker.StreamStatus(peerID)
|
_, found := s1.peerStreamServer.StreamStatus(peerID)
|
||||||
return found
|
return found
|
||||||
}, 7*time.Second, 1*time.Second, "peering should not have been established")
|
}, 7*time.Second, 1*time.Second, "peering should not have been established")
|
||||||
}
|
}
|
||||||
|
|
|
@ -370,9 +370,9 @@ type Server struct {
|
||||||
|
|
||||||
// peerStreamServer is a server used to handle peering streams from external clusters.
|
// peerStreamServer is a server used to handle peering streams from external clusters.
|
||||||
peerStreamServer *peerstream.Server
|
peerStreamServer *peerstream.Server
|
||||||
|
|
||||||
// peeringServer handles peering RPC requests internal to this cluster, like generating peering tokens.
|
// peeringServer handles peering RPC requests internal to this cluster, like generating peering tokens.
|
||||||
peeringServer *peering.Server
|
peeringServer *peering.Server
|
||||||
peerStreamTracker *peerstream.Tracker
|
|
||||||
|
|
||||||
// embedded struct to hold all the enterprise specific data
|
// embedded struct to hold all the enterprise specific data
|
||||||
EnterpriseServer
|
EnterpriseServer
|
||||||
|
@ -728,11 +728,9 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
|
||||||
})
|
})
|
||||||
serverDiscoveryServer.Register(externalGRPCServer)
|
serverDiscoveryServer.Register(externalGRPCServer)
|
||||||
|
|
||||||
s.peerStreamTracker = peerstream.NewTracker()
|
|
||||||
s.peeringBackend = NewPeeringBackend(s)
|
s.peeringBackend = NewPeeringBackend(s)
|
||||||
s.peerStreamServer = peerstream.NewServer(peerstream.Config{
|
s.peerStreamServer = peerstream.NewServer(peerstream.Config{
|
||||||
Backend: s.peeringBackend,
|
Backend: s.peeringBackend,
|
||||||
Tracker: s.peerStreamTracker,
|
|
||||||
GetStore: func() peerstream.StateStore { return s.FSM().State() },
|
GetStore: func() peerstream.StateStore { return s.FSM().State() },
|
||||||
Logger: logger.Named("grpc-api.peerstream"),
|
Logger: logger.Named("grpc-api.peerstream"),
|
||||||
ACLResolver: s.ACLResolver,
|
ACLResolver: s.ACLResolver,
|
||||||
|
@ -746,7 +744,6 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
|
||||||
return s.ForwardGRPC(s.grpcConnPool, info, fn)
|
return s.ForwardGRPC(s.grpcConnPool, info, fn)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
s.peerStreamTracker.SetHeartbeatTimeout(s.peerStreamServer.Config.IncomingHeartbeatTimeout)
|
|
||||||
s.peerStreamServer.Register(externalGRPCServer)
|
s.peerStreamServer.Register(externalGRPCServer)
|
||||||
|
|
||||||
// Initialize internal gRPC server.
|
// Initialize internal gRPC server.
|
||||||
|
@ -795,7 +792,7 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler
|
||||||
|
|
||||||
p := peering.NewServer(peering.Config{
|
p := peering.NewServer(peering.Config{
|
||||||
Backend: s.peeringBackend,
|
Backend: s.peeringBackend,
|
||||||
Tracker: s.peerStreamTracker,
|
Tracker: s.peerStreamServer.Tracker,
|
||||||
Logger: deps.Logger.Named("grpc-api.peering"),
|
Logger: deps.Logger.Named("grpc-api.peering"),
|
||||||
ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) {
|
ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) {
|
||||||
// Only forward the request if the dc in the request matches the server's datacenter.
|
// Only forward the request if the dc in the request matches the server's datacenter.
|
||||||
|
|
|
@ -535,6 +535,12 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
|
||||||
if req.Peering.Name == "" {
|
if req.Peering.Name == "" {
|
||||||
return errors.New("Missing Peering Name")
|
return errors.New("Missing Peering Name")
|
||||||
}
|
}
|
||||||
|
if req.Peering.State == pbpeering.PeeringState_DELETING && (req.Peering.DeletedAt == nil || structs.IsZeroProtoTime(req.Peering.DeletedAt)) {
|
||||||
|
return errors.New("Missing deletion time for peering in deleting state")
|
||||||
|
}
|
||||||
|
if req.Peering.DeletedAt != nil && !structs.IsZeroProtoTime(req.Peering.DeletedAt) && req.Peering.State != pbpeering.PeeringState_DELETING {
|
||||||
|
return fmt.Errorf("Unexpected state for peering with deletion time: %s", pbpeering.PeeringStateToAPI(req.Peering.State))
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure the name is unique (cannot conflict with another peering with a different ID).
|
// Ensure the name is unique (cannot conflict with another peering with a different ID).
|
||||||
_, existing, err := peeringReadTxn(tx, nil, Query{
|
_, existing, err := peeringReadTxn(tx, nil, Query{
|
||||||
|
@ -546,11 +552,32 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
|
||||||
}
|
}
|
||||||
|
|
||||||
if existing != nil {
|
if existing != nil {
|
||||||
|
if req.Peering.ShouldDial() != existing.ShouldDial() {
|
||||||
|
return fmt.Errorf("Cannot switch peering dialing mode from %t to %t", existing.ShouldDial(), req.Peering.ShouldDial())
|
||||||
|
}
|
||||||
|
|
||||||
if req.Peering.ID != existing.ID {
|
if req.Peering.ID != existing.ID {
|
||||||
return fmt.Errorf("A peering already exists with the name %q and a different ID %q", req.Peering.Name, existing.ID)
|
return fmt.Errorf("A peering already exists with the name %q and a different ID %q", req.Peering.Name, existing.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Nothing to do if our peer wants to terminate the peering but the peering is already marked for deletion.
|
||||||
|
if existing.State == pbpeering.PeeringState_DELETING && req.Peering.State == pbpeering.PeeringState_TERMINATED {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// No-op deletion
|
||||||
|
if existing.State == pbpeering.PeeringState_DELETING && req.Peering.State == pbpeering.PeeringState_DELETING {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// No-op termination
|
||||||
|
if existing.State == pbpeering.PeeringState_TERMINATED && req.Peering.State == pbpeering.PeeringState_TERMINATED {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Prevent modifications to Peering marked for deletion.
|
// Prevent modifications to Peering marked for deletion.
|
||||||
if !existing.IsActive() {
|
// This blocks generating new peering tokens or re-establishing the peering until the peering is done deleting.
|
||||||
|
if existing.State == pbpeering.PeeringState_DELETING {
|
||||||
return fmt.Errorf("cannot write to peering that is marked for deletion")
|
return fmt.Errorf("cannot write to peering that is marked for deletion")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -582,8 +609,8 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
|
||||||
req.Peering.ModifyIndex = idx
|
req.Peering.ModifyIndex = idx
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure associated secrets are cleaned up when a peering is marked for deletion.
|
// Ensure associated secrets are cleaned up when a peering is marked for deletion or terminated.
|
||||||
if req.Peering.State == pbpeering.PeeringState_DELETING {
|
if !req.Peering.IsActive() {
|
||||||
if err := peeringSecretsDeleteTxn(tx, req.Peering.ID, req.Peering.ShouldDial()); err != nil {
|
if err := peeringSecretsDeleteTxn(tx, req.Peering.ID, req.Peering.ShouldDial()); err != nil {
|
||||||
return fmt.Errorf("failed to delete peering secrets: %w", err)
|
return fmt.Errorf("failed to delete peering secrets: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -950,6 +950,7 @@ func TestStore_Peering_Watch(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: testFooPeerID,
|
ID: testFooPeerID,
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -976,6 +977,7 @@ func TestStore_Peering_Watch(t *testing.T) {
|
||||||
err := s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{
|
err := s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{
|
||||||
ID: testBarPeerID,
|
ID: testBarPeerID,
|
||||||
Name: "bar",
|
Name: "bar",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -1077,6 +1079,7 @@ func TestStore_PeeringList_Watch(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: testFooPeerID,
|
ID: testFooPeerID,
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
},
|
},
|
||||||
|
@ -1112,16 +1115,22 @@ func TestStore_PeeringWrite(t *testing.T) {
|
||||||
// Each case depends on the previous.
|
// Each case depends on the previous.
|
||||||
s := NewStateStore(nil)
|
s := NewStateStore(nil)
|
||||||
|
|
||||||
|
testTime := time.Now()
|
||||||
|
|
||||||
|
type expectations struct {
|
||||||
|
peering *pbpeering.Peering
|
||||||
|
secrets *pbpeering.PeeringSecrets
|
||||||
|
err string
|
||||||
|
}
|
||||||
type testcase struct {
|
type testcase struct {
|
||||||
name string
|
name string
|
||||||
input *pbpeering.PeeringWriteRequest
|
input *pbpeering.PeeringWriteRequest
|
||||||
expectSecrets *pbpeering.PeeringSecrets
|
expect expectations
|
||||||
expectErr string
|
|
||||||
}
|
}
|
||||||
run := func(t *testing.T, tc testcase) {
|
run := func(t *testing.T, tc testcase) {
|
||||||
err := s.PeeringWrite(10, tc.input)
|
err := s.PeeringWrite(10, tc.input)
|
||||||
if tc.expectErr != "" {
|
if tc.expect.err != "" {
|
||||||
testutil.RequireErrorContains(t, err, tc.expectErr)
|
testutil.RequireErrorContains(t, err, tc.expect.err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1133,52 +1142,176 @@ func TestStore_PeeringWrite(t *testing.T) {
|
||||||
_, p, err := s.PeeringRead(nil, q)
|
_, p, err := s.PeeringRead(nil, q)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, p)
|
require.NotNil(t, p)
|
||||||
require.Equal(t, tc.input.Peering.State, p.State)
|
require.Equal(t, tc.expect.peering.State, p.State)
|
||||||
require.Equal(t, tc.input.Peering.Name, p.Name)
|
require.Equal(t, tc.expect.peering.Name, p.Name)
|
||||||
|
require.Equal(t, tc.expect.peering.Meta, p.Meta)
|
||||||
|
if tc.expect.peering.DeletedAt != nil {
|
||||||
|
require.Equal(t, tc.expect.peering.DeletedAt, p.DeletedAt)
|
||||||
|
}
|
||||||
|
|
||||||
secrets, err := s.PeeringSecretsRead(nil, tc.input.Peering.ID)
|
secrets, err := s.PeeringSecretsRead(nil, tc.input.Peering.ID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
prototest.AssertDeepEqual(t, tc.expectSecrets, secrets)
|
prototest.AssertDeepEqual(t, tc.expect.secrets, secrets)
|
||||||
}
|
}
|
||||||
tcs := []testcase{
|
tcs := []testcase{
|
||||||
{
|
{
|
||||||
name: "create baz",
|
name: "create baz",
|
||||||
input: &pbpeering.PeeringWriteRequest{
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: testBazPeerID,
|
ID: testBazPeerID,
|
||||||
Name: "baz",
|
Name: "baz",
|
||||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
State: pbpeering.PeeringState_ESTABLISHING,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
},
|
},
|
||||||
SecretsRequest: &pbpeering.SecretsWriteRequest{
|
SecretsRequest: &pbpeering.SecretsWriteRequest{
|
||||||
PeerID: testBazPeerID,
|
PeerID: testBazPeerID,
|
||||||
Request: &pbpeering.SecretsWriteRequest_GenerateToken{
|
Request: &pbpeering.SecretsWriteRequest_Establish{
|
||||||
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{
|
Establish: &pbpeering.SecretsWriteRequest_EstablishRequest{
|
||||||
EstablishmentSecret: testBazSecretID,
|
ActiveStreamSecret: testBazSecretID,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectSecrets: &pbpeering.PeeringSecrets{
|
expect: expectations{
|
||||||
PeerID: testBazPeerID,
|
peering: &pbpeering.Peering{
|
||||||
Establishment: &pbpeering.PeeringSecrets_Establishment{
|
ID: testBazPeerID,
|
||||||
SecretID: testBazSecretID,
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_ESTABLISHING,
|
||||||
},
|
},
|
||||||
|
secrets: &pbpeering.PeeringSecrets{
|
||||||
|
PeerID: testBazPeerID,
|
||||||
|
Stream: &pbpeering.PeeringSecrets_Stream{
|
||||||
|
ActiveSecretID: testBazSecretID,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cannot change ID for baz",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: "123",
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_FAILING,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
err: `A peering already exists with the name "baz" and a different ID`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cannot change dialer status for baz",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: "123",
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_FAILING,
|
||||||
|
// Excluding the peer server addresses leads to baz not being considered a dialer.
|
||||||
|
// PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
err: "Cannot switch peering dialing mode from true to false",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "update baz",
|
name: "update baz",
|
||||||
input: &pbpeering.PeeringWriteRequest{
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: testBazPeerID,
|
ID: testBazPeerID,
|
||||||
Name: "baz",
|
Name: "baz",
|
||||||
State: pbpeering.PeeringState_FAILING,
|
State: pbpeering.PeeringState_FAILING,
|
||||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectSecrets: &pbpeering.PeeringSecrets{
|
expect: expectations{
|
||||||
PeerID: testBazPeerID,
|
peering: &pbpeering.Peering{
|
||||||
Establishment: &pbpeering.PeeringSecrets_Establishment{
|
ID: testBazPeerID,
|
||||||
SecretID: testBazSecretID,
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_FAILING,
|
||||||
|
},
|
||||||
|
secrets: &pbpeering.PeeringSecrets{
|
||||||
|
PeerID: testBazPeerID,
|
||||||
|
Stream: &pbpeering.PeeringSecrets_Stream{
|
||||||
|
ActiveSecretID: testBazSecretID,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "if no state was included in request it is inherited from existing",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
// Send undefined state.
|
||||||
|
// State: pbpeering.PeeringState_FAILING,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
// Previous failing state is picked up.
|
||||||
|
State: pbpeering.PeeringState_FAILING,
|
||||||
|
},
|
||||||
|
secrets: &pbpeering.PeeringSecrets{
|
||||||
|
PeerID: testBazPeerID,
|
||||||
|
Stream: &pbpeering.PeeringSecrets_Stream{
|
||||||
|
ActiveSecretID: testBazSecretID,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mark baz as terminated",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_TERMINATED,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_TERMINATED,
|
||||||
|
},
|
||||||
|
// Secrets for baz should have been deleted
|
||||||
|
secrets: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cannot modify peering during no-op termination",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_TERMINATED,
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
|
||||||
|
// Attempt to add metadata
|
||||||
|
Meta: map[string]string{"foo": "bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_TERMINATED,
|
||||||
|
// Meta should be unchanged.
|
||||||
|
Meta: nil,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -1186,42 +1319,104 @@ func TestStore_PeeringWrite(t *testing.T) {
|
||||||
name: "mark baz for deletion",
|
name: "mark baz for deletion",
|
||||||
input: &pbpeering.PeeringWriteRequest{
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
DeletedAt: structs.TimeToProto(testTime),
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
peering: &pbpeering.Peering{
|
||||||
ID: testBazPeerID,
|
ID: testBazPeerID,
|
||||||
Name: "baz",
|
Name: "baz",
|
||||||
State: pbpeering.PeeringState_DELETING,
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(testTime),
|
||||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
},
|
||||||
|
secrets: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "deleting a deleted peering is a no-op",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Secrets for baz should have been deleted
|
expect: expectations{
|
||||||
expectSecrets: nil,
|
peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
// Still marked as deleting at the original testTime
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
|
DeletedAt: structs.TimeToProto(testTime),
|
||||||
|
},
|
||||||
|
// Secrets for baz should have been deleted
|
||||||
|
secrets: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "terminating a peering marked for deletion is a no-op",
|
||||||
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
|
Peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
State: pbpeering.PeeringState_TERMINATED,
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: expectations{
|
||||||
|
peering: &pbpeering.Peering{
|
||||||
|
ID: testBazPeerID,
|
||||||
|
Name: "baz",
|
||||||
|
// Still marked as deleting
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
|
},
|
||||||
|
// Secrets for baz should have been deleted
|
||||||
|
secrets: nil,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "cannot update peering marked for deletion",
|
name: "cannot update peering marked for deletion",
|
||||||
input: &pbpeering.PeeringWriteRequest{
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: testBazPeerID,
|
ID: testBazPeerID,
|
||||||
Name: "baz",
|
Name: "baz",
|
||||||
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
|
|
||||||
// Attempt to add metadata
|
// Attempt to add metadata
|
||||||
Meta: map[string]string{
|
Meta: map[string]string{
|
||||||
"source": "kubernetes",
|
"source": "kubernetes",
|
||||||
},
|
},
|
||||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectErr: "cannot write to peering that is marked for deletion",
|
expect: expectations{
|
||||||
|
err: "cannot write to peering that is marked for deletion",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "cannot create peering marked for deletion",
|
name: "cannot create peering marked for deletion",
|
||||||
input: &pbpeering.PeeringWriteRequest{
|
input: &pbpeering.PeeringWriteRequest{
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: testFooPeerID,
|
ID: testFooPeerID,
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
PeerServerAddresses: []string{"localhost:8502"},
|
||||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
State: pbpeering.PeeringState_DELETING,
|
||||||
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectErr: "cannot create a new peering marked for deletion",
|
expect: expectations{
|
||||||
|
err: "cannot create a new peering marked for deletion",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tc := range tcs {
|
for _, tc := range tcs {
|
||||||
|
@ -1246,6 +1441,7 @@ func TestStore_PeeringDelete(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: testFooPeerID,
|
ID: testFooPeerID,
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
|
@ -1759,6 +1955,7 @@ func TestStateStore_PeeringsForService(t *testing.T) {
|
||||||
copied := pbpeering.Peering{
|
copied := pbpeering.Peering{
|
||||||
ID: tp.peering.ID,
|
ID: tp.peering.ID,
|
||||||
Name: tp.peering.Name,
|
Name: tp.peering.Name,
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
}
|
}
|
||||||
require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &copied}))
|
require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &copied}))
|
||||||
|
@ -2201,6 +2398,7 @@ func TestStore_TrustBundleListByService(t *testing.T) {
|
||||||
Peering: &pbpeering.Peering{
|
Peering: &pbpeering.Peering{
|
||||||
ID: peerID1,
|
ID: peerID1,
|
||||||
Name: "peer1",
|
Name: "peer1",
|
||||||
|
State: pbpeering.PeeringState_DELETING,
|
||||||
DeletedAt: structs.TimeToProto(time.Now()),
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
|
|
|
@ -60,6 +60,13 @@ func (s *Store) txnKVS(tx WriteTxn, idx uint64, op *structs.TxnKVOp) (structs.Tx
|
||||||
err = fmt.Errorf("key %q doesn't exist", op.DirEnt.Key)
|
err = fmt.Errorf("key %q doesn't exist", op.DirEnt.Key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case api.KVGetOrEmpty:
|
||||||
|
_, entry, err = kvsGetTxn(tx, nil, op.DirEnt.Key, op.DirEnt.EnterpriseMeta)
|
||||||
|
if entry == nil && err == nil {
|
||||||
|
entry = &op.DirEnt
|
||||||
|
entry.Value = nil
|
||||||
|
}
|
||||||
|
|
||||||
case api.KVGetTree:
|
case api.KVGetTree:
|
||||||
var entries structs.DirEntries
|
var entries structs.DirEntries
|
||||||
_, entries, err = s.kvsListTxn(tx, nil, op.DirEnt.Key, op.DirEnt.EnterpriseMeta)
|
_, entries, err = s.kvsListTxn(tx, nil, op.DirEnt.Key, op.DirEnt.EnterpriseMeta)
|
||||||
|
@ -95,7 +102,7 @@ func (s *Store) txnKVS(tx WriteTxn, idx uint64, op *structs.TxnKVOp) (structs.Tx
|
||||||
// value (we have to clone so we don't modify the entry being used by
|
// value (we have to clone so we don't modify the entry being used by
|
||||||
// the state store).
|
// the state store).
|
||||||
if entry != nil {
|
if entry != nil {
|
||||||
if op.Verb == api.KVGet {
|
if op.Verb == api.KVGet || op.Verb == api.KVGetOrEmpty {
|
||||||
result := structs.TxnResult{KV: entry}
|
result := structs.TxnResult{KV: entry}
|
||||||
return structs.TxnResults{&result}, nil
|
return structs.TxnResults{&result}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -577,6 +577,22 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
&structs.TxnOp{
|
||||||
|
KV: &structs.TxnKVOp{
|
||||||
|
Verb: api.KVGetOrEmpty,
|
||||||
|
DirEnt: structs.DirEntry{
|
||||||
|
Key: "foo/update",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&structs.TxnOp{
|
||||||
|
KV: &structs.TxnKVOp{
|
||||||
|
Verb: api.KVGetOrEmpty,
|
||||||
|
DirEnt: structs.DirEntry{
|
||||||
|
Key: "foo/not-exists",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
&structs.TxnOp{
|
&structs.TxnOp{
|
||||||
KV: &structs.TxnKVOp{
|
KV: &structs.TxnKVOp{
|
||||||
Verb: api.KVCheckIndex,
|
Verb: api.KVCheckIndex,
|
||||||
|
@ -702,6 +718,22 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
&structs.TxnResult{
|
||||||
|
KV: &structs.DirEntry{
|
||||||
|
Key: "foo/update",
|
||||||
|
Value: []byte("stale"),
|
||||||
|
RaftIndex: structs.RaftIndex{
|
||||||
|
CreateIndex: 5,
|
||||||
|
ModifyIndex: 5,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&structs.TxnResult{
|
||||||
|
KV: &structs.DirEntry{
|
||||||
|
Key: "foo/not-exists",
|
||||||
|
Value: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
&structs.TxnResult{
|
&structs.TxnResult{
|
||||||
KV: &structs.DirEntry{
|
KV: &structs.DirEntry{
|
||||||
|
|
||||||
|
|
|
@ -41,8 +41,8 @@ var Gauges = []prometheus.GaugeDefinition{
|
||||||
Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.",
|
Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: []string{"consul", "kv", "entries"},
|
Name: []string{"consul", "state", "kv_entries"},
|
||||||
Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.10.3.",
|
Help: "Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3.",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: []string{"consul", "state", "connect_instances"},
|
Name: []string{"consul", "state", "connect_instances"},
|
||||||
|
|
|
@ -26,11 +26,12 @@ const (
|
||||||
|
|
||||||
type Server struct {
|
type Server struct {
|
||||||
Config
|
Config
|
||||||
|
|
||||||
|
Tracker *Tracker
|
||||||
}
|
}
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Backend Backend
|
Backend Backend
|
||||||
Tracker *Tracker
|
|
||||||
GetStore func() StateStore
|
GetStore func() StateStore
|
||||||
Logger hclog.Logger
|
Logger hclog.Logger
|
||||||
ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error)
|
ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error)
|
||||||
|
@ -42,8 +43,8 @@ type Config struct {
|
||||||
// outgoingHeartbeatInterval is how often we send a heartbeat.
|
// outgoingHeartbeatInterval is how often we send a heartbeat.
|
||||||
outgoingHeartbeatInterval time.Duration
|
outgoingHeartbeatInterval time.Duration
|
||||||
|
|
||||||
// IncomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection.
|
// incomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection.
|
||||||
IncomingHeartbeatTimeout time.Duration
|
incomingHeartbeatTimeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:generate mockery --name ACLResolver --inpackage
|
//go:generate mockery --name ACLResolver --inpackage
|
||||||
|
@ -53,7 +54,6 @@ type ACLResolver interface {
|
||||||
|
|
||||||
func NewServer(cfg Config) *Server {
|
func NewServer(cfg Config) *Server {
|
||||||
requireNotNil(cfg.Backend, "Backend")
|
requireNotNil(cfg.Backend, "Backend")
|
||||||
requireNotNil(cfg.Tracker, "Tracker")
|
|
||||||
requireNotNil(cfg.GetStore, "GetStore")
|
requireNotNil(cfg.GetStore, "GetStore")
|
||||||
requireNotNil(cfg.Logger, "Logger")
|
requireNotNil(cfg.Logger, "Logger")
|
||||||
// requireNotNil(cfg.ACLResolver, "ACLResolver") // TODO(peering): reenable check when ACLs are required
|
// requireNotNil(cfg.ACLResolver, "ACLResolver") // TODO(peering): reenable check when ACLs are required
|
||||||
|
@ -63,11 +63,12 @@ func NewServer(cfg Config) *Server {
|
||||||
if cfg.outgoingHeartbeatInterval == 0 {
|
if cfg.outgoingHeartbeatInterval == 0 {
|
||||||
cfg.outgoingHeartbeatInterval = defaultOutgoingHeartbeatInterval
|
cfg.outgoingHeartbeatInterval = defaultOutgoingHeartbeatInterval
|
||||||
}
|
}
|
||||||
if cfg.IncomingHeartbeatTimeout == 0 {
|
if cfg.incomingHeartbeatTimeout == 0 {
|
||||||
cfg.IncomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
|
cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
|
||||||
}
|
}
|
||||||
return &Server{
|
return &Server{
|
||||||
Config: cfg,
|
Config: cfg,
|
||||||
|
Tracker: NewTracker(cfg.incomingHeartbeatTimeout),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -406,7 +406,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
||||||
|
|
||||||
// incomingHeartbeatCtx will complete if incoming heartbeats time out.
|
// incomingHeartbeatCtx will complete if incoming heartbeats time out.
|
||||||
incomingHeartbeatCtx, incomingHeartbeatCtxCancel :=
|
incomingHeartbeatCtx, incomingHeartbeatCtxCancel :=
|
||||||
context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout)
|
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
|
||||||
// NOTE: It's important that we wrap the call to cancel in a wrapper func because during the loop we're
|
// NOTE: It's important that we wrap the call to cancel in a wrapper func because during the loop we're
|
||||||
// re-assigning the value of incomingHeartbeatCtxCancel and we want the defer to run on the last assigned
|
// re-assigning the value of incomingHeartbeatCtxCancel and we want the defer to run on the last assigned
|
||||||
// value, not the current value.
|
// value, not the current value.
|
||||||
|
@ -575,6 +575,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
||||||
status.TrackRecvResourceSuccess()
|
status.TrackRecvResourceSuccess()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We are replying ACK or NACK depending on whether we successfully processed the response.
|
||||||
if err := streamSend(reply); err != nil {
|
if err := streamSend(reply); err != nil {
|
||||||
return fmt.Errorf("failed to send to stream: %v", err)
|
return fmt.Errorf("failed to send to stream: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -605,7 +606,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
||||||
// They just can't trace the execution properly for some reason (possibly golang/go#29587).
|
// They just can't trace the execution properly for some reason (possibly golang/go#29587).
|
||||||
//nolint:govet
|
//nolint:govet
|
||||||
incomingHeartbeatCtx, incomingHeartbeatCtxCancel =
|
incomingHeartbeatCtx, incomingHeartbeatCtxCancel =
|
||||||
context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout)
|
context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
case update := <-subCh:
|
case update := <-subCh:
|
||||||
|
@ -642,7 +643,6 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
|
||||||
if err := streamSend(replResp); err != nil {
|
if err := streamSend(replResp); err != nil {
|
||||||
return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err)
|
return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err)
|
||||||
}
|
}
|
||||||
status.TrackSendSuccess()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -499,9 +499,8 @@ func TestStreamResources_Server_Terminate(t *testing.T) {
|
||||||
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
|
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||||
}
|
}
|
||||||
|
|
||||||
srv, store := newTestServer(t, func(c *Config) {
|
srv, store := newTestServer(t, nil)
|
||||||
c.Tracker.SetClock(it.Now)
|
srv.Tracker.setClock(it.Now)
|
||||||
})
|
|
||||||
|
|
||||||
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
||||||
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
||||||
|
@ -552,9 +551,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
|
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
|
||||||
}
|
}
|
||||||
|
|
||||||
srv, store := newTestServer(t, func(c *Config) {
|
srv, store := newTestServer(t, nil)
|
||||||
c.Tracker.SetClock(it.Now)
|
srv.Tracker.setClock(it.Now)
|
||||||
})
|
|
||||||
|
|
||||||
// Set the initial roots and CA configuration.
|
// Set the initial roots and CA configuration.
|
||||||
_, rootA := writeInitialRootsAndCA(t, store)
|
_, rootA := writeInitialRootsAndCA(t, store)
|
||||||
|
@ -572,7 +570,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
var lastSendAck, lastSendSuccess time.Time
|
var lastSendAck time.Time
|
||||||
|
|
||||||
testutil.RunStep(t, "ack tracked as success", func(t *testing.T) {
|
testutil.RunStep(t, "ack tracked as success", func(t *testing.T) {
|
||||||
ack := &pbpeerstream.ReplicationMessage{
|
ack := &pbpeerstream.ReplicationMessage{
|
||||||
|
@ -587,16 +585,13 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
lastSendAck = time.Date(2000, time.January, 1, 0, 0, 2, 0, time.UTC)
|
lastSendAck = it.FutureNow(1)
|
||||||
lastSendSuccess = time.Date(2000, time.January, 1, 0, 0, 3, 0, time.UTC)
|
|
||||||
err := client.Send(ack)
|
err := client.Send(ack)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
expect := Status{
|
expect := Status{
|
||||||
Connected: true,
|
Connected: true,
|
||||||
LastAck: lastSendAck,
|
LastAck: lastSendAck,
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
LastSendSuccess: lastSendSuccess,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -624,20 +619,17 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
lastSendAck = time.Date(2000, time.January, 1, 0, 0, 4, 0, time.UTC)
|
lastNack = it.FutureNow(1)
|
||||||
lastNack = time.Date(2000, time.January, 1, 0, 0, 5, 0, time.UTC)
|
|
||||||
err := client.Send(nack)
|
err := client.Send(nack)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
lastNackMsg = "client peer was unable to apply resource: bad bad not good"
|
lastNackMsg = "client peer was unable to apply resource: bad bad not good"
|
||||||
|
|
||||||
expect := Status{
|
expect := Status{
|
||||||
Connected: true,
|
Connected: true,
|
||||||
LastAck: lastSendAck,
|
LastAck: lastSendAck,
|
||||||
LastNack: lastNack,
|
LastNack: lastNack,
|
||||||
LastNackMessage: lastNackMsg,
|
LastNackMessage: lastNackMsg,
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
LastSendSuccess: lastSendSuccess,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -707,8 +699,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
ImportedServices: map[string]struct{}{
|
ImportedServices: map[string]struct{}{
|
||||||
api.String(): {},
|
api.String(): {},
|
||||||
},
|
},
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
LastSendSuccess: lastSendSuccess,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -770,8 +760,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
ImportedServices: map[string]struct{}{
|
ImportedServices: map[string]struct{}{
|
||||||
api.String(): {},
|
api.String(): {},
|
||||||
},
|
},
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
LastSendSuccess: lastSendSuccess,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -805,8 +793,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
ImportedServices: map[string]struct{}{
|
ImportedServices: map[string]struct{}{
|
||||||
api.String(): {},
|
api.String(): {},
|
||||||
},
|
},
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
LastSendSuccess: lastSendSuccess,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -839,8 +825,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
|
||||||
ImportedServices: map[string]struct{}{
|
ImportedServices: map[string]struct{}{
|
||||||
api.String(): {},
|
api.String(): {},
|
||||||
},
|
},
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
LastSendSuccess: lastSendSuccess,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -1142,9 +1126,9 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
srv, store := newTestServer(t, func(c *Config) {
|
srv, store := newTestServer(t, func(c *Config) {
|
||||||
c.Tracker.SetClock(it.Now)
|
c.incomingHeartbeatTimeout = 5 * time.Millisecond
|
||||||
c.IncomingHeartbeatTimeout = 5 * time.Millisecond
|
|
||||||
})
|
})
|
||||||
|
srv.Tracker.setClock(it.Now)
|
||||||
|
|
||||||
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
||||||
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
||||||
|
@ -1190,9 +1174,9 @@ func TestStreamResources_Server_SendsHeartbeats(t *testing.T) {
|
||||||
outgoingHeartbeatInterval := 5 * time.Millisecond
|
outgoingHeartbeatInterval := 5 * time.Millisecond
|
||||||
|
|
||||||
srv, store := newTestServer(t, func(c *Config) {
|
srv, store := newTestServer(t, func(c *Config) {
|
||||||
c.Tracker.SetClock(it.Now)
|
|
||||||
c.outgoingHeartbeatInterval = outgoingHeartbeatInterval
|
c.outgoingHeartbeatInterval = outgoingHeartbeatInterval
|
||||||
})
|
})
|
||||||
|
srv.Tracker.setClock(it.Now)
|
||||||
|
|
||||||
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
||||||
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
||||||
|
@ -1249,9 +1233,9 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) {
|
||||||
incomingHeartbeatTimeout := 10 * time.Millisecond
|
incomingHeartbeatTimeout := 10 * time.Millisecond
|
||||||
|
|
||||||
srv, store := newTestServer(t, func(c *Config) {
|
srv, store := newTestServer(t, func(c *Config) {
|
||||||
c.Tracker.SetClock(it.Now)
|
c.incomingHeartbeatTimeout = incomingHeartbeatTimeout
|
||||||
c.IncomingHeartbeatTimeout = incomingHeartbeatTimeout
|
|
||||||
})
|
})
|
||||||
|
srv.Tracker.setClock(it.Now)
|
||||||
|
|
||||||
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
p := writePeeringToBeDialed(t, store, 1, "my-peer")
|
||||||
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
require.Empty(t, p.PeerID, "should be empty if being dialed")
|
||||||
|
@ -2760,7 +2744,6 @@ func newTestServer(t *testing.T, configFn func(c *Config)) (*testServer, *state.
|
||||||
store: store,
|
store: store,
|
||||||
pub: publisher,
|
pub: publisher,
|
||||||
},
|
},
|
||||||
Tracker: NewTracker(),
|
|
||||||
GetStore: func() StateStore { return store },
|
GetStore: func() StateStore { return store },
|
||||||
Logger: testutil.Logger(t),
|
Logger: testutil.Logger(t),
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
|
|
|
@ -14,20 +14,27 @@ type Tracker struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
streams map[string]*MutableStatus
|
streams map[string]*MutableStatus
|
||||||
|
|
||||||
|
// heartbeatTimeout is the max duration a connection is allowed to be
|
||||||
|
// disconnected before the stream health is reported as non-healthy
|
||||||
|
heartbeatTimeout time.Duration
|
||||||
|
|
||||||
// timeNow is a shim for testing.
|
// timeNow is a shim for testing.
|
||||||
timeNow func() time.Time
|
timeNow func() time.Time
|
||||||
|
|
||||||
heartbeatTimeout time.Duration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTracker() *Tracker {
|
func NewTracker(heartbeatTimeout time.Duration) *Tracker {
|
||||||
|
if heartbeatTimeout == 0 {
|
||||||
|
heartbeatTimeout = defaultIncomingHeartbeatTimeout
|
||||||
|
}
|
||||||
return &Tracker{
|
return &Tracker{
|
||||||
streams: make(map[string]*MutableStatus),
|
streams: make(map[string]*MutableStatus),
|
||||||
timeNow: time.Now,
|
timeNow: time.Now,
|
||||||
|
heartbeatTimeout: heartbeatTimeout,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Tracker) SetClock(clock func() time.Time) {
|
// setClock is used for debugging purposes only.
|
||||||
|
func (t *Tracker) setClock(clock func() time.Time) {
|
||||||
if clock == nil {
|
if clock == nil {
|
||||||
t.timeNow = time.Now
|
t.timeNow = time.Now
|
||||||
} else {
|
} else {
|
||||||
|
@ -35,12 +42,6 @@ func (t *Tracker) SetClock(clock func() time.Time) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Tracker) SetHeartbeatTimeout(heartbeatTimeout time.Duration) {
|
|
||||||
t.mu.Lock()
|
|
||||||
defer t.mu.Unlock()
|
|
||||||
t.heartbeatTimeout = heartbeatTimeout
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register a stream for a given peer but do not mark it as connected.
|
// Register a stream for a given peer but do not mark it as connected.
|
||||||
func (t *Tracker) Register(id string) (*MutableStatus, error) {
|
func (t *Tracker) Register(id string) (*MutableStatus, error) {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
|
@ -52,7 +53,7 @@ func (t *Tracker) Register(id string) (*MutableStatus, error) {
|
||||||
func (t *Tracker) registerLocked(id string, initAsConnected bool) (*MutableStatus, bool, error) {
|
func (t *Tracker) registerLocked(id string, initAsConnected bool) (*MutableStatus, bool, error) {
|
||||||
status, ok := t.streams[id]
|
status, ok := t.streams[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
status = newMutableStatus(t.timeNow, t.heartbeatTimeout, initAsConnected)
|
status = newMutableStatus(t.timeNow, initAsConnected)
|
||||||
t.streams[id] = status
|
t.streams[id] = status
|
||||||
return status, true, nil
|
return status, true, nil
|
||||||
}
|
}
|
||||||
|
@ -136,6 +137,39 @@ func (t *Tracker) DeleteStatus(id string) {
|
||||||
delete(t.streams, id)
|
delete(t.streams, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsHealthy is a calculates the health of a peering status.
|
||||||
|
// We define a peering as unhealthy if its status has been in the following
|
||||||
|
// states for longer than the configured incomingHeartbeatTimeout.
|
||||||
|
// - If it is disconnected
|
||||||
|
// - If the last received Nack is newer than last received Ack
|
||||||
|
// - If the last received error is newer than last received success
|
||||||
|
//
|
||||||
|
// If none of these conditions apply, we call the peering healthy.
|
||||||
|
func (t *Tracker) IsHealthy(s Status) bool {
|
||||||
|
// If stream is in a disconnected state for longer than the configured
|
||||||
|
// heartbeat timeout, report as unhealthy.
|
||||||
|
if !s.DisconnectTime.IsZero() &&
|
||||||
|
t.timeNow().Sub(s.DisconnectTime) > t.heartbeatTimeout {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If last Nack is after last Ack, it means the peer is unable to
|
||||||
|
// handle our replication message.
|
||||||
|
if s.LastNack.After(s.LastAck) &&
|
||||||
|
t.timeNow().Sub(s.LastAck) > t.heartbeatTimeout {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If last recv error is newer than last recv success, we were unable
|
||||||
|
// to handle the peer's replication message.
|
||||||
|
if s.LastRecvError.After(s.LastRecvResourceSuccess) &&
|
||||||
|
t.timeNow().Sub(s.LastRecvError) > t.heartbeatTimeout {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
type MutableStatus struct {
|
type MutableStatus struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
@ -152,8 +186,6 @@ type MutableStatus struct {
|
||||||
// Status contains information about the replication stream to a peer cluster.
|
// Status contains information about the replication stream to a peer cluster.
|
||||||
// TODO(peering): There's a lot of fields here...
|
// TODO(peering): There's a lot of fields here...
|
||||||
type Status struct {
|
type Status struct {
|
||||||
heartbeatTimeout time.Duration
|
|
||||||
|
|
||||||
// Connected is true when there is an open stream for the peer.
|
// Connected is true when there is an open stream for the peer.
|
||||||
Connected bool
|
Connected bool
|
||||||
|
|
||||||
|
@ -182,9 +214,6 @@ type Status struct {
|
||||||
// LastSendErrorMessage tracks the last error message when sending into the stream.
|
// LastSendErrorMessage tracks the last error message when sending into the stream.
|
||||||
LastSendErrorMessage string
|
LastSendErrorMessage string
|
||||||
|
|
||||||
// LastSendSuccess tracks the time of the last success response sent into the stream.
|
|
||||||
LastSendSuccess time.Time
|
|
||||||
|
|
||||||
// LastRecvHeartbeat tracks when we last received a heartbeat from our peer.
|
// LastRecvHeartbeat tracks when we last received a heartbeat from our peer.
|
||||||
LastRecvHeartbeat time.Time
|
LastRecvHeartbeat time.Time
|
||||||
|
|
||||||
|
@ -214,40 +243,11 @@ func (s *Status) GetExportedServicesCount() uint64 {
|
||||||
return uint64(len(s.ExportedServices))
|
return uint64(len(s.ExportedServices))
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsHealthy is a convenience func that returns true/ false for a peering status.
|
func newMutableStatus(now func() time.Time, connected bool) *MutableStatus {
|
||||||
// We define a peering as unhealthy if its status satisfies one of the following:
|
|
||||||
// - If heartbeat hasn't been received within the IncomingHeartbeatTimeout
|
|
||||||
// - If the last sent error is newer than last sent success
|
|
||||||
// - If the last received error is newer than last received success
|
|
||||||
// If none of these conditions apply, we call the peering healthy.
|
|
||||||
func (s *Status) IsHealthy() bool {
|
|
||||||
if time.Now().Sub(s.LastRecvHeartbeat) > s.heartbeatTimeout {
|
|
||||||
// 1. If heartbeat hasn't been received for a while - report unhealthy
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.LastSendError.After(s.LastSendSuccess) {
|
|
||||||
// 2. If last sent error is newer than last sent success - report unhealthy
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.LastRecvError.After(s.LastRecvResourceSuccess) {
|
|
||||||
// 3. If last recv error is newer than last recv success - report unhealthy
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMutableStatus(now func() time.Time, heartbeatTimeout time.Duration, connected bool) *MutableStatus {
|
|
||||||
if heartbeatTimeout.Microseconds() == 0 {
|
|
||||||
heartbeatTimeout = defaultIncomingHeartbeatTimeout
|
|
||||||
}
|
|
||||||
return &MutableStatus{
|
return &MutableStatus{
|
||||||
Status: Status{
|
Status: Status{
|
||||||
Connected: connected,
|
Connected: connected,
|
||||||
heartbeatTimeout: heartbeatTimeout,
|
NeverConnected: !connected,
|
||||||
NeverConnected: !connected,
|
|
||||||
},
|
},
|
||||||
timeNow: now,
|
timeNow: now,
|
||||||
doneCh: make(chan struct{}),
|
doneCh: make(chan struct{}),
|
||||||
|
@ -271,12 +271,6 @@ func (s *MutableStatus) TrackSendError(error string) {
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MutableStatus) TrackSendSuccess() {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.LastSendSuccess = s.timeNow().UTC()
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TrackRecvResourceSuccess tracks receiving a replicated resource.
|
// TrackRecvResourceSuccess tracks receiving a replicated resource.
|
||||||
func (s *MutableStatus) TrackRecvResourceSuccess() {
|
func (s *MutableStatus) TrackRecvResourceSuccess() {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/sdk/testutil"
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
|
@ -14,95 +15,107 @@ const (
|
||||||
aPeerID = "63b60245-c475-426b-b314-4588d210859d"
|
aPeerID = "63b60245-c475-426b-b314-4588d210859d"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestStatus_IsHealthy(t *testing.T) {
|
func TestTracker_IsHealthy(t *testing.T) {
|
||||||
type testcase struct {
|
type testcase struct {
|
||||||
name string
|
name string
|
||||||
dontConnect bool
|
tracker *Tracker
|
||||||
modifierFunc func(status *MutableStatus)
|
modifierFunc func(status *MutableStatus)
|
||||||
expectedVal bool
|
expectedVal bool
|
||||||
heartbeatTimeout time.Duration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tcs := []testcase{
|
tcs := []testcase{
|
||||||
{
|
{
|
||||||
name: "never connected, unhealthy",
|
name: "disconnect time within timeout",
|
||||||
expectedVal: false,
|
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
|
||||||
dontConnect: true,
|
expectedVal: true,
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no heartbeat, unhealthy",
|
|
||||||
expectedVal: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "heartbeat is not received, unhealthy",
|
|
||||||
expectedVal: false,
|
|
||||||
modifierFunc: func(status *MutableStatus) {
|
modifierFunc: func(status *MutableStatus) {
|
||||||
// set heartbeat
|
status.DisconnectTime = time.Now()
|
||||||
status.LastRecvHeartbeat = time.Now().Add(-1 * time.Second)
|
|
||||||
},
|
|
||||||
heartbeatTimeout: 1 * time.Second,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "send error before send success",
|
|
||||||
expectedVal: false,
|
|
||||||
modifierFunc: func(status *MutableStatus) {
|
|
||||||
// set heartbeat
|
|
||||||
status.LastRecvHeartbeat = time.Now()
|
|
||||||
|
|
||||||
status.LastSendSuccess = time.Now()
|
|
||||||
status.LastSendError = time.Now()
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "received error before received success",
|
name: "disconnect time past timeout",
|
||||||
|
tracker: NewTracker(1 * time.Millisecond),
|
||||||
expectedVal: false,
|
expectedVal: false,
|
||||||
modifierFunc: func(status *MutableStatus) {
|
modifierFunc: func(status *MutableStatus) {
|
||||||
// set heartbeat
|
status.DisconnectTime = time.Now().Add(-1 * time.Minute)
|
||||||
status.LastRecvHeartbeat = time.Now()
|
},
|
||||||
|
},
|
||||||
status.LastRecvResourceSuccess = time.Now()
|
{
|
||||||
status.LastRecvError = time.Now()
|
name: "receive error before receive success within timeout",
|
||||||
|
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
|
||||||
|
expectedVal: true,
|
||||||
|
modifierFunc: func(status *MutableStatus) {
|
||||||
|
now := time.Now()
|
||||||
|
status.LastRecvResourceSuccess = now
|
||||||
|
status.LastRecvError = now.Add(1 * time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "receive error before receive success within timeout",
|
||||||
|
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
|
||||||
|
expectedVal: true,
|
||||||
|
modifierFunc: func(status *MutableStatus) {
|
||||||
|
now := time.Now()
|
||||||
|
status.LastRecvResourceSuccess = now
|
||||||
|
status.LastRecvError = now.Add(1 * time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "receive error before receive success past timeout",
|
||||||
|
tracker: NewTracker(1 * time.Millisecond),
|
||||||
|
expectedVal: false,
|
||||||
|
modifierFunc: func(status *MutableStatus) {
|
||||||
|
now := time.Now().Add(-2 * time.Second)
|
||||||
|
status.LastRecvResourceSuccess = now
|
||||||
|
status.LastRecvError = now.Add(1 * time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nack before ack within timeout",
|
||||||
|
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
|
||||||
|
expectedVal: true,
|
||||||
|
modifierFunc: func(status *MutableStatus) {
|
||||||
|
now := time.Now()
|
||||||
|
status.LastAck = now
|
||||||
|
status.LastNack = now.Add(1 * time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nack before ack past timeout",
|
||||||
|
tracker: NewTracker(1 * time.Millisecond),
|
||||||
|
expectedVal: false,
|
||||||
|
modifierFunc: func(status *MutableStatus) {
|
||||||
|
now := time.Now().Add(-2 * time.Second)
|
||||||
|
status.LastAck = now
|
||||||
|
status.LastNack = now.Add(1 * time.Second)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "healthy",
|
name: "healthy",
|
||||||
|
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
|
||||||
expectedVal: true,
|
expectedVal: true,
|
||||||
modifierFunc: func(status *MutableStatus) {
|
|
||||||
// set heartbeat
|
|
||||||
status.LastRecvHeartbeat = time.Now()
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tcs {
|
for _, tc := range tcs {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
tracker := NewTracker()
|
tracker := tc.tracker
|
||||||
if tc.heartbeatTimeout.Microseconds() != 0 {
|
|
||||||
tracker.SetHeartbeatTimeout(tc.heartbeatTimeout)
|
st, err := tracker.Connected(aPeerID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, st.Connected)
|
||||||
|
|
||||||
|
if tc.modifierFunc != nil {
|
||||||
|
tc.modifierFunc(st)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !tc.dontConnect {
|
assert.Equal(t, tc.expectedVal, tracker.IsHealthy(st.GetStatus()))
|
||||||
st, err := tracker.Connected(aPeerID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.True(t, st.Connected)
|
|
||||||
|
|
||||||
if tc.modifierFunc != nil {
|
|
||||||
tc.modifierFunc(st)
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Equal(t, tc.expectedVal, st.IsHealthy())
|
|
||||||
|
|
||||||
} else {
|
|
||||||
st, found := tracker.StreamStatus(aPeerID)
|
|
||||||
require.False(t, found)
|
|
||||||
require.Equal(t, tc.expectedVal, st.IsHealthy())
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
||||||
tracker := NewTracker()
|
tracker := NewTracker(defaultIncomingHeartbeatTimeout)
|
||||||
peerID := "63b60245-c475-426b-b314-4588d210859d"
|
peerID := "63b60245-c475-426b-b314-4588d210859d"
|
||||||
|
|
||||||
it := incrementalTime{
|
it := incrementalTime{
|
||||||
|
@ -120,8 +133,7 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
expect := Status{
|
expect := Status{
|
||||||
Connected: true,
|
Connected: true,
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
status, ok := tracker.StreamStatus(peerID)
|
status, ok := tracker.StreamStatus(peerID)
|
||||||
|
@ -147,9 +159,8 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
||||||
|
|
||||||
lastSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC()
|
lastSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC()
|
||||||
expect := Status{
|
expect := Status{
|
||||||
Connected: true,
|
Connected: true,
|
||||||
LastAck: lastSuccess,
|
LastAck: lastSuccess,
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
}
|
}
|
||||||
require.Equal(t, expect, status)
|
require.Equal(t, expect, status)
|
||||||
})
|
})
|
||||||
|
@ -159,10 +170,9 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
||||||
sequence++
|
sequence++
|
||||||
|
|
||||||
expect := Status{
|
expect := Status{
|
||||||
Connected: false,
|
Connected: false,
|
||||||
DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(),
|
DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(),
|
||||||
LastAck: lastSuccess,
|
LastAck: lastSuccess,
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
}
|
}
|
||||||
status, ok := tracker.StreamStatus(peerID)
|
status, ok := tracker.StreamStatus(peerID)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
@ -174,9 +184,8 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
expect := Status{
|
expect := Status{
|
||||||
Connected: true,
|
Connected: true,
|
||||||
LastAck: lastSuccess,
|
LastAck: lastSuccess,
|
||||||
heartbeatTimeout: defaultIncomingHeartbeatTimeout,
|
|
||||||
|
|
||||||
// DisconnectTime gets cleared on re-connect.
|
// DisconnectTime gets cleared on re-connect.
|
||||||
}
|
}
|
||||||
|
@ -203,7 +212,7 @@ func TestTracker_connectedStreams(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
run := func(t *testing.T, tc testCase) {
|
run := func(t *testing.T, tc testCase) {
|
||||||
tracker := NewTracker()
|
tracker := NewTracker(defaultIncomingHeartbeatTimeout)
|
||||||
if tc.setup != nil {
|
if tc.setup != nil {
|
||||||
tc.setup(t, tracker)
|
tc.setup(t, tracker)
|
||||||
}
|
}
|
||||||
|
|
|
@ -280,16 +280,6 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
|
||||||
}
|
}
|
||||||
snap.Roots = roots
|
snap.Roots = roots
|
||||||
|
|
||||||
case strings.HasPrefix(u.CorrelationID, peerTrustBundleIDPrefix):
|
|
||||||
resp, ok := u.Result.(*pbpeering.TrustBundleReadResponse)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
||||||
}
|
|
||||||
peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix)
|
|
||||||
if resp.Bundle != nil {
|
|
||||||
snap.ConnectProxy.UpstreamPeerTrustBundles.Set(peer, resp.Bundle)
|
|
||||||
}
|
|
||||||
|
|
||||||
case u.CorrelationID == peeringTrustBundlesWatchID:
|
case u.CorrelationID == peeringTrustBundlesWatchID:
|
||||||
resp, ok := u.Result.(*pbpeering.TrustBundleListByServiceResponse)
|
resp, ok := u.Result.(*pbpeering.TrustBundleListByServiceResponse)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -369,6 +359,17 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
|
||||||
// Clean up data
|
// Clean up data
|
||||||
//
|
//
|
||||||
|
|
||||||
|
peeredChainTargets := make(map[UpstreamID]struct{})
|
||||||
|
for _, discoChain := range snap.ConnectProxy.DiscoveryChain {
|
||||||
|
for _, target := range discoChain.Targets {
|
||||||
|
if target.Peer == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
uid := NewUpstreamIDFromTargetID(target.ID)
|
||||||
|
peeredChainTargets[uid] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
validPeerNames := make(map[string]struct{})
|
validPeerNames := make(map[string]struct{})
|
||||||
|
|
||||||
// Iterate through all known endpoints and remove references to upstream IDs that weren't in the update
|
// Iterate through all known endpoints and remove references to upstream IDs that weren't in the update
|
||||||
|
@ -383,6 +384,11 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
|
||||||
validPeerNames[uid.Peer] = struct{}{}
|
validPeerNames[uid.Peer] = struct{}{}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
// Peered upstream came from a discovery chain target
|
||||||
|
if _, ok := peeredChainTargets[uid]; ok {
|
||||||
|
validPeerNames[uid.Peer] = struct{}{}
|
||||||
|
return true
|
||||||
|
}
|
||||||
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(uid)
|
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(uid)
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
@ -463,8 +469,14 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, ok := seenUpstreams[uid]; !ok {
|
if _, ok := seenUpstreams[uid]; !ok {
|
||||||
for _, cancelFn := range targets {
|
for targetID, cancelFn := range targets {
|
||||||
cancelFn()
|
cancelFn()
|
||||||
|
|
||||||
|
targetUID := NewUpstreamIDFromTargetID(targetID)
|
||||||
|
if targetUID.Peer != "" {
|
||||||
|
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(targetUID)
|
||||||
|
snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
delete(snap.ConnectProxy.WatchedUpstreams, uid)
|
delete(snap.ConnectProxy.WatchedUpstreams, uid)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||||
|
"github.com/hashicorp/consul/agent/proxycfg/internal/watch"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
)
|
)
|
||||||
|
|
||||||
type handlerIngressGateway struct {
|
type handlerIngressGateway struct {
|
||||||
|
@ -66,6 +68,9 @@ func (s *handlerIngressGateway) initialize(ctx context.Context) (ConfigSnapshot,
|
||||||
snap.IngressGateway.WatchedGateways = make(map[UpstreamID]map[string]context.CancelFunc)
|
snap.IngressGateway.WatchedGateways = make(map[UpstreamID]map[string]context.CancelFunc)
|
||||||
snap.IngressGateway.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes)
|
snap.IngressGateway.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes)
|
||||||
snap.IngressGateway.Listeners = make(map[IngressListenerKey]structs.IngressListener)
|
snap.IngressGateway.Listeners = make(map[IngressListenerKey]structs.IngressListener)
|
||||||
|
snap.IngressGateway.UpstreamPeerTrustBundles = watch.NewMap[string, *pbpeering.PeeringTrustBundle]()
|
||||||
|
snap.IngressGateway.PeerUpstreamEndpoints = watch.NewMap[UpstreamID, structs.CheckServiceNodes]()
|
||||||
|
snap.IngressGateway.PeerUpstreamEndpointsUseHostnames = make(map[UpstreamID]struct{})
|
||||||
return snap, nil
|
return snap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,6 +157,12 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent,
|
||||||
delete(snap.IngressGateway.WatchedUpstreams[uid], targetID)
|
delete(snap.IngressGateway.WatchedUpstreams[uid], targetID)
|
||||||
delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID)
|
delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID)
|
||||||
cancelUpstreamFn()
|
cancelUpstreamFn()
|
||||||
|
|
||||||
|
targetUID := NewUpstreamIDFromTargetID(targetID)
|
||||||
|
if targetUID.Peer != "" {
|
||||||
|
snap.IngressGateway.PeerUpstreamEndpoints.CancelWatch(targetUID)
|
||||||
|
snap.IngressGateway.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cancelFn()
|
cancelFn()
|
||||||
|
|
|
@ -814,6 +814,18 @@ func (s *ConfigSnapshot) MeshConfigTLSOutgoing() *structs.MeshDirectionalTLSConf
|
||||||
return mesh.TLS.Outgoing
|
return mesh.TLS.Outgoing
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *ConfigSnapshot) ToConfigSnapshotUpstreams() (*ConfigSnapshotUpstreams, error) {
|
||||||
|
switch s.Kind {
|
||||||
|
case structs.ServiceKindConnectProxy:
|
||||||
|
return &s.ConnectProxy.ConfigSnapshotUpstreams, nil
|
||||||
|
case structs.ServiceKindIngressGateway:
|
||||||
|
return &s.IngressGateway.ConfigSnapshotUpstreams, nil
|
||||||
|
default:
|
||||||
|
// This is a coherence check and should never fail
|
||||||
|
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", s.Kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (u *ConfigSnapshotUpstreams) UpstreamPeerMeta(uid UpstreamID) structs.PeeringServiceMeta {
|
func (u *ConfigSnapshotUpstreams) UpstreamPeerMeta(uid UpstreamID) structs.PeeringServiceMeta {
|
||||||
nodes, _ := u.PeerUpstreamEndpoints.Get(uid)
|
nodes, _ := u.PeerUpstreamEndpoints.Get(uid)
|
||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
|
|
|
@ -493,6 +493,11 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
Mode: structs.MeshGatewayModeNone,
|
Mode: structs.MeshGatewayModeNone,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
structs.Upstream{
|
||||||
|
DestinationType: structs.UpstreamDestTypeService,
|
||||||
|
DestinationName: "api-failover-to-peer",
|
||||||
|
LocalBindPort: 10007,
|
||||||
|
},
|
||||||
structs.Upstream{
|
structs.Upstream{
|
||||||
DestinationType: structs.UpstreamDestTypeService,
|
DestinationType: structs.UpstreamDestTypeService,
|
||||||
DestinationName: "api-dc2",
|
DestinationName: "api-dc2",
|
||||||
|
@ -552,6 +557,16 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
Mode: structs.MeshGatewayModeNone,
|
Mode: structs.MeshGatewayModeNone,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
|
fmt.Sprintf("discovery-chain:%s-failover-to-peer", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{
|
||||||
|
Name: "api-failover-to-peer",
|
||||||
|
EvaluateInDatacenter: "dc1",
|
||||||
|
EvaluateInNamespace: "default",
|
||||||
|
EvaluateInPartition: "default",
|
||||||
|
Datacenter: "dc1",
|
||||||
|
OverrideMeshGateway: structs.MeshGatewayConfig{
|
||||||
|
Mode: meshGatewayProxyConfigValue,
|
||||||
|
},
|
||||||
|
}),
|
||||||
fmt.Sprintf("discovery-chain:%s-dc2", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{
|
fmt.Sprintf("discovery-chain:%s-dc2", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{
|
||||||
Name: "api-dc2",
|
Name: "api-dc2",
|
||||||
EvaluateInDatacenter: "dc1",
|
EvaluateInDatacenter: "dc1",
|
||||||
|
@ -639,6 +654,26 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
},
|
},
|
||||||
Err: nil,
|
Err: nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
CorrelationID: fmt.Sprintf("discovery-chain:%s-failover-to-peer", apiUID.String()),
|
||||||
|
Result: &structs.DiscoveryChainResponse{
|
||||||
|
Chain: discoverychain.TestCompileConfigEntries(t, "api-failover-to-peer", "default", "default", "dc1", "trustdomain.consul",
|
||||||
|
func(req *discoverychain.CompileRequest) {
|
||||||
|
req.OverrideMeshGateway.Mode = meshGatewayProxyConfigValue
|
||||||
|
}, &structs.ServiceResolverConfigEntry{
|
||||||
|
Kind: structs.ServiceResolver,
|
||||||
|
Name: "api-failover-to-peer",
|
||||||
|
Failover: map[string]structs.ServiceResolverFailover{
|
||||||
|
"*": {
|
||||||
|
Targets: []structs.ServiceResolverFailoverTarget{
|
||||||
|
{Peer: "cluster-01"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
Err: nil,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
||||||
require.True(t, snap.Valid())
|
require.True(t, snap.Valid())
|
||||||
|
@ -646,15 +681,18 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
require.Equal(t, indexedRoots, snap.Roots)
|
require.Equal(t, indexedRoots, snap.Roots)
|
||||||
|
|
||||||
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
|
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
|
||||||
require.Len(t, snap.ConnectProxy.DiscoveryChain, 5, "%+v", snap.ConnectProxy.DiscoveryChain)
|
require.Len(t, snap.ConnectProxy.DiscoveryChain, 6, "%+v", snap.ConnectProxy.DiscoveryChain)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 5, "%+v", snap.ConnectProxy.WatchedUpstreams)
|
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 6, "%+v", snap.ConnectProxy.WatchedUpstreams)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 5, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
|
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 6, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedGateways, 5, "%+v", snap.ConnectProxy.WatchedGateways)
|
require.Len(t, snap.ConnectProxy.WatchedGateways, 6, "%+v", snap.ConnectProxy.WatchedGateways)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 5, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
|
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 6, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
|
||||||
|
|
||||||
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
|
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
|
||||||
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
|
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
|
||||||
|
|
||||||
|
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len())
|
||||||
|
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len())
|
||||||
|
|
||||||
require.True(t, snap.ConnectProxy.IntentionsSet)
|
require.True(t, snap.ConnectProxy.IntentionsSet)
|
||||||
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
|
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
|
||||||
require.True(t, snap.ConnectProxy.MeshConfigSet)
|
require.True(t, snap.ConnectProxy.MeshConfigSet)
|
||||||
|
@ -667,6 +705,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
fmt.Sprintf("upstream-target:api-failover-remote.default.default.dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-remote", "", "dc2", true),
|
fmt.Sprintf("upstream-target:api-failover-remote.default.default.dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-remote", "", "dc2", true),
|
||||||
fmt.Sprintf("upstream-target:api-failover-local.default.default.dc2:%s-failover-local?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-local", "", "dc2", true),
|
fmt.Sprintf("upstream-target:api-failover-local.default.default.dc2:%s-failover-local?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-local", "", "dc2", true),
|
||||||
fmt.Sprintf("upstream-target:api-failover-direct.default.default.dc2:%s-failover-direct?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-direct", "", "dc2", true),
|
fmt.Sprintf("upstream-target:api-failover-direct.default.default.dc2:%s-failover-direct?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-direct", "", "dc2", true),
|
||||||
|
upstreamPeerWatchIDPrefix + fmt.Sprintf("%s-failover-to-peer?peer=cluster-01", apiUID.String()): genVerifyServiceSpecificPeeredRequest("api-failover-to-peer", "", "", "cluster-01", true),
|
||||||
fmt.Sprintf("mesh-gateway:dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc2"),
|
fmt.Sprintf("mesh-gateway:dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc2"),
|
||||||
fmt.Sprintf("mesh-gateway:dc1:%s-failover-local?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc1"),
|
fmt.Sprintf("mesh-gateway:dc1:%s-failover-local?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc1"),
|
||||||
},
|
},
|
||||||
|
@ -676,15 +715,18 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
require.Equal(t, indexedRoots, snap.Roots)
|
require.Equal(t, indexedRoots, snap.Roots)
|
||||||
|
|
||||||
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
|
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
|
||||||
require.Len(t, snap.ConnectProxy.DiscoveryChain, 5, "%+v", snap.ConnectProxy.DiscoveryChain)
|
require.Len(t, snap.ConnectProxy.DiscoveryChain, 6, "%+v", snap.ConnectProxy.DiscoveryChain)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 5, "%+v", snap.ConnectProxy.WatchedUpstreams)
|
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 6, "%+v", snap.ConnectProxy.WatchedUpstreams)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 5, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
|
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 6, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedGateways, 5, "%+v", snap.ConnectProxy.WatchedGateways)
|
require.Len(t, snap.ConnectProxy.WatchedGateways, 6, "%+v", snap.ConnectProxy.WatchedGateways)
|
||||||
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 5, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
|
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 6, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
|
||||||
|
|
||||||
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
|
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
|
||||||
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
|
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
|
||||||
|
|
||||||
|
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len())
|
||||||
|
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len())
|
||||||
|
|
||||||
require.True(t, snap.ConnectProxy.IntentionsSet)
|
require.True(t, snap.ConnectProxy.IntentionsSet)
|
||||||
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
|
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
|
||||||
},
|
},
|
||||||
|
|
|
@ -280,6 +280,31 @@ func TestUpstreamNodesDC2(t testing.T) structs.CheckServiceNodes {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUpstreamNodesPeerCluster01(t testing.T) structs.CheckServiceNodes {
|
||||||
|
peer := "cluster-01"
|
||||||
|
service := structs.TestNodeServiceWithNameInPeer(t, "web", peer)
|
||||||
|
return structs.CheckServiceNodes{
|
||||||
|
structs.CheckServiceNode{
|
||||||
|
Node: &structs.Node{
|
||||||
|
ID: "test1",
|
||||||
|
Node: "test1",
|
||||||
|
Address: "10.40.1.1",
|
||||||
|
PeerName: peer,
|
||||||
|
},
|
||||||
|
Service: service,
|
||||||
|
},
|
||||||
|
structs.CheckServiceNode{
|
||||||
|
Node: &structs.Node{
|
||||||
|
ID: "test2",
|
||||||
|
Node: "test2",
|
||||||
|
Address: "10.40.1.2",
|
||||||
|
PeerName: peer,
|
||||||
|
},
|
||||||
|
Service: service,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestUpstreamNodesInStatusDC2(t testing.T, status string) structs.CheckServiceNodes {
|
func TestUpstreamNodesInStatusDC2(t testing.T, status string) structs.CheckServiceNodes {
|
||||||
return structs.CheckServiceNodes{
|
return structs.CheckServiceNodes{
|
||||||
structs.CheckServiceNode{
|
structs.CheckServiceNode{
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/connect"
|
"github.com/hashicorp/consul/agent/connect"
|
||||||
"github.com/hashicorp/consul/agent/consul/discoverychain"
|
"github.com/hashicorp/consul/agent/consul/discoverychain"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupTestVariationConfigEntriesAndSnapshot(
|
func setupTestVariationConfigEntriesAndSnapshot(
|
||||||
|
@ -72,6 +73,24 @@ func setupTestVariationConfigEntriesAndSnapshot(
|
||||||
Nodes: TestGatewayNodesDC2(t),
|
Nodes: TestGatewayNodesDC2(t),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
case "failover-to-cluster-peer":
|
||||||
|
events = append(events, UpdateEvent{
|
||||||
|
CorrelationID: "peer-trust-bundle:cluster-01",
|
||||||
|
Result: &pbpeering.TrustBundleReadResponse{
|
||||||
|
Bundle: &pbpeering.PeeringTrustBundle{
|
||||||
|
PeerName: "peer1",
|
||||||
|
TrustDomain: "peer1.domain",
|
||||||
|
ExportedPartition: "peer1ap",
|
||||||
|
RootPEMs: []string{"peer1-root-1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
events = append(events, UpdateEvent{
|
||||||
|
CorrelationID: "upstream-peer:db?peer=cluster-01",
|
||||||
|
Result: &structs.IndexedCheckServiceNodes{
|
||||||
|
Nodes: TestUpstreamNodesPeerCluster01(t),
|
||||||
|
},
|
||||||
|
})
|
||||||
case "failover-through-double-remote-gateway-triggered":
|
case "failover-through-double-remote-gateway-triggered":
|
||||||
events = append(events, UpdateEvent{
|
events = append(events, UpdateEvent{
|
||||||
CorrelationID: "upstream-target:db.default.default.dc1:" + dbUID.String(),
|
CorrelationID: "upstream-target:db.default.default.dc1:" + dbUID.String(),
|
||||||
|
@ -255,6 +274,21 @@ func setupTestVariationDiscoveryChain(
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
case "failover-to-cluster-peer":
|
||||||
|
entries = append(entries,
|
||||||
|
&structs.ServiceResolverConfigEntry{
|
||||||
|
Kind: structs.ServiceResolver,
|
||||||
|
Name: "db",
|
||||||
|
ConnectTimeout: 33 * time.Second,
|
||||||
|
Failover: map[string]structs.ServiceResolverFailover{
|
||||||
|
"*": {
|
||||||
|
Targets: []structs.ServiceResolverFailoverTarget{
|
||||||
|
{Peer: "cluster-01"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
case "failover-through-double-remote-gateway-triggered":
|
case "failover-through-double-remote-gateway-triggered":
|
||||||
fallthrough
|
fallthrough
|
||||||
case "failover-through-double-remote-gateway":
|
case "failover-through-double-remote-gateway":
|
||||||
|
|
|
@ -9,7 +9,9 @@ import (
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
)
|
)
|
||||||
|
|
||||||
type handlerUpstreams struct {
|
type handlerUpstreams struct {
|
||||||
|
@ -21,9 +23,10 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
|
||||||
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
||||||
}
|
}
|
||||||
|
|
||||||
upstreamsSnapshot := &snap.ConnectProxy.ConfigSnapshotUpstreams
|
upstreamsSnapshot, err := snap.ToConfigSnapshotUpstreams()
|
||||||
if snap.Kind == structs.ServiceKindIngressGateway {
|
|
||||||
upstreamsSnapshot = &snap.IngressGateway.ConfigSnapshotUpstreams
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
@ -98,19 +101,16 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
|
||||||
|
|
||||||
uid := UpstreamIDFromString(uidString)
|
uid := UpstreamIDFromString(uidString)
|
||||||
|
|
||||||
filteredNodes := hostnameEndpoints(
|
s.setPeerEndpoints(upstreamsSnapshot, uid, resp.Nodes)
|
||||||
s.logger,
|
|
||||||
GatewayKey{ /*empty so it never matches*/ },
|
case strings.HasPrefix(u.CorrelationID, peerTrustBundleIDPrefix):
|
||||||
resp.Nodes,
|
resp, ok := u.Result.(*pbpeering.TrustBundleReadResponse)
|
||||||
)
|
if !ok {
|
||||||
if len(filteredNodes) > 0 {
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
||||||
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, filteredNodes); set {
|
}
|
||||||
upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{}
|
peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix)
|
||||||
}
|
if resp.Bundle != nil {
|
||||||
} else {
|
upstreamsSnapshot.UpstreamPeerTrustBundles.Set(peer, resp.Bundle)
|
||||||
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, resp.Nodes); set {
|
|
||||||
delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case strings.HasPrefix(u.CorrelationID, "upstream-target:"):
|
case strings.HasPrefix(u.CorrelationID, "upstream-target:"):
|
||||||
|
@ -216,6 +216,23 @@ func removeColonPrefix(s string) (string, string, bool) {
|
||||||
return s[0:idx], s[idx+1:], true
|
return s[0:idx], s[idx+1:], true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *handlerUpstreams) setPeerEndpoints(upstreamsSnapshot *ConfigSnapshotUpstreams, uid UpstreamID, nodes structs.CheckServiceNodes) {
|
||||||
|
filteredNodes := hostnameEndpoints(
|
||||||
|
s.logger,
|
||||||
|
GatewayKey{ /*empty so it never matches*/ },
|
||||||
|
nodes,
|
||||||
|
)
|
||||||
|
if len(filteredNodes) > 0 {
|
||||||
|
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, filteredNodes); set {
|
||||||
|
upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, nodes); set {
|
||||||
|
delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *handlerUpstreams) resetWatchesFromChain(
|
func (s *handlerUpstreams) resetWatchesFromChain(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
uid UpstreamID,
|
uid UpstreamID,
|
||||||
|
@ -255,6 +272,12 @@ func (s *handlerUpstreams) resetWatchesFromChain(
|
||||||
delete(snap.WatchedUpstreams[uid], targetID)
|
delete(snap.WatchedUpstreams[uid], targetID)
|
||||||
delete(snap.WatchedUpstreamEndpoints[uid], targetID)
|
delete(snap.WatchedUpstreamEndpoints[uid], targetID)
|
||||||
cancelFn()
|
cancelFn()
|
||||||
|
|
||||||
|
targetUID := NewUpstreamIDFromTargetID(targetID)
|
||||||
|
if targetUID.Peer != "" {
|
||||||
|
snap.PeerUpstreamEndpoints.CancelWatch(targetUID)
|
||||||
|
snap.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -274,6 +297,7 @@ func (s *handlerUpstreams) resetWatchesFromChain(
|
||||||
service: target.Service,
|
service: target.Service,
|
||||||
filter: target.Subset.Filter,
|
filter: target.Subset.Filter,
|
||||||
datacenter: target.Datacenter,
|
datacenter: target.Datacenter,
|
||||||
|
peer: target.Peer,
|
||||||
entMeta: target.GetEnterpriseMetadata(),
|
entMeta: target.GetEnterpriseMetadata(),
|
||||||
}
|
}
|
||||||
err := s.watchUpstreamTarget(ctx, snap, opts)
|
err := s.watchUpstreamTarget(ctx, snap, opts)
|
||||||
|
@ -384,6 +408,7 @@ type targetWatchOpts struct {
|
||||||
service string
|
service string
|
||||||
filter string
|
filter string
|
||||||
datacenter string
|
datacenter string
|
||||||
|
peer string
|
||||||
entMeta *acl.EnterpriseMeta
|
entMeta *acl.EnterpriseMeta
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -397,11 +422,17 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
|
||||||
var finalMeta acl.EnterpriseMeta
|
var finalMeta acl.EnterpriseMeta
|
||||||
finalMeta.Merge(opts.entMeta)
|
finalMeta.Merge(opts.entMeta)
|
||||||
|
|
||||||
correlationID := "upstream-target:" + opts.chainID + ":" + opts.upstreamID.String()
|
uid := opts.upstreamID
|
||||||
|
correlationID := "upstream-target:" + opts.chainID + ":" + uid.String()
|
||||||
|
|
||||||
|
if opts.peer != "" {
|
||||||
|
uid = NewUpstreamIDFromTargetID(opts.chainID)
|
||||||
|
correlationID = upstreamPeerWatchIDPrefix + uid.String()
|
||||||
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{
|
err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{
|
||||||
PeerName: opts.upstreamID.Peer,
|
PeerName: opts.peer,
|
||||||
Datacenter: opts.datacenter,
|
Datacenter: opts.datacenter,
|
||||||
QueryOptions: structs.QueryOptions{
|
QueryOptions: structs.QueryOptions{
|
||||||
Token: s.token,
|
Token: s.token,
|
||||||
|
@ -422,6 +453,31 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
|
||||||
}
|
}
|
||||||
snap.WatchedUpstreams[opts.upstreamID][opts.chainID] = cancel
|
snap.WatchedUpstreams[opts.upstreamID][opts.chainID] = cancel
|
||||||
|
|
||||||
|
if uid.Peer == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok := snap.PeerUpstreamEndpoints.IsWatched(uid); !ok {
|
||||||
|
snap.PeerUpstreamEndpoints.InitWatch(uid, cancel)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether a watch for this peer exists to avoid duplicates.
|
||||||
|
if ok := snap.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok {
|
||||||
|
peerCtx, cancel := context.WithCancel(ctx)
|
||||||
|
if err := s.dataSources.TrustBundle.Notify(peerCtx, &cachetype.TrustBundleReadRequest{
|
||||||
|
Request: &pbpeering.TrustBundleReadRequest{
|
||||||
|
Name: uid.Peer,
|
||||||
|
Partition: uid.PartitionOrDefault(),
|
||||||
|
},
|
||||||
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||||
|
}, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil {
|
||||||
|
cancel()
|
||||||
|
return fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
snap.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -726,11 +726,12 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !existing.IsActive() {
|
if existing == nil || existing.State == pbpeering.PeeringState_DELETING {
|
||||||
// Return early when the Peering doesn't exist or is already marked for deletion.
|
// Return early when the Peering doesn't exist or is already marked for deletion.
|
||||||
// We don't return nil because the pb will fail to marshal.
|
// We don't return nil because the pb will fail to marshal.
|
||||||
return &pbpeering.PeeringDeleteResponse{}, nil
|
return &pbpeering.PeeringDeleteResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// We are using a write request due to needing to perform a deferred deletion.
|
// We are using a write request due to needing to perform a deferred deletion.
|
||||||
// The peering gets marked for deletion by setting the DeletedAt field,
|
// The peering gets marked for deletion by setting the DeletedAt field,
|
||||||
// and a leader routine will handle deleting the peering.
|
// and a leader routine will handle deleting the peering.
|
||||||
|
|
|
@ -621,38 +621,50 @@ func TestPeeringService_Read_ACLEnforcement(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPeeringService_Delete(t *testing.T) {
|
func TestPeeringService_Delete(t *testing.T) {
|
||||||
// TODO(peering): see note on newTestServer, refactor to not use this
|
tt := map[string]pbpeering.PeeringState{
|
||||||
s := newTestServer(t, nil)
|
"active peering": pbpeering.PeeringState_ACTIVE,
|
||||||
|
"terminated peering": pbpeering.PeeringState_TERMINATED,
|
||||||
p := &pbpeering.Peering{
|
|
||||||
ID: testUUID(t),
|
|
||||||
Name: "foo",
|
|
||||||
State: pbpeering.PeeringState_ESTABLISHING,
|
|
||||||
PeerCAPems: nil,
|
|
||||||
PeerServerName: "test",
|
|
||||||
PeerServerAddresses: []string{"addr1"},
|
|
||||||
}
|
}
|
||||||
err := s.Server.FSM().State().PeeringWrite(10, &pbpeering.PeeringWriteRequest{Peering: p})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Nil(t, p.DeletedAt)
|
|
||||||
require.True(t, p.IsActive())
|
|
||||||
|
|
||||||
client := pbpeering.NewPeeringServiceClient(s.ClientConn(t))
|
for name, overrideState := range tt {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
// TODO(peering): see note on newTestServer, refactor to not use this
|
||||||
|
s := newTestServer(t, nil)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
// A pointer is kept for the following peering so that we can modify the object without another PeeringWrite.
|
||||||
t.Cleanup(cancel)
|
p := &pbpeering.Peering{
|
||||||
|
ID: testUUID(t),
|
||||||
|
Name: "foo",
|
||||||
|
PeerCAPems: nil,
|
||||||
|
PeerServerName: "test",
|
||||||
|
PeerServerAddresses: []string{"addr1"},
|
||||||
|
}
|
||||||
|
err := s.Server.FSM().State().PeeringWrite(10, &pbpeering.PeeringWriteRequest{Peering: p})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, p.DeletedAt)
|
||||||
|
require.True(t, p.IsActive())
|
||||||
|
|
||||||
_, err = client.PeeringDelete(ctx, &pbpeering.PeeringDeleteRequest{Name: "foo"})
|
// Overwrite the peering state to simulate deleting from a non-initial state.
|
||||||
require.NoError(t, err)
|
p.State = overrideState
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
client := pbpeering.NewPeeringServiceClient(s.ClientConn(t))
|
||||||
_, resp, err := s.Server.FSM().State().PeeringRead(nil, state.Query{Value: "foo"})
|
|
||||||
require.NoError(r, err)
|
|
||||||
|
|
||||||
// Initially the peering will be marked for deletion but eventually the leader
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
// routine will clean it up.
|
t.Cleanup(cancel)
|
||||||
require.Nil(r, resp)
|
|
||||||
})
|
_, err = client.PeeringDelete(ctx, &pbpeering.PeeringDeleteRequest{Name: "foo"})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
_, resp, err := s.Server.FSM().State().PeeringRead(nil, state.Query{Value: "foo"})
|
||||||
|
require.NoError(r, err)
|
||||||
|
|
||||||
|
// Initially the peering will be marked for deletion but eventually the leader
|
||||||
|
// routine will clean it up.
|
||||||
|
require.Nil(r, resp)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPeeringService_Delete_ACLEnforcement(t *testing.T) {
|
func TestPeeringService_Delete_ACLEnforcement(t *testing.T) {
|
||||||
|
|
|
@ -127,9 +127,20 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, "", err
|
return nil, nil, "", err
|
||||||
}
|
}
|
||||||
// Setup default check if none given
|
// Setup default check if none given.
|
||||||
if len(checks) < 1 {
|
if len(checks) < 1 {
|
||||||
checks = sidecarDefaultChecks(ns.ID, sidecar.Proxy.LocalServiceAddress, sidecar.Port)
|
// The check should use the sidecar's address because it makes a request to the sidecar.
|
||||||
|
// If the sidecar's address is empty, we fall back to the address of the local service, as set in
|
||||||
|
// sidecar.Proxy.LocalServiceAddress, in the hope that the proxy is also accessible on that address
|
||||||
|
// (which in most cases it is because it's running as a sidecar in the same network).
|
||||||
|
// We could instead fall back to the address of the service as set by (ns.Address), but I've kept it using
|
||||||
|
// sidecar.Proxy.LocalServiceAddress so as to not change things too much in the
|
||||||
|
// process of fixing #14433.
|
||||||
|
checkAddress := sidecar.Address
|
||||||
|
if checkAddress == "" {
|
||||||
|
checkAddress = sidecar.Proxy.LocalServiceAddress
|
||||||
|
}
|
||||||
|
checks = sidecarDefaultChecks(ns.ID, checkAddress, sidecar.Port)
|
||||||
}
|
}
|
||||||
|
|
||||||
return sidecar, checks, token, nil
|
return sidecar, checks, token, nil
|
||||||
|
@ -202,14 +213,11 @@ func (a *Agent) sidecarPortFromServiceID(sidecarCompoundServiceID structs.Servic
|
||||||
return sidecarPort, nil
|
return sidecarPort, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func sidecarDefaultChecks(serviceID string, localServiceAddress string, port int) []*structs.CheckType {
|
func sidecarDefaultChecks(serviceID string, address string, port int) []*structs.CheckType {
|
||||||
// Setup default check if none given
|
|
||||||
return []*structs.CheckType{
|
return []*structs.CheckType{
|
||||||
{
|
{
|
||||||
Name: "Connect Sidecar Listening",
|
Name: "Connect Sidecar Listening",
|
||||||
// Default to localhost rather than agent/service public IP. The checks
|
TCP: ipaddr.FormatAddressPort(address, port),
|
||||||
// can always be overridden if a non-loopback IP is needed.
|
|
||||||
TCP: ipaddr.FormatAddressPort(localServiceAddress, port),
|
|
||||||
Interval: 10 * time.Second,
|
Interval: 10 * time.Second,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -215,6 +215,141 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
|
||||||
token: "foo",
|
token: "foo",
|
||||||
wantErr: "reserved for internal use",
|
wantErr: "reserved for internal use",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "uses proxy address for check",
|
||||||
|
sd: &structs.ServiceDefinition{
|
||||||
|
ID: "web1",
|
||||||
|
Name: "web",
|
||||||
|
Port: 1111,
|
||||||
|
Connect: &structs.ServiceConnect{
|
||||||
|
SidecarService: &structs.ServiceDefinition{
|
||||||
|
Address: "123.123.123.123",
|
||||||
|
Proxy: &structs.ConnectProxyConfig{
|
||||||
|
LocalServiceAddress: "255.255.255.255",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Address: "255.255.255.255",
|
||||||
|
},
|
||||||
|
token: "foo",
|
||||||
|
wantNS: &structs.NodeService{
|
||||||
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||||
|
Kind: structs.ServiceKindConnectProxy,
|
||||||
|
ID: "web1-sidecar-proxy",
|
||||||
|
Service: "web-sidecar-proxy",
|
||||||
|
Port: 2222,
|
||||||
|
Address: "123.123.123.123",
|
||||||
|
LocallyRegisteredAsSidecar: true,
|
||||||
|
Proxy: structs.ConnectProxyConfig{
|
||||||
|
DestinationServiceName: "web",
|
||||||
|
DestinationServiceID: "web1",
|
||||||
|
LocalServiceAddress: "255.255.255.255",
|
||||||
|
LocalServicePort: 1111,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantChecks: []*structs.CheckType{
|
||||||
|
{
|
||||||
|
Name: "Connect Sidecar Listening",
|
||||||
|
TCP: "123.123.123.123:2222",
|
||||||
|
Interval: 10 * time.Second,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Connect Sidecar Aliasing web1",
|
||||||
|
AliasService: "web1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantToken: "foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "uses proxy.local_service_address for check if proxy address is empty",
|
||||||
|
sd: &structs.ServiceDefinition{
|
||||||
|
ID: "web1",
|
||||||
|
Name: "web",
|
||||||
|
Port: 1111,
|
||||||
|
Connect: &structs.ServiceConnect{
|
||||||
|
SidecarService: &structs.ServiceDefinition{
|
||||||
|
Address: "", // Proxy address empty.
|
||||||
|
Proxy: &structs.ConnectProxyConfig{
|
||||||
|
LocalServiceAddress: "1.2.3.4",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Address: "", // Service address empty.
|
||||||
|
},
|
||||||
|
token: "foo",
|
||||||
|
wantNS: &structs.NodeService{
|
||||||
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||||
|
Kind: structs.ServiceKindConnectProxy,
|
||||||
|
ID: "web1-sidecar-proxy",
|
||||||
|
Service: "web-sidecar-proxy",
|
||||||
|
Port: 2222,
|
||||||
|
Address: "",
|
||||||
|
LocallyRegisteredAsSidecar: true,
|
||||||
|
Proxy: structs.ConnectProxyConfig{
|
||||||
|
DestinationServiceName: "web",
|
||||||
|
DestinationServiceID: "web1",
|
||||||
|
LocalServiceAddress: "1.2.3.4",
|
||||||
|
LocalServicePort: 1111,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantChecks: []*structs.CheckType{
|
||||||
|
{
|
||||||
|
Name: "Connect Sidecar Listening",
|
||||||
|
TCP: "1.2.3.4:2222",
|
||||||
|
Interval: 10 * time.Second,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Connect Sidecar Aliasing web1",
|
||||||
|
AliasService: "web1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantToken: "foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "uses 127.0.0.1 for check if proxy and proxy.local_service_address are empty",
|
||||||
|
sd: &structs.ServiceDefinition{
|
||||||
|
ID: "web1",
|
||||||
|
Name: "web",
|
||||||
|
Port: 1111,
|
||||||
|
Connect: &structs.ServiceConnect{
|
||||||
|
SidecarService: &structs.ServiceDefinition{
|
||||||
|
Address: "",
|
||||||
|
Proxy: &structs.ConnectProxyConfig{
|
||||||
|
LocalServiceAddress: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Address: "",
|
||||||
|
},
|
||||||
|
token: "foo",
|
||||||
|
wantNS: &structs.NodeService{
|
||||||
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||||
|
Kind: structs.ServiceKindConnectProxy,
|
||||||
|
ID: "web1-sidecar-proxy",
|
||||||
|
Service: "web-sidecar-proxy",
|
||||||
|
Port: 2222,
|
||||||
|
Address: "",
|
||||||
|
LocallyRegisteredAsSidecar: true,
|
||||||
|
Proxy: structs.ConnectProxyConfig{
|
||||||
|
DestinationServiceName: "web",
|
||||||
|
DestinationServiceID: "web1",
|
||||||
|
LocalServiceAddress: "127.0.0.1",
|
||||||
|
LocalServicePort: 1111,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantChecks: []*structs.CheckType{
|
||||||
|
{
|
||||||
|
Name: "Connect Sidecar Listening",
|
||||||
|
TCP: "127.0.0.1:2222",
|
||||||
|
Interval: 10 * time.Second,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Connect Sidecar Aliasing web1",
|
||||||
|
AliasService: "web1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantToken: "foo",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
|
|
@ -3,12 +3,13 @@ package structs
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/miekg/dns"
|
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
"github.com/mitchellh/hashstructure"
|
"github.com/mitchellh/hashstructure"
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
|
@ -362,6 +363,13 @@ func (e *ProxyConfigEntry) Normalize() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
e.Kind = ProxyDefaults
|
e.Kind = ProxyDefaults
|
||||||
|
|
||||||
|
// proxy default config only accepts global configs
|
||||||
|
// this check is replicated in normalize() and validate(),
|
||||||
|
// since validate is not called by all the endpoints (e.g., delete)
|
||||||
|
if e.Name != "" && e.Name != ProxyConfigGlobal {
|
||||||
|
return fmt.Errorf("invalid name (%q), only %q is supported", e.Name, ProxyConfigGlobal)
|
||||||
|
}
|
||||||
e.Name = ProxyConfigGlobal
|
e.Name = ProxyConfigGlobal
|
||||||
|
|
||||||
e.EnterpriseMeta.Normalize()
|
e.EnterpriseMeta.Normalize()
|
||||||
|
@ -961,6 +969,11 @@ type PassiveHealthCheck struct {
|
||||||
// MaxFailures is the count of consecutive failures that results in a host
|
// MaxFailures is the count of consecutive failures that results in a host
|
||||||
// being removed from the pool.
|
// being removed from the pool.
|
||||||
MaxFailures uint32 `json:",omitempty" alias:"max_failures"`
|
MaxFailures uint32 `json:",omitempty" alias:"max_failures"`
|
||||||
|
|
||||||
|
// EnforcingConsecutive5xx is the % chance that a host will be actually ejected
|
||||||
|
// when an outlier status is detected through consecutive 5xx.
|
||||||
|
// This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.
|
||||||
|
EnforcingConsecutive5xx *uint32 `json:",omitempty" alias:"enforcing_consecutive_5xx"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (chk *PassiveHealthCheck) Clone() *PassiveHealthCheck {
|
func (chk *PassiveHealthCheck) Clone() *PassiveHealthCheck {
|
||||||
|
|
|
@ -2754,8 +2754,9 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
|
||||||
MaxConcurrentRequests: intPointer(12),
|
MaxConcurrentRequests: intPointer(12),
|
||||||
},
|
},
|
||||||
"passive_health_check": &PassiveHealthCheck{
|
"passive_health_check": &PassiveHealthCheck{
|
||||||
MaxFailures: 13,
|
MaxFailures: 13,
|
||||||
Interval: 14 * time.Second,
|
Interval: 14 * time.Second,
|
||||||
|
EnforcingConsecutive5xx: uintPointer(80),
|
||||||
},
|
},
|
||||||
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
|
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
|
||||||
},
|
},
|
||||||
|
@ -2770,8 +2771,9 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
|
||||||
MaxConcurrentRequests: intPointer(12),
|
MaxConcurrentRequests: intPointer(12),
|
||||||
},
|
},
|
||||||
"passive_health_check": &PassiveHealthCheck{
|
"passive_health_check": &PassiveHealthCheck{
|
||||||
MaxFailures: 13,
|
MaxFailures: 13,
|
||||||
Interval: 14 * time.Second,
|
Interval: 14 * time.Second,
|
||||||
|
EnforcingConsecutive5xx: uintPointer(80),
|
||||||
},
|
},
|
||||||
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
|
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
|
||||||
},
|
},
|
||||||
|
@ -2944,6 +2946,28 @@ func TestParseUpstreamConfig(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProxyConfigEntry(t *testing.T) {
|
||||||
|
cases := map[string]configEntryTestcase{
|
||||||
|
"proxy config name provided is not global": {
|
||||||
|
entry: &ProxyConfigEntry{
|
||||||
|
Name: "foo",
|
||||||
|
},
|
||||||
|
normalizeErr: `invalid name ("foo"), only "global" is supported`,
|
||||||
|
},
|
||||||
|
"proxy config has no name": {
|
||||||
|
entry: &ProxyConfigEntry{
|
||||||
|
Name: "",
|
||||||
|
},
|
||||||
|
expected: &ProxyConfigEntry{
|
||||||
|
Name: ProxyConfigGlobal,
|
||||||
|
Kind: ProxyDefaults,
|
||||||
|
EnterpriseMeta: *acl.DefaultEnterpriseMeta(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
testConfigEntryNormalizeAndValidate(t, cases)
|
||||||
|
}
|
||||||
|
|
||||||
func requireContainsLower(t *testing.T, haystack, needle string) {
|
func requireContainsLower(t *testing.T, haystack, needle string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
require.Contains(t, strings.ToLower(haystack), strings.ToLower(needle))
|
require.Contains(t, strings.ToLower(haystack), strings.ToLower(needle))
|
||||||
|
@ -3046,3 +3070,7 @@ func testConfigEntryNormalizeAndValidate(t *testing.T, cases map[string]configEn
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func uintPointer(v uint32) *uint32 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
|
@ -53,6 +53,28 @@ func TestNodeServiceWithName(t testing.T, name string) *NodeService {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const peerTrustDomain = "1c053652-8512-4373-90cf-5a7f6263a994.consul"
|
||||||
|
|
||||||
|
func TestNodeServiceWithNameInPeer(t testing.T, name string, peer string) *NodeService {
|
||||||
|
service := "payments"
|
||||||
|
return &NodeService{
|
||||||
|
Kind: ServiceKindTypical,
|
||||||
|
Service: name,
|
||||||
|
Port: 8080,
|
||||||
|
Connect: ServiceConnect{
|
||||||
|
PeerMeta: &PeeringServiceMeta{
|
||||||
|
SNI: []string{
|
||||||
|
service + ".default.default." + peer + ".external." + peerTrustDomain,
|
||||||
|
},
|
||||||
|
SpiffeID: []string{
|
||||||
|
"spiffe://" + peerTrustDomain + "/ns/default/dc/" + peer + "-dc/svc/" + service,
|
||||||
|
},
|
||||||
|
Protocol: "tcp",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TestNodeServiceProxy returns a *NodeService representing a valid
|
// TestNodeServiceProxy returns a *NodeService representing a valid
|
||||||
// Connect proxy.
|
// Connect proxy.
|
||||||
func TestNodeServiceProxy(t testing.T) *NodeService {
|
func TestNodeServiceProxy(t testing.T) *NodeService {
|
||||||
|
|
|
@ -26,7 +26,7 @@ func TestUpstreams(t testing.T) Upstreams {
|
||||||
Config: map[string]interface{}{
|
Config: map[string]interface{}{
|
||||||
// Float because this is how it is decoded by JSON decoder so this
|
// Float because this is how it is decoded by JSON decoder so this
|
||||||
// enables the value returned to be compared directly to a decoded JSON
|
// enables the value returned to be compared directly to a decoded JSON
|
||||||
// response without spurios type loss.
|
// response without spurious type loss.
|
||||||
"connect_timeout_ms": float64(1000),
|
"connect_timeout_ms": float64(1000),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -185,6 +185,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
|
||||||
Address: node.Address,
|
Address: node.Address,
|
||||||
Datacenter: node.Datacenter,
|
Datacenter: node.Datacenter,
|
||||||
TaggedAddresses: node.TaggedAddresses,
|
TaggedAddresses: node.TaggedAddresses,
|
||||||
|
PeerName: node.PeerName,
|
||||||
Meta: node.Meta,
|
Meta: node.Meta,
|
||||||
RaftIndex: structs.RaftIndex{
|
RaftIndex: structs.RaftIndex{
|
||||||
ModifyIndex: node.ModifyIndex,
|
ModifyIndex: node.ModifyIndex,
|
||||||
|
@ -207,6 +208,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
|
||||||
Service: structs.NodeService{
|
Service: structs.NodeService{
|
||||||
ID: svc.ID,
|
ID: svc.ID,
|
||||||
Service: svc.Service,
|
Service: svc.Service,
|
||||||
|
Kind: structs.ServiceKind(svc.Kind),
|
||||||
Tags: svc.Tags,
|
Tags: svc.Tags,
|
||||||
Address: svc.Address,
|
Address: svc.Address,
|
||||||
Meta: svc.Meta,
|
Meta: svc.Meta,
|
||||||
|
@ -226,6 +228,39 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if svc.Proxy != nil {
|
||||||
|
out.Service.Service.Proxy = structs.ConnectProxyConfig{}
|
||||||
|
t := &out.Service.Service.Proxy
|
||||||
|
if svc.Proxy.DestinationServiceName != "" {
|
||||||
|
t.DestinationServiceName = svc.Proxy.DestinationServiceName
|
||||||
|
}
|
||||||
|
if svc.Proxy.DestinationServiceID != "" {
|
||||||
|
t.DestinationServiceID = svc.Proxy.DestinationServiceID
|
||||||
|
}
|
||||||
|
if svc.Proxy.LocalServiceAddress != "" {
|
||||||
|
t.LocalServiceAddress = svc.Proxy.LocalServiceAddress
|
||||||
|
}
|
||||||
|
if svc.Proxy.LocalServicePort != 0 {
|
||||||
|
t.LocalServicePort = svc.Proxy.LocalServicePort
|
||||||
|
}
|
||||||
|
if svc.Proxy.LocalServiceSocketPath != "" {
|
||||||
|
t.LocalServiceSocketPath = svc.Proxy.LocalServiceSocketPath
|
||||||
|
}
|
||||||
|
if svc.Proxy.MeshGateway.Mode != "" {
|
||||||
|
t.MeshGateway.Mode = structs.MeshGatewayMode(svc.Proxy.MeshGateway.Mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
if svc.Proxy.TransparentProxy != nil {
|
||||||
|
if svc.Proxy.TransparentProxy.DialedDirectly {
|
||||||
|
t.TransparentProxy.DialedDirectly = svc.Proxy.TransparentProxy.DialedDirectly
|
||||||
|
}
|
||||||
|
|
||||||
|
if svc.Proxy.TransparentProxy.OutboundListenerPort != 0 {
|
||||||
|
t.TransparentProxy.OutboundListenerPort = svc.Proxy.TransparentProxy.OutboundListenerPort
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
opsRPC = append(opsRPC, out)
|
opsRPC = append(opsRPC, out)
|
||||||
|
|
||||||
case in.Check != nil:
|
case in.Check != nil:
|
||||||
|
@ -265,6 +300,8 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
|
||||||
ServiceID: check.ServiceID,
|
ServiceID: check.ServiceID,
|
||||||
ServiceName: check.ServiceName,
|
ServiceName: check.ServiceName,
|
||||||
ServiceTags: check.ServiceTags,
|
ServiceTags: check.ServiceTags,
|
||||||
|
PeerName: check.PeerName,
|
||||||
|
ExposedPort: check.ExposedPort,
|
||||||
Definition: structs.HealthCheckDefinition{
|
Definition: structs.HealthCheckDefinition{
|
||||||
HTTP: check.Definition.HTTP,
|
HTTP: check.Definition.HTTP,
|
||||||
TLSServerName: check.Definition.TLSServerName,
|
TLSServerName: check.Definition.TLSServerName,
|
||||||
|
|
|
@ -585,6 +585,7 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
|
||||||
"Output": "success",
|
"Output": "success",
|
||||||
"ServiceID": "",
|
"ServiceID": "",
|
||||||
"ServiceName": "",
|
"ServiceName": "",
|
||||||
|
"ExposedPort": 5678,
|
||||||
"Definition": {
|
"Definition": {
|
||||||
"IntervalDuration": "15s",
|
"IntervalDuration": "15s",
|
||||||
"TimeoutDuration": "15s",
|
"TimeoutDuration": "15s",
|
||||||
|
@ -600,12 +601,8 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
|
||||||
req, _ := http.NewRequest("PUT", "/v1/txn", buf)
|
req, _ := http.NewRequest("PUT", "/v1/txn", buf)
|
||||||
resp := httptest.NewRecorder()
|
resp := httptest.NewRecorder()
|
||||||
obj, err := a.srv.Txn(resp, req)
|
obj, err := a.srv.Txn(resp, req)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatalf("err: %v", err)
|
require.Equal(t, 200, resp.Code, resp.Body)
|
||||||
}
|
|
||||||
if resp.Code != 200 {
|
|
||||||
t.Fatalf("expected 200, got %d", resp.Code)
|
|
||||||
}
|
|
||||||
|
|
||||||
txnResp, ok := obj.(structs.TxnResponse)
|
txnResp, ok := obj.(structs.TxnResponse)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -662,12 +659,13 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
|
||||||
},
|
},
|
||||||
&structs.TxnResult{
|
&structs.TxnResult{
|
||||||
Check: &structs.HealthCheck{
|
Check: &structs.HealthCheck{
|
||||||
Node: a.config.NodeName,
|
Node: a.config.NodeName,
|
||||||
CheckID: "nodecheck",
|
CheckID: "nodecheck",
|
||||||
Name: "Node http check",
|
Name: "Node http check",
|
||||||
Status: api.HealthPassing,
|
Status: api.HealthPassing,
|
||||||
Notes: "Http based health check",
|
Notes: "Http based health check",
|
||||||
Output: "success",
|
Output: "success",
|
||||||
|
ExposedPort: 5678,
|
||||||
Definition: structs.HealthCheckDefinition{
|
Definition: structs.HealthCheckDefinition{
|
||||||
Interval: 15 * time.Second,
|
Interval: 15 * time.Second,
|
||||||
Timeout: 15 * time.Second,
|
Timeout: 15 * time.Second,
|
||||||
|
@ -686,3 +684,117 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
|
||||||
}
|
}
|
||||||
assert.Equal(t, expected, txnResp)
|
assert.Equal(t, expected, txnResp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTxnEndpoint_NodeService(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
a := NewTestAgent(t, "")
|
||||||
|
defer a.Shutdown()
|
||||||
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
// Make sure the fields of a check are handled correctly when both creating and
|
||||||
|
// updating, and test both sets of duration fields to ensure backwards compatibility.
|
||||||
|
buf := bytes.NewBuffer([]byte(fmt.Sprintf(`
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"Service": {
|
||||||
|
"Verb": "set",
|
||||||
|
"Node": "%s",
|
||||||
|
"Service": {
|
||||||
|
"Service": "test",
|
||||||
|
"Port": 4444
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Service": {
|
||||||
|
"Verb": "set",
|
||||||
|
"Node": "%s",
|
||||||
|
"Service": {
|
||||||
|
"Service": "test-sidecar-proxy",
|
||||||
|
"Port": 20000,
|
||||||
|
"Kind": "connect-proxy",
|
||||||
|
"Proxy": {
|
||||||
|
"DestinationServiceName": "test",
|
||||||
|
"DestinationServiceID": "test",
|
||||||
|
"LocalServiceAddress": "127.0.0.1",
|
||||||
|
"LocalServicePort": 4444,
|
||||||
|
"upstreams": [
|
||||||
|
{
|
||||||
|
"DestinationName": "fake-backend",
|
||||||
|
"LocalBindPort": 25001
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
`, a.config.NodeName, a.config.NodeName)))
|
||||||
|
req, _ := http.NewRequest("PUT", "/v1/txn", buf)
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
obj, err := a.srv.Txn(resp, req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 200, resp.Code)
|
||||||
|
|
||||||
|
txnResp, ok := obj.(structs.TxnResponse)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("bad type: %T", obj)
|
||||||
|
}
|
||||||
|
require.Equal(t, 2, len(txnResp.Results))
|
||||||
|
|
||||||
|
index := txnResp.Results[0].Service.ModifyIndex
|
||||||
|
expected := structs.TxnResponse{
|
||||||
|
Results: structs.TxnResults{
|
||||||
|
&structs.TxnResult{
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
Service: "test",
|
||||||
|
ID: "test",
|
||||||
|
Port: 4444,
|
||||||
|
Weights: &structs.Weights{
|
||||||
|
Passing: 1,
|
||||||
|
Warning: 1,
|
||||||
|
},
|
||||||
|
RaftIndex: structs.RaftIndex{
|
||||||
|
CreateIndex: index,
|
||||||
|
ModifyIndex: index,
|
||||||
|
},
|
||||||
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&structs.TxnResult{
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
Service: "test-sidecar-proxy",
|
||||||
|
ID: "test-sidecar-proxy",
|
||||||
|
Port: 20000,
|
||||||
|
Kind: "connect-proxy",
|
||||||
|
Weights: &structs.Weights{
|
||||||
|
Passing: 1,
|
||||||
|
Warning: 1,
|
||||||
|
},
|
||||||
|
Proxy: structs.ConnectProxyConfig{
|
||||||
|
DestinationServiceName: "test",
|
||||||
|
DestinationServiceID: "test",
|
||||||
|
LocalServiceAddress: "127.0.0.1",
|
||||||
|
LocalServicePort: 4444,
|
||||||
|
},
|
||||||
|
TaggedAddresses: map[string]structs.ServiceAddress{
|
||||||
|
"consul-virtual": {
|
||||||
|
Address: "240.0.0.1",
|
||||||
|
Port: 20000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RaftIndex: structs.RaftIndex{
|
||||||
|
CreateIndex: index,
|
||||||
|
ModifyIndex: index,
|
||||||
|
},
|
||||||
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
assert.Equal(t, expected, txnResp)
|
||||||
|
}
|
||||||
|
|
|
@ -211,7 +211,9 @@ func (s *HTTPHandlers) UIServices(resp http.ResponseWriter, req *http.Request) (
|
||||||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
if peer := req.URL.Query().Get("peer"); peer != "" {
|
||||||
|
args.PeerName = peer
|
||||||
|
}
|
||||||
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
|
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,29 +88,26 @@ func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.C
|
||||||
clusters = append(clusters, passthroughs...)
|
clusters = append(clusters, passthroughs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in endpoints.go
|
getUpstream := func(uid proxycfg.UpstreamID) (*structs.Upstream, bool) {
|
||||||
// so that the sets of endpoints generated matches the sets of clusters.
|
|
||||||
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
|
||||||
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
||||||
|
|
||||||
explicit := upstream.HasLocalPortOrSocket()
|
explicit := upstream.HasLocalPortOrSocket()
|
||||||
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
||||||
if !implicit && !explicit {
|
return upstream, !implicit && !explicit
|
||||||
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
}
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
chainEndpoints, ok := cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid]
|
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in endpoints.go
|
||||||
if !ok {
|
// so that the sets of endpoints generated matches the sets of clusters.
|
||||||
// this should not happen
|
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
||||||
return nil, fmt.Errorf("no endpoint map for upstream %q", uid)
|
upstream, skip := getUpstream(uid)
|
||||||
|
if skip {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
|
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
|
||||||
uid,
|
uid,
|
||||||
upstream,
|
upstream,
|
||||||
chain,
|
chain,
|
||||||
chainEndpoints,
|
|
||||||
cfgSnap,
|
cfgSnap,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
@ -127,18 +124,15 @@ func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.C
|
||||||
// upstream in endpoints.go so that the sets of endpoints generated matches
|
// upstream in endpoints.go so that the sets of endpoints generated matches
|
||||||
// the sets of clusters.
|
// the sets of clusters.
|
||||||
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
|
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
|
||||||
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
upstream, skip := getUpstream(uid)
|
||||||
|
if skip {
|
||||||
explicit := upstreamCfg.HasLocalPortOrSocket()
|
|
||||||
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
|
||||||
if !implicit && !explicit {
|
|
||||||
// Not associated with a known explicit or implicit upstream so it is skipped.
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
peerMeta := cfgSnap.ConnectProxy.UpstreamPeerMeta(uid)
|
peerMeta := cfgSnap.ConnectProxy.UpstreamPeerMeta(uid)
|
||||||
|
cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta)
|
||||||
|
|
||||||
upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, upstreamCfg, peerMeta, cfgSnap)
|
upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, cfg, peerMeta, cfgSnap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -652,17 +646,10 @@ func (s *ResourceGenerator) clustersFromSnapshotIngressGateway(cfgSnap *proxycfg
|
||||||
return nil, fmt.Errorf("no discovery chain for upstream %q", uid)
|
return nil, fmt.Errorf("no discovery chain for upstream %q", uid)
|
||||||
}
|
}
|
||||||
|
|
||||||
chainEndpoints, ok := cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid]
|
|
||||||
if !ok {
|
|
||||||
// this should not happen
|
|
||||||
return nil, fmt.Errorf("no endpoint map for upstream %q", uid)
|
|
||||||
}
|
|
||||||
|
|
||||||
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
|
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
|
||||||
uid,
|
uid,
|
||||||
&u,
|
&u,
|
||||||
chain,
|
chain,
|
||||||
chainEndpoints,
|
|
||||||
cfgSnap,
|
cfgSnap,
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
@ -745,7 +732,7 @@ func (s *ResourceGenerator) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot, nam
|
||||||
|
|
||||||
func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
|
func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
|
||||||
uid proxycfg.UpstreamID,
|
uid proxycfg.UpstreamID,
|
||||||
upstream *structs.Upstream,
|
upstreamConfig structs.UpstreamConfig,
|
||||||
peerMeta structs.PeeringServiceMeta,
|
peerMeta structs.PeeringServiceMeta,
|
||||||
cfgSnap *proxycfg.ConfigSnapshot,
|
cfgSnap *proxycfg.ConfigSnapshot,
|
||||||
) (*envoy_cluster_v3.Cluster, error) {
|
) (*envoy_cluster_v3.Cluster, error) {
|
||||||
|
@ -754,16 +741,21 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta)
|
if upstreamConfig.EnvoyClusterJSON != "" {
|
||||||
if cfg.EnvoyClusterJSON != "" {
|
c, err = makeClusterFromUserConfig(upstreamConfig.EnvoyClusterJSON)
|
||||||
c, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return c, err
|
return c, err
|
||||||
}
|
}
|
||||||
// In the happy path don't return yet as we need to inject TLS config still.
|
// In the happy path don't return yet as we need to inject TLS config still.
|
||||||
}
|
}
|
||||||
|
|
||||||
tbs, ok := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer)
|
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return c, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(uid.Peer)
|
||||||
if !ok {
|
if !ok {
|
||||||
// this should never happen since we loop through upstreams with
|
// this should never happen since we loop through upstreams with
|
||||||
// set trust bundles
|
// set trust bundles
|
||||||
|
@ -772,22 +764,29 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
|
||||||
|
|
||||||
clusterName := generatePeeredClusterName(uid, tbs)
|
clusterName := generatePeeredClusterName(uid, tbs)
|
||||||
|
|
||||||
|
outlierDetection := ToOutlierDetection(upstreamConfig.PassiveHealthCheck)
|
||||||
|
// We can't rely on health checks for services on cluster peers because they
|
||||||
|
// don't take into account service resolvers, splitters and routers. Setting
|
||||||
|
// MaxEjectionPercent too 100% gives outlier detection the power to eject the
|
||||||
|
// entire cluster.
|
||||||
|
outlierDetection.MaxEjectionPercent = &wrappers.UInt32Value{Value: 100}
|
||||||
|
|
||||||
s.Logger.Trace("generating cluster for", "cluster", clusterName)
|
s.Logger.Trace("generating cluster for", "cluster", clusterName)
|
||||||
if c == nil {
|
if c == nil {
|
||||||
c = &envoy_cluster_v3.Cluster{
|
c = &envoy_cluster_v3.Cluster{
|
||||||
Name: clusterName,
|
Name: clusterName,
|
||||||
ConnectTimeout: durationpb.New(time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond),
|
ConnectTimeout: durationpb.New(time.Duration(upstreamConfig.ConnectTimeoutMs) * time.Millisecond),
|
||||||
CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{
|
CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{
|
||||||
HealthyPanicThreshold: &envoy_type_v3.Percent{
|
HealthyPanicThreshold: &envoy_type_v3.Percent{
|
||||||
Value: 0, // disable panic threshold
|
Value: 0, // disable panic threshold
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
|
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
|
||||||
Thresholds: makeThresholdsIfNeeded(cfg.Limits),
|
Thresholds: makeThresholdsIfNeeded(upstreamConfig.Limits),
|
||||||
},
|
},
|
||||||
OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck),
|
OutlierDetection: outlierDetection,
|
||||||
}
|
}
|
||||||
if cfg.Protocol == "http2" || cfg.Protocol == "grpc" {
|
if upstreamConfig.Protocol == "http2" || upstreamConfig.Protocol == "grpc" {
|
||||||
if err := s.setHttp2ProtocolOptions(c); err != nil {
|
if err := s.setHttp2ProtocolOptions(c); err != nil {
|
||||||
return c, err
|
return c, err
|
||||||
}
|
}
|
||||||
|
@ -821,12 +820,11 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
|
||||||
false, /*onlyPassing*/
|
false, /*onlyPassing*/
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rootPEMs := cfgSnap.RootPEMs()
|
rootPEMs := cfgSnap.RootPEMs()
|
||||||
if uid.Peer != "" {
|
if uid.Peer != "" {
|
||||||
tbs, _ := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer)
|
tbs, _ := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(uid.Peer)
|
||||||
rootPEMs = tbs.ConcatenatedRootPEMs()
|
rootPEMs = tbs.ConcatenatedRootPEMs()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -961,7 +959,6 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
uid proxycfg.UpstreamID,
|
uid proxycfg.UpstreamID,
|
||||||
upstream *structs.Upstream,
|
upstream *structs.Upstream,
|
||||||
chain *structs.CompiledDiscoveryChain,
|
chain *structs.CompiledDiscoveryChain,
|
||||||
chainEndpoints map[string]structs.CheckServiceNodes,
|
|
||||||
cfgSnap *proxycfg.ConfigSnapshot,
|
cfgSnap *proxycfg.ConfigSnapshot,
|
||||||
forMeshGateway bool,
|
forMeshGateway bool,
|
||||||
) ([]*envoy_cluster_v3.Cluster, error) {
|
) ([]*envoy_cluster_v3.Cluster, error) {
|
||||||
|
@ -978,7 +975,15 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
upstreamConfigMap = upstream.Config
|
upstreamConfigMap = upstream.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap)
|
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
|
||||||
|
|
||||||
|
// Mesh gateways are exempt because upstreamsSnapshot is only used for
|
||||||
|
// cluster peering targets and transative failover/redirects are unsupported.
|
||||||
|
if err != nil && !forMeshGateway {
|
||||||
|
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", cfgSnap.Kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawUpstreamConfig, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Don't hard fail on a config typo, just warn. The parse func returns
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
||||||
// default config if there is an error so it's safe to continue.
|
// default config if there is an error so it's safe to continue.
|
||||||
|
@ -986,13 +991,28 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
"error", err)
|
"error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
finalizeUpstreamConfig := func(cfg structs.UpstreamConfig, connectTimeout time.Duration) structs.UpstreamConfig {
|
||||||
|
if cfg.Protocol == "" {
|
||||||
|
cfg.Protocol = chain.Protocol
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Protocol == "" {
|
||||||
|
cfg.Protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.ConnectTimeoutMs == 0 {
|
||||||
|
cfg.ConnectTimeoutMs = int(connectTimeout / time.Millisecond)
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
var escapeHatchCluster *envoy_cluster_v3.Cluster
|
var escapeHatchCluster *envoy_cluster_v3.Cluster
|
||||||
if !forMeshGateway {
|
if !forMeshGateway {
|
||||||
if cfg.EnvoyClusterJSON != "" {
|
if rawUpstreamConfig.EnvoyClusterJSON != "" {
|
||||||
if chain.Default {
|
if chain.Default {
|
||||||
// If you haven't done anything to setup the discovery chain, then
|
// If you haven't done anything to setup the discovery chain, then
|
||||||
// you can use the envoy_cluster_json escape hatch.
|
// you can use the envoy_cluster_json escape hatch.
|
||||||
escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
|
escapeHatchCluster, err = makeClusterFromUserConfig(rawUpstreamConfig.EnvoyClusterJSON)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1006,14 +1026,20 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
|
|
||||||
var out []*envoy_cluster_v3.Cluster
|
var out []*envoy_cluster_v3.Cluster
|
||||||
for _, node := range chain.Nodes {
|
for _, node := range chain.Nodes {
|
||||||
if node.Type != structs.DiscoveryGraphNodeTypeResolver {
|
switch {
|
||||||
|
case node == nil:
|
||||||
|
return nil, fmt.Errorf("impossible to process a nil node")
|
||||||
|
case node.Type != structs.DiscoveryGraphNodeTypeResolver:
|
||||||
continue
|
continue
|
||||||
|
case node.Resolver == nil:
|
||||||
|
return nil, fmt.Errorf("impossible to process a non-resolver node")
|
||||||
}
|
}
|
||||||
failover := node.Resolver.Failover
|
failover := node.Resolver.Failover
|
||||||
// These variables are prefixed with primary to avoid shaddowing bugs.
|
// These variables are prefixed with primary to avoid shaddowing bugs.
|
||||||
primaryTargetID := node.Resolver.Target
|
primaryTargetID := node.Resolver.Target
|
||||||
primaryTarget := chain.Targets[primaryTargetID]
|
primaryTarget := chain.Targets[primaryTargetID]
|
||||||
primaryClusterName := CustomizeClusterName(primaryTarget.Name, chain)
|
primaryClusterName := CustomizeClusterName(primaryTarget.Name, chain)
|
||||||
|
upstreamConfig := finalizeUpstreamConfig(rawUpstreamConfig, node.Resolver.ConnectTimeout)
|
||||||
if forMeshGateway {
|
if forMeshGateway {
|
||||||
primaryClusterName = meshGatewayExportedClusterNamePrefix + primaryClusterName
|
primaryClusterName = meshGatewayExportedClusterNamePrefix + primaryClusterName
|
||||||
}
|
}
|
||||||
|
@ -1026,22 +1052,38 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
type targetClusterOptions struct {
|
type targetClusterOption struct {
|
||||||
targetID string
|
targetID string
|
||||||
clusterName string
|
clusterName string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct the information required to make target clusters. When
|
// Construct the information required to make target clusters. When
|
||||||
// failover is configured, create the aggregate cluster.
|
// failover is configured, create the aggregate cluster.
|
||||||
var targetClustersOptions []targetClusterOptions
|
var targetClustersOptions []targetClusterOption
|
||||||
if failover != nil && !forMeshGateway {
|
if failover != nil && !forMeshGateway {
|
||||||
var failoverClusterNames []string
|
var failoverClusterNames []string
|
||||||
for _, tid := range append([]string{primaryTargetID}, failover.Targets...) {
|
for _, tid := range append([]string{primaryTargetID}, failover.Targets...) {
|
||||||
target := chain.Targets[tid]
|
target := chain.Targets[tid]
|
||||||
clusterName := CustomizeClusterName(target.Name, chain)
|
clusterName := target.Name
|
||||||
|
targetUID := proxycfg.NewUpstreamIDFromTargetID(tid)
|
||||||
|
if targetUID.Peer != "" {
|
||||||
|
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(targetUID.Peer)
|
||||||
|
// We can't generate cluster on peers without the trust bundle. The
|
||||||
|
// trust bundle should be ready soon.
|
||||||
|
if !ok {
|
||||||
|
s.Logger.Debug("peer trust bundle not ready for discovery chain target",
|
||||||
|
"peer", targetUID.Peer,
|
||||||
|
"target", tid,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
clusterName = generatePeeredClusterName(targetUID, tbs)
|
||||||
|
}
|
||||||
|
clusterName = CustomizeClusterName(clusterName, chain)
|
||||||
clusterName = failoverClusterNamePrefix + clusterName
|
clusterName = failoverClusterNamePrefix + clusterName
|
||||||
|
|
||||||
targetClustersOptions = append(targetClustersOptions, targetClusterOptions{
|
targetClustersOptions = append(targetClustersOptions, targetClusterOption{
|
||||||
targetID: tid,
|
targetID: tid,
|
||||||
clusterName: clusterName,
|
clusterName: clusterName,
|
||||||
})
|
})
|
||||||
|
@ -1070,7 +1112,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
|
|
||||||
out = append(out, c)
|
out = append(out, c)
|
||||||
} else {
|
} else {
|
||||||
targetClustersOptions = append(targetClustersOptions, targetClusterOptions{
|
targetClustersOptions = append(targetClustersOptions, targetClusterOption{
|
||||||
targetID: primaryTargetID,
|
targetID: primaryTargetID,
|
||||||
clusterName: primaryClusterName,
|
clusterName: primaryClusterName,
|
||||||
})
|
})
|
||||||
|
@ -1089,11 +1131,20 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
Datacenter: target.Datacenter,
|
Datacenter: target.Datacenter,
|
||||||
Service: target.Service,
|
Service: target.Service,
|
||||||
}.URI().String()
|
}.URI().String()
|
||||||
if uid.Peer != "" {
|
targetUID := proxycfg.NewUpstreamIDFromTargetID(targetInfo.targetID)
|
||||||
return nil, fmt.Errorf("impossible to get a peer discovery chain")
|
s.Logger.Debug("generating cluster for", "cluster", targetInfo.clusterName)
|
||||||
|
if targetUID.Peer != "" {
|
||||||
|
peerMeta := upstreamsSnapshot.UpstreamPeerMeta(targetUID)
|
||||||
|
upstreamCluster, err := s.makeUpstreamClusterForPeerService(targetUID, upstreamConfig, peerMeta, cfgSnap)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Override the cluster name to include the failover-target~ prefix.
|
||||||
|
upstreamCluster.Name = targetInfo.clusterName
|
||||||
|
out = append(out, upstreamCluster)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Logger.Trace("generating cluster for", "cluster", targetInfo.clusterName)
|
|
||||||
c := &envoy_cluster_v3.Cluster{
|
c := &envoy_cluster_v3.Cluster{
|
||||||
Name: targetInfo.clusterName,
|
Name: targetInfo.clusterName,
|
||||||
AltStatName: targetInfo.clusterName,
|
AltStatName: targetInfo.clusterName,
|
||||||
|
@ -1114,9 +1165,9 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
},
|
},
|
||||||
// TODO(peering): make circuit breakers or outlier detection work?
|
// TODO(peering): make circuit breakers or outlier detection work?
|
||||||
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
|
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
|
||||||
Thresholds: makeThresholdsIfNeeded(cfg.Limits),
|
Thresholds: makeThresholdsIfNeeded(upstreamConfig.Limits),
|
||||||
},
|
},
|
||||||
OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck),
|
OutlierDetection: ToOutlierDetection(upstreamConfig.PassiveHealthCheck),
|
||||||
}
|
}
|
||||||
|
|
||||||
var lb *structs.LoadBalancer
|
var lb *structs.LoadBalancer
|
||||||
|
@ -1127,19 +1178,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
return nil, fmt.Errorf("failed to apply load balancer configuration to cluster %q: %v", targetInfo.clusterName, err)
|
return nil, fmt.Errorf("failed to apply load balancer configuration to cluster %q: %v", targetInfo.clusterName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var proto string
|
if upstreamConfig.Protocol == "http2" || upstreamConfig.Protocol == "grpc" {
|
||||||
if !forMeshGateway {
|
|
||||||
proto = cfg.Protocol
|
|
||||||
}
|
|
||||||
if proto == "" {
|
|
||||||
proto = chain.Protocol
|
|
||||||
}
|
|
||||||
|
|
||||||
if proto == "" {
|
|
||||||
proto = "tcp"
|
|
||||||
}
|
|
||||||
|
|
||||||
if proto == "http2" || proto == "grpc" {
|
|
||||||
if err := s.setHttp2ProtocolOptions(c); err != nil {
|
if err := s.setHttp2ProtocolOptions(c); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1148,7 +1187,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
||||||
configureTLS := true
|
configureTLS := true
|
||||||
if forMeshGateway {
|
if forMeshGateway {
|
||||||
// We only initiate TLS if we're doing an L7 proxy.
|
// We only initiate TLS if we're doing an L7 proxy.
|
||||||
configureTLS = structs.IsProtocolHTTPLike(proto)
|
configureTLS = structs.IsProtocolHTTPLike(upstreamConfig.Protocol)
|
||||||
}
|
}
|
||||||
|
|
||||||
if configureTLS {
|
if configureTLS {
|
||||||
|
@ -1221,7 +1260,6 @@ func (s *ResourceGenerator) makeExportedUpstreamClustersForMeshGateway(cfgSnap *
|
||||||
proxycfg.NewUpstreamIDFromServiceName(svc),
|
proxycfg.NewUpstreamIDFromServiceName(svc),
|
||||||
nil,
|
nil,
|
||||||
chain,
|
chain,
|
||||||
nil,
|
|
||||||
cfgSnap,
|
cfgSnap,
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
|
|
|
@ -169,6 +169,18 @@ func TestClustersFromSnapshot(t *testing.T) {
|
||||||
}, nil)
|
}, nil)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "custom-passive-healthcheck",
|
||||||
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
|
||||||
|
ns.Proxy.Upstreams[0].Config["passive_health_check"] = map[string]interface{}{
|
||||||
|
"enforcing_consecutive_5xx": float64(80),
|
||||||
|
"max_failures": float64(5),
|
||||||
|
"interval": float64(10),
|
||||||
|
}
|
||||||
|
}, nil)
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "custom-max-inbound-connections",
|
name: "custom-max-inbound-connections",
|
||||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
@ -257,6 +269,12 @@ func TestClustersFromSnapshot(t *testing.T) {
|
||||||
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
|
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "connect-proxy-with-chain-and-failover-to-cluster-peer",
|
||||||
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-to-cluster-peer", nil, nil)
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
|
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
|
||||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
@ -495,6 +513,13 @@ func TestClustersFromSnapshot(t *testing.T) {
|
||||||
"failover", nil, nil, nil)
|
"failover", nil, nil, nil)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "ingress-with-chain-and-failover-to-cluster-peer",
|
||||||
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
|
||||||
|
"failover-to-cluster-peer", nil, nil, nil)
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "ingress-with-tcp-chain-failover-through-remote-gateway",
|
name: "ingress-with-tcp-chain-failover-through-remote-gateway",
|
||||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
|
|
@ -27,6 +27,12 @@ type ProxyConfig struct {
|
||||||
// Note: This escape hatch is compatible with the discovery chain.
|
// Note: This escape hatch is compatible with the discovery chain.
|
||||||
PublicListenerJSON string `mapstructure:"envoy_public_listener_json"`
|
PublicListenerJSON string `mapstructure:"envoy_public_listener_json"`
|
||||||
|
|
||||||
|
// ListenerTracingJSON is a complete override ("escape hatch") for the
|
||||||
|
// listeners tracing configuration.
|
||||||
|
//
|
||||||
|
// Note: This escape hatch is compatible with the discovery chain.
|
||||||
|
ListenerTracingJSON string `mapstructure:"envoy_listener_tracing_json"`
|
||||||
|
|
||||||
// LocalClusterJSON is a complete override ("escape hatch") for the
|
// LocalClusterJSON is a complete override ("escape hatch") for the
|
||||||
// local application cluster.
|
// local application cluster.
|
||||||
//
|
//
|
||||||
|
@ -168,5 +174,10 @@ func ToOutlierDetection(p *structs.PassiveHealthCheck) *envoy_cluster_v3.Outlier
|
||||||
if p.MaxFailures != 0 {
|
if p.MaxFailures != 0 {
|
||||||
od.Consecutive_5Xx = &wrappers.UInt32Value{Value: p.MaxFailures}
|
od.Consecutive_5Xx = &wrappers.UInt32Value{Value: p.MaxFailures}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if p.EnforcingConsecutive5xx != nil {
|
||||||
|
od.EnforcingConsecutive_5Xx = &wrappers.UInt32Value{Value: *p.EnforcingConsecutive5xx}
|
||||||
|
}
|
||||||
|
|
||||||
return od
|
return od
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,14 +50,19 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Len()+
|
cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Len()+
|
||||||
len(cfgSnap.ConnectProxy.WatchedUpstreamEndpoints))
|
len(cfgSnap.ConnectProxy.WatchedUpstreamEndpoints))
|
||||||
|
|
||||||
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go
|
getUpstream := func(uid proxycfg.UpstreamID) (*structs.Upstream, bool) {
|
||||||
// so that the sets of endpoints generated matches the sets of clusters.
|
|
||||||
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
|
||||||
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
||||||
|
|
||||||
explicit := upstream.HasLocalPortOrSocket()
|
explicit := upstream.HasLocalPortOrSocket()
|
||||||
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
||||||
if !implicit && !explicit {
|
return upstream, !implicit && !explicit
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go
|
||||||
|
// so that the sets of endpoints generated matches the sets of clusters.
|
||||||
|
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
||||||
|
upstream, skip := getUpstream(uid)
|
||||||
|
if skip {
|
||||||
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -70,6 +75,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
es, err := s.endpointsFromDiscoveryChain(
|
es, err := s.endpointsFromDiscoveryChain(
|
||||||
uid,
|
uid,
|
||||||
chain,
|
chain,
|
||||||
|
cfgSnap,
|
||||||
cfgSnap.Locality,
|
cfgSnap.Locality,
|
||||||
upstreamConfigMap,
|
upstreamConfigMap,
|
||||||
cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid],
|
cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid],
|
||||||
|
@ -86,12 +92,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
// upstream in clusters.go so that the sets of endpoints generated matches
|
// upstream in clusters.go so that the sets of endpoints generated matches
|
||||||
// the sets of clusters.
|
// the sets of clusters.
|
||||||
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
|
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
|
||||||
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
_, skip := getUpstream(uid)
|
||||||
|
if skip {
|
||||||
explicit := upstreamCfg.HasLocalPortOrSocket()
|
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
||||||
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
|
||||||
if !implicit && !explicit {
|
|
||||||
// Not associated with a known explicit or implicit upstream so it is skipped.
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,22 +107,14 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
|
|
||||||
clusterName := generatePeeredClusterName(uid, tbs)
|
clusterName := generatePeeredClusterName(uid, tbs)
|
||||||
|
|
||||||
// Also skip peer instances with a hostname as their address. EDS
|
loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, clusterName, uid)
|
||||||
// cannot resolve hostnames, so we provide them through CDS instead.
|
|
||||||
if _, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpointsUseHostnames[uid]; ok {
|
if err != nil {
|
||||||
continue
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Get(uid)
|
if loadAssignment != nil {
|
||||||
if ok {
|
resources = append(resources, loadAssignment)
|
||||||
la := makeLoadAssignment(
|
|
||||||
clusterName,
|
|
||||||
[]loadAssignmentEndpointGroup{
|
|
||||||
{Endpoints: endpoints},
|
|
||||||
},
|
|
||||||
proxycfg.GatewayKey{ /*empty so it never matches*/ },
|
|
||||||
)
|
|
||||||
resources = append(resources, la)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,6 +370,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotIngressGateway(cfgSnap *proxycf
|
||||||
es, err := s.endpointsFromDiscoveryChain(
|
es, err := s.endpointsFromDiscoveryChain(
|
||||||
uid,
|
uid,
|
||||||
cfgSnap.IngressGateway.DiscoveryChain[uid],
|
cfgSnap.IngressGateway.DiscoveryChain[uid],
|
||||||
|
cfgSnap,
|
||||||
proxycfg.GatewayKey{Datacenter: cfgSnap.Datacenter, Partition: u.DestinationPartition},
|
proxycfg.GatewayKey{Datacenter: cfgSnap.Datacenter, Partition: u.DestinationPartition},
|
||||||
u.Config,
|
u.Config,
|
||||||
cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid],
|
cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid],
|
||||||
|
@ -412,9 +408,38 @@ func makePipeEndpoint(path string) *envoy_endpoint_v3.LbEndpoint {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *ResourceGenerator) makeUpstreamLoadAssignmentForPeerService(cfgSnap *proxycfg.ConfigSnapshot, clusterName string, uid proxycfg.UpstreamID) (*envoy_endpoint_v3.ClusterLoadAssignment, error) {
|
||||||
|
var la *envoy_endpoint_v3.ClusterLoadAssignment
|
||||||
|
|
||||||
|
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
|
||||||
|
if err != nil {
|
||||||
|
return la, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also skip peer instances with a hostname as their address. EDS
|
||||||
|
// cannot resolve hostnames, so we provide them through CDS instead.
|
||||||
|
if _, ok := upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid]; ok {
|
||||||
|
return la, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoints, ok := upstreamsSnapshot.PeerUpstreamEndpoints.Get(uid)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
la = makeLoadAssignment(
|
||||||
|
clusterName,
|
||||||
|
[]loadAssignmentEndpointGroup{
|
||||||
|
{Endpoints: endpoints},
|
||||||
|
},
|
||||||
|
proxycfg.GatewayKey{ /*empty so it never matches*/ },
|
||||||
|
)
|
||||||
|
return la, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
||||||
uid proxycfg.UpstreamID,
|
uid proxycfg.UpstreamID,
|
||||||
chain *structs.CompiledDiscoveryChain,
|
chain *structs.CompiledDiscoveryChain,
|
||||||
|
cfgSnap *proxycfg.ConfigSnapshot,
|
||||||
gatewayKey proxycfg.GatewayKey,
|
gatewayKey proxycfg.GatewayKey,
|
||||||
upstreamConfigMap map[string]interface{},
|
upstreamConfigMap map[string]interface{},
|
||||||
upstreamEndpoints map[string]structs.CheckServiceNodes,
|
upstreamEndpoints map[string]structs.CheckServiceNodes,
|
||||||
|
@ -432,6 +457,14 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
||||||
upstreamConfigMap = make(map[string]interface{}) // TODO:needed?
|
upstreamConfigMap = make(map[string]interface{}) // TODO:needed?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
|
||||||
|
|
||||||
|
// Mesh gateways are exempt because upstreamsSnapshot is only used for
|
||||||
|
// cluster peering targets and transative failover/redirects are unsupported.
|
||||||
|
if err != nil && !forMeshGateway {
|
||||||
|
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", cfgSnap.Kind)
|
||||||
|
}
|
||||||
|
|
||||||
var resources []proto.Message
|
var resources []proto.Message
|
||||||
|
|
||||||
var escapeHatchCluster *envoy_cluster_v3.Cluster
|
var escapeHatchCluster *envoy_cluster_v3.Cluster
|
||||||
|
@ -465,8 +498,15 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
||||||
if node.Type != structs.DiscoveryGraphNodeTypeResolver {
|
if node.Type != structs.DiscoveryGraphNodeTypeResolver {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
primaryTargetID := node.Resolver.Target
|
||||||
failover := node.Resolver.Failover
|
failover := node.Resolver.Failover
|
||||||
|
|
||||||
|
type targetLoadAssignmentOption struct {
|
||||||
|
targetID string
|
||||||
|
clusterName string
|
||||||
|
}
|
||||||
|
var targetLoadAssignmentOptions []targetLoadAssignmentOption
|
||||||
|
|
||||||
var numFailoverTargets int
|
var numFailoverTargets int
|
||||||
if failover != nil {
|
if failover != nil {
|
||||||
numFailoverTargets = len(failover.Targets)
|
numFailoverTargets = len(failover.Targets)
|
||||||
|
@ -474,66 +514,84 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
||||||
clusterNamePrefix := ""
|
clusterNamePrefix := ""
|
||||||
if numFailoverTargets > 0 && !forMeshGateway {
|
if numFailoverTargets > 0 && !forMeshGateway {
|
||||||
clusterNamePrefix = failoverClusterNamePrefix
|
clusterNamePrefix = failoverClusterNamePrefix
|
||||||
for _, failTargetID := range failover.Targets {
|
for _, targetID := range append([]string{primaryTargetID}, failover.Targets...) {
|
||||||
target := chain.Targets[failTargetID]
|
target := chain.Targets[targetID]
|
||||||
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
|
clusterName := target.Name
|
||||||
chain.Targets,
|
targetUID := proxycfg.NewUpstreamIDFromTargetID(targetID)
|
||||||
upstreamEndpoints,
|
if targetUID.Peer != "" {
|
||||||
gatewayEndpoints,
|
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(targetUID.Peer)
|
||||||
failTargetID,
|
// We can't generate cluster on peers without the trust bundle. The
|
||||||
gatewayKey,
|
// trust bundle should be ready soon.
|
||||||
forMeshGateway,
|
if !ok {
|
||||||
)
|
s.Logger.Debug("peer trust bundle not ready for discovery chain target",
|
||||||
if !valid {
|
"peer", targetUID.Peer,
|
||||||
continue // skip the failover target if we're still populating the snapshot
|
"target", targetID,
|
||||||
}
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
clusterName := CustomizeClusterName(target.Name, chain)
|
clusterName = generatePeeredClusterName(targetUID, tbs)
|
||||||
|
}
|
||||||
|
clusterName = CustomizeClusterName(clusterName, chain)
|
||||||
clusterName = failoverClusterNamePrefix + clusterName
|
clusterName = failoverClusterNamePrefix + clusterName
|
||||||
if escapeHatchCluster != nil {
|
if escapeHatchCluster != nil {
|
||||||
clusterName = escapeHatchCluster.Name
|
clusterName = escapeHatchCluster.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Logger.Debug("generating endpoints for", "cluster", clusterName)
|
targetLoadAssignmentOptions = append(targetLoadAssignmentOptions, targetLoadAssignmentOption{
|
||||||
|
targetID: targetID,
|
||||||
la := makeLoadAssignment(
|
clusterName: clusterName,
|
||||||
clusterName,
|
})
|
||||||
[]loadAssignmentEndpointGroup{endpointGroup},
|
|
||||||
gatewayKey,
|
|
||||||
)
|
|
||||||
resources = append(resources, la)
|
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
targetID := node.Resolver.Target
|
target := chain.Targets[primaryTargetID]
|
||||||
|
clusterName := CustomizeClusterName(target.Name, chain)
|
||||||
target := chain.Targets[targetID]
|
clusterName = clusterNamePrefix + clusterName
|
||||||
clusterName := CustomizeClusterName(target.Name, chain)
|
if escapeHatchCluster != nil {
|
||||||
clusterName = clusterNamePrefix + clusterName
|
clusterName = escapeHatchCluster.Name
|
||||||
if escapeHatchCluster != nil {
|
}
|
||||||
clusterName = escapeHatchCluster.Name
|
if forMeshGateway {
|
||||||
}
|
clusterName = meshGatewayExportedClusterNamePrefix + clusterName
|
||||||
if forMeshGateway {
|
}
|
||||||
clusterName = meshGatewayExportedClusterNamePrefix + clusterName
|
targetLoadAssignmentOptions = append(targetLoadAssignmentOptions, targetLoadAssignmentOption{
|
||||||
}
|
targetID: primaryTargetID,
|
||||||
s.Logger.Debug("generating endpoints for", "cluster", clusterName)
|
clusterName: clusterName,
|
||||||
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
|
})
|
||||||
chain.Targets,
|
|
||||||
upstreamEndpoints,
|
|
||||||
gatewayEndpoints,
|
|
||||||
targetID,
|
|
||||||
gatewayKey,
|
|
||||||
forMeshGateway,
|
|
||||||
)
|
|
||||||
if !valid {
|
|
||||||
continue // skip the cluster if we're still populating the snapshot
|
|
||||||
}
|
}
|
||||||
|
|
||||||
la := makeLoadAssignment(
|
for _, targetInfo := range targetLoadAssignmentOptions {
|
||||||
clusterName,
|
s.Logger.Debug("generating endpoints for", "cluster", targetInfo.clusterName)
|
||||||
[]loadAssignmentEndpointGroup{endpointGroup},
|
targetUID := proxycfg.NewUpstreamIDFromTargetID(targetInfo.targetID)
|
||||||
gatewayKey,
|
if targetUID.Peer != "" {
|
||||||
)
|
loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, targetInfo.clusterName, targetUID)
|
||||||
resources = append(resources, la)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if loadAssignment != nil {
|
||||||
|
resources = append(resources, loadAssignment)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
|
||||||
|
chain.Targets,
|
||||||
|
upstreamEndpoints,
|
||||||
|
gatewayEndpoints,
|
||||||
|
targetInfo.targetID,
|
||||||
|
gatewayKey,
|
||||||
|
forMeshGateway,
|
||||||
|
)
|
||||||
|
if !valid {
|
||||||
|
continue // skip the cluster if we're still populating the snapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
la := makeLoadAssignment(
|
||||||
|
targetInfo.clusterName,
|
||||||
|
[]loadAssignmentEndpointGroup{endpointGroup},
|
||||||
|
gatewayKey,
|
||||||
|
)
|
||||||
|
resources = append(resources, la)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return resources, nil
|
return resources, nil
|
||||||
|
@ -586,6 +644,7 @@ func (s *ResourceGenerator) makeExportedUpstreamEndpointsForMeshGateway(cfgSnap
|
||||||
clusterEndpoints, err := s.endpointsFromDiscoveryChain(
|
clusterEndpoints, err := s.endpointsFromDiscoveryChain(
|
||||||
proxycfg.NewUpstreamIDFromServiceName(svc),
|
proxycfg.NewUpstreamIDFromServiceName(svc),
|
||||||
chain,
|
chain,
|
||||||
|
cfgSnap,
|
||||||
cfgSnap.Locality,
|
cfgSnap.Locality,
|
||||||
nil,
|
nil,
|
||||||
chainEndpoints,
|
chainEndpoints,
|
||||||
|
@ -640,11 +699,12 @@ func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpo
|
||||||
healthStatus = endpointGroup.OverrideHealth
|
healthStatus = endpointGroup.OverrideHealth
|
||||||
}
|
}
|
||||||
|
|
||||||
|
endpoint := &envoy_endpoint_v3.Endpoint{
|
||||||
|
Address: makeAddress(addr, port),
|
||||||
|
}
|
||||||
es = append(es, &envoy_endpoint_v3.LbEndpoint{
|
es = append(es, &envoy_endpoint_v3.LbEndpoint{
|
||||||
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
|
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
|
||||||
Endpoint: &envoy_endpoint_v3.Endpoint{
|
Endpoint: endpoint,
|
||||||
Address: makeAddress(addr, port),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
HealthStatus: healthStatus,
|
HealthStatus: healthStatus,
|
||||||
LoadBalancingWeight: makeUint32Value(weight),
|
LoadBalancingWeight: makeUint32Value(weight),
|
||||||
|
|
|
@ -284,6 +284,12 @@ func TestEndpointsFromSnapshot(t *testing.T) {
|
||||||
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
|
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "connect-proxy-with-chain-and-failover-to-cluster-peer",
|
||||||
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-to-cluster-peer", nil, nil)
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
|
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
|
||||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
@ -396,6 +402,13 @@ func TestEndpointsFromSnapshot(t *testing.T) {
|
||||||
"failover", nil, nil, nil)
|
"failover", nil, nil, nil)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "ingress-with-chain-and-failover-to-cluster-peer",
|
||||||
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
|
||||||
|
"failover-to-cluster-peer", nil, nil, nil)
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "ingress-with-tcp-chain-failover-through-remote-gateway",
|
name: "ingress-with-tcp-chain-failover-through-remote-gateway",
|
||||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
|
|
@ -3,7 +3,6 @@ package xds
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
envoy_extensions_filters_listener_http_inspector_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/http_inspector/v3"
|
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
@ -12,6 +11,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
envoy_extensions_filters_listener_http_inspector_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/http_inspector/v3"
|
||||||
|
|
||||||
envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||||
envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
|
envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
|
||||||
envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
|
envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
|
||||||
|
@ -107,6 +108,19 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
proxyCfg, err := ParseProxyConfig(cfgSnap.Proxy.Config)
|
||||||
|
if err != nil {
|
||||||
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
||||||
|
// default config if there is an error so it's safe to continue.
|
||||||
|
s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err)
|
||||||
|
}
|
||||||
|
var tracing *envoy_http_v3.HttpConnectionManager_Tracing
|
||||||
|
if proxyCfg.ListenerTracingJSON != "" {
|
||||||
|
if tracing, err = makeTracingFromUserConfig(proxyCfg.ListenerTracingJSON); err != nil {
|
||||||
|
s.Logger.Warn("failed to parse ListenerTracingJSON config", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
||||||
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
||||||
|
|
||||||
|
@ -153,6 +167,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
filterName: filterName,
|
filterName: filterName,
|
||||||
protocol: cfg.Protocol,
|
protocol: cfg.Protocol,
|
||||||
useRDS: useRDS,
|
useRDS: useRDS,
|
||||||
|
tracing: tracing,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -178,6 +193,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
filterName: filterName,
|
filterName: filterName,
|
||||||
protocol: cfg.Protocol,
|
protocol: cfg.Protocol,
|
||||||
useRDS: useRDS,
|
useRDS: useRDS,
|
||||||
|
tracing: tracing,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -249,6 +265,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
filterName: routeName,
|
filterName: routeName,
|
||||||
protocol: svcConfig.Protocol,
|
protocol: svcConfig.Protocol,
|
||||||
useRDS: true,
|
useRDS: true,
|
||||||
|
tracing: tracing,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -265,6 +282,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
clusterName: clusterName,
|
clusterName: clusterName,
|
||||||
filterName: clusterName,
|
filterName: clusterName,
|
||||||
protocol: svcConfig.Protocol,
|
protocol: svcConfig.Protocol,
|
||||||
|
tracing: tracing,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -376,6 +394,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
protocol: cfg.Protocol,
|
protocol: cfg.Protocol,
|
||||||
useRDS: false,
|
useRDS: false,
|
||||||
statPrefix: "upstream_peered.",
|
statPrefix: "upstream_peered.",
|
||||||
|
tracing: tracing,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -533,6 +552,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
|
||||||
filterName: uid.EnvoyID(),
|
filterName: uid.EnvoyID(),
|
||||||
routeName: uid.EnvoyID(),
|
routeName: uid.EnvoyID(),
|
||||||
protocol: cfg.Protocol,
|
protocol: cfg.Protocol,
|
||||||
|
tracing: tracing,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1188,12 +1208,20 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot
|
||||||
|
|
||||||
l = makePortListener(name, addr, port, envoy_core_v3.TrafficDirection_INBOUND)
|
l = makePortListener(name, addr, port, envoy_core_v3.TrafficDirection_INBOUND)
|
||||||
|
|
||||||
|
var tracing *envoy_http_v3.HttpConnectionManager_Tracing
|
||||||
|
if cfg.ListenerTracingJSON != "" {
|
||||||
|
if tracing, err = makeTracingFromUserConfig(cfg.ListenerTracingJSON); err != nil {
|
||||||
|
s.Logger.Warn("failed to parse ListenerTracingJSON config", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
filterOpts := listenerFilterOpts{
|
filterOpts := listenerFilterOpts{
|
||||||
protocol: cfg.Protocol,
|
protocol: cfg.Protocol,
|
||||||
filterName: name,
|
filterName: name,
|
||||||
routeName: name,
|
routeName: name,
|
||||||
cluster: LocalAppClusterName,
|
cluster: LocalAppClusterName,
|
||||||
requestTimeoutMs: cfg.LocalRequestTimeoutMs,
|
requestTimeoutMs: cfg.LocalRequestTimeoutMs,
|
||||||
|
tracing: tracing,
|
||||||
}
|
}
|
||||||
if useHTTPFilter {
|
if useHTTPFilter {
|
||||||
filterOpts.httpAuthzFilter, err = makeRBACHTTPFilter(
|
filterOpts.httpAuthzFilter, err = makeRBACHTTPFilter(
|
||||||
|
@ -1310,6 +1338,7 @@ func (s *ResourceGenerator) makeExposedCheckListener(cfgSnap *proxycfg.ConfigSna
|
||||||
statPrefix: "",
|
statPrefix: "",
|
||||||
routePath: path.Path,
|
routePath: path.Path,
|
||||||
httpAuthzFilter: nil,
|
httpAuthzFilter: nil,
|
||||||
|
// in the exposed check listener we don't set the tracing configuration
|
||||||
}
|
}
|
||||||
f, err := makeListenerFilter(opts)
|
f, err := makeListenerFilter(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1542,6 +1571,19 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg.
|
||||||
filterChain.Filters = append(filterChain.Filters, authFilter)
|
filterChain.Filters = append(filterChain.Filters, authFilter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
proxyCfg, err := ParseProxyConfig(cfgSnap.Proxy.Config)
|
||||||
|
if err != nil {
|
||||||
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
||||||
|
// default config if there is an error so it's safe to continue.
|
||||||
|
s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err)
|
||||||
|
}
|
||||||
|
var tracing *envoy_http_v3.HttpConnectionManager_Tracing
|
||||||
|
if proxyCfg.ListenerTracingJSON != "" {
|
||||||
|
if tracing, err = makeTracingFromUserConfig(proxyCfg.ListenerTracingJSON); err != nil {
|
||||||
|
s.Logger.Warn("failed to parse ListenerTracingJSON config", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Lastly we setup the actual proxying component. For L4 this is a straight
|
// Lastly we setup the actual proxying component. For L4 this is a straight
|
||||||
// tcp proxy. For L7 this is a very hands-off HTTP proxy just to inject an
|
// tcp proxy. For L7 this is a very hands-off HTTP proxy just to inject an
|
||||||
// HTTP filter to do intention checks here instead.
|
// HTTP filter to do intention checks here instead.
|
||||||
|
@ -1552,6 +1594,7 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg.
|
||||||
cluster: tgtwyOpts.cluster,
|
cluster: tgtwyOpts.cluster,
|
||||||
statPrefix: "upstream.",
|
statPrefix: "upstream.",
|
||||||
routePath: "",
|
routePath: "",
|
||||||
|
tracing: tracing,
|
||||||
}
|
}
|
||||||
|
|
||||||
if useHTTPFilter {
|
if useHTTPFilter {
|
||||||
|
@ -1798,6 +1841,7 @@ type filterChainOpts struct {
|
||||||
statPrefix string
|
statPrefix string
|
||||||
forwardClientDetails bool
|
forwardClientDetails bool
|
||||||
forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails
|
forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails
|
||||||
|
tracing *envoy_http_v3.HttpConnectionManager_Tracing
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ResourceGenerator) makeUpstreamFilterChain(opts filterChainOpts) (*envoy_listener_v3.FilterChain, error) {
|
func (s *ResourceGenerator) makeUpstreamFilterChain(opts filterChainOpts) (*envoy_listener_v3.FilterChain, error) {
|
||||||
|
@ -1813,6 +1857,7 @@ func (s *ResourceGenerator) makeUpstreamFilterChain(opts filterChainOpts) (*envo
|
||||||
statPrefix: opts.statPrefix,
|
statPrefix: opts.statPrefix,
|
||||||
forwardClientDetails: opts.forwardClientDetails,
|
forwardClientDetails: opts.forwardClientDetails,
|
||||||
forwardClientPolicy: opts.forwardClientPolicy,
|
forwardClientPolicy: opts.forwardClientPolicy,
|
||||||
|
tracing: opts.tracing,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1955,6 +2000,7 @@ type listenerFilterOpts struct {
|
||||||
httpAuthzFilter *envoy_http_v3.HttpFilter
|
httpAuthzFilter *envoy_http_v3.HttpFilter
|
||||||
forwardClientDetails bool
|
forwardClientDetails bool
|
||||||
forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails
|
forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails
|
||||||
|
tracing *envoy_http_v3.HttpConnectionManager_Tracing
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeListenerFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) {
|
func makeListenerFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) {
|
||||||
|
@ -2014,6 +2060,19 @@ func makeStatPrefix(prefix, filterName string) string {
|
||||||
return fmt.Sprintf("%s%s", prefix, strings.Replace(filterName, ":", "_", -1))
|
return fmt.Sprintf("%s%s", prefix, strings.Replace(filterName, ":", "_", -1))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func makeTracingFromUserConfig(configJSON string) (*envoy_http_v3.HttpConnectionManager_Tracing, error) {
|
||||||
|
// Type field is present so decode it as a any.Any
|
||||||
|
var any any.Any
|
||||||
|
if err := jsonpb.UnmarshalString(configJSON, &any); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var t envoy_http_v3.HttpConnectionManager_Tracing
|
||||||
|
if err := proto.Unmarshal(any.Value, &t); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &t, nil
|
||||||
|
}
|
||||||
|
|
||||||
func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) {
|
func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) {
|
||||||
router, err := makeEnvoyHTTPFilter("envoy.filters.http.router", &envoy_http_router_v3.Router{})
|
router, err := makeEnvoyHTTPFilter("envoy.filters.http.router", &envoy_http_router_v3.Router{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -2034,6 +2093,10 @@ func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opts.tracing != nil {
|
||||||
|
cfg.Tracing = opts.tracing
|
||||||
|
}
|
||||||
|
|
||||||
if opts.useRDS {
|
if opts.useRDS {
|
||||||
if opts.cluster != "" {
|
if opts.cluster != "" {
|
||||||
return nil, fmt.Errorf("cannot specify cluster name when using RDS")
|
return nil, fmt.Errorf("cannot specify cluster name when using RDS")
|
||||||
|
|
|
@ -772,6 +772,15 @@ func TestListenersFromSnapshot(t *testing.T) {
|
||||||
name: "transparent-proxy-terminating-gateway",
|
name: "transparent-proxy-terminating-gateway",
|
||||||
create: proxycfg.TestConfigSnapshotTransparentProxyTerminatingGatewayCatalogDestinationsOnly,
|
create: proxycfg.TestConfigSnapshotTransparentProxyTerminatingGatewayCatalogDestinationsOnly,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "custom-trace-listener",
|
||||||
|
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||||
|
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
|
||||||
|
ns.Proxy.Config["protocol"] = "http"
|
||||||
|
ns.Proxy.Config["envoy_listener_tracing_json"] = customTraceJSON(t)
|
||||||
|
}, nil)
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
latestEnvoyVersion := proxysupport.EnvoyVersions[0]
|
latestEnvoyVersion := proxysupport.EnvoyVersions[0]
|
||||||
|
@ -947,6 +956,40 @@ func customHTTPListenerJSON(t testinf.T, opts customHTTPListenerJSONOptions) str
|
||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func customTraceJSON(t testinf.T) string {
|
||||||
|
t.Helper()
|
||||||
|
return `
|
||||||
|
{
|
||||||
|
"@type" : "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing",
|
||||||
|
"provider" : {
|
||||||
|
"name" : "envoy.tracers.zipkin",
|
||||||
|
"typed_config" : {
|
||||||
|
"@type" : "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig",
|
||||||
|
"collector_cluster" : "otelcolector",
|
||||||
|
"collector_endpoint" : "/api/v2/spans",
|
||||||
|
"collector_endpoint_version" : "HTTP_JSON",
|
||||||
|
"shared_span_context" : false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"custom_tags" : [
|
||||||
|
{
|
||||||
|
"tag" : "custom_header",
|
||||||
|
"request_header" : {
|
||||||
|
"name" : "x-custom-traceid",
|
||||||
|
"default_value" : ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"tag" : "alloc_id",
|
||||||
|
"environment" : {
|
||||||
|
"name" : "NOMAD_ALLOC_ID"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
}
|
||||||
|
|
||||||
type configFetcherFunc func() string
|
type configFetcherFunc func() string
|
||||||
|
|
||||||
var _ ConfigFetcher = (configFetcherFunc)(nil)
|
var _ ConfigFetcher = (configFetcherFunc)(nil)
|
||||||
|
|
219
agent/xds/testdata/clusters/connect-proxy-with-chain-and-failover-to-cluster-peer.latest.golden
vendored
Normal file
219
agent/xds/testdata/clusters/connect-proxy-with-chain-and-failover-to-cluster-peer.latest.golden
vendored
Normal file
|
@ -0,0 +1,219 @@
|
||||||
|
{
|
||||||
|
"versionInfo": "00000001",
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"clusterType": {
|
||||||
|
"name": "envoy.clusters.aggregate",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig",
|
||||||
|
"clusters": [
|
||||||
|
"failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"failover-target~db.default.cluster-01.external.peer1.domain"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"connectTimeout": "33s",
|
||||||
|
"lbPolicy": "CLUSTER_PROVIDED"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"name": "failover-target~db.default.cluster-01.external.peer1.domain",
|
||||||
|
"type": "EDS",
|
||||||
|
"edsClusterConfig": {
|
||||||
|
"edsConfig": {
|
||||||
|
"ads": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"resourceApiVersion": "V3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"connectTimeout": "1s",
|
||||||
|
"circuitBreakers": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"outlierDetection": {
|
||||||
|
"maxEjectionPercent": 100
|
||||||
|
},
|
||||||
|
"commonLbConfig": {
|
||||||
|
"healthyPanicThreshold": {
|
||||||
|
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"transportSocket": {
|
||||||
|
"name": "tls",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
|
||||||
|
"commonTlsContext": {
|
||||||
|
"tlsParams": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"tlsCertificates": [
|
||||||
|
{
|
||||||
|
"certificateChain": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||||
|
},
|
||||||
|
"privateKey": {
|
||||||
|
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"validationContext": {
|
||||||
|
"trustedCa": {
|
||||||
|
"inlineString": "peer1-root-1\n"
|
||||||
|
},
|
||||||
|
"matchSubjectAltNames": [
|
||||||
|
{
|
||||||
|
"exact": "spiffe://1c053652-8512-4373-90cf-5a7f6263a994.consul/ns/default/dc/cluster-01-dc/svc/payments"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"sni": "payments.default.default.cluster-01.external.1c053652-8512-4373-90cf-5a7f6263a994.consul"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"name": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"altStatName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"type": "EDS",
|
||||||
|
"edsClusterConfig": {
|
||||||
|
"edsConfig": {
|
||||||
|
"ads": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"resourceApiVersion": "V3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"connectTimeout": "33s",
|
||||||
|
"circuitBreakers": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"outlierDetection": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"commonLbConfig": {
|
||||||
|
"healthyPanicThreshold": {
|
||||||
|
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"transportSocket": {
|
||||||
|
"name": "tls",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
|
||||||
|
"commonTlsContext": {
|
||||||
|
"tlsParams": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"tlsCertificates": [
|
||||||
|
{
|
||||||
|
"certificateChain": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||||
|
},
|
||||||
|
"privateKey": {
|
||||||
|
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"validationContext": {
|
||||||
|
"trustedCa": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||||
|
},
|
||||||
|
"matchSubjectAltNames": [
|
||||||
|
{
|
||||||
|
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"name": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"type": "EDS",
|
||||||
|
"edsClusterConfig": {
|
||||||
|
"edsConfig": {
|
||||||
|
"ads": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"resourceApiVersion": "V3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"connectTimeout": "5s",
|
||||||
|
"circuitBreakers": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"outlierDetection": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"transportSocket": {
|
||||||
|
"name": "tls",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
|
||||||
|
"commonTlsContext": {
|
||||||
|
"tlsParams": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"tlsCertificates": [
|
||||||
|
{
|
||||||
|
"certificateChain": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||||
|
},
|
||||||
|
"privateKey": {
|
||||||
|
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"validationContext": {
|
||||||
|
"trustedCa": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||||
|
},
|
||||||
|
"matchSubjectAltNames": [
|
||||||
|
{
|
||||||
|
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/geo-cache-target"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/geo-cache-target"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"sni": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"name": "local_app",
|
||||||
|
"type": "STATIC",
|
||||||
|
"connectTimeout": "5s",
|
||||||
|
"loadAssignment": {
|
||||||
|
"clusterName": "local_app",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"lbEndpoints": [
|
||||||
|
{
|
||||||
|
"endpoint": {
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "127.0.0.1",
|
||||||
|
"portValue": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"nonce": "00000001"
|
||||||
|
}
|
|
@ -58,7 +58,7 @@
|
||||||
"dnsRefreshRate": "10s",
|
"dnsRefreshRate": "10s",
|
||||||
"dnsLookupFamily": "V4_ONLY",
|
"dnsLookupFamily": "V4_ONLY",
|
||||||
"outlierDetection": {
|
"outlierDetection": {
|
||||||
|
"maxEjectionPercent": 100
|
||||||
},
|
},
|
||||||
"commonLbConfig": {
|
"commonLbConfig": {
|
||||||
"healthyPanicThreshold": {
|
"healthyPanicThreshold": {
|
||||||
|
@ -115,7 +115,7 @@
|
||||||
|
|
||||||
},
|
},
|
||||||
"outlierDetection": {
|
"outlierDetection": {
|
||||||
|
"maxEjectionPercent": 100
|
||||||
},
|
},
|
||||||
"commonLbConfig": {
|
"commonLbConfig": {
|
||||||
"healthyPanicThreshold": {
|
"healthyPanicThreshold": {
|
||||||
|
|
|
@ -0,0 +1,147 @@
|
||||||
|
{
|
||||||
|
"versionInfo": "00000001",
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"type": "EDS",
|
||||||
|
"edsClusterConfig": {
|
||||||
|
"edsConfig": {
|
||||||
|
"ads": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"resourceApiVersion": "V3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"connectTimeout": "5s",
|
||||||
|
"circuitBreakers": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"outlierDetection": {
|
||||||
|
"consecutive5xx": 5,
|
||||||
|
"interval": "0.000000010s",
|
||||||
|
"enforcingConsecutive5xx": 80
|
||||||
|
},
|
||||||
|
"commonLbConfig": {
|
||||||
|
"healthyPanicThreshold": {
|
||||||
|
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"transportSocket": {
|
||||||
|
"name": "tls",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
|
||||||
|
"commonTlsContext": {
|
||||||
|
"tlsParams": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"tlsCertificates": [
|
||||||
|
{
|
||||||
|
"certificateChain": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||||
|
},
|
||||||
|
"privateKey": {
|
||||||
|
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"validationContext": {
|
||||||
|
"trustedCa": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||||
|
},
|
||||||
|
"matchSubjectAltNames": [
|
||||||
|
{
|
||||||
|
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"name": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"type": "EDS",
|
||||||
|
"edsClusterConfig": {
|
||||||
|
"edsConfig": {
|
||||||
|
"ads": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"resourceApiVersion": "V3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"connectTimeout": "5s",
|
||||||
|
"circuitBreakers": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"outlierDetection": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"transportSocket": {
|
||||||
|
"name": "tls",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
|
||||||
|
"commonTlsContext": {
|
||||||
|
"tlsParams": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"tlsCertificates": [
|
||||||
|
{
|
||||||
|
"certificateChain": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||||
|
},
|
||||||
|
"privateKey": {
|
||||||
|
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"validationContext": {
|
||||||
|
"trustedCa": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||||
|
},
|
||||||
|
"matchSubjectAltNames": [
|
||||||
|
{
|
||||||
|
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/geo-cache-target"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/geo-cache-target"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"sni": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"name": "local_app",
|
||||||
|
"type": "STATIC",
|
||||||
|
"connectTimeout": "5s",
|
||||||
|
"loadAssignment": {
|
||||||
|
"clusterName": "local_app",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"lbEndpoints": [
|
||||||
|
{
|
||||||
|
"endpoint": {
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "127.0.0.1",
|
||||||
|
"portValue": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"nonce": "00000001"
|
||||||
|
}
|
139
agent/xds/testdata/clusters/ingress-with-chain-and-failover-to-cluster-peer.latest.golden
vendored
Normal file
139
agent/xds/testdata/clusters/ingress-with-chain-and-failover-to-cluster-peer.latest.golden
vendored
Normal file
|
@ -0,0 +1,139 @@
|
||||||
|
{
|
||||||
|
"versionInfo": "00000001",
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"clusterType": {
|
||||||
|
"name": "envoy.clusters.aggregate",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig",
|
||||||
|
"clusters": [
|
||||||
|
"failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"failover-target~db.default.cluster-01.external.peer1.domain"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"connectTimeout": "33s",
|
||||||
|
"lbPolicy": "CLUSTER_PROVIDED"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"name": "failover-target~db.default.cluster-01.external.peer1.domain",
|
||||||
|
"type": "EDS",
|
||||||
|
"edsClusterConfig": {
|
||||||
|
"edsConfig": {
|
||||||
|
"ads": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"resourceApiVersion": "V3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"connectTimeout": "33s",
|
||||||
|
"circuitBreakers": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"outlierDetection": {
|
||||||
|
"maxEjectionPercent": 100
|
||||||
|
},
|
||||||
|
"commonLbConfig": {
|
||||||
|
"healthyPanicThreshold": {
|
||||||
|
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"transportSocket": {
|
||||||
|
"name": "tls",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
|
||||||
|
"commonTlsContext": {
|
||||||
|
"tlsParams": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"tlsCertificates": [
|
||||||
|
{
|
||||||
|
"certificateChain": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||||
|
},
|
||||||
|
"privateKey": {
|
||||||
|
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"validationContext": {
|
||||||
|
"trustedCa": {
|
||||||
|
"inlineString": "peer1-root-1\n"
|
||||||
|
},
|
||||||
|
"matchSubjectAltNames": [
|
||||||
|
{
|
||||||
|
"exact": "spiffe://1c053652-8512-4373-90cf-5a7f6263a994.consul/ns/default/dc/cluster-01-dc/svc/payments"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"sni": "payments.default.default.cluster-01.external.1c053652-8512-4373-90cf-5a7f6263a994.consul"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"name": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"altStatName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"type": "EDS",
|
||||||
|
"edsClusterConfig": {
|
||||||
|
"edsConfig": {
|
||||||
|
"ads": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"resourceApiVersion": "V3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"connectTimeout": "33s",
|
||||||
|
"circuitBreakers": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"outlierDetection": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"commonLbConfig": {
|
||||||
|
"healthyPanicThreshold": {
|
||||||
|
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"transportSocket": {
|
||||||
|
"name": "tls",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
|
||||||
|
"commonTlsContext": {
|
||||||
|
"tlsParams": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"tlsCertificates": [
|
||||||
|
{
|
||||||
|
"certificateChain": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||||
|
},
|
||||||
|
"privateKey": {
|
||||||
|
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"validationContext": {
|
||||||
|
"trustedCa": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||||
|
},
|
||||||
|
"matchSubjectAltNames": [
|
||||||
|
{
|
||||||
|
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||||
|
"nonce": "00000001"
|
||||||
|
}
|
|
@ -18,7 +18,7 @@
|
||||||
|
|
||||||
},
|
},
|
||||||
"outlierDetection": {
|
"outlierDetection": {
|
||||||
|
"maxEjectionPercent": 100
|
||||||
},
|
},
|
||||||
"commonLbConfig": {
|
"commonLbConfig": {
|
||||||
"healthyPanicThreshold": {
|
"healthyPanicThreshold": {
|
||||||
|
@ -75,7 +75,7 @@
|
||||||
|
|
||||||
},
|
},
|
||||||
"outlierDetection": {
|
"outlierDetection": {
|
||||||
|
"maxEjectionPercent": 100
|
||||||
},
|
},
|
||||||
"commonLbConfig": {
|
"commonLbConfig": {
|
||||||
"healthyPanicThreshold": {
|
"healthyPanicThreshold": {
|
||||||
|
@ -157,7 +157,7 @@
|
||||||
|
|
||||||
},
|
},
|
||||||
"outlierDetection": {
|
"outlierDetection": {
|
||||||
|
"maxEjectionPercent": 100
|
||||||
},
|
},
|
||||||
"commonLbConfig": {
|
"commonLbConfig": {
|
||||||
"healthyPanicThreshold": {
|
"healthyPanicThreshold": {
|
||||||
|
|
109
agent/xds/testdata/endpoints/connect-proxy-with-chain-and-failover-to-cluster-peer.latest.golden
vendored
Normal file
109
agent/xds/testdata/endpoints/connect-proxy-with-chain-and-failover-to-cluster-peer.latest.golden
vendored
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
{
|
||||||
|
"versionInfo": "00000001",
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
|
||||||
|
"clusterName": "failover-target~db.default.cluster-01.external.peer1.domain",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"lbEndpoints": [
|
||||||
|
{
|
||||||
|
"endpoint": {
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "10.40.1.1",
|
||||||
|
"portValue": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"healthStatus": "HEALTHY",
|
||||||
|
"loadBalancingWeight": 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"endpoint": {
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "10.40.1.2",
|
||||||
|
"portValue": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"healthStatus": "HEALTHY",
|
||||||
|
"loadBalancingWeight": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
|
||||||
|
"clusterName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"lbEndpoints": [
|
||||||
|
{
|
||||||
|
"endpoint": {
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "10.10.1.1",
|
||||||
|
"portValue": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"healthStatus": "HEALTHY",
|
||||||
|
"loadBalancingWeight": 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"endpoint": {
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "10.10.1.2",
|
||||||
|
"portValue": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"healthStatus": "HEALTHY",
|
||||||
|
"loadBalancingWeight": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
|
||||||
|
"clusterName": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"lbEndpoints": [
|
||||||
|
{
|
||||||
|
"endpoint": {
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "10.10.1.1",
|
||||||
|
"portValue": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"healthStatus": "HEALTHY",
|
||||||
|
"loadBalancingWeight": 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"endpoint": {
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "10.20.1.2",
|
||||||
|
"portValue": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"healthStatus": "HEALTHY",
|
||||||
|
"loadBalancingWeight": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
|
||||||
|
"nonce": "00000001"
|
||||||
|
}
|
75
agent/xds/testdata/endpoints/ingress-with-chain-and-failover-to-cluster-peer.latest.golden
vendored
Normal file
75
agent/xds/testdata/endpoints/ingress-with-chain-and-failover-to-cluster-peer.latest.golden
vendored
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
{
|
||||||
|
"versionInfo": "00000001",
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
|
||||||
|
"clusterName": "failover-target~db.default.cluster-01.external.peer1.domain",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"lbEndpoints": [
|
||||||
|
{
|
||||||
|
"endpoint": {
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "10.40.1.1",
|
||||||
|
"portValue": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"healthStatus": "HEALTHY",
|
||||||
|
"loadBalancingWeight": 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"endpoint": {
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "10.40.1.2",
|
||||||
|
"portValue": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"healthStatus": "HEALTHY",
|
||||||
|
"loadBalancingWeight": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
|
||||||
|
"clusterName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||||
|
"endpoints": [
|
||||||
|
{
|
||||||
|
"lbEndpoints": [
|
||||||
|
{
|
||||||
|
"endpoint": {
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "10.10.1.1",
|
||||||
|
"portValue": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"healthStatus": "HEALTHY",
|
||||||
|
"loadBalancingWeight": 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"endpoint": {
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "10.10.1.2",
|
||||||
|
"portValue": 8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"healthStatus": "HEALTHY",
|
||||||
|
"loadBalancingWeight": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
|
||||||
|
"nonce": "00000001"
|
||||||
|
}
|
|
@ -0,0 +1,180 @@
|
||||||
|
{
|
||||||
|
"versionInfo": "00000001",
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.listener.v3.Listener",
|
||||||
|
"name": "db:127.0.0.1:9191",
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "127.0.0.1",
|
||||||
|
"portValue": 9191
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"filterChains": [
|
||||||
|
{
|
||||||
|
"filters": [
|
||||||
|
{
|
||||||
|
"name": "envoy.filters.network.tcp_proxy",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy",
|
||||||
|
"statPrefix": "upstream.db.default.default.dc1",
|
||||||
|
"cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"trafficDirection": "OUTBOUND"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.listener.v3.Listener",
|
||||||
|
"name": "prepared_query:geo-cache:127.10.10.10:8181",
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "127.10.10.10",
|
||||||
|
"portValue": 8181
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"filterChains": [
|
||||||
|
{
|
||||||
|
"filters": [
|
||||||
|
{
|
||||||
|
"name": "envoy.filters.network.tcp_proxy",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy",
|
||||||
|
"statPrefix": "upstream.prepared_query_geo-cache",
|
||||||
|
"cluster": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"trafficDirection": "OUTBOUND"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@type": "type.googleapis.com/envoy.config.listener.v3.Listener",
|
||||||
|
"name": "public_listener:0.0.0.0:9999",
|
||||||
|
"address": {
|
||||||
|
"socketAddress": {
|
||||||
|
"address": "0.0.0.0",
|
||||||
|
"portValue": 9999
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"filterChains": [
|
||||||
|
{
|
||||||
|
"filters": [
|
||||||
|
{
|
||||||
|
"name": "envoy.filters.network.http_connection_manager",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
|
||||||
|
"statPrefix": "public_listener",
|
||||||
|
"routeConfig": {
|
||||||
|
"name": "public_listener",
|
||||||
|
"virtualHosts": [
|
||||||
|
{
|
||||||
|
"name": "public_listener",
|
||||||
|
"domains": [
|
||||||
|
"*"
|
||||||
|
],
|
||||||
|
"routes": [
|
||||||
|
{
|
||||||
|
"match": {
|
||||||
|
"prefix": "/"
|
||||||
|
},
|
||||||
|
"route": {
|
||||||
|
"cluster": "local_app"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"httpFilters": [
|
||||||
|
{
|
||||||
|
"name": "envoy.filters.http.rbac",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC",
|
||||||
|
"rules": {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "envoy.filters.http.router",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"tracing": {
|
||||||
|
"customTags": [
|
||||||
|
{
|
||||||
|
"tag": "custom_header",
|
||||||
|
"requestHeader": {
|
||||||
|
"name": "x-custom-traceid"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"tag": "alloc_id",
|
||||||
|
"environment": {
|
||||||
|
"name": "NOMAD_ALLOC_ID"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"provider": {
|
||||||
|
"name": "envoy.tracers.zipkin",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig",
|
||||||
|
"collectorCluster": "otelcolector",
|
||||||
|
"collectorEndpoint": "/api/v2/spans",
|
||||||
|
"sharedSpanContext": false,
|
||||||
|
"collectorEndpointVersion": "HTTP_JSON"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"forwardClientCertDetails": "APPEND_FORWARD",
|
||||||
|
"setCurrentClientCertDetails": {
|
||||||
|
"subject": true,
|
||||||
|
"cert": true,
|
||||||
|
"chain": true,
|
||||||
|
"dns": true,
|
||||||
|
"uri": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"transportSocket": {
|
||||||
|
"name": "tls",
|
||||||
|
"typedConfig": {
|
||||||
|
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext",
|
||||||
|
"commonTlsContext": {
|
||||||
|
"tlsParams": {
|
||||||
|
|
||||||
|
},
|
||||||
|
"tlsCertificates": [
|
||||||
|
{
|
||||||
|
"certificateChain": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||||
|
},
|
||||||
|
"privateKey": {
|
||||||
|
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"validationContext": {
|
||||||
|
"trustedCa": {
|
||||||
|
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"requireClientCertificate": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"trafficDirection": "INBOUND"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener",
|
||||||
|
"nonce": "00000001"
|
||||||
|
}
|
|
@ -363,7 +363,7 @@ func TestAPI_AgentServicesWithFilterOpts(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(t, agent.ServiceRegister(reg))
|
require.NoError(t, agent.ServiceRegister(reg))
|
||||||
|
|
||||||
opts := &QueryOptions{Namespace: splitDefaultNamespace}
|
opts := &QueryOptions{Namespace: defaultNamespace}
|
||||||
services, err := agent.ServicesWithFilterOpts("foo in Tags", opts)
|
services, err := agent.ServicesWithFilterOpts("foo in Tags", opts)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, services, 1)
|
require.Len(t, services, 1)
|
||||||
|
@ -791,8 +791,8 @@ func TestAPI_AgentService(t *testing.T) {
|
||||||
Warning: 1,
|
Warning: 1,
|
||||||
},
|
},
|
||||||
Meta: map[string]string{},
|
Meta: map[string]string{},
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
}
|
}
|
||||||
require.Equal(t, expect, got)
|
require.Equal(t, expect, got)
|
||||||
|
@ -932,7 +932,7 @@ func TestAPI_AgentUpdateTTLOpts(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := &QueryOptions{Namespace: splitDefaultNamespace}
|
opts := &QueryOptions{Namespace: defaultNamespace}
|
||||||
|
|
||||||
if err := agent.UpdateTTLOpts("service:foo", "foo", HealthWarning, opts); err != nil {
|
if err := agent.UpdateTTLOpts("service:foo", "foo", HealthWarning, opts); err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
|
@ -1007,7 +1007,7 @@ func TestAPI_AgentChecksWithFilterOpts(t *testing.T) {
|
||||||
reg.TTL = "15s"
|
reg.TTL = "15s"
|
||||||
require.NoError(t, agent.CheckRegister(reg))
|
require.NoError(t, agent.CheckRegister(reg))
|
||||||
|
|
||||||
opts := &QueryOptions{Namespace: splitDefaultNamespace}
|
opts := &QueryOptions{Namespace: defaultNamespace}
|
||||||
checks, err := agent.ChecksWithFilterOpts("Name == foo", opts)
|
checks, err := agent.ChecksWithFilterOpts("Name == foo", opts)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, checks, 1)
|
require.Len(t, checks, 1)
|
||||||
|
@ -1382,7 +1382,7 @@ func TestAPI_ServiceMaintenanceOpts(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Specify namespace in query option
|
// Specify namespace in query option
|
||||||
opts := &QueryOptions{Namespace: splitDefaultNamespace}
|
opts := &QueryOptions{Namespace: defaultNamespace}
|
||||||
|
|
||||||
// Enable maintenance mode
|
// Enable maintenance mode
|
||||||
if err := agent.EnableServiceMaintenanceOpts("redis", "broken", opts); err != nil {
|
if err := agent.EnableServiceMaintenanceOpts("redis", "broken", opts); err != nil {
|
||||||
|
@ -1701,7 +1701,7 @@ func TestAPI_AgentHealthServiceOpts(t *testing.T) {
|
||||||
requireServiceHealthID := func(t *testing.T, serviceID, expected string, shouldExist bool) {
|
requireServiceHealthID := func(t *testing.T, serviceID, expected string, shouldExist bool) {
|
||||||
msg := fmt.Sprintf("service id:%s, shouldExist:%v, expectedStatus:%s : bad %%s", serviceID, shouldExist, expected)
|
msg := fmt.Sprintf("service id:%s, shouldExist:%v, expectedStatus:%s : bad %%s", serviceID, shouldExist, expected)
|
||||||
|
|
||||||
opts := &QueryOptions{Namespace: splitDefaultNamespace}
|
opts := &QueryOptions{Namespace: defaultNamespace}
|
||||||
state, out, err := agent.AgentHealthServiceByIDOpts(serviceID, opts)
|
state, out, err := agent.AgentHealthServiceByIDOpts(serviceID, opts)
|
||||||
require.Nil(t, err, msg, "err")
|
require.Nil(t, err, msg, "err")
|
||||||
require.Equal(t, expected, state, msg, "state")
|
require.Equal(t, expected, state, msg, "state")
|
||||||
|
@ -1715,7 +1715,7 @@ func TestAPI_AgentHealthServiceOpts(t *testing.T) {
|
||||||
requireServiceHealthName := func(t *testing.T, serviceName, expected string, shouldExist bool) {
|
requireServiceHealthName := func(t *testing.T, serviceName, expected string, shouldExist bool) {
|
||||||
msg := fmt.Sprintf("service name:%s, shouldExist:%v, expectedStatus:%s : bad %%s", serviceName, shouldExist, expected)
|
msg := fmt.Sprintf("service name:%s, shouldExist:%v, expectedStatus:%s : bad %%s", serviceName, shouldExist, expected)
|
||||||
|
|
||||||
opts := &QueryOptions{Namespace: splitDefaultNamespace}
|
opts := &QueryOptions{Namespace: defaultNamespace}
|
||||||
state, outs, err := agent.AgentHealthServiceByNameOpts(serviceName, opts)
|
state, outs, err := agent.AgentHealthServiceByNameOpts(serviceName, opts)
|
||||||
require.Nil(t, err, msg, "err")
|
require.Nil(t, err, msg, "err")
|
||||||
require.Equal(t, expected, state, msg, "state")
|
require.Equal(t, expected, state, msg, "state")
|
||||||
|
|
|
@ -20,6 +20,7 @@ type Node struct {
|
||||||
CreateIndex uint64
|
CreateIndex uint64
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64
|
||||||
Partition string `json:",omitempty"`
|
Partition string `json:",omitempty"`
|
||||||
|
PeerName string `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ServiceAddress struct {
|
type ServiceAddress struct {
|
||||||
|
|
|
@ -51,7 +51,7 @@ func TestAPI_CatalogNodes(t *testing.T) {
|
||||||
want := &Node{
|
want := &Node{
|
||||||
ID: s.Config.NodeID,
|
ID: s.Config.NodeID,
|
||||||
Node: s.Config.NodeName,
|
Node: s.Config.NodeName,
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Address: "127.0.0.1",
|
Address: "127.0.0.1",
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
TaggedAddresses: map[string]string{
|
TaggedAddresses: map[string]string{
|
||||||
|
@ -1144,8 +1144,8 @@ func TestAPI_CatalogGatewayServices_Terminating(t *testing.T) {
|
||||||
|
|
||||||
expect := []*GatewayService{
|
expect := []*GatewayService{
|
||||||
{
|
{
|
||||||
Service: CompoundServiceName{Name: "api", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
|
Service: CompoundServiceName{Name: "api", Namespace: defaultNamespace, Partition: defaultPartition},
|
||||||
Gateway: CompoundServiceName{Name: "terminating", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
|
Gateway: CompoundServiceName{Name: "terminating", Namespace: defaultNamespace, Partition: defaultPartition},
|
||||||
GatewayKind: ServiceKindTerminatingGateway,
|
GatewayKind: ServiceKindTerminatingGateway,
|
||||||
CAFile: "api/ca.crt",
|
CAFile: "api/ca.crt",
|
||||||
CertFile: "api/client.crt",
|
CertFile: "api/client.crt",
|
||||||
|
@ -1153,8 +1153,8 @@ func TestAPI_CatalogGatewayServices_Terminating(t *testing.T) {
|
||||||
SNI: "my-domain",
|
SNI: "my-domain",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Service: CompoundServiceName{Name: "redis", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
|
Service: CompoundServiceName{Name: "redis", Namespace: defaultNamespace, Partition: defaultPartition},
|
||||||
Gateway: CompoundServiceName{Name: "terminating", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
|
Gateway: CompoundServiceName{Name: "terminating", Namespace: defaultNamespace, Partition: defaultPartition},
|
||||||
GatewayKind: ServiceKindTerminatingGateway,
|
GatewayKind: ServiceKindTerminatingGateway,
|
||||||
CAFile: "ca.crt",
|
CAFile: "ca.crt",
|
||||||
CertFile: "client.crt",
|
CertFile: "client.crt",
|
||||||
|
@ -1212,15 +1212,15 @@ func TestAPI_CatalogGatewayServices_Ingress(t *testing.T) {
|
||||||
|
|
||||||
expect := []*GatewayService{
|
expect := []*GatewayService{
|
||||||
{
|
{
|
||||||
Service: CompoundServiceName{Name: "api", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
|
Service: CompoundServiceName{Name: "api", Namespace: defaultNamespace, Partition: defaultPartition},
|
||||||
Gateway: CompoundServiceName{Name: "ingress", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
|
Gateway: CompoundServiceName{Name: "ingress", Namespace: defaultNamespace, Partition: defaultPartition},
|
||||||
GatewayKind: ServiceKindIngressGateway,
|
GatewayKind: ServiceKindIngressGateway,
|
||||||
Protocol: "tcp",
|
Protocol: "tcp",
|
||||||
Port: 8888,
|
Port: 8888,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Service: CompoundServiceName{Name: "redis", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
|
Service: CompoundServiceName{Name: "redis", Namespace: defaultNamespace, Partition: defaultPartition},
|
||||||
Gateway: CompoundServiceName{Name: "ingress", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition},
|
Gateway: CompoundServiceName{Name: "ingress", Namespace: defaultNamespace, Partition: defaultPartition},
|
||||||
GatewayKind: ServiceKindIngressGateway,
|
GatewayKind: ServiceKindIngressGateway,
|
||||||
Protocol: "tcp",
|
Protocol: "tcp",
|
||||||
Port: 9999,
|
Port: 9999,
|
||||||
|
|
|
@ -196,6 +196,11 @@ type PassiveHealthCheck struct {
|
||||||
// MaxFailures is the count of consecutive failures that results in a host
|
// MaxFailures is the count of consecutive failures that results in a host
|
||||||
// being removed from the pool.
|
// being removed from the pool.
|
||||||
MaxFailures uint32 `alias:"max_failures"`
|
MaxFailures uint32 `alias:"max_failures"`
|
||||||
|
|
||||||
|
// EnforcingConsecutive5xx is the % chance that a host will be actually ejected
|
||||||
|
// when an outlier status is detected through consecutive 5xx.
|
||||||
|
// This setting can be used to disable ejection or to ramp it up slowly.
|
||||||
|
EnforcingConsecutive5xx uint32 `json:",omitempty" alias:"enforcing_consecutive_5xx"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpstreamLimits describes the limits that are associated with a specific
|
// UpstreamLimits describes the limits that are associated with a specific
|
||||||
|
|
|
@ -139,8 +139,8 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
||||||
entry: &ServiceResolverConfigEntry{
|
entry: &ServiceResolverConfigEntry{
|
||||||
Kind: ServiceResolver,
|
Kind: ServiceResolver,
|
||||||
Name: "test-failover",
|
Name: "test-failover",
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
DefaultSubset: "v1",
|
DefaultSubset: "v1",
|
||||||
Subsets: map[string]ServiceResolverSubset{
|
Subsets: map[string]ServiceResolverSubset{
|
||||||
"v1": {
|
"v1": {
|
||||||
|
@ -159,7 +159,7 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
||||||
},
|
},
|
||||||
"v1": {
|
"v1": {
|
||||||
Service: "alternate",
|
Service: "alternate",
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
},
|
},
|
||||||
"v3": {
|
"v3": {
|
||||||
Targets: []ServiceResolverFailoverTarget{
|
Targets: []ServiceResolverFailoverTarget{
|
||||||
|
@ -182,12 +182,12 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
||||||
entry: &ServiceResolverConfigEntry{
|
entry: &ServiceResolverConfigEntry{
|
||||||
Kind: ServiceResolver,
|
Kind: ServiceResolver,
|
||||||
Name: "test-redirect",
|
Name: "test-redirect",
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
Redirect: &ServiceResolverRedirect{
|
Redirect: &ServiceResolverRedirect{
|
||||||
Service: "test-failover",
|
Service: "test-failover",
|
||||||
ServiceSubset: "v2",
|
ServiceSubset: "v2",
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
Datacenter: "d",
|
Datacenter: "d",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -198,8 +198,8 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
||||||
entry: &ServiceResolverConfigEntry{
|
entry: &ServiceResolverConfigEntry{
|
||||||
Kind: ServiceResolver,
|
Kind: ServiceResolver,
|
||||||
Name: "test-redirect",
|
Name: "test-redirect",
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
Redirect: &ServiceResolverRedirect{
|
Redirect: &ServiceResolverRedirect{
|
||||||
Service: "test-failover",
|
Service: "test-failover",
|
||||||
Peer: "cluster-01",
|
Peer: "cluster-01",
|
||||||
|
@ -212,14 +212,14 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
||||||
entry: &ServiceSplitterConfigEntry{
|
entry: &ServiceSplitterConfigEntry{
|
||||||
Kind: ServiceSplitter,
|
Kind: ServiceSplitter,
|
||||||
Name: "test-split",
|
Name: "test-split",
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
Splits: []ServiceSplit{
|
Splits: []ServiceSplit{
|
||||||
{
|
{
|
||||||
Weight: 90,
|
Weight: 90,
|
||||||
Service: "test-failover",
|
Service: "test-failover",
|
||||||
ServiceSubset: "v1",
|
ServiceSubset: "v1",
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
RequestHeaders: &HTTPHeaderModifiers{
|
RequestHeaders: &HTTPHeaderModifiers{
|
||||||
Set: map[string]string{
|
Set: map[string]string{
|
||||||
"x-foo": "bar",
|
"x-foo": "bar",
|
||||||
|
@ -232,7 +232,7 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
||||||
{
|
{
|
||||||
Weight: 10,
|
Weight: 10,
|
||||||
Service: "test-redirect",
|
Service: "test-redirect",
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Meta: map[string]string{
|
Meta: map[string]string{
|
||||||
|
@ -247,8 +247,8 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
||||||
entry: &ServiceRouterConfigEntry{
|
entry: &ServiceRouterConfigEntry{
|
||||||
Kind: ServiceRouter,
|
Kind: ServiceRouter,
|
||||||
Name: "test-route",
|
Name: "test-route",
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
Routes: []ServiceRoute{
|
Routes: []ServiceRoute{
|
||||||
{
|
{
|
||||||
Match: &ServiceRouteMatch{
|
Match: &ServiceRouteMatch{
|
||||||
|
@ -265,8 +265,8 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) {
|
||||||
Destination: &ServiceRouteDestination{
|
Destination: &ServiceRouteDestination{
|
||||||
Service: "test-failover",
|
Service: "test-failover",
|
||||||
ServiceSubset: "v2",
|
ServiceSubset: "v2",
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
PrefixRewrite: "/",
|
PrefixRewrite: "/",
|
||||||
RequestTimeout: 5 * time.Second,
|
RequestTimeout: 5 * time.Second,
|
||||||
NumRetries: 5,
|
NumRetries: 5,
|
||||||
|
@ -358,8 +358,8 @@ func TestAPI_ConfigEntry_ServiceResolver_LoadBalancer(t *testing.T) {
|
||||||
entry: &ServiceResolverConfigEntry{
|
entry: &ServiceResolverConfigEntry{
|
||||||
Kind: ServiceResolver,
|
Kind: ServiceResolver,
|
||||||
Name: "test-least-req",
|
Name: "test-least-req",
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
LoadBalancer: &LoadBalancer{
|
LoadBalancer: &LoadBalancer{
|
||||||
Policy: "least_request",
|
Policy: "least_request",
|
||||||
LeastRequestConfig: &LeastRequestConfig{ChoiceCount: 10},
|
LeastRequestConfig: &LeastRequestConfig{ChoiceCount: 10},
|
||||||
|
@ -372,8 +372,8 @@ func TestAPI_ConfigEntry_ServiceResolver_LoadBalancer(t *testing.T) {
|
||||||
entry: &ServiceResolverConfigEntry{
|
entry: &ServiceResolverConfigEntry{
|
||||||
Kind: ServiceResolver,
|
Kind: ServiceResolver,
|
||||||
Name: "test-ring-hash",
|
Name: "test-ring-hash",
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
LoadBalancer: &LoadBalancer{
|
LoadBalancer: &LoadBalancer{
|
||||||
Policy: "ring_hash",
|
Policy: "ring_hash",
|
||||||
RingHashConfig: &RingHashConfig{
|
RingHashConfig: &RingHashConfig{
|
||||||
|
|
|
@ -57,7 +57,7 @@ type ServiceConsumer struct {
|
||||||
func (e *ExportedServicesConfigEntry) GetKind() string { return ExportedServices }
|
func (e *ExportedServicesConfigEntry) GetKind() string { return ExportedServices }
|
||||||
func (e *ExportedServicesConfigEntry) GetName() string { return e.Name }
|
func (e *ExportedServicesConfigEntry) GetName() string { return e.Name }
|
||||||
func (e *ExportedServicesConfigEntry) GetPartition() string { return e.Name }
|
func (e *ExportedServicesConfigEntry) GetPartition() string { return e.Name }
|
||||||
func (e *ExportedServicesConfigEntry) GetNamespace() string { return splitDefaultNamespace }
|
func (e *ExportedServicesConfigEntry) GetNamespace() string { return "" }
|
||||||
func (e *ExportedServicesConfigEntry) GetMeta() map[string]string { return e.Meta }
|
func (e *ExportedServicesConfigEntry) GetMeta() map[string]string { return e.Meta }
|
||||||
func (e *ExportedServicesConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
func (e *ExportedServicesConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
|
||||||
func (e *ExportedServicesConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
func (e *ExportedServicesConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
|
||||||
|
|
|
@ -17,7 +17,7 @@ func TestAPI_ConfigEntries_ExportedServices(t *testing.T) {
|
||||||
testutil.RunStep(t, "set and get", func(t *testing.T) {
|
testutil.RunStep(t, "set and get", func(t *testing.T) {
|
||||||
exports := &ExportedServicesConfigEntry{
|
exports := &ExportedServicesConfigEntry{
|
||||||
Name: PartitionDefaultName,
|
Name: PartitionDefaultName,
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Meta: map[string]string{
|
Meta: map[string]string{
|
||||||
"gir": "zim",
|
"gir": "zim",
|
||||||
},
|
},
|
||||||
|
@ -48,7 +48,7 @@ func TestAPI_ConfigEntries_ExportedServices(t *testing.T) {
|
||||||
Services: []ExportedService{
|
Services: []ExportedService{
|
||||||
{
|
{
|
||||||
Name: "db",
|
Name: "db",
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
Consumers: []ServiceConsumer{
|
Consumers: []ServiceConsumer{
|
||||||
{
|
{
|
||||||
PeerName: "alpha",
|
PeerName: "alpha",
|
||||||
|
@ -60,7 +60,7 @@ func TestAPI_ConfigEntries_ExportedServices(t *testing.T) {
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
"gir": "zim",
|
"gir": "zim",
|
||||||
},
|
},
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, wm, err := entries.Set(updated, nil)
|
_, wm, err := entries.Set(updated, nil)
|
||||||
|
|
|
@ -215,8 +215,8 @@ func TestAPI_ConfigEntries(t *testing.T) {
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
"gir": "zim",
|
"gir": "zim",
|
||||||
},
|
},
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
}
|
}
|
||||||
ce := c.ConfigEntries()
|
ce := c.ConfigEntries()
|
||||||
|
|
||||||
|
|
|
@ -87,7 +87,7 @@ func TestAPI_CoordinateUpdate(t *testing.T) {
|
||||||
newCoord.Height = 0.5
|
newCoord.Height = 0.5
|
||||||
entry := &CoordinateEntry{
|
entry := &CoordinateEntry{
|
||||||
Node: node,
|
Node: node,
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Coord: newCoord,
|
Coord: newCoord,
|
||||||
}
|
}
|
||||||
_, err = coord.Update(entry, nil)
|
_, err = coord.Update(entry, nil)
|
||||||
|
|
|
@ -45,6 +45,8 @@ type HealthCheck struct {
|
||||||
Type string
|
Type string
|
||||||
Namespace string `json:",omitempty"`
|
Namespace string `json:",omitempty"`
|
||||||
Partition string `json:",omitempty"`
|
Partition string `json:",omitempty"`
|
||||||
|
ExposedPort int
|
||||||
|
PeerName string `json:",omitempty"`
|
||||||
|
|
||||||
Definition HealthCheckDefinition
|
Definition HealthCheckDefinition
|
||||||
|
|
||||||
|
@ -176,8 +178,7 @@ type HealthChecks []*HealthCheck
|
||||||
// attached, this function determines the best representative of the status as
|
// attached, this function determines the best representative of the status as
|
||||||
// as single string using the following heuristic:
|
// as single string using the following heuristic:
|
||||||
//
|
//
|
||||||
// maintenance > critical > warning > passing
|
// maintenance > critical > warning > passing
|
||||||
//
|
|
||||||
func (c HealthChecks) AggregatedStatus() string {
|
func (c HealthChecks) AggregatedStatus() string {
|
||||||
var passing, warning, critical, maintenance bool
|
var passing, warning, critical, maintenance bool
|
||||||
for _, check := range c {
|
for _, check := range c {
|
||||||
|
|
|
@ -223,8 +223,8 @@ func TestAPI_HealthChecks(t *testing.T) {
|
||||||
ServiceName: "foo",
|
ServiceName: "foo",
|
||||||
ServiceTags: []string{"bar"},
|
ServiceTags: []string{"bar"},
|
||||||
Type: "ttl",
|
Type: "ttl",
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,5 +6,5 @@ package api
|
||||||
// The following defaults return "default" in enterprise and "" in OSS.
|
// The following defaults return "default" in enterprise and "" in OSS.
|
||||||
// This constant is useful when a default value is needed for an
|
// This constant is useful when a default value is needed for an
|
||||||
// operation that will reject non-empty values in OSS.
|
// operation that will reject non-empty values in OSS.
|
||||||
const splitDefaultNamespace = ""
|
const defaultNamespace = ""
|
||||||
const splitDefaultPartition = ""
|
const defaultPartition = ""
|
|
@ -17,6 +17,9 @@ type QueryFailoverOptions struct {
|
||||||
Targets []QueryFailoverTarget
|
Targets []QueryFailoverTarget
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Deprecated: use QueryFailoverOptions instead.
|
||||||
|
type QueryDatacenterOptions = QueryFailoverOptions
|
||||||
|
|
||||||
type QueryFailoverTarget struct {
|
type QueryFailoverTarget struct {
|
||||||
// PeerName specifies a peer to try during failover.
|
// PeerName specifies a peer to try during failover.
|
||||||
PeerName string
|
PeerName string
|
||||||
|
|
|
@ -67,6 +67,7 @@ const (
|
||||||
KVLock KVOp = "lock"
|
KVLock KVOp = "lock"
|
||||||
KVUnlock KVOp = "unlock"
|
KVUnlock KVOp = "unlock"
|
||||||
KVGet KVOp = "get"
|
KVGet KVOp = "get"
|
||||||
|
KVGetOrEmpty KVOp = "get-or-empty"
|
||||||
KVGetTree KVOp = "get-tree"
|
KVGetTree KVOp = "get-tree"
|
||||||
KVCheckSession KVOp = "check-session"
|
KVCheckSession KVOp = "check-session"
|
||||||
KVCheckIndex KVOp = "check-index"
|
KVCheckIndex KVOp = "check-index"
|
||||||
|
|
|
@ -187,7 +187,7 @@ func TestAPI_ClientTxn(t *testing.T) {
|
||||||
CreateIndex: ret.Results[0].KV.CreateIndex,
|
CreateIndex: ret.Results[0].KV.CreateIndex,
|
||||||
ModifyIndex: ret.Results[0].KV.ModifyIndex,
|
ModifyIndex: ret.Results[0].KV.ModifyIndex,
|
||||||
Namespace: ret.Results[0].KV.Namespace,
|
Namespace: ret.Results[0].KV.Namespace,
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&TxnResult{
|
&TxnResult{
|
||||||
|
@ -199,14 +199,14 @@ func TestAPI_ClientTxn(t *testing.T) {
|
||||||
CreateIndex: ret.Results[1].KV.CreateIndex,
|
CreateIndex: ret.Results[1].KV.CreateIndex,
|
||||||
ModifyIndex: ret.Results[1].KV.ModifyIndex,
|
ModifyIndex: ret.Results[1].KV.ModifyIndex,
|
||||||
Namespace: ret.Results[0].KV.Namespace,
|
Namespace: ret.Results[0].KV.Namespace,
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&TxnResult{
|
&TxnResult{
|
||||||
Node: &Node{
|
Node: &Node{
|
||||||
ID: nodeID,
|
ID: nodeID,
|
||||||
Node: "foo",
|
Node: "foo",
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Address: "2.2.2.2",
|
Address: "2.2.2.2",
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
CreateIndex: ret.Results[2].Node.CreateIndex,
|
CreateIndex: ret.Results[2].Node.CreateIndex,
|
||||||
|
@ -218,8 +218,8 @@ func TestAPI_ClientTxn(t *testing.T) {
|
||||||
ID: "foo1",
|
ID: "foo1",
|
||||||
CreateIndex: ret.Results[3].Service.CreateIndex,
|
CreateIndex: ret.Results[3].Service.CreateIndex,
|
||||||
ModifyIndex: ret.Results[3].Service.CreateIndex,
|
ModifyIndex: ret.Results[3].Service.CreateIndex,
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&TxnResult{
|
&TxnResult{
|
||||||
|
@ -237,8 +237,8 @@ func TestAPI_ClientTxn(t *testing.T) {
|
||||||
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
|
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
|
||||||
},
|
},
|
||||||
Type: "tcp",
|
Type: "tcp",
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
CreateIndex: ret.Results[4].Check.CreateIndex,
|
CreateIndex: ret.Results[4].Check.CreateIndex,
|
||||||
ModifyIndex: ret.Results[4].Check.CreateIndex,
|
ModifyIndex: ret.Results[4].Check.CreateIndex,
|
||||||
},
|
},
|
||||||
|
@ -258,8 +258,8 @@ func TestAPI_ClientTxn(t *testing.T) {
|
||||||
DeregisterCriticalServiceAfterDuration: 160 * time.Second,
|
DeregisterCriticalServiceAfterDuration: 160 * time.Second,
|
||||||
},
|
},
|
||||||
Type: "tcp",
|
Type: "tcp",
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
CreateIndex: ret.Results[4].Check.CreateIndex,
|
CreateIndex: ret.Results[4].Check.CreateIndex,
|
||||||
ModifyIndex: ret.Results[4].Check.CreateIndex,
|
ModifyIndex: ret.Results[4].Check.CreateIndex,
|
||||||
},
|
},
|
||||||
|
@ -279,8 +279,8 @@ func TestAPI_ClientTxn(t *testing.T) {
|
||||||
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
|
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
|
||||||
},
|
},
|
||||||
Type: "udp",
|
Type: "udp",
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
CreateIndex: ret.Results[4].Check.CreateIndex,
|
CreateIndex: ret.Results[4].Check.CreateIndex,
|
||||||
ModifyIndex: ret.Results[4].Check.CreateIndex,
|
ModifyIndex: ret.Results[4].Check.CreateIndex,
|
||||||
},
|
},
|
||||||
|
@ -300,8 +300,8 @@ func TestAPI_ClientTxn(t *testing.T) {
|
||||||
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
|
DeregisterCriticalServiceAfterDuration: 20 * time.Second,
|
||||||
},
|
},
|
||||||
Type: "udp",
|
Type: "udp",
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Namespace: splitDefaultNamespace,
|
Namespace: defaultNamespace,
|
||||||
CreateIndex: ret.Results[4].Check.CreateIndex,
|
CreateIndex: ret.Results[4].Check.CreateIndex,
|
||||||
ModifyIndex: ret.Results[4].Check.CreateIndex,
|
ModifyIndex: ret.Results[4].Check.CreateIndex,
|
||||||
},
|
},
|
||||||
|
@ -342,14 +342,14 @@ func TestAPI_ClientTxn(t *testing.T) {
|
||||||
CreateIndex: ret.Results[0].KV.CreateIndex,
|
CreateIndex: ret.Results[0].KV.CreateIndex,
|
||||||
ModifyIndex: ret.Results[0].KV.ModifyIndex,
|
ModifyIndex: ret.Results[0].KV.ModifyIndex,
|
||||||
Namespace: ret.Results[0].KV.Namespace,
|
Namespace: ret.Results[0].KV.Namespace,
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&TxnResult{
|
&TxnResult{
|
||||||
Node: &Node{
|
Node: &Node{
|
||||||
ID: s.Config.NodeID,
|
ID: s.Config.NodeID,
|
||||||
Node: s.Config.NodeName,
|
Node: s.Config.NodeName,
|
||||||
Partition: splitDefaultPartition,
|
Partition: defaultPartition,
|
||||||
Address: "127.0.0.1",
|
Address: "127.0.0.1",
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
TaggedAddresses: map[string]string{
|
TaggedAddresses: map[string]string{
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
ARG CONSUL_IMAGE_VERSION=latest
|
||||||
|
FROM consul:${CONSUL_IMAGE_VERSION}
|
||||||
|
RUN apk update && apk add iptables
|
||||||
|
ARG TARGETARCH
|
||||||
|
COPY linux_${TARGETARCH}/consul /bin/consul
|
|
@ -98,6 +98,10 @@ func (f *HTTPFlags) Datacenter() string {
|
||||||
return f.datacenter.String()
|
return f.datacenter.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *HTTPFlags) Partition() string {
|
||||||
|
return f.partition.String()
|
||||||
|
}
|
||||||
|
|
||||||
func (f *HTTPFlags) Stale() bool {
|
func (f *HTTPFlags) Stale() bool {
|
||||||
if f.stale.v == nil {
|
if f.stale.v == nil {
|
||||||
return false
|
return false
|
||||||
|
|
|
@ -0,0 +1,91 @@
|
||||||
|
package delete
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/command/flags"
|
||||||
|
)
|
||||||
|
|
||||||
|
func New(ui cli.Ui) *cmd {
|
||||||
|
c := &cmd{UI: ui}
|
||||||
|
c.init()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
type cmd struct {
|
||||||
|
UI cli.Ui
|
||||||
|
flags *flag.FlagSet
|
||||||
|
http *flags.HTTPFlags
|
||||||
|
help string
|
||||||
|
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) init() {
|
||||||
|
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
|
||||||
|
|
||||||
|
c.flags.StringVar(&c.name, "name", "", "(Required) The local name assigned to the peer cluster.")
|
||||||
|
|
||||||
|
c.http = &flags.HTTPFlags{}
|
||||||
|
flags.Merge(c.flags, c.http.ClientFlags())
|
||||||
|
flags.Merge(c.flags, c.http.PartitionFlag())
|
||||||
|
c.help = flags.Usage(help, c.flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Run(args []string) int {
|
||||||
|
if err := c.flags.Parse(args); err != nil {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.name == "" {
|
||||||
|
c.UI.Error("Missing the required -name flag")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := c.http.APIClient()
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
peerings := client.Peerings()
|
||||||
|
|
||||||
|
_, err = peerings.Delete(context.Background(), c.name, &api.WriteOptions{})
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error deleting peering for %s: %v", c.name, err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
c.UI.Info(fmt.Sprintf("Successfully submitted peering connection, %s, for deletion", c.name))
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Synopsis() string {
|
||||||
|
return synopsis
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Help() string {
|
||||||
|
return flags.Usage(c.help, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
synopsis = "Delete a peering connection"
|
||||||
|
help = `
|
||||||
|
Usage: consul peering delete [options] -name <peer name>
|
||||||
|
|
||||||
|
Delete a peering connection. Consul deletes all data imported from the peer
|
||||||
|
in the background. The peering connection is removed after all associated
|
||||||
|
data has been deleted. Operators can still read the peering connections
|
||||||
|
while the data is being removed. A 'DeletedAt' field will be populated with
|
||||||
|
the timestamp of when the peering was marked for deletion.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
$ consul peering delete -name west-dc
|
||||||
|
`
|
||||||
|
)
|
|
@ -0,0 +1,70 @@
|
||||||
|
package delete
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent"
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/testrpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDeleteCommand_noTabs(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') {
|
||||||
|
t.Fatal("help has tabs")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteCommand(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
acceptor := agent.NewTestAgent(t, ``)
|
||||||
|
t.Cleanup(func() { _ = acceptor.Shutdown() })
|
||||||
|
|
||||||
|
testrpc.WaitForTestAgent(t, acceptor.RPC, "dc1")
|
||||||
|
|
||||||
|
acceptingClient := acceptor.Client()
|
||||||
|
|
||||||
|
t.Run("name is required", func(t *testing.T) {
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + acceptor.HTTPAddr(),
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String())
|
||||||
|
require.Contains(t, ui.ErrorWriter.String(), "Missing the required -name flag")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("delete connection", func(t *testing.T) {
|
||||||
|
|
||||||
|
req := api.PeeringGenerateTokenRequest{PeerName: "foo"}
|
||||||
|
_, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), req, &api.WriteOptions{})
|
||||||
|
require.NoError(t, err, "Could not generate peering token at acceptor")
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + acceptor.HTTPAddr(),
|
||||||
|
"-name=foo",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 0, code)
|
||||||
|
output := ui.OutputWriter.String()
|
||||||
|
require.Contains(t, output, "Success")
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,109 @@
|
||||||
|
package establish
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/command/flags"
|
||||||
|
)
|
||||||
|
|
||||||
|
func New(ui cli.Ui) *cmd {
|
||||||
|
c := &cmd{UI: ui}
|
||||||
|
c.init()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
type cmd struct {
|
||||||
|
UI cli.Ui
|
||||||
|
flags *flag.FlagSet
|
||||||
|
http *flags.HTTPFlags
|
||||||
|
help string
|
||||||
|
|
||||||
|
name string
|
||||||
|
peeringToken string
|
||||||
|
meta map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) init() {
|
||||||
|
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
|
||||||
|
|
||||||
|
c.flags.StringVar(&c.name, "name", "", "(Required) The local name assigned to the peer cluster.")
|
||||||
|
|
||||||
|
c.flags.StringVar(&c.peeringToken, "peering-token", "", "(Required) The peering token from the accepting cluster.")
|
||||||
|
|
||||||
|
c.flags.Var((*flags.FlagMapValue)(&c.meta), "meta",
|
||||||
|
"Metadata to associate with the peering, formatted as key=value. This flag "+
|
||||||
|
"may be specified multiple times to set multiple meta fields.")
|
||||||
|
|
||||||
|
c.http = &flags.HTTPFlags{}
|
||||||
|
flags.Merge(c.flags, c.http.ClientFlags())
|
||||||
|
flags.Merge(c.flags, c.http.PartitionFlag())
|
||||||
|
c.help = flags.Usage(help, c.flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Run(args []string) int {
|
||||||
|
if err := c.flags.Parse(args); err != nil {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.name == "" {
|
||||||
|
c.UI.Error("Missing the required -name flag")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.peeringToken == "" {
|
||||||
|
c.UI.Error("Missing the required -peering-token flag")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := c.http.APIClient()
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
peerings := client.Peerings()
|
||||||
|
|
||||||
|
req := api.PeeringEstablishRequest{
|
||||||
|
PeerName: c.name,
|
||||||
|
PeeringToken: c.peeringToken,
|
||||||
|
Partition: c.http.Partition(),
|
||||||
|
Meta: c.meta,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err = peerings.Establish(context.Background(), req, &api.WriteOptions{})
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error establishing peering for %s: %v", req.PeerName, err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
c.UI.Info(fmt.Sprintf("Successfully established peering connection with %s", req.PeerName))
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Synopsis() string {
|
||||||
|
return synopsis
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Help() string {
|
||||||
|
return flags.Usage(c.help, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
synopsis = "Establish a peering connection"
|
||||||
|
help = `
|
||||||
|
Usage: consul peering establish [options] -name <peer name> -peering-token <token>
|
||||||
|
|
||||||
|
Establish a peering connection. The name provided will be used locally by
|
||||||
|
this cluster to refer to the peering connection. The peering token can
|
||||||
|
only be used once to establish the connection.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
$ consul peering establish -name west-dc -peering-token <token>
|
||||||
|
`
|
||||||
|
)
|
|
@ -0,0 +1,127 @@
|
||||||
|
package establish
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent"
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/testrpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEstablishCommand_noTabs(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') {
|
||||||
|
t.Fatal("help has tabs")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEstablishCommand(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
acceptor := agent.NewTestAgent(t, ``)
|
||||||
|
t.Cleanup(func() { _ = acceptor.Shutdown() })
|
||||||
|
|
||||||
|
dialer := agent.NewTestAgent(t, ``)
|
||||||
|
t.Cleanup(func() { _ = dialer.Shutdown() })
|
||||||
|
|
||||||
|
testrpc.WaitForTestAgent(t, acceptor.RPC, "dc1")
|
||||||
|
testrpc.WaitForTestAgent(t, dialer.RPC, "dc1")
|
||||||
|
|
||||||
|
acceptingClient := acceptor.Client()
|
||||||
|
dialingClient := dialer.Client()
|
||||||
|
|
||||||
|
t.Run("name is required", func(t *testing.T) {
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + dialer.HTTPAddr(),
|
||||||
|
"-peering-token=1234abcde",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String())
|
||||||
|
require.Contains(t, ui.ErrorWriter.String(), "Missing the required -name flag")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("peering token is required", func(t *testing.T) {
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + dialer.HTTPAddr(),
|
||||||
|
"-name=bar",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String())
|
||||||
|
require.Contains(t, ui.ErrorWriter.String(), "Missing the required -peering-token flag")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("establish connection", func(t *testing.T) {
|
||||||
|
// Grab the token from the acceptor
|
||||||
|
req := api.PeeringGenerateTokenRequest{PeerName: "foo"}
|
||||||
|
res, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), req, &api.WriteOptions{})
|
||||||
|
require.NoError(t, err, "Could not generate peering token at acceptor")
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + dialer.HTTPAddr(),
|
||||||
|
"-name=bar",
|
||||||
|
fmt.Sprintf("-peering-token=%s", res.PeeringToken),
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 0, code)
|
||||||
|
output := ui.OutputWriter.String()
|
||||||
|
require.Contains(t, output, "Success")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("establish connection with options", func(t *testing.T) {
|
||||||
|
// Grab the token from the acceptor
|
||||||
|
req := api.PeeringGenerateTokenRequest{PeerName: "foo"}
|
||||||
|
res, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), req, &api.WriteOptions{})
|
||||||
|
require.NoError(t, err, "Could not generate peering token at acceptor")
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + dialer.HTTPAddr(),
|
||||||
|
"-name=bar",
|
||||||
|
fmt.Sprintf("-peering-token=%s", res.PeeringToken),
|
||||||
|
"-meta=env=production",
|
||||||
|
"-meta=region=us-west-1",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 0, code)
|
||||||
|
output := ui.OutputWriter.String()
|
||||||
|
require.Contains(t, output, "Success")
|
||||||
|
|
||||||
|
//Meta
|
||||||
|
peering, _, err := dialingClient.Peerings().Read(context.Background(), "bar", &api.QueryOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
actual, ok := peering.Meta["env"]
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Equal(t, "production", actual)
|
||||||
|
|
||||||
|
actual, ok = peering.Meta["region"]
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Equal(t, "us-west-1", actual)
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,139 @@
|
||||||
|
package generate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/command/flags"
|
||||||
|
"github.com/hashicorp/consul/command/peering"
|
||||||
|
)
|
||||||
|
|
||||||
|
func New(ui cli.Ui) *cmd {
|
||||||
|
c := &cmd{UI: ui}
|
||||||
|
c.init()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
type cmd struct {
|
||||||
|
UI cli.Ui
|
||||||
|
flags *flag.FlagSet
|
||||||
|
http *flags.HTTPFlags
|
||||||
|
help string
|
||||||
|
|
||||||
|
name string
|
||||||
|
externalAddresses []string
|
||||||
|
meta map[string]string
|
||||||
|
format string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) init() {
|
||||||
|
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
|
||||||
|
|
||||||
|
c.flags.StringVar(&c.name, "name", "", "(Required) The local name assigned to the peer cluster.")
|
||||||
|
|
||||||
|
c.flags.Var((*flags.FlagMapValue)(&c.meta), "meta",
|
||||||
|
"Metadata to associate with the peering, formatted as key=value. This flag "+
|
||||||
|
"may be specified multiple times to set multiple metadata fields.")
|
||||||
|
|
||||||
|
c.flags.Var((*flags.AppendSliceValue)(&c.externalAddresses), "server-external-addresses",
|
||||||
|
"A list of addresses to put into the generated token, formatted as a comma-separate list. "+
|
||||||
|
"Addresses are the form of <host or IP>:port. "+
|
||||||
|
"This could be used to specify load balancer(s) or external IPs to reach the servers from "+
|
||||||
|
"the dialing side, and will override any server addresses obtained from the \"consul\" service.")
|
||||||
|
|
||||||
|
c.flags.StringVar(
|
||||||
|
&c.format,
|
||||||
|
"format",
|
||||||
|
peering.PeeringFormatPretty,
|
||||||
|
fmt.Sprintf("Output format {%s} (default: %s)", strings.Join(peering.GetSupportedFormats(), "|"), peering.PeeringFormatPretty),
|
||||||
|
)
|
||||||
|
|
||||||
|
c.http = &flags.HTTPFlags{}
|
||||||
|
flags.Merge(c.flags, c.http.ClientFlags())
|
||||||
|
flags.Merge(c.flags, c.http.PartitionFlag())
|
||||||
|
c.help = flags.Usage(help, c.flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Run(args []string) int {
|
||||||
|
if err := c.flags.Parse(args); err != nil {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.name == "" {
|
||||||
|
c.UI.Error("Missing the required -name flag")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if !peering.FormatIsValid(c.format) {
|
||||||
|
c.UI.Error(fmt.Sprintf("Invalid format, valid formats are {%s}", strings.Join(peering.GetSupportedFormats(), "|")))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := c.http.APIClient()
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error connect to Consul agent: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
peerings := client.Peerings()
|
||||||
|
|
||||||
|
req := api.PeeringGenerateTokenRequest{
|
||||||
|
PeerName: c.name,
|
||||||
|
Partition: c.http.Partition(),
|
||||||
|
Meta: c.meta,
|
||||||
|
ServerExternalAddresses: c.externalAddresses,
|
||||||
|
}
|
||||||
|
|
||||||
|
res, _, err := peerings.GenerateToken(context.Background(), req, &api.WriteOptions{})
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error generating peering token for %s: %v", req.PeerName, err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.format == peering.PeeringFormatJSON {
|
||||||
|
output, err := json.Marshal(res)
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error marshalling JSON: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
c.UI.Output(string(output))
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
c.UI.Info(res.PeeringToken)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Synopsis() string {
|
||||||
|
return synopsis
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Help() string {
|
||||||
|
return flags.Usage(c.help, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
synopsis = "Generate a peering token"
|
||||||
|
help = `
|
||||||
|
Usage: consul peering generate-token [options] -name <peer name>
|
||||||
|
|
||||||
|
Generate a peering token. The name provided will be used locally by
|
||||||
|
this cluster to refer to the peering connection. Re-generating a token
|
||||||
|
for a given name will not interrupt any active connection, but will
|
||||||
|
invalidate any unused token for that name.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
$ consul peering generate-token -name west-dc
|
||||||
|
|
||||||
|
Example using a load balancer in front of Consul servers:
|
||||||
|
|
||||||
|
$ consul peering generate-token -name west-dc -server-external-addresses load-balancer.elb.us-west-1.amazonaws.com:8502
|
||||||
|
`
|
||||||
|
)
|
|
@ -0,0 +1,141 @@
|
||||||
|
package generate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent"
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/testrpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGenerateCommand_noTabs(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') {
|
||||||
|
t.Fatal("help has tabs")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGenerateCommand(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
a := agent.NewTestAgent(t, ``)
|
||||||
|
t.Cleanup(func() { _ = a.Shutdown() })
|
||||||
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
client := a.Client()
|
||||||
|
|
||||||
|
t.Run("name is required", func(t *testing.T) {
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + a.HTTPAddr(),
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String())
|
||||||
|
require.Contains(t, ui.ErrorWriter.String(), "Missing the required -name flag")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid format", func(t *testing.T) {
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + a.HTTPAddr(),
|
||||||
|
"-name=foo",
|
||||||
|
"-format=toml",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 1, code, "exited successfully when it should have failed")
|
||||||
|
output := ui.ErrorWriter.String()
|
||||||
|
require.Contains(t, output, "Invalid format")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("generate token", func(t *testing.T) {
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + a.HTTPAddr(),
|
||||||
|
"-name=foo",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 0, code)
|
||||||
|
token, err := base64.StdEncoding.DecodeString(ui.OutputWriter.String())
|
||||||
|
require.NoError(t, err, "error decoding token")
|
||||||
|
require.Contains(t, string(token), "\"ServerName\":\"server.dc1.consul\"")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("generate token with options", func(t *testing.T) {
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + a.HTTPAddr(),
|
||||||
|
"-name=bar",
|
||||||
|
"-server-external-addresses=1.2.3.4,5.6.7.8",
|
||||||
|
"-meta=env=production",
|
||||||
|
"-meta=region=us-east-1",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 0, code)
|
||||||
|
token, err := base64.StdEncoding.DecodeString(ui.OutputWriter.String())
|
||||||
|
require.NoError(t, err, "error decoding token")
|
||||||
|
require.Contains(t, string(token), "\"ServerName\":\"server.dc1.consul\"")
|
||||||
|
|
||||||
|
//ServerExternalAddresses
|
||||||
|
require.Contains(t, string(token), "1.2.3.4")
|
||||||
|
require.Contains(t, string(token), "5.6.7.8")
|
||||||
|
|
||||||
|
//Meta
|
||||||
|
peering, _, err := client.Peerings().Read(context.Background(), "bar", &api.QueryOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
actual, ok := peering.Meta["env"]
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Equal(t, "production", actual)
|
||||||
|
|
||||||
|
actual, ok = peering.Meta["region"]
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Equal(t, "us-east-1", actual)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("read with json", func(t *testing.T) {
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + a.HTTPAddr(),
|
||||||
|
"-name=baz",
|
||||||
|
"-format=json",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 0, code)
|
||||||
|
output := ui.OutputWriter.Bytes()
|
||||||
|
|
||||||
|
var outputRes api.PeeringGenerateTokenResponse
|
||||||
|
require.NoError(t, json.Unmarshal(output, &outputRes))
|
||||||
|
|
||||||
|
token, err := base64.StdEncoding.DecodeString(outputRes.PeeringToken)
|
||||||
|
require.NoError(t, err, "error decoding token")
|
||||||
|
require.Contains(t, string(token), "\"ServerName\":\"server.dc1.consul\"")
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,139 @@
|
||||||
|
package list
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
"github.com/ryanuber/columnize"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/command/flags"
|
||||||
|
"github.com/hashicorp/consul/command/peering"
|
||||||
|
)
|
||||||
|
|
||||||
|
func New(ui cli.Ui) *cmd {
|
||||||
|
c := &cmd{UI: ui}
|
||||||
|
c.init()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
type cmd struct {
|
||||||
|
UI cli.Ui
|
||||||
|
flags *flag.FlagSet
|
||||||
|
http *flags.HTTPFlags
|
||||||
|
help string
|
||||||
|
|
||||||
|
format string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) init() {
|
||||||
|
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
|
||||||
|
|
||||||
|
c.flags.StringVar(
|
||||||
|
&c.format,
|
||||||
|
"format",
|
||||||
|
peering.PeeringFormatPretty,
|
||||||
|
fmt.Sprintf("Output format {%s} (default: %s)", strings.Join(peering.GetSupportedFormats(), "|"), peering.PeeringFormatPretty),
|
||||||
|
)
|
||||||
|
|
||||||
|
c.http = &flags.HTTPFlags{}
|
||||||
|
flags.Merge(c.flags, c.http.ClientFlags())
|
||||||
|
flags.Merge(c.flags, c.http.PartitionFlag())
|
||||||
|
c.help = flags.Usage(help, c.flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Run(args []string) int {
|
||||||
|
if err := c.flags.Parse(args); err != nil {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if !peering.FormatIsValid(c.format) {
|
||||||
|
c.UI.Error(fmt.Sprintf("Invalid format, valid formats are {%s}", strings.Join(peering.GetSupportedFormats(), "|")))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := c.http.APIClient()
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error connect to Consul agent: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
peerings := client.Peerings()
|
||||||
|
|
||||||
|
res, _, err := peerings.List(context.Background(), &api.QueryOptions{})
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error("Error listing peerings")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
list := peeringList(res)
|
||||||
|
sort.Sort(list)
|
||||||
|
|
||||||
|
if c.format == peering.PeeringFormatJSON {
|
||||||
|
output, err := json.Marshal(list)
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error marshalling JSON: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
c.UI.Output(string(output))
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(res) == 0 {
|
||||||
|
c.UI.Info(fmt.Sprintf("There are no peering connections."))
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make([]string, 0, len(list))
|
||||||
|
header := "Name\x1fState\x1fImported Svcs\x1fExported Svcs\x1fMeta"
|
||||||
|
result = append(result, header)
|
||||||
|
for _, peer := range list {
|
||||||
|
metaPairs := make([]string, 0, len(peer.Meta))
|
||||||
|
for k, v := range peer.Meta {
|
||||||
|
metaPairs = append(metaPairs, fmt.Sprintf("%s=%s", k, v))
|
||||||
|
}
|
||||||
|
meta := strings.Join(metaPairs, ",")
|
||||||
|
line := fmt.Sprintf("%s\x1f%s\x1f%d\x1f%d\x1f%s",
|
||||||
|
peer.Name, peer.State, peer.ImportedServiceCount, peer.ExportedServiceCount, meta)
|
||||||
|
result = append(result, line)
|
||||||
|
}
|
||||||
|
|
||||||
|
output := columnize.Format(result, &columnize.Config{Delim: string([]byte{0x1f})})
|
||||||
|
c.UI.Output(output)
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Synopsis() string {
|
||||||
|
return synopsis
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Help() string {
|
||||||
|
return flags.Usage(c.help, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
synopsis = "List peering connections"
|
||||||
|
help = `
|
||||||
|
Usage: consul peering list [options]
|
||||||
|
|
||||||
|
List all peering connections. The results will be filtered according
|
||||||
|
to ACL policy configuration.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
$ consul peering list
|
||||||
|
`
|
||||||
|
)
|
||||||
|
|
||||||
|
// peeringList applies sort.Interface to a list of peering connections for sorting by name.
|
||||||
|
type peeringList []*api.Peering
|
||||||
|
|
||||||
|
func (d peeringList) Len() int { return len(d) }
|
||||||
|
func (d peeringList) Less(i, j int) bool { return d[i].Name < d[j].Name }
|
||||||
|
func (d peeringList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
|
@ -0,0 +1,133 @@
|
||||||
|
package list
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent"
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/testrpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestListCommand_noTabs(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') {
|
||||||
|
t.Fatal("help has tabs")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListCommand(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
acceptor := agent.NewTestAgent(t, ``)
|
||||||
|
t.Cleanup(func() { _ = acceptor.Shutdown() })
|
||||||
|
|
||||||
|
testrpc.WaitForTestAgent(t, acceptor.RPC, "dc1")
|
||||||
|
|
||||||
|
acceptingClient := acceptor.Client()
|
||||||
|
|
||||||
|
t.Run("invalid format", func(t *testing.T) {
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + acceptor.HTTPAddr(),
|
||||||
|
"-format=toml",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 1, code, "exited successfully when it should have failed")
|
||||||
|
output := ui.ErrorWriter.String()
|
||||||
|
require.Contains(t, output, "Invalid format")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no results - pretty", func(t *testing.T) {
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + acceptor.HTTPAddr(),
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 0, code)
|
||||||
|
output := ui.OutputWriter.String()
|
||||||
|
require.Contains(t, output, "no peering connections")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("two results for pretty print", func(t *testing.T) {
|
||||||
|
|
||||||
|
generateReq := api.PeeringGenerateTokenRequest{PeerName: "foo"}
|
||||||
|
_, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), generateReq, &api.WriteOptions{})
|
||||||
|
require.NoError(t, err, "Could not generate peering token at acceptor for \"foo\"")
|
||||||
|
|
||||||
|
generateReq = api.PeeringGenerateTokenRequest{PeerName: "bar"}
|
||||||
|
_, _, err = acceptingClient.Peerings().GenerateToken(context.Background(), generateReq, &api.WriteOptions{})
|
||||||
|
require.NoError(t, err, "Could not generate peering token at acceptor for \"bar\"")
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + acceptor.HTTPAddr(),
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 0, code)
|
||||||
|
output := ui.OutputWriter.String()
|
||||||
|
require.Equal(t, 3, strings.Count(output, "\n")) // There should be three lines including the header
|
||||||
|
|
||||||
|
lines := strings.Split(output, "\n")
|
||||||
|
|
||||||
|
require.Contains(t, lines[0], "Name")
|
||||||
|
require.Contains(t, lines[1], "bar")
|
||||||
|
require.Contains(t, lines[2], "foo")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no results - json", func(t *testing.T) {
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + acceptor.HTTPAddr(),
|
||||||
|
"-format=json",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 0, code)
|
||||||
|
output := ui.OutputWriter.String()
|
||||||
|
require.Contains(t, output, "[]")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("two results for JSON print", func(t *testing.T) {
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + acceptor.HTTPAddr(),
|
||||||
|
"-format=json",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 0, code)
|
||||||
|
output := ui.OutputWriter.Bytes()
|
||||||
|
|
||||||
|
var outputList []*api.Peering
|
||||||
|
require.NoError(t, json.Unmarshal(output, &outputList))
|
||||||
|
|
||||||
|
require.Len(t, outputList, 2)
|
||||||
|
require.Equal(t, "bar", outputList[0].Name)
|
||||||
|
require.Equal(t, "foo", outputList[1].Name)
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,69 @@
|
||||||
|
package peering
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/command/flags"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
PeeringFormatJSON = "json"
|
||||||
|
PeeringFormatPretty = "pretty"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetSupportedFormats() []string {
|
||||||
|
return []string{PeeringFormatJSON, PeeringFormatPretty}
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormatIsValid(f string) bool {
|
||||||
|
return f == PeeringFormatPretty || f == PeeringFormatJSON
|
||||||
|
}
|
||||||
|
|
||||||
|
func New() *cmd {
|
||||||
|
return &cmd{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type cmd struct{}
|
||||||
|
|
||||||
|
func (c *cmd) Run(args []string) int {
|
||||||
|
return cli.RunResultHelp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Synopsis() string {
|
||||||
|
return synopsis
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Help() string {
|
||||||
|
return flags.Usage(help, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
const synopsis = "Create and manage peering connections between Consul clusters"
|
||||||
|
const help = `
|
||||||
|
Usage: consul peering <subcommand> [options] [args]
|
||||||
|
|
||||||
|
This command has subcommands for interacting with Cluster Peering
|
||||||
|
connections. Here are some simple examples, and more detailed
|
||||||
|
examples are available in the subcommands or the documentation.
|
||||||
|
|
||||||
|
Generate a peering token:
|
||||||
|
|
||||||
|
$ consul peering generate-token -name west-dc
|
||||||
|
|
||||||
|
Establish a peering connection:
|
||||||
|
|
||||||
|
$ consul peering establish -name east-dc -peering-token <token>
|
||||||
|
|
||||||
|
List all the local peering connections:
|
||||||
|
|
||||||
|
$ consul peering list
|
||||||
|
|
||||||
|
Print the status of a peering connection:
|
||||||
|
|
||||||
|
$ consul peering read -name west-dc
|
||||||
|
|
||||||
|
Delete and close a peering connection:
|
||||||
|
|
||||||
|
$ consul peering delete -name west-dc
|
||||||
|
|
||||||
|
For more examples, ask for subcommand help or view the documentation.
|
||||||
|
`
|
|
@ -0,0 +1,164 @@
|
||||||
|
package read
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/command/flags"
|
||||||
|
"github.com/hashicorp/consul/command/peering"
|
||||||
|
)
|
||||||
|
|
||||||
|
func New(ui cli.Ui) *cmd {
|
||||||
|
c := &cmd{UI: ui}
|
||||||
|
c.init()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
type cmd struct {
|
||||||
|
UI cli.Ui
|
||||||
|
flags *flag.FlagSet
|
||||||
|
http *flags.HTTPFlags
|
||||||
|
help string
|
||||||
|
|
||||||
|
name string
|
||||||
|
format string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) init() {
|
||||||
|
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
|
||||||
|
|
||||||
|
c.flags.StringVar(&c.name, "name", "", "(Required) The local name assigned to the peer cluster.")
|
||||||
|
|
||||||
|
c.flags.StringVar(
|
||||||
|
&c.format,
|
||||||
|
"format",
|
||||||
|
peering.PeeringFormatPretty,
|
||||||
|
fmt.Sprintf("Output format {%s} (default: %s)", strings.Join(peering.GetSupportedFormats(), "|"), peering.PeeringFormatPretty),
|
||||||
|
)
|
||||||
|
|
||||||
|
c.http = &flags.HTTPFlags{}
|
||||||
|
flags.Merge(c.flags, c.http.ClientFlags())
|
||||||
|
flags.Merge(c.flags, c.http.PartitionFlag())
|
||||||
|
c.help = flags.Usage(help, c.flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Run(args []string) int {
|
||||||
|
if err := c.flags.Parse(args); err != nil {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.name == "" {
|
||||||
|
c.UI.Error("Missing the required -name flag")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if !peering.FormatIsValid(c.format) {
|
||||||
|
c.UI.Error(fmt.Sprintf("Invalid format, valid formats are {%s}", strings.Join(peering.GetSupportedFormats(), "|")))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := c.http.APIClient()
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error connect to Consul agent: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
peerings := client.Peerings()
|
||||||
|
|
||||||
|
res, _, err := peerings.Read(context.Background(), c.name, &api.QueryOptions{})
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error("Error reading peerings")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if res == nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("No peering with name %s found.", c.name))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.format == peering.PeeringFormatJSON {
|
||||||
|
output, err := json.Marshal(res)
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error marshalling JSON: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
c.UI.Output(string(output))
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
c.UI.Output(formatPeering(res))
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatPeering(peering *api.Peering) string {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
|
||||||
|
buffer.WriteString(fmt.Sprintf("Name: %s\n", peering.Name))
|
||||||
|
buffer.WriteString(fmt.Sprintf("ID: %s\n", peering.ID))
|
||||||
|
if peering.Partition != "" {
|
||||||
|
buffer.WriteString(fmt.Sprintf("Partition: %s\n", peering.Partition))
|
||||||
|
}
|
||||||
|
if peering.DeletedAt != nil {
|
||||||
|
buffer.WriteString(fmt.Sprintf("DeletedAt: %s\n", peering.DeletedAt.Format(time.RFC3339)))
|
||||||
|
}
|
||||||
|
buffer.WriteString(fmt.Sprintf("State: %s\n", peering.State))
|
||||||
|
if peering.Meta != nil && len(peering.Meta) > 0 {
|
||||||
|
buffer.WriteString("Meta:\n")
|
||||||
|
for k, v := range peering.Meta {
|
||||||
|
buffer.WriteString(fmt.Sprintf(" %s=%s\n", k, v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer.WriteString("\n")
|
||||||
|
buffer.WriteString(fmt.Sprintf("Peer ID: %s\n", peering.PeerID))
|
||||||
|
buffer.WriteString(fmt.Sprintf("Peer Server Name: %s\n", peering.PeerServerName))
|
||||||
|
buffer.WriteString(fmt.Sprintf("Peer CA Pems: %d\n", len(peering.PeerCAPems)))
|
||||||
|
if peering.PeerServerAddresses != nil && len(peering.PeerServerAddresses) > 0 {
|
||||||
|
buffer.WriteString("Peer Server Addresses:\n")
|
||||||
|
for _, v := range peering.PeerServerAddresses {
|
||||||
|
buffer.WriteString(fmt.Sprintf(" %s", v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer.WriteString("\n")
|
||||||
|
buffer.WriteString(fmt.Sprintf("Imported Services: %d\n", peering.ImportedServiceCount))
|
||||||
|
buffer.WriteString(fmt.Sprintf("Exported Services: %d\n", peering.ExportedServiceCount))
|
||||||
|
|
||||||
|
buffer.WriteString("\n")
|
||||||
|
buffer.WriteString(fmt.Sprintf("Create Index: %d\n", peering.CreateIndex))
|
||||||
|
buffer.WriteString(fmt.Sprintf("Modify Index: %d\n", peering.ModifyIndex))
|
||||||
|
|
||||||
|
return buffer.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Synopsis() string {
|
||||||
|
return synopsis
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Help() string {
|
||||||
|
return flags.Usage(c.help, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
synopsis = "Read a peering connection"
|
||||||
|
help = `
|
||||||
|
Usage: consul peering read [options] -name <peer name>
|
||||||
|
|
||||||
|
Read a peering connection with the provided name. If one is not found,
|
||||||
|
the command will exit with a non-zero code. The result will be filtered according
|
||||||
|
to ACL policy configuration.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
$ consul peering read -name west-dc
|
||||||
|
`
|
||||||
|
)
|
|
@ -0,0 +1,135 @@
|
||||||
|
package read
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent"
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/testrpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReadCommand_noTabs(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') {
|
||||||
|
t.Fatal("help has tabs")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadCommand(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
acceptor := agent.NewTestAgent(t, ``)
|
||||||
|
t.Cleanup(func() { _ = acceptor.Shutdown() })
|
||||||
|
|
||||||
|
testrpc.WaitForTestAgent(t, acceptor.RPC, "dc1")
|
||||||
|
|
||||||
|
acceptingClient := acceptor.Client()
|
||||||
|
|
||||||
|
t.Run("no name flag", func(t *testing.T) {
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + acceptor.HTTPAddr(),
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String())
|
||||||
|
require.Contains(t, ui.ErrorWriter.String(), "Missing the required -name flag")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid format", func(t *testing.T) {
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + acceptor.HTTPAddr(),
|
||||||
|
"-name=foo",
|
||||||
|
"-format=toml",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 1, code, "exited successfully when it should have failed")
|
||||||
|
output := ui.ErrorWriter.String()
|
||||||
|
require.Contains(t, output, "Invalid format")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("peering does not exist", func(t *testing.T) {
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + acceptor.HTTPAddr(),
|
||||||
|
"-name=foo",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String())
|
||||||
|
require.Contains(t, ui.ErrorWriter.String(), "No peering with name")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("read with pretty print", func(t *testing.T) {
|
||||||
|
|
||||||
|
generateReq := api.PeeringGenerateTokenRequest{
|
||||||
|
PeerName: "foo",
|
||||||
|
Meta: map[string]string{
|
||||||
|
"env": "production",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), generateReq, &api.WriteOptions{})
|
||||||
|
require.NoError(t, err, "Could not generate peering token at acceptor for \"foo\"")
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + acceptor.HTTPAddr(),
|
||||||
|
"-name=foo",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 0, code)
|
||||||
|
output := ui.OutputWriter.String()
|
||||||
|
require.Greater(t, strings.Count(output, "\n"), 0) // Checking for some kind of empty output
|
||||||
|
|
||||||
|
// Spot check some fields and values
|
||||||
|
require.Contains(t, output, "foo")
|
||||||
|
require.Contains(t, output, api.PeeringStatePending)
|
||||||
|
require.Contains(t, output, "env=production")
|
||||||
|
require.Contains(t, output, "Imported Services")
|
||||||
|
require.Contains(t, output, "Exported Services")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("read with json", func(t *testing.T) {
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + acceptor.HTTPAddr(),
|
||||||
|
"-name=foo",
|
||||||
|
"-format=json",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
require.Equal(t, 0, code)
|
||||||
|
output := ui.OutputWriter.Bytes()
|
||||||
|
|
||||||
|
var outputPeering api.Peering
|
||||||
|
require.NoError(t, json.Unmarshal(output, &outputPeering))
|
||||||
|
|
||||||
|
require.Equal(t, "foo", outputPeering.Name)
|
||||||
|
require.Equal(t, "production", outputPeering.Meta["env"])
|
||||||
|
})
|
||||||
|
}
|
|
@ -96,6 +96,12 @@ import (
|
||||||
operraft "github.com/hashicorp/consul/command/operator/raft"
|
operraft "github.com/hashicorp/consul/command/operator/raft"
|
||||||
operraftlist "github.com/hashicorp/consul/command/operator/raft/listpeers"
|
operraftlist "github.com/hashicorp/consul/command/operator/raft/listpeers"
|
||||||
operraftremove "github.com/hashicorp/consul/command/operator/raft/removepeer"
|
operraftremove "github.com/hashicorp/consul/command/operator/raft/removepeer"
|
||||||
|
"github.com/hashicorp/consul/command/peering"
|
||||||
|
peerdelete "github.com/hashicorp/consul/command/peering/delete"
|
||||||
|
peerestablish "github.com/hashicorp/consul/command/peering/establish"
|
||||||
|
peergenerate "github.com/hashicorp/consul/command/peering/generate"
|
||||||
|
peerlist "github.com/hashicorp/consul/command/peering/list"
|
||||||
|
peerread "github.com/hashicorp/consul/command/peering/read"
|
||||||
"github.com/hashicorp/consul/command/reload"
|
"github.com/hashicorp/consul/command/reload"
|
||||||
"github.com/hashicorp/consul/command/rtt"
|
"github.com/hashicorp/consul/command/rtt"
|
||||||
"github.com/hashicorp/consul/command/services"
|
"github.com/hashicorp/consul/command/services"
|
||||||
|
@ -214,6 +220,12 @@ func RegisteredCommands(ui cli.Ui) map[string]mcli.CommandFactory {
|
||||||
entry{"operator raft", func(cli.Ui) (cli.Command, error) { return operraft.New(), nil }},
|
entry{"operator raft", func(cli.Ui) (cli.Command, error) { return operraft.New(), nil }},
|
||||||
entry{"operator raft list-peers", func(ui cli.Ui) (cli.Command, error) { return operraftlist.New(ui), nil }},
|
entry{"operator raft list-peers", func(ui cli.Ui) (cli.Command, error) { return operraftlist.New(ui), nil }},
|
||||||
entry{"operator raft remove-peer", func(ui cli.Ui) (cli.Command, error) { return operraftremove.New(ui), nil }},
|
entry{"operator raft remove-peer", func(ui cli.Ui) (cli.Command, error) { return operraftremove.New(ui), nil }},
|
||||||
|
entry{"peering", func(cli.Ui) (cli.Command, error) { return peering.New(), nil }},
|
||||||
|
entry{"peering delete", func(ui cli.Ui) (cli.Command, error) { return peerdelete.New(ui), nil }},
|
||||||
|
entry{"peering generate-token", func(ui cli.Ui) (cli.Command, error) { return peergenerate.New(ui), nil }},
|
||||||
|
entry{"peering establish", func(ui cli.Ui) (cli.Command, error) { return peerestablish.New(ui), nil }},
|
||||||
|
entry{"peering list", func(ui cli.Ui) (cli.Command, error) { return peerlist.New(ui), nil }},
|
||||||
|
entry{"peering read", func(ui cli.Ui) (cli.Command, error) { return peerread.New(ui), nil }},
|
||||||
entry{"reload", func(ui cli.Ui) (cli.Command, error) { return reload.New(ui), nil }},
|
entry{"reload", func(ui cli.Ui) (cli.Command, error) { return reload.New(ui), nil }},
|
||||||
entry{"rtt", func(ui cli.Ui) (cli.Command, error) { return rtt.New(ui), nil }},
|
entry{"rtt", func(ui cli.Ui) (cli.Command, error) { return rtt.New(ui), nil }},
|
||||||
entry{"services", func(cli.Ui) (cli.Command, error) { return services.New(), nil }},
|
entry{"services", func(cli.Ui) (cli.Command, error) { return services.New(), nil }},
|
||||||
|
|
|
@ -57,7 +57,7 @@ func AssertElementsMatch[V any](
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(outX) == len(outY) && len(outX) == len(listX) {
|
if len(outX) == len(outY) && len(listX) == len(listY) {
|
||||||
return // matches
|
return // matches
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue