mirror of https://github.com/hashicorp/consul
Merge branch 'master' of github.com:hashicorp/consul into consul-documentation-update
commit
498a698ffa
|
@ -0,0 +1,9 @@
|
|||
```release-note:improvement
|
||||
agent: Save exposed Envoy ports to the agent's state when `Expose.Checks` is true in proxy's configuration.
|
||||
```
|
||||
```release-note:improvement
|
||||
api: Add `ExposedPort` to the health check API resource.
|
||||
```
|
||||
```release-note:improvement
|
||||
command: Exclude exposed Envoy ports from traffic redirection when providing `-proxy-id` and `Expose.Checks` is set.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
ui: Show a message to explain that health checks may be out of date if the serf health check is in a critical state
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
connect: update supported envoy versions to 1.18.3, 1.17.3, 1.16.4, and 1.15.5
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
server: ensure that central service config flattening properly resets the state each time
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
agent: ensure we hash the non-deprecated upstream fields on ServiceConfigRequest
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:feature
|
||||
xds: emit a labeled gauge of connected xDS streams by version
|
||||
```
|
|
@ -0,0 +1,6 @@
|
|||
```release-note:bug
|
||||
areas: **(Enterprise only)** Revert to the 10s dial timeout used before connection pooling was introduced in 1.7.3.
|
||||
```
|
||||
```release-note:improvement
|
||||
areas: **(Enterprise only)** Use server agent's gossip_wan config when setting memberlist configuration for network areas. Previously they used memberlists WAN defaults.
|
||||
```
|
|
@ -36,7 +36,7 @@ steps:
|
|||
install-gotestsum: &install-gotestsum
|
||||
name: install gotestsum
|
||||
environment:
|
||||
GOTESTSUM_RELEASE: 0.6.0
|
||||
GOTESTSUM_RELEASE: 1.6.4
|
||||
command: |
|
||||
url=https://github.com/gotestyourself/gotestsum/releases/download
|
||||
curl -sSL "${url}/v${GOTESTSUM_RELEASE}/gotestsum_${GOTESTSUM_RELEASE}_linux_amd64.tar.gz" | \
|
||||
|
@ -767,14 +767,14 @@ jobs:
|
|||
command: make test-coverage-ci
|
||||
- run: *notify-slack-failure
|
||||
|
||||
envoy-integration-test-1_15_4: &ENVOY_TESTS
|
||||
envoy-integration-test-1_15_5: &ENVOY_TESTS
|
||||
docker:
|
||||
# We only really need bash and docker-compose which is installed on all
|
||||
# Circle images but pick Go since we have to pick one of them.
|
||||
- image: *GOLANG_IMAGE
|
||||
parallelism: 2
|
||||
environment:
|
||||
ENVOY_VERSION: "1.15.4"
|
||||
ENVOY_VERSION: "1.15.5"
|
||||
steps: &ENVOY_INTEGRATION_TEST_STEPS
|
||||
- checkout
|
||||
# Get go binary from workspace
|
||||
|
@ -807,32 +807,32 @@ jobs:
|
|||
path: *TEST_RESULTS_DIR
|
||||
- run: *notify-slack-failure
|
||||
|
||||
envoy-integration-test-1_15_4-v2compat:
|
||||
envoy-integration-test-1_15_5-v2compat:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.15.4"
|
||||
ENVOY_VERSION: "1.15.5"
|
||||
TEST_V2_XDS: "1"
|
||||
|
||||
envoy-integration-test-1_16_3:
|
||||
envoy-integration-test-1_16_4:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.16.3"
|
||||
ENVOY_VERSION: "1.16.4"
|
||||
|
||||
envoy-integration-test-1_16_3-v2compat:
|
||||
envoy-integration-test-1_16_4-v2compat:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.16.3"
|
||||
ENVOY_VERSION: "1.16.4"
|
||||
TEST_V2_XDS: "1"
|
||||
|
||||
envoy-integration-test-1_17_2:
|
||||
envoy-integration-test-1_17_3:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.17.2"
|
||||
ENVOY_VERSION: "1.17.3"
|
||||
|
||||
envoy-integration-test-1_18_2:
|
||||
envoy-integration-test-1_18_3:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.18.2"
|
||||
ENVOY_VERSION: "1.18.3"
|
||||
|
||||
# run integration tests for the connect ca providers
|
||||
test-connect-ca-providers:
|
||||
|
@ -1054,22 +1054,22 @@ workflows:
|
|||
- nomad-integration-0_8:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_15_4:
|
||||
- envoy-integration-test-1_15_5:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_15_4-v2compat:
|
||||
- envoy-integration-test-1_15_5-v2compat:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_16_3:
|
||||
- envoy-integration-test-1_16_4:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_16_3-v2compat:
|
||||
- envoy-integration-test-1_16_4-v2compat:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_17_2:
|
||||
- envoy-integration-test-1_17_3:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_18_2:
|
||||
- envoy-integration-test-1_18_3:
|
||||
requires:
|
||||
- dev-build
|
||||
|
||||
|
|
|
@ -2555,6 +2555,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
|||
return err
|
||||
}
|
||||
http.ProxyHTTP = httpInjectAddr(http.HTTP, proxy.Address, port)
|
||||
check.ExposedPort = port
|
||||
}
|
||||
|
||||
http.Start()
|
||||
|
@ -2624,6 +2625,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
|||
return err
|
||||
}
|
||||
grpc.ProxyGRPC = grpcInjectAddr(grpc.GRPC, proxy.Address, port)
|
||||
check.ExposedPort = port
|
||||
}
|
||||
|
||||
grpc.Start()
|
||||
|
@ -3809,6 +3811,8 @@ func (a *Agent) rerouteExposedChecks(serviceID structs.ServiceID, proxyAddr stri
|
|||
return err
|
||||
}
|
||||
c.ProxyHTTP = httpInjectAddr(c.HTTP, proxyAddr, port)
|
||||
hc := a.State.Check(cid)
|
||||
hc.ExposedPort = port
|
||||
}
|
||||
for cid, c := range a.checkGRPCs {
|
||||
if c.ServiceID != serviceID {
|
||||
|
@ -3819,6 +3823,8 @@ func (a *Agent) rerouteExposedChecks(serviceID structs.ServiceID, proxyAddr stri
|
|||
return err
|
||||
}
|
||||
c.ProxyGRPC = grpcInjectAddr(c.GRPC, proxyAddr, port)
|
||||
hc := a.State.Check(cid)
|
||||
hc.ExposedPort = port
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -3831,12 +3837,16 @@ func (a *Agent) resetExposedChecks(serviceID structs.ServiceID) {
|
|||
for cid, c := range a.checkHTTPs {
|
||||
if c.ServiceID == serviceID {
|
||||
c.ProxyHTTP = ""
|
||||
hc := a.State.Check(cid)
|
||||
hc.ExposedPort = 0
|
||||
ids = append(ids, cid)
|
||||
}
|
||||
}
|
||||
for cid, c := range a.checkGRPCs {
|
||||
if c.ServiceID == serviceID {
|
||||
c.ProxyGRPC = ""
|
||||
hc := a.State.Check(cid)
|
||||
hc.ExposedPort = 0
|
||||
ids = append(ids, cid)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1389,6 +1389,7 @@ func TestAgent_IndexChurn(t *testing.T) {
|
|||
// verifyIndexChurn registers some things and runs anti-entropy a bunch of times
|
||||
// in a row to make sure there are no index bumps.
|
||||
func verifyIndexChurn(t *testing.T, tags []string) {
|
||||
t.Helper()
|
||||
a := NewTestAgent(t, "")
|
||||
defer a.Shutdown()
|
||||
|
||||
|
@ -4299,8 +4300,8 @@ func TestAgent_RerouteExistingHTTPChecks(t *testing.T) {
|
|||
t.Fatalf("failed to add svc: %v", err)
|
||||
}
|
||||
|
||||
// Register a proxy and expose HTTP checks
|
||||
// This should trigger setting ProxyHTTP and ProxyGRPC in the checks
|
||||
// Register a proxy and expose HTTP checks.
|
||||
// This should trigger setting ProxyHTTP and ProxyGRPC in the checks.
|
||||
proxy := &structs.NodeService{
|
||||
Kind: "connect-proxy",
|
||||
ID: "web-proxy",
|
||||
|
@ -4324,36 +4325,30 @@ func TestAgent_RerouteExistingHTTPChecks(t *testing.T) {
|
|||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil))
|
||||
require.Equal(r, chks[0].ProxyHTTP, "http://localhost:21500/mypath?query")
|
||||
})
|
||||
|
||||
got := chks[0].ProxyHTTP
|
||||
if got == "" {
|
||||
r.Fatal("proxyHTTP addr not set in check")
|
||||
}
|
||||
|
||||
want := "http://localhost:21500/mypath?query"
|
||||
if got != want {
|
||||
r.Fatalf("unexpected proxy addr in check, want: %s, got: %s", want, got)
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
hc := a.State.Check(structs.NewCheckID("http", nil))
|
||||
require.Equal(r, hc.ExposedPort, 21500)
|
||||
})
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil))
|
||||
|
||||
// Will be at a later index than HTTP check because of the fetching order in ServiceHTTPBasedChecks
|
||||
got := chks[1].ProxyGRPC
|
||||
if got == "" {
|
||||
r.Fatal("ProxyGRPC addr not set in check")
|
||||
}
|
||||
|
||||
// Node that this relies on listener ports auto-incrementing in a.listenerPortLocked
|
||||
want := "localhost:21501/myservice"
|
||||
if got != want {
|
||||
r.Fatalf("unexpected proxy addr in check, want: %s, got: %s", want, got)
|
||||
}
|
||||
// GRPC check will be at a later index than HTTP check because of the fetching order in ServiceHTTPBasedChecks.
|
||||
// Note that this relies on listener ports auto-incrementing in a.listenerPortLocked.
|
||||
require.Equal(r, chks[1].ProxyGRPC, "localhost:21501/myservice")
|
||||
})
|
||||
|
||||
// Re-register a proxy and disable exposing HTTP checks
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
hc := a.State.Check(structs.NewCheckID("grpc", nil))
|
||||
require.Equal(r, hc.ExposedPort, 21501)
|
||||
})
|
||||
|
||||
// Re-register a proxy and disable exposing HTTP checks.
|
||||
// This should trigger resetting ProxyHTTP and ProxyGRPC to empty strings
|
||||
// and reset saved exposed ports in the agent's state.
|
||||
proxy = &structs.NodeService{
|
||||
Kind: "connect-proxy",
|
||||
ID: "web-proxy",
|
||||
|
@ -4377,21 +4372,24 @@ func TestAgent_RerouteExistingHTTPChecks(t *testing.T) {
|
|||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil))
|
||||
require.Empty(r, chks[0].ProxyHTTP, "ProxyHTTP addr was not reset")
|
||||
})
|
||||
|
||||
got := chks[0].ProxyHTTP
|
||||
if got != "" {
|
||||
r.Fatal("ProxyHTTP addr was not reset")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
hc := a.State.Check(structs.NewCheckID("http", nil))
|
||||
require.Equal(r, hc.ExposedPort, 0)
|
||||
})
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil))
|
||||
|
||||
// Will be at a later index than HTTP check because of the fetching order in ServiceHTTPBasedChecks
|
||||
got := chks[1].ProxyGRPC
|
||||
if got != "" {
|
||||
r.Fatal("ProxyGRPC addr was not reset")
|
||||
}
|
||||
// Will be at a later index than HTTP check because of the fetching order in ServiceHTTPBasedChecks.
|
||||
require.Empty(r, chks[1].ProxyGRPC, "ProxyGRPC addr was not reset")
|
||||
})
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
hc := a.State.Check(structs.NewCheckID("grpc", nil))
|
||||
require.Equal(r, hc.ExposedPort, 0)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -4480,31 +4478,24 @@ func TestAgent_RerouteNewHTTPChecks(t *testing.T) {
|
|||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil))
|
||||
require.Equal(r, chks[0].ProxyHTTP, "http://localhost:21500/mypath?query")
|
||||
})
|
||||
|
||||
got := chks[0].ProxyHTTP
|
||||
if got == "" {
|
||||
r.Fatal("ProxyHTTP addr not set in check")
|
||||
}
|
||||
|
||||
want := "http://localhost:21500/mypath?query"
|
||||
if got != want {
|
||||
r.Fatalf("unexpected proxy addr in http check, want: %s, got: %s", want, got)
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
hc := a.State.Check(structs.NewCheckID("http", nil))
|
||||
require.Equal(r, hc.ExposedPort, 21500)
|
||||
})
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil))
|
||||
|
||||
// Will be at a later index than HTTP check because of the fetching order in ServiceHTTPBasedChecks
|
||||
got := chks[1].ProxyGRPC
|
||||
if got == "" {
|
||||
r.Fatal("ProxyGRPC addr not set in check")
|
||||
}
|
||||
// GRPC check will be at a later index than HTTP check because of the fetching order in ServiceHTTPBasedChecks.
|
||||
require.Equal(r, chks[1].ProxyGRPC, "localhost:21501/myservice")
|
||||
})
|
||||
|
||||
want := "localhost:21501/myservice"
|
||||
if got != want {
|
||||
r.Fatalf("unexpected proxy addr in grpc check, want: %s, got: %s", want, got)
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
hc := a.State.Check(structs.NewCheckID("grpc", nil))
|
||||
require.Equal(r, hc.ExposedPort, 21501)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
uuid "github.com/hashicorp/go-uuid"
|
||||
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/square/go-jose.v2/jwt"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/authmethod/kubeauth"
|
||||
"github.com/hashicorp/consul/agent/consul/authmethod/testauth"
|
||||
|
@ -18,10 +23,6 @@ import (
|
|||
"github.com/hashicorp/consul/sdk/freeport"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
uuid "github.com/hashicorp/go-uuid"
|
||||
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/square/go-jose.v2/jwt"
|
||||
)
|
||||
|
||||
func TestACLEndpoint_Bootstrap(t *testing.T) {
|
||||
|
@ -4981,7 +4982,7 @@ func TestACLEndpoint_Login_with_TokenLocality(t *testing.T) {
|
|||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
go t.Parallel()
|
||||
t.Parallel()
|
||||
|
||||
_, s1, codec := testACLServerWithConfig(t, func(c *Config) {
|
||||
c.ACLTokenMinExpirationTTL = 10 * time.Millisecond
|
||||
|
|
|
@ -323,8 +323,9 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
reply.Reset()
|
||||
reply.MeshGateway.Mode = structs.MeshGatewayModeDefault
|
||||
var thisReply structs.ServiceConfigResponse
|
||||
|
||||
thisReply.MeshGateway.Mode = structs.MeshGatewayModeDefault
|
||||
// TODO(freddy) Refactor this into smaller set of state store functions
|
||||
// Pass the WatchSet to both the service and proxy config lookups. If either is updated during the
|
||||
// blocking query, this function will be rerun and these state store lookups will both be current.
|
||||
|
@ -349,11 +350,11 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to copy global proxy-defaults: %v", err)
|
||||
}
|
||||
reply.ProxyConfig = mapCopy.(map[string]interface{})
|
||||
reply.Mode = proxyConf.Mode
|
||||
reply.TransparentProxy = proxyConf.TransparentProxy
|
||||
reply.MeshGateway = proxyConf.MeshGateway
|
||||
reply.Expose = proxyConf.Expose
|
||||
thisReply.ProxyConfig = mapCopy.(map[string]interface{})
|
||||
thisReply.Mode = proxyConf.Mode
|
||||
thisReply.TransparentProxy = proxyConf.TransparentProxy
|
||||
thisReply.MeshGateway = proxyConf.MeshGateway
|
||||
thisReply.Expose = proxyConf.Expose
|
||||
|
||||
// Extract the global protocol from proxyConf for upstream configs.
|
||||
rawProtocol := proxyConf.Config["protocol"]
|
||||
|
@ -369,7 +370,7 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.Index = index
|
||||
thisReply.Index = index
|
||||
|
||||
var serviceConf *structs.ServiceConfigEntry
|
||||
if serviceEntry != nil {
|
||||
|
@ -378,25 +379,25 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
return fmt.Errorf("invalid service config type %T", serviceEntry)
|
||||
}
|
||||
if serviceConf.Expose.Checks {
|
||||
reply.Expose.Checks = true
|
||||
thisReply.Expose.Checks = true
|
||||
}
|
||||
if len(serviceConf.Expose.Paths) >= 1 {
|
||||
reply.Expose.Paths = serviceConf.Expose.Paths
|
||||
thisReply.Expose.Paths = serviceConf.Expose.Paths
|
||||
}
|
||||
if serviceConf.MeshGateway.Mode != structs.MeshGatewayModeDefault {
|
||||
reply.MeshGateway.Mode = serviceConf.MeshGateway.Mode
|
||||
thisReply.MeshGateway.Mode = serviceConf.MeshGateway.Mode
|
||||
}
|
||||
if serviceConf.Protocol != "" {
|
||||
if reply.ProxyConfig == nil {
|
||||
reply.ProxyConfig = make(map[string]interface{})
|
||||
if thisReply.ProxyConfig == nil {
|
||||
thisReply.ProxyConfig = make(map[string]interface{})
|
||||
}
|
||||
reply.ProxyConfig["protocol"] = serviceConf.Protocol
|
||||
thisReply.ProxyConfig["protocol"] = serviceConf.Protocol
|
||||
}
|
||||
if serviceConf.TransparentProxy.OutboundListenerPort != 0 {
|
||||
reply.TransparentProxy.OutboundListenerPort = serviceConf.TransparentProxy.OutboundListenerPort
|
||||
thisReply.TransparentProxy.OutboundListenerPort = serviceConf.TransparentProxy.OutboundListenerPort
|
||||
}
|
||||
if serviceConf.Mode != structs.ProxyModeDefault {
|
||||
reply.Mode = serviceConf.Mode
|
||||
thisReply.Mode = serviceConf.Mode
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -414,13 +415,14 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
|
||||
// Check the args and the resolved value. If it was exclusively set via a config entry, then args.Mode
|
||||
// will never be transparent because the service config request does not use the resolved value.
|
||||
tproxy = args.Mode == structs.ProxyModeTransparent || reply.Mode == structs.ProxyModeTransparent
|
||||
tproxy = args.Mode == structs.ProxyModeTransparent || thisReply.Mode == structs.ProxyModeTransparent
|
||||
)
|
||||
|
||||
// The upstreams passed as arguments to this endpoint are the upstreams explicitly defined in a proxy registration.
|
||||
// If no upstreams were passed, then we should only returned the resolved config if the proxy in transparent mode.
|
||||
// Otherwise we would return a resolved upstream config to a proxy with no configured upstreams.
|
||||
if noUpstreamArgs && !tproxy {
|
||||
*reply = thisReply
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -534,25 +536,28 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
|
||||
// don't allocate the slices just to not fill them
|
||||
if len(usConfigs) == 0 {
|
||||
*reply = thisReply
|
||||
return nil
|
||||
}
|
||||
|
||||
if legacyUpstreams {
|
||||
// For legacy upstreams we return a map that is only keyed on the string ID, since they precede namespaces
|
||||
reply.UpstreamConfigs = make(map[string]map[string]interface{})
|
||||
thisReply.UpstreamConfigs = make(map[string]map[string]interface{})
|
||||
|
||||
for us, conf := range usConfigs {
|
||||
reply.UpstreamConfigs[us.ID] = conf
|
||||
thisReply.UpstreamConfigs[us.ID] = conf
|
||||
}
|
||||
|
||||
} else {
|
||||
reply.UpstreamIDConfigs = make(structs.OpaqueUpstreamConfigs, 0, len(usConfigs))
|
||||
thisReply.UpstreamIDConfigs = make(structs.OpaqueUpstreamConfigs, 0, len(usConfigs))
|
||||
|
||||
for us, conf := range usConfigs {
|
||||
reply.UpstreamIDConfigs = append(reply.UpstreamIDConfigs,
|
||||
thisReply.UpstreamIDConfigs = append(thisReply.UpstreamIDConfigs,
|
||||
structs.OpaqueUpstreamConfig{Upstream: us, Config: conf})
|
||||
}
|
||||
}
|
||||
|
||||
*reply = thisReply
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1420,6 +1420,9 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
|
|||
// of the blocking query does NOT bleed over into the next run. Concretely
|
||||
// in this test the data present in the initial proxy-defaults should not
|
||||
// be present when we are woken up due to proxy-defaults being deleted.
|
||||
//
|
||||
// This test does not pertain to upstreams, see:
|
||||
// TestConfigEntry_ResolveServiceConfig_Upstreams_Blocking
|
||||
|
||||
state := s1.fsm.State()
|
||||
require.NoError(state.EnsureConfigEntry(1, &structs.ProxyConfigEntry{
|
||||
|
@ -1571,6 +1574,205 @@ func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConfigEntry_ResolveServiceConfig_Upstreams_Blocking(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
dir1, s1 := testServer(t)
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
// The main thing this should test is that information from one iteration
|
||||
// of the blocking query does NOT bleed over into the next run. Concretely
|
||||
// in this test the data present in the initial proxy-defaults should not
|
||||
// be present when we are woken up due to proxy-defaults being deleted.
|
||||
//
|
||||
// This test is about fields in upstreams, see:
|
||||
// TestConfigEntry_ResolveServiceConfig_Blocking
|
||||
|
||||
state := s1.fsm.State()
|
||||
require.NoError(t, state.EnsureConfigEntry(1, &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "foo",
|
||||
Protocol: "http",
|
||||
}))
|
||||
require.NoError(t, state.EnsureConfigEntry(2, &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "bar",
|
||||
Protocol: "http",
|
||||
}))
|
||||
|
||||
var index uint64
|
||||
|
||||
runStep(t, "foo and bar should be both http", func(t *testing.T) {
|
||||
// Verify that we get the results of service-defaults for 'foo' and 'bar'.
|
||||
var out structs.ServiceConfigResponse
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
|
||||
&structs.ServiceConfigRequest{
|
||||
Name: "foo",
|
||||
Datacenter: "dc1",
|
||||
UpstreamIDs: []structs.ServiceID{
|
||||
structs.NewServiceID("bar", nil),
|
||||
structs.NewServiceID("other", nil),
|
||||
},
|
||||
},
|
||||
&out,
|
||||
))
|
||||
|
||||
expected := structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{
|
||||
"protocol": "http",
|
||||
},
|
||||
UpstreamIDConfigs: []structs.OpaqueUpstreamConfig{
|
||||
{
|
||||
Upstream: structs.NewServiceID("bar", nil),
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "http",
|
||||
},
|
||||
},
|
||||
},
|
||||
QueryMeta: out.QueryMeta, // don't care
|
||||
}
|
||||
|
||||
require.Equal(t, expected, out)
|
||||
index = out.Index
|
||||
})
|
||||
|
||||
runStep(t, "blocking query for foo wakes on bar entry delete", func(t *testing.T) {
|
||||
// Now setup a blocking query for 'foo' while we erase the
|
||||
// service-defaults for bar.
|
||||
|
||||
// Async cause a change
|
||||
start := time.Now()
|
||||
go func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
err := state.DeleteConfigEntry(index+1,
|
||||
structs.ServiceDefaults,
|
||||
"bar",
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("delete config entry failed: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Re-run the query
|
||||
var out structs.ServiceConfigResponse
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
|
||||
&structs.ServiceConfigRequest{
|
||||
Name: "foo",
|
||||
Datacenter: "dc1",
|
||||
UpstreamIDs: []structs.ServiceID{
|
||||
structs.NewServiceID("bar", nil),
|
||||
structs.NewServiceID("other", nil),
|
||||
},
|
||||
QueryOptions: structs.QueryOptions{
|
||||
MinQueryIndex: index,
|
||||
MaxQueryTime: time.Second,
|
||||
},
|
||||
},
|
||||
&out,
|
||||
))
|
||||
|
||||
// Should block at least 100ms
|
||||
require.True(t, time.Since(start) >= 100*time.Millisecond, "too fast")
|
||||
|
||||
// Check the indexes
|
||||
require.Equal(t, out.Index, index+1)
|
||||
|
||||
expected := structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{
|
||||
"protocol": "http",
|
||||
},
|
||||
QueryMeta: out.QueryMeta, // don't care
|
||||
}
|
||||
|
||||
require.Equal(t, expected, out)
|
||||
index = out.Index
|
||||
})
|
||||
|
||||
runStep(t, "foo should be http and bar should be unset", func(t *testing.T) {
|
||||
// Verify that we get the results of service-defaults for just 'foo'.
|
||||
var out structs.ServiceConfigResponse
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
|
||||
&structs.ServiceConfigRequest{
|
||||
Name: "foo",
|
||||
Datacenter: "dc1",
|
||||
UpstreamIDs: []structs.ServiceID{
|
||||
structs.NewServiceID("bar", nil),
|
||||
structs.NewServiceID("other", nil),
|
||||
},
|
||||
},
|
||||
&out,
|
||||
))
|
||||
|
||||
expected := structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{
|
||||
"protocol": "http",
|
||||
},
|
||||
QueryMeta: out.QueryMeta, // don't care
|
||||
}
|
||||
|
||||
require.Equal(t, expected, out)
|
||||
index = out.Index
|
||||
})
|
||||
|
||||
runStep(t, "blocking query for foo wakes on foo entry delete", func(t *testing.T) {
|
||||
// Now setup a blocking query for 'foo' while we erase the
|
||||
// service-defaults for foo.
|
||||
|
||||
// Async cause a change
|
||||
start := time.Now()
|
||||
go func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
err := state.DeleteConfigEntry(index+1,
|
||||
structs.ServiceDefaults,
|
||||
"foo",
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("delete config entry failed: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Re-run the query
|
||||
var out structs.ServiceConfigResponse
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig",
|
||||
&structs.ServiceConfigRequest{
|
||||
Name: "foo",
|
||||
Datacenter: "dc1",
|
||||
UpstreamIDs: []structs.ServiceID{
|
||||
structs.NewServiceID("bar", nil),
|
||||
structs.NewServiceID("other", nil),
|
||||
},
|
||||
QueryOptions: structs.QueryOptions{
|
||||
MinQueryIndex: index,
|
||||
MaxQueryTime: time.Second,
|
||||
},
|
||||
},
|
||||
&out,
|
||||
))
|
||||
|
||||
// Should block at least 100ms
|
||||
require.True(t, time.Since(start) >= 100*time.Millisecond, "too fast")
|
||||
|
||||
// Check the indexes
|
||||
require.Equal(t, out.Index, index+1)
|
||||
|
||||
expected := structs.ServiceConfigResponse{
|
||||
QueryMeta: out.QueryMeta, // don't care
|
||||
}
|
||||
|
||||
require.Equal(t, expected, out)
|
||||
index = out.Index
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigEntry_ResolveServiceConfig_UpstreamProxyDefaultsProtocol(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
@ -1848,3 +2050,10 @@ func TestConfigEntry_ProxyDefaultsExposeConfig(t *testing.T) {
|
|||
require.True(t, ok)
|
||||
require.Equal(t, expose, proxyConf.Expose)
|
||||
}
|
||||
|
||||
func runStep(t *testing.T, name string, fn func(t *testing.T)) {
|
||||
t.Helper()
|
||||
if !t.Run(name, fn) {
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/hashicorp/consul/agent/router"
|
||||
"github.com/hashicorp/consul/agent/submatview"
|
||||
"github.com/hashicorp/consul/agent/token"
|
||||
"github.com/hashicorp/consul/agent/xds"
|
||||
"github.com/hashicorp/consul/ipaddr"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
|
@ -195,6 +196,7 @@ func getPrometheusDefs(cfg lib.TelemetryConfig) ([]prometheus.GaugeDefinition, [
|
|||
consul.RPCGauges,
|
||||
consul.SessionGauges,
|
||||
grpc.StatsGauges,
|
||||
xds.StatsGauges,
|
||||
usagemetrics.Gauges,
|
||||
consul.ReplicationGauges,
|
||||
Gauges,
|
||||
|
|
|
@ -641,11 +641,13 @@ func (r *ServiceConfigRequest) CacheInfo() cache.RequestInfo {
|
|||
v, err := hashstructure.Hash(struct {
|
||||
Name string
|
||||
EnterpriseMeta EnterpriseMeta
|
||||
Upstreams []string `hash:"set"`
|
||||
Upstreams []string `hash:"set"`
|
||||
UpstreamIDs []ServiceID `hash:"set"`
|
||||
}{
|
||||
Name: r.Name,
|
||||
EnterpriseMeta: r.EnterpriseMeta,
|
||||
Upstreams: r.Upstreams,
|
||||
UpstreamIDs: r.UpstreamIDs,
|
||||
}, nil)
|
||||
if err == nil {
|
||||
// If there is an error, we don't set the key. A blank key forces
|
||||
|
@ -966,12 +968,6 @@ type ServiceConfigResponse struct {
|
|||
QueryMeta
|
||||
}
|
||||
|
||||
func (r *ServiceConfigResponse) Reset() {
|
||||
r.ProxyConfig = nil
|
||||
r.UpstreamConfigs = nil
|
||||
r.MeshGateway = MeshGatewayConfig{}
|
||||
}
|
||||
|
||||
// MarshalBinary writes ServiceConfigResponse as msgpack encoded. It's only here
|
||||
// because we need custom decoding of the raw interface{} values.
|
||||
func (r *ServiceConfigResponse) MarshalBinary() (data []byte, err error) {
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
||||
|
@ -1366,6 +1367,118 @@ func TestDecodeConfigEntry(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestServiceConfigRequest(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
req ServiceConfigRequest
|
||||
mutate func(req *ServiceConfigRequest)
|
||||
want *cache.RequestInfo
|
||||
wantSame bool
|
||||
}{
|
||||
{
|
||||
name: "basic params",
|
||||
req: ServiceConfigRequest{
|
||||
QueryOptions: QueryOptions{Token: "foo"},
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
want: &cache.RequestInfo{
|
||||
Token: "foo",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
wantSame: true,
|
||||
},
|
||||
{
|
||||
name: "name should be considered",
|
||||
req: ServiceConfigRequest{
|
||||
Name: "web",
|
||||
},
|
||||
mutate: func(req *ServiceConfigRequest) {
|
||||
req.Name = "db"
|
||||
},
|
||||
wantSame: false,
|
||||
},
|
||||
{
|
||||
name: "legacy upstreams should be different",
|
||||
req: ServiceConfigRequest{
|
||||
Name: "web",
|
||||
Upstreams: []string{"foo"},
|
||||
},
|
||||
mutate: func(req *ServiceConfigRequest) {
|
||||
req.Upstreams = []string{"foo", "bar"}
|
||||
},
|
||||
wantSame: false,
|
||||
},
|
||||
{
|
||||
name: "legacy upstreams should not depend on order",
|
||||
req: ServiceConfigRequest{
|
||||
Name: "web",
|
||||
Upstreams: []string{"bar", "foo"},
|
||||
},
|
||||
mutate: func(req *ServiceConfigRequest) {
|
||||
req.Upstreams = []string{"foo", "bar"}
|
||||
},
|
||||
wantSame: true,
|
||||
},
|
||||
{
|
||||
name: "upstreams should be different",
|
||||
req: ServiceConfigRequest{
|
||||
Name: "web",
|
||||
UpstreamIDs: []ServiceID{
|
||||
NewServiceID("foo", nil),
|
||||
},
|
||||
},
|
||||
mutate: func(req *ServiceConfigRequest) {
|
||||
req.UpstreamIDs = []ServiceID{
|
||||
NewServiceID("foo", nil),
|
||||
NewServiceID("bar", nil),
|
||||
}
|
||||
},
|
||||
wantSame: false,
|
||||
},
|
||||
{
|
||||
name: "upstreams should not depend on order",
|
||||
req: ServiceConfigRequest{
|
||||
Name: "web",
|
||||
UpstreamIDs: []ServiceID{
|
||||
NewServiceID("bar", nil),
|
||||
NewServiceID("foo", nil),
|
||||
},
|
||||
},
|
||||
mutate: func(req *ServiceConfigRequest) {
|
||||
req.UpstreamIDs = []ServiceID{
|
||||
NewServiceID("foo", nil),
|
||||
NewServiceID("bar", nil),
|
||||
}
|
||||
},
|
||||
wantSame: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
info := tc.req.CacheInfo()
|
||||
if tc.mutate != nil {
|
||||
tc.mutate(&tc.req)
|
||||
}
|
||||
afterInfo := tc.req.CacheInfo()
|
||||
|
||||
// Check key matches or not
|
||||
if tc.wantSame {
|
||||
require.Equal(t, info, afterInfo)
|
||||
} else {
|
||||
require.NotEqual(t, info, afterInfo)
|
||||
}
|
||||
|
||||
if tc.want != nil {
|
||||
// Reset key since we don't care about the actual hash value as long as
|
||||
// it does/doesn't change appropriately (asserted with wantSame above).
|
||||
info.Key = ""
|
||||
require.Equal(t, *tc.want, info)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceConfigResponse_MsgPack(t *testing.T) {
|
||||
// TODO(banks) lib.MapWalker doesn't actually fix the map[interface{}] issue
|
||||
// it claims to in docs yet. When it does uncomment those cases below.
|
||||
|
|
|
@ -1400,7 +1400,7 @@ type NodeServiceList struct {
|
|||
Services []*NodeService
|
||||
}
|
||||
|
||||
// HealthCheck represents a single check on a given node
|
||||
// HealthCheck represents a single check on a given node.
|
||||
type HealthCheck struct {
|
||||
Node string
|
||||
CheckID types.CheckID // Unique per-node ID
|
||||
|
@ -1413,6 +1413,10 @@ type HealthCheck struct {
|
|||
ServiceTags []string // optional service tags
|
||||
Type string // Check type: http/ttl/tcp/etc
|
||||
|
||||
// ExposedPort is the port of the exposed Envoy listener representing the
|
||||
// HTTP or GRPC health check of the service.
|
||||
ExposedPort int
|
||||
|
||||
Definition HealthCheckDefinition `bexpr:"-"`
|
||||
|
||||
EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"`
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"sync"
|
||||
"testing"
|
||||
|
||||
bexpr "github.com/hashicorp/go-bexpr"
|
||||
"github.com/hashicorp/go-bexpr"
|
||||
"github.com/mitchellh/pointerstructure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -545,6 +545,11 @@ var expectedFieldConfigHealthCheck bexpr.FieldConfigurations = bexpr.FieldConfig
|
|||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches},
|
||||
StructFieldName: "Type",
|
||||
},
|
||||
"ExposedPort": &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceInt,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
StructFieldName: "ExposedPort",
|
||||
},
|
||||
}
|
||||
|
||||
var expectedFieldConfigCheckServiceNode bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
|
|
|
@ -16,10 +16,8 @@ import (
|
|||
"time"
|
||||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
uuid "github.com/hashicorp/go-uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/config"
|
||||
|
@ -99,6 +97,7 @@ func NewTestAgent(t *testing.T, hcl string) *TestAgent {
|
|||
func StartTestAgent(t *testing.T, a TestAgent) *TestAgent {
|
||||
t.Helper()
|
||||
retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) {
|
||||
t.Helper()
|
||||
if err := a.Start(t); err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
|
@ -130,7 +129,7 @@ func TestConfigHCL(nodeID string) string {
|
|||
|
||||
// Start starts a test agent. It returns an error if the agent could not be started.
|
||||
// If no error is returned, the caller must call Shutdown() when finished.
|
||||
func (a *TestAgent) Start(t *testing.T) (err error) {
|
||||
func (a *TestAgent) Start(t *testing.T) error {
|
||||
t.Helper()
|
||||
if a.Agent != nil {
|
||||
return fmt.Errorf("TestAgent already started")
|
||||
|
@ -187,7 +186,9 @@ func (a *TestAgent) Start(t *testing.T) (err error) {
|
|||
return result, err
|
||||
}
|
||||
bd, err := NewBaseDeps(loader, logOutput)
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create base deps: %w", err)
|
||||
}
|
||||
|
||||
bd.Logger = logger
|
||||
bd.MetricsHandler = metrics.NewInmemSink(1*time.Second, time.Minute)
|
||||
|
@ -215,8 +216,8 @@ func (a *TestAgent) Start(t *testing.T) (err error) {
|
|||
|
||||
if err := a.waitForUp(); err != nil {
|
||||
a.Shutdown()
|
||||
t.Logf("Error while waiting for test agent to start: %v", err)
|
||||
return errwrap.Wrapf(name+": {{err}}", err)
|
||||
a.Agent = nil
|
||||
return fmt.Errorf("error waiting for test agent to start: %w", err)
|
||||
}
|
||||
|
||||
a.dns = a.dnsServers[0]
|
||||
|
@ -280,17 +281,6 @@ func (a *TestAgent) waitForUp() error {
|
|||
// Shutdown stops the agent and removes the data directory if it is
|
||||
// managed by the test agent.
|
||||
func (a *TestAgent) Shutdown() error {
|
||||
/* Removed this because it was breaking persistence tests where we would
|
||||
persist a service and load it through a new agent with the same data-dir.
|
||||
Not sure if we still need this for other things, everywhere we manually make
|
||||
a data dir we already do 'defer os.RemoveAll()'
|
||||
defer func() {
|
||||
if a.DataDir != "" {
|
||||
os.RemoveAll(a.DataDir)
|
||||
}
|
||||
}()*/
|
||||
|
||||
// already shut down
|
||||
if a.Agent == nil {
|
||||
return nil
|
||||
}
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -31,6 +31,8 @@ type ADSDeltaStream = envoy_discovery_v3.AggregatedDiscoveryService_DeltaAggrega
|
|||
|
||||
// DeltaAggregatedResources implements envoy_discovery_v3.AggregatedDiscoveryServiceServer
|
||||
func (s *Server) DeltaAggregatedResources(stream ADSDeltaStream) error {
|
||||
defer s.activeStreams.Increment("v3")()
|
||||
|
||||
// a channel for receiving incoming requests
|
||||
reqCh := make(chan *envoy_discovery_v3.DeltaDiscoveryRequest)
|
||||
reqStop := int32(0)
|
||||
|
|
|
@ -59,6 +59,8 @@ func TestServer_DeltaAggregatedResources_v3_BasicProtocol_TCP(t *testing.T) {
|
|||
// Check no response sent yet
|
||||
assertDeltaChanBlocked(t, envoy.deltaStream.sendCh)
|
||||
|
||||
requireProtocolVersionGauge(t, scenario, "v3", 1)
|
||||
|
||||
// Deliver a new snapshot (tcp with one tcp upstream)
|
||||
mgr.DeliverConfig(t, sid, snap)
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ func TestDetermineSupportedProxyFeaturesFromString(t *testing.T) {
|
|||
|
||||
// Insert a bunch of valid versions.
|
||||
for _, v := range []string{
|
||||
"1.15.0", "1.15.1", "1.15.2", "1.15.3", "1.15.4",
|
||||
"1.15.0", "1.15.1", "1.15.2", "1.15.3", "1.15.4", "1.15.5",
|
||||
} {
|
||||
cases[v] = testcase{expect: supportedProxyFeatures{
|
||||
GatewaysNeedStubClusterWhenEmptyWithIncrementalXDS: true,
|
||||
|
@ -118,9 +118,9 @@ func TestDetermineSupportedProxyFeaturesFromString(t *testing.T) {
|
|||
}}
|
||||
}
|
||||
for _, v := range []string{
|
||||
"1.16.0", "1.16.1", "1.16.2", "1.16.3",
|
||||
"1.17.0", "1.17.1", "1.17.2",
|
||||
"1.18.0", "1.18.1", "1.18.2",
|
||||
"1.16.0", "1.16.1", "1.16.2", "1.16.3", "1.16.4",
|
||||
"1.17.0", "1.17.1", "1.17.2", "1.17.3",
|
||||
"1.18.0", "1.18.1", "1.18.2", "1.18.3",
|
||||
} {
|
||||
cases[v] = testcase{expect: supportedProxyFeatures{}}
|
||||
}
|
||||
|
|
|
@ -7,13 +7,13 @@ package proxysupport
|
|||
//
|
||||
// see: https://www.consul.io/docs/connect/proxies/envoy#supported-versions
|
||||
var EnvoyVersions = []string{
|
||||
"1.18.2",
|
||||
"1.17.2",
|
||||
"1.16.3",
|
||||
"1.15.4",
|
||||
"1.18.3",
|
||||
"1.17.3",
|
||||
"1.16.4",
|
||||
"1.15.5",
|
||||
}
|
||||
|
||||
var EnvoyVersionsV2 = []string{
|
||||
"1.16.3",
|
||||
"1.15.4",
|
||||
"1.16.4",
|
||||
"1.15.5",
|
||||
}
|
||||
|
|
|
@ -11,6 +11,8 @@ import (
|
|||
envoy_discovery_v2 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2"
|
||||
envoy_discovery_v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/armon/go-metrics/prometheus"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
@ -25,6 +27,13 @@ import (
|
|||
"github.com/hashicorp/consul/tlsutil"
|
||||
)
|
||||
|
||||
var StatsGauges = []prometheus.GaugeDefinition{
|
||||
{
|
||||
Name: []string{"xds", "server", "streams"},
|
||||
Help: "Measures the number of active xDS streams handled by the server split by protocol version.",
|
||||
},
|
||||
}
|
||||
|
||||
// ADSStream is a shorter way of referring to this thing...
|
||||
type ADSStream = envoy_discovery_v3.AggregatedDiscoveryService_StreamAggregatedResourcesServer
|
||||
type ADSStream_v2 = envoy_discovery_v2.AggregatedDiscoveryService_StreamAggregatedResourcesServer
|
||||
|
@ -141,6 +150,36 @@ type Server struct {
|
|||
AuthCheckFrequency time.Duration
|
||||
|
||||
DisableV2Protocol bool
|
||||
|
||||
activeStreams activeStreamCounters
|
||||
}
|
||||
|
||||
// activeStreamCounters simply encapsulates two counters accessed atomically to
|
||||
// ensure alignment is correct.
|
||||
type activeStreamCounters struct {
|
||||
xDSv3 uint64
|
||||
xDSv2 uint64
|
||||
}
|
||||
|
||||
func (c *activeStreamCounters) Increment(xdsVersion string) func() {
|
||||
var counter *uint64
|
||||
switch xdsVersion {
|
||||
case "v3":
|
||||
counter = &c.xDSv3
|
||||
case "v2":
|
||||
counter = &c.xDSv2
|
||||
default:
|
||||
return func() {}
|
||||
}
|
||||
|
||||
labels := []metrics.Label{{Name: "version", Value: xdsVersion}}
|
||||
|
||||
count := atomic.AddUint64(counter, 1)
|
||||
metrics.SetGaugeWithLabels([]string{"xds", "server", "streams"}, float32(count), labels)
|
||||
return func() {
|
||||
count := atomic.AddUint64(counter, ^uint64(0))
|
||||
metrics.SetGaugeWithLabels([]string{"xds", "server", "streams"}, float32(count), labels)
|
||||
}
|
||||
}
|
||||
|
||||
func NewServer(
|
||||
|
@ -171,6 +210,8 @@ func (s *Server) StreamAggregatedResources(stream ADSStream) error {
|
|||
|
||||
// Deprecated: remove when xDS v2 is no longer supported
|
||||
func (s *Server) streamAggregatedResources(stream ADSStream) error {
|
||||
defer s.activeStreams.Increment("v2")()
|
||||
|
||||
// Note: despite dealing entirely in v3 protobufs, this function is
|
||||
// exclusively used from the xDS v2 shim RPC handler, so the logging below
|
||||
// will refer to it as "v2".
|
||||
|
|
|
@ -43,6 +43,8 @@ func TestServer_StreamAggregatedResources_v2_BasicProtocol_TCP(t *testing.T) {
|
|||
// Check no response sent yet
|
||||
assertChanBlocked(t, envoy.stream.sendCh)
|
||||
|
||||
requireProtocolVersionGauge(t, scenario, "v2", 1)
|
||||
|
||||
// Deliver a new snapshot
|
||||
snap := newTestSnapshot(t, nil, "")
|
||||
mgr.DeliverConfig(t, sid, snap)
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
envoy_discovery_v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||
envoy_type_v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
|
@ -118,6 +119,7 @@ type testServerScenario struct {
|
|||
server *Server
|
||||
mgr *testManager
|
||||
envoy *TestEnvoy
|
||||
sink *metrics.InmemSink
|
||||
errCh <-chan error
|
||||
}
|
||||
|
||||
|
@ -155,6 +157,17 @@ func newTestServerScenarioInner(
|
|||
envoy.Close()
|
||||
})
|
||||
|
||||
sink := metrics.NewInmemSink(1*time.Minute, 1*time.Minute)
|
||||
cfg := metrics.DefaultConfig("consul.xds.test")
|
||||
cfg.EnableHostname = false
|
||||
cfg.EnableRuntimeMetrics = false
|
||||
metrics.NewGlobal(cfg, sink)
|
||||
|
||||
t.Cleanup(func() {
|
||||
sink := &metrics.BlackholeSink{}
|
||||
metrics.NewGlobal(cfg, sink)
|
||||
})
|
||||
|
||||
s := NewServer(
|
||||
testutil.Logger(t),
|
||||
mgr,
|
||||
|
@ -178,6 +191,7 @@ func newTestServerScenarioInner(
|
|||
server: s,
|
||||
mgr: mgr,
|
||||
envoy: envoy,
|
||||
sink: sink,
|
||||
errCh: errCh,
|
||||
}
|
||||
}
|
||||
|
@ -647,3 +661,23 @@ func runStep(t *testing.T, name string, fn func(t *testing.T)) {
|
|||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
func requireProtocolVersionGauge(
|
||||
t *testing.T,
|
||||
scenario *testServerScenario,
|
||||
xdsVersion string,
|
||||
expected int,
|
||||
) {
|
||||
data := scenario.sink.Data()
|
||||
require.Len(t, data, 1)
|
||||
|
||||
item := data[0]
|
||||
require.Len(t, item.Gauges, 1)
|
||||
|
||||
val, ok := item.Gauges["consul.xds.test.xds.server.streams;version="+xdsVersion]
|
||||
require.True(t, ok)
|
||||
|
||||
require.Equal(t, "consul.xds.test.xds.server.streams", val.Name)
|
||||
require.Equal(t, expected, int(val.Value))
|
||||
require.Equal(t, []metrics.Label{{Name: "version", Value: xdsVersion}}, val.Labels)
|
||||
}
|
||||
|
|
|
@ -63,6 +63,7 @@ type AgentCheck struct {
|
|||
ServiceID string
|
||||
ServiceName string
|
||||
Type string
|
||||
ExposedPort int
|
||||
Definition HealthCheckDefinition
|
||||
Namespace string `json:",omitempty"`
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "ingress-gateway",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "ingress-gateway",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "my-gateway-123",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "my-gateway",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "ingress-gateway-1",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
"id": "test-proxy",
|
||||
"metadata": {
|
||||
"namespace": "default",
|
||||
"envoy_version": "1.18.2"
|
||||
"envoy_version": "1.18.3"
|
||||
}
|
||||
},
|
||||
"static_resources": {
|
||||
|
|
|
@ -197,6 +197,21 @@ func (c *cmd) generateConfigFromFlags() (iptables.Config, error) {
|
|||
cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, strconv.Itoa(exposePath.ListenerPort))
|
||||
}
|
||||
}
|
||||
|
||||
// Exclude any exposed health check ports when Proxy.Expose.Checks is true.
|
||||
if svc.Proxy.Expose.Checks {
|
||||
// Get the health checks of the destination service.
|
||||
checks, err := c.client.Agent().ChecksWithFilter(fmt.Sprintf("ServiceName == %q", svc.Proxy.DestinationServiceName))
|
||||
if err != nil {
|
||||
return iptables.Config{}, err
|
||||
}
|
||||
|
||||
for _, check := range checks {
|
||||
if check.ExposedPort != 0 {
|
||||
cfg.ExcludeInboundPorts = append(cfg.ExcludeInboundPorts, strconv.Itoa(check.ExposedPort))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, port := range c.excludeInboundPorts {
|
||||
|
|
|
@ -62,11 +62,11 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
command func() cmd
|
||||
proxyService *api.AgentServiceRegistration
|
||||
expCfg iptables.Config
|
||||
expError string
|
||||
name string
|
||||
command func() cmd
|
||||
consulServices []api.AgentServiceRegistration
|
||||
expCfg iptables.Config
|
||||
expError string
|
||||
}{
|
||||
{
|
||||
"proxyID with service port provided",
|
||||
|
@ -77,14 +77,16 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
c.proxyID = "test-proxy-id"
|
||||
return c
|
||||
},
|
||||
&api.AgentServiceRegistration{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
[]api.AgentServiceRegistration{
|
||||
{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
iptables.Config{
|
||||
|
@ -103,16 +105,18 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
c.proxyID = "test-proxy-id"
|
||||
return c
|
||||
},
|
||||
&api.AgentServiceRegistration{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"bind_port": 21000,
|
||||
[]api.AgentServiceRegistration{
|
||||
{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"bind_port": 21000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -132,16 +136,18 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
c.proxyID = "test-proxy-id"
|
||||
return c
|
||||
},
|
||||
&api.AgentServiceRegistration{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"bind_port": "21000",
|
||||
[]api.AgentServiceRegistration{
|
||||
{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"bind_port": "21000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -161,16 +167,18 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
c.proxyID = "test-proxy-id"
|
||||
return c
|
||||
},
|
||||
&api.AgentServiceRegistration{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"bind_port": "invalid",
|
||||
[]api.AgentServiceRegistration{
|
||||
{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"bind_port": "invalid",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -186,16 +194,18 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
c.proxyID = "test-proxy-id"
|
||||
return c
|
||||
},
|
||||
&api.AgentServiceRegistration{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
TransparentProxy: &api.TransparentProxyConfig{
|
||||
OutboundListenerPort: 21000,
|
||||
[]api.AgentServiceRegistration{
|
||||
{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
TransparentProxy: &api.TransparentProxyConfig{
|
||||
OutboundListenerPort: 21000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -228,11 +238,13 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
c.proxyID = "test-proxy-id"
|
||||
return c
|
||||
},
|
||||
&api.AgentServiceRegistration{
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
[]api.AgentServiceRegistration{
|
||||
{
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
},
|
||||
},
|
||||
iptables.Config{},
|
||||
"service test-proxy-id is not a proxy service",
|
||||
|
@ -357,16 +369,18 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
c.proxyID = "test-proxy-id"
|
||||
return c
|
||||
},
|
||||
&api.AgentServiceRegistration{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"envoy_prometheus_bind_addr": "0.0.0.0:9000",
|
||||
[]api.AgentServiceRegistration{
|
||||
{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"envoy_prometheus_bind_addr": "0.0.0.0:9000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -387,16 +401,18 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
c.proxyID = "test-proxy-id"
|
||||
return c
|
||||
},
|
||||
&api.AgentServiceRegistration{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"envoy_prometheus_bind_addr": "9000",
|
||||
[]api.AgentServiceRegistration{
|
||||
{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"envoy_prometheus_bind_addr": "9000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -412,16 +428,18 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
c.proxyID = "test-proxy-id"
|
||||
return c
|
||||
},
|
||||
&api.AgentServiceRegistration{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"envoy_stats_bind_addr": "0.0.0.0:8000",
|
||||
[]api.AgentServiceRegistration{
|
||||
{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"envoy_stats_bind_addr": "0.0.0.0:8000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -442,16 +460,18 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
c.proxyID = "test-proxy-id"
|
||||
return c
|
||||
},
|
||||
&api.AgentServiceRegistration{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"envoy_stats_bind_addr": "8000",
|
||||
[]api.AgentServiceRegistration{
|
||||
{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Config: map[string]interface{}{
|
||||
"envoy_stats_bind_addr": "8000",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -467,20 +487,22 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
c.proxyID = "test-proxy-id"
|
||||
return c
|
||||
},
|
||||
&api.AgentServiceRegistration{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Expose: api.ExposeConfig{
|
||||
Paths: []api.ExposePath{
|
||||
{
|
||||
ListenerPort: 23000,
|
||||
LocalPathPort: 8080,
|
||||
Path: "/health",
|
||||
[]api.AgentServiceRegistration{
|
||||
{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
Expose: api.ExposeConfig{
|
||||
Paths: []api.ExposePath{
|
||||
{
|
||||
ListenerPort: 23000,
|
||||
LocalPathPort: 8080,
|
||||
Path: "/health",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -494,12 +516,63 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
},
|
||||
"",
|
||||
},
|
||||
{
|
||||
"proxy config has expose paths with checks set to true",
|
||||
func() cmd {
|
||||
var c cmd
|
||||
c.init()
|
||||
c.proxyUID = "1234"
|
||||
c.proxyID = "test-proxy-id"
|
||||
return c
|
||||
},
|
||||
[]api.AgentServiceRegistration{
|
||||
{
|
||||
ID: "foo-id",
|
||||
Name: "foo",
|
||||
Port: 8080,
|
||||
Address: "1.1.1.1",
|
||||
Checks: []*api.AgentServiceCheck{
|
||||
{
|
||||
Name: "http",
|
||||
HTTP: "1.1.1.1:8080/health",
|
||||
Interval: "10s",
|
||||
},
|
||||
{
|
||||
Name: "grpc",
|
||||
GRPC: "1.1.1.1:8081",
|
||||
Interval: "10s",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Kind: api.ServiceKindConnectProxy,
|
||||
ID: "test-proxy-id",
|
||||
Name: "test-proxy",
|
||||
Port: 20000,
|
||||
Address: "1.1.1.1",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceName: "foo",
|
||||
DestinationServiceID: "foo-id",
|
||||
Expose: api.ExposeConfig{
|
||||
Checks: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
iptables.Config{
|
||||
ProxyUserID: "1234",
|
||||
ProxyInboundPort: 20000,
|
||||
ProxyOutboundPort: iptables.DefaultTProxyOutboundPort,
|
||||
ExcludeInboundPorts: []string{"21500", "21501"},
|
||||
},
|
||||
"",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
cmd := c.command()
|
||||
if c.proxyService != nil {
|
||||
if c.consulServices != nil {
|
||||
testServer, err := testutil.NewTestServerConfigT(t, nil)
|
||||
require.NoError(t, err)
|
||||
testServer.WaitForLeader(t)
|
||||
|
@ -507,11 +580,12 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
|
||||
client, err := api.NewClient(&api.Config{Address: testServer.HTTPAddr})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = client.Agent().ServiceRegister(c.proxyService)
|
||||
require.NoError(t, err)
|
||||
|
||||
cmd.client = client
|
||||
|
||||
for _, service := range c.consulServices {
|
||||
err = client.Agent().ServiceRegister(&service)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
} else {
|
||||
client, err := api.NewClient(&api.Config{Address: "not-reachable"})
|
||||
require.NoError(t, err)
|
||||
|
@ -522,7 +596,7 @@ func TestGenerateConfigFromFlags(t *testing.T) {
|
|||
|
||||
if c.expError == "" {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.expCfg, cfg)
|
||||
require.EqualValues(t, c.expCfg, cfg)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), c.expError)
|
||||
|
|
1
go.mod
1
go.mod
|
@ -30,7 +30,6 @@ require (
|
|||
github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2
|
||||
github.com/hashicorp/consul/api v1.8.0
|
||||
github.com/hashicorp/consul/sdk v0.7.0
|
||||
github.com/hashicorp/errwrap v1.0.0
|
||||
github.com/hashicorp/go-bexpr v0.1.2
|
||||
github.com/hashicorp/go-checkpoint v0.5.0
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1
|
||||
|
|
|
@ -80,6 +80,7 @@ func HealthCheckToStructs(s HealthCheck) structs.HealthCheck {
|
|||
t.ServiceName = s.ServiceName
|
||||
t.ServiceTags = s.ServiceTags
|
||||
t.Type = s.Type
|
||||
t.ExposedPort = int(s.ExposedPort)
|
||||
t.Definition = HealthCheckDefinitionToStructs(s.Definition)
|
||||
t.EnterpriseMeta = EnterpriseMetaToStructs(s.EnterpriseMeta)
|
||||
t.RaftIndex = RaftIndexToStructs(s.RaftIndex)
|
||||
|
@ -97,6 +98,7 @@ func NewHealthCheckFromStructs(t structs.HealthCheck) HealthCheck {
|
|||
s.ServiceName = t.ServiceName
|
||||
s.ServiceTags = t.ServiceTags
|
||||
s.Type = t.Type
|
||||
s.ExposedPort = int32(t.ExposedPort)
|
||||
s.Definition = NewHealthCheckDefinitionFromStructs(t.Definition)
|
||||
s.EnterpriseMeta = NewEnterpriseMetaFromStructs(t.EnterpriseMeta)
|
||||
s.RaftIndex = NewRaftIndexFromStructs(t.RaftIndex)
|
||||
|
|
|
@ -52,6 +52,8 @@ type HealthCheck struct {
|
|||
pbcommon.RaftIndex `protobuf:"bytes,11,opt,name=RaftIndex,proto3,embedded=RaftIndex" json:"RaftIndex"`
|
||||
// mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs
|
||||
EnterpriseMeta pbcommon.EnterpriseMeta `protobuf:"bytes,13,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta"`
|
||||
// mog: func-to=int func-from=int32
|
||||
ExposedPort int32 `protobuf:"varint,14,opt,name=ExposedPort,proto3" json:"ExposedPort,omitempty"`
|
||||
}
|
||||
|
||||
func (m *HealthCheck) Reset() { *m = HealthCheck{} }
|
||||
|
@ -285,72 +287,73 @@ func init() {
|
|||
func init() { proto.RegisterFile("proto/pbservice/healthcheck.proto", fileDescriptor_8a6f7448747c9fbe) }
|
||||
|
||||
var fileDescriptor_8a6f7448747c9fbe = []byte{
|
||||
// 1031 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4d, 0x73, 0xdb, 0x44,
|
||||
0x18, 0xb6, 0xe2, 0x7c, 0x69, 0x9d, 0xa4, 0xc9, 0x36, 0x0d, 0xdb, 0xb4, 0xa3, 0x98, 0xc0, 0xc1,
|
||||
0x0c, 0x41, 0x9e, 0x31, 0x03, 0x03, 0xcc, 0x00, 0x13, 0xdb, 0x6d, 0x62, 0x26, 0x09, 0x46, 0x16,
|
||||
0x3d, 0x70, 0x53, 0xe4, 0xb5, 0xad, 0xb1, 0xac, 0xf5, 0xac, 0x56, 0x99, 0x98, 0x2b, 0x7f, 0xa0,
|
||||
0xc7, 0xfe, 0x07, 0xfe, 0x48, 0x8e, 0x39, 0x72, 0x0a, 0x90, 0xfc, 0x0b, 0x4e, 0xcc, 0xbe, 0x2b,
|
||||
0x39, 0x72, 0xad, 0x62, 0xd3, 0x69, 0x4f, 0x7e, 0x3f, 0x77, 0xb5, 0xef, 0xfb, 0x3c, 0x4f, 0x82,
|
||||
0x3e, 0x1c, 0x72, 0x26, 0x58, 0x79, 0x78, 0x1e, 0x52, 0x7e, 0xe1, 0xb9, 0xb4, 0xdc, 0xa3, 0x8e,
|
||||
0x2f, 0x7a, 0x6e, 0x8f, 0xba, 0x7d, 0x13, 0x72, 0x58, 0x1f, 0x27, 0x77, 0x8d, 0x2e, 0x63, 0x5d,
|
||||
0x9f, 0x96, 0x21, 0x71, 0x1e, 0x75, 0xca, 0xed, 0x88, 0x3b, 0xc2, 0x63, 0x81, 0x2a, 0xdd, 0x7d,
|
||||
0x92, 0x9c, 0xe6, 0xb2, 0xc1, 0x80, 0x05, 0x65, 0xf5, 0x13, 0x27, 0xb7, 0xbb, 0xac, 0xcb, 0x54,
|
||||
0x81, 0xb4, 0x54, 0x74, 0xff, 0xb7, 0x45, 0x54, 0x38, 0x86, 0x3b, 0x6b, 0xf2, 0x4e, 0x8c, 0xd1,
|
||||
0xe2, 0x19, 0x6b, 0x53, 0xa2, 0x15, 0xb5, 0x92, 0x6e, 0x81, 0x8d, 0x8f, 0xd0, 0x0a, 0x24, 0x1b,
|
||||
0x75, 0xb2, 0x20, 0xc3, 0xd5, 0xcf, 0xfe, 0xb9, 0xd9, 0xfb, 0xa4, 0xeb, 0x89, 0x5e, 0x74, 0x6e,
|
||||
0xba, 0x6c, 0x50, 0xee, 0x39, 0x61, 0xcf, 0x73, 0x19, 0x1f, 0x96, 0x5d, 0x16, 0x84, 0x91, 0x5f,
|
||||
0x16, 0xa3, 0x21, 0x0d, 0xcd, 0xb8, 0xc9, 0x4a, 0xba, 0xe1, 0x70, 0x67, 0x40, 0x49, 0x3e, 0x3e,
|
||||
0xdc, 0x19, 0x50, 0xbc, 0x83, 0x96, 0x5b, 0xc2, 0x11, 0x51, 0x48, 0x16, 0x21, 0x1a, 0x7b, 0x78,
|
||||
0x1b, 0x2d, 0x9d, 0x31, 0x41, 0x43, 0xb2, 0x04, 0x61, 0xe5, 0xc8, 0xea, 0x1f, 0x23, 0x31, 0x8c,
|
||||
0x04, 0x59, 0x56, 0xd5, 0xca, 0xc3, 0x4f, 0x91, 0xde, 0x52, 0x43, 0x6a, 0xd4, 0xc9, 0x0a, 0xa4,
|
||||
0xee, 0x03, 0xb8, 0x88, 0x0a, 0xb1, 0x03, 0xd7, 0xaf, 0x42, 0x3e, 0x1d, 0x4a, 0x55, 0xd8, 0x4e,
|
||||
0x37, 0x24, 0x7a, 0x31, 0x9f, 0xaa, 0x90, 0x21, 0xf9, 0xed, 0xf6, 0x68, 0x48, 0xc9, 0x9a, 0xfa,
|
||||
0x76, 0x69, 0xe3, 0xe7, 0x08, 0xd5, 0x69, 0xc7, 0x0b, 0x3c, 0xb9, 0x03, 0x82, 0x8a, 0x5a, 0xa9,
|
||||
0x50, 0x29, 0x9a, 0xe3, 0x7d, 0x99, 0xa9, 0xc1, 0xde, 0xd7, 0x55, 0x17, 0xaf, 0x6e, 0xf6, 0x72,
|
||||
0x56, 0xaa, 0x13, 0x7f, 0x8d, 0x74, 0xcb, 0xe9, 0x88, 0x46, 0xd0, 0xa6, 0x97, 0xa4, 0x00, 0xc7,
|
||||
0x6c, 0x99, 0xf1, 0xf2, 0xc6, 0x89, 0xea, 0xaa, 0xec, 0xbb, 0xbe, 0xd9, 0xd3, 0xac, 0xfb, 0x6a,
|
||||
0x5c, 0x47, 0x1b, 0xcf, 0x02, 0x41, 0xf9, 0x90, 0x7b, 0x21, 0x3d, 0xa5, 0xc2, 0x21, 0xeb, 0xd0,
|
||||
0xbf, 0x93, 0xf4, 0x4f, 0x66, 0xe3, 0xcb, 0x5f, 0xeb, 0xd9, 0xff, 0x08, 0x40, 0xd0, 0xa6, 0xfc,
|
||||
0x85, 0xe3, 0x47, 0x54, 0xce, 0x1e, 0x0c, 0xa2, 0xc1, 0x1c, 0x94, 0xb3, 0xff, 0x72, 0x05, 0x3d,
|
||||
0xca, 0x7c, 0x91, 0x9c, 0xcd, 0xb1, 0x6d, 0x37, 0x13, 0xd0, 0x48, 0x1b, 0x7f, 0x8c, 0xd6, 0xed,
|
||||
0x93, 0x96, 0x9c, 0x20, 0xe5, 0x30, 0xf5, 0x87, 0x90, 0x9c, 0x0c, 0x26, 0x55, 0x7d, 0x6f, 0xf8,
|
||||
0x82, 0x72, 0xaf, 0x33, 0x02, 0x80, 0xad, 0x5a, 0x93, 0x41, 0xfc, 0x03, 0x5a, 0x56, 0x9f, 0x47,
|
||||
0xf2, 0xc5, 0x7c, 0xa9, 0x50, 0x39, 0x98, 0x35, 0x63, 0x53, 0x95, 0x3f, 0x0b, 0x04, 0x1f, 0xc5,
|
||||
0x4f, 0x8e, 0x4f, 0x90, 0x08, 0x3a, 0xa5, 0xa2, 0xc7, 0xda, 0x09, 0xde, 0x94, 0x27, 0xdf, 0x50,
|
||||
0x65, 0xed, 0x11, 0xc1, 0xea, 0x0d, 0xd2, 0xc6, 0x9b, 0x28, 0x6f, 0xd7, 0x9a, 0x31, 0x02, 0xa5,
|
||||
0x89, 0xbf, 0x47, 0xab, 0x0d, 0x39, 0xba, 0x0b, 0xc7, 0x07, 0x04, 0x16, 0x2a, 0x8f, 0x4d, 0x45,
|
||||
0x4a, 0x33, 0x21, 0xa5, 0x59, 0x8f, 0x49, 0xa9, 0x16, 0xf6, 0xea, 0xcf, 0x3d, 0xcd, 0x1a, 0x37,
|
||||
0xc9, 0x07, 0x2b, 0xc8, 0x9e, 0x3a, 0x97, 0x2d, 0xef, 0x57, 0x4a, 0xf4, 0xa2, 0x56, 0x5a, 0xb7,
|
||||
0x26, 0x83, 0xf8, 0x5b, 0xb4, 0x62, 0x7b, 0x03, 0xca, 0x22, 0x01, 0x60, 0x9e, 0xf3, 0x96, 0xa4,
|
||||
0x07, 0xf7, 0x91, 0x51, 0xa7, 0x9c, 0x76, 0xbd, 0x50, 0x50, 0x5e, 0xe3, 0x9e, 0xf0, 0x5c, 0xc7,
|
||||
0x8f, 0xc1, 0x7c, 0xd8, 0x11, 0x94, 0x03, 0x05, 0xe6, 0x3c, 0x75, 0xc6, 0x51, 0xd8, 0x40, 0xa8,
|
||||
0xe5, 0x72, 0x6f, 0x28, 0x0e, 0x79, 0x37, 0x24, 0x08, 0x10, 0x93, 0x8a, 0xe0, 0x03, 0xb4, 0x55,
|
||||
0x67, 0x6e, 0x9f, 0xf2, 0x1a, 0x0b, 0x84, 0xe3, 0x05, 0x94, 0x37, 0xea, 0x00, 0x72, 0xdd, 0x9a,
|
||||
0x4e, 0x48, 0xe8, 0xb5, 0x7a, 0xd4, 0xf7, 0x63, 0x9e, 0x29, 0x47, 0x2e, 0xed, 0xb8, 0xd2, 0x6c,
|
||||
0x9c, 0x1d, 0x91, 0x6d, 0xb5, 0x34, 0xe5, 0xc9, 0xa5, 0x1d, 0x59, 0xcd, 0x1a, 0x60, 0x5e, 0xb7,
|
||||
0xc0, 0x96, 0xdf, 0x23, 0x7f, 0x7f, 0x0e, 0xa9, 0x7d, 0xd2, 0x22, 0x1b, 0x80, 0xa7, 0x54, 0x44,
|
||||
0x4a, 0xc5, 0xa1, 0xef, 0x39, 0x21, 0xc8, 0xdc, 0x03, 0x25, 0x15, 0xe3, 0x00, 0xde, 0x47, 0x6b,
|
||||
0xe0, 0xc4, 0x4f, 0x24, 0x9b, 0x50, 0x30, 0x11, 0xc3, 0x5f, 0xa0, 0xbc, 0x6d, 0x9f, 0x90, 0xad,
|
||||
0xf9, 0x67, 0x28, 0xeb, 0x77, 0x7f, 0x4a, 0x48, 0x06, 0xb0, 0x94, 0xe0, 0xea, 0xd3, 0x51, 0xcc,
|
||||
0x19, 0x69, 0xe2, 0x03, 0xb4, 0x74, 0x01, 0xb4, 0x5b, 0x88, 0x29, 0x3c, 0x81, 0xf2, 0x84, 0x9d,
|
||||
0x96, 0x2a, 0xfa, 0x66, 0xe1, 0x2b, 0x6d, 0xff, 0x77, 0x1d, 0xe9, 0x00, 0x7d, 0x90, 0xa3, 0x94,
|
||||
0x4e, 0x6b, 0xef, 0x44, 0xa7, 0x17, 0x32, 0x75, 0x3a, 0x9f, 0xad, 0xd3, 0x8b, 0x69, 0x9d, 0x9e,
|
||||
0x04, 0xc5, 0xd2, 0x14, 0x28, 0x12, 0xc5, 0x58, 0x4e, 0x29, 0xc6, 0x77, 0x63, 0x96, 0x6f, 0x03,
|
||||
0xcb, 0xd3, 0x4a, 0x3a, 0x7e, 0xe4, 0x5c, 0xcc, 0x5e, 0xc9, 0x64, 0xf6, 0xee, 0x34, 0xb3, 0x57,
|
||||
0xb3, 0x99, 0xad, 0xbf, 0x0d, 0xb3, 0x27, 0x70, 0x85, 0x66, 0xe1, 0xaa, 0x90, 0x81, 0xab, 0x4c,
|
||||
0xa6, 0xac, 0xcd, 0x64, 0xca, 0x7a, 0x36, 0x53, 0x9e, 0x66, 0x32, 0x65, 0xe3, 0x8d, 0x4c, 0x79,
|
||||
0x30, 0xc5, 0x94, 0x29, 0x09, 0x7f, 0x32, 0x97, 0x84, 0x6f, 0x66, 0x49, 0x78, 0x4a, 0xd1, 0xb6,
|
||||
0xde, 0x42, 0xd1, 0x62, 0xca, 0xe1, 0xff, 0x47, 0x39, 0x5c, 0x41, 0xdb, 0xad, 0xc8, 0x75, 0x69,
|
||||
0x18, 0x56, 0x69, 0x87, 0x71, 0xda, 0x74, 0xc2, 0xd0, 0x0b, 0xba, 0xe4, 0x51, 0x51, 0x2b, 0x2d,
|
||||
0x59, 0x99, 0x39, 0xfc, 0x25, 0xda, 0x79, 0xee, 0x78, 0x7e, 0xc4, 0x69, 0x9c, 0x48, 0x54, 0x8f,
|
||||
0xec, 0x40, 0xd7, 0x1b, 0xb2, 0x72, 0xff, 0x4d, 0xce, 0x2e, 0x47, 0x80, 0xeb, 0x0f, 0xd4, 0xfe,
|
||||
0xc7, 0x81, 0x71, 0x16, 0x96, 0x40, 0x52, 0x59, 0xd8, 0xc4, 0x6c, 0xc1, 0x7e, 0xf8, 0xee, 0x04,
|
||||
0x7b, 0xea, 0x4f, 0xd0, 0x63, 0x78, 0xd7, 0x64, 0xf0, 0x3d, 0xa8, 0x55, 0xf5, 0xf4, 0xea, 0x6f,
|
||||
0x23, 0x77, 0x75, 0x6b, 0x68, 0xd7, 0xb7, 0x86, 0xf6, 0xd7, 0xad, 0xa1, 0xbd, 0xbc, 0x33, 0x72,
|
||||
0xaf, 0xee, 0x8c, 0xdc, 0xf5, 0x9d, 0x91, 0xfb, 0xe3, 0xce, 0xc8, 0xfd, 0xf2, 0xe9, 0x7f, 0x89,
|
||||
0xd5, 0x6b, 0xff, 0x2a, 0x9f, 0x2f, 0x43, 0xe0, 0xf3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x5b,
|
||||
0x4c, 0x80, 0x35, 0x44, 0x0b, 0x00, 0x00,
|
||||
// 1051 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x52, 0xe3, 0x46,
|
||||
0x17, 0xb5, 0x30, 0x3f, 0x56, 0x1b, 0x18, 0xe8, 0x61, 0xf8, 0x7a, 0x98, 0x29, 0xe1, 0x8f, 0x64,
|
||||
0x41, 0x2a, 0x44, 0xae, 0x22, 0x95, 0x54, 0x92, 0xaa, 0x24, 0x85, 0x31, 0x03, 0x4e, 0x01, 0x71,
|
||||
0x64, 0x65, 0x16, 0xd9, 0x09, 0xb9, 0x6d, 0xab, 0x2c, 0xab, 0x55, 0xad, 0x16, 0x85, 0xf3, 0x14,
|
||||
0xb3, 0x9c, 0x17, 0xc8, 0x2a, 0x2f, 0xc2, 0x92, 0x65, 0x56, 0x24, 0x81, 0xb7, 0xc8, 0x2a, 0xd5,
|
||||
0xb7, 0x25, 0x23, 0x8f, 0x35, 0xb1, 0x33, 0x35, 0x59, 0xd1, 0xf7, 0xdc, 0x7b, 0xbb, 0xd5, 0x7d,
|
||||
0xcf, 0x39, 0x06, 0xfd, 0x3f, 0xe4, 0x4c, 0xb0, 0x6a, 0x78, 0x11, 0x51, 0x7e, 0xe9, 0xb9, 0xb4,
|
||||
0xda, 0xa3, 0x8e, 0x2f, 0x7a, 0x6e, 0x8f, 0xba, 0x7d, 0x13, 0x72, 0x58, 0x1f, 0x25, 0xb7, 0x8c,
|
||||
0x2e, 0x63, 0x5d, 0x9f, 0x56, 0x21, 0x71, 0x11, 0x77, 0xaa, 0xed, 0x98, 0x3b, 0xc2, 0x63, 0x81,
|
||||
0x2a, 0xdd, 0x7a, 0x96, 0xee, 0xe6, 0xb2, 0xc1, 0x80, 0x05, 0x55, 0xf5, 0x27, 0x49, 0x6e, 0x74,
|
||||
0x59, 0x97, 0xa9, 0x02, 0xb9, 0x52, 0xe8, 0xce, 0x2f, 0xf3, 0xa8, 0x7c, 0x02, 0x67, 0x1e, 0xca,
|
||||
0x33, 0x31, 0x46, 0xf3, 0xe7, 0xac, 0x4d, 0x89, 0x56, 0xd1, 0x76, 0x75, 0x0b, 0xd6, 0xf8, 0x18,
|
||||
0x2d, 0x41, 0xb2, 0x51, 0x27, 0x73, 0x12, 0xae, 0x7d, 0xf2, 0xd7, 0xed, 0xf6, 0x47, 0x5d, 0x4f,
|
||||
0xf4, 0xe2, 0x0b, 0xd3, 0x65, 0x83, 0x6a, 0xcf, 0x89, 0x7a, 0x9e, 0xcb, 0x78, 0x58, 0x75, 0x59,
|
||||
0x10, 0xc5, 0x7e, 0x55, 0x0c, 0x43, 0x1a, 0x99, 0x49, 0x93, 0x95, 0x76, 0xc3, 0xe6, 0xce, 0x80,
|
||||
0x92, 0x62, 0xb2, 0xb9, 0x33, 0xa0, 0x78, 0x13, 0x2d, 0xb6, 0x84, 0x23, 0xe2, 0x88, 0xcc, 0x03,
|
||||
0x9a, 0x44, 0x78, 0x03, 0x2d, 0x9c, 0x33, 0x41, 0x23, 0xb2, 0x00, 0xb0, 0x0a, 0x64, 0xf5, 0xf7,
|
||||
0xb1, 0x08, 0x63, 0x41, 0x16, 0x55, 0xb5, 0x8a, 0xf0, 0x73, 0xa4, 0xb7, 0xd4, 0x23, 0x35, 0xea,
|
||||
0x64, 0x09, 0x52, 0x0f, 0x00, 0xae, 0xa0, 0x72, 0x12, 0xc0, 0xf1, 0x25, 0xc8, 0x67, 0xa1, 0x4c,
|
||||
0x85, 0xed, 0x74, 0x23, 0xa2, 0x57, 0x8a, 0x99, 0x0a, 0x09, 0xc9, 0x6f, 0xb7, 0x87, 0x21, 0x25,
|
||||
0xcb, 0xea, 0xdb, 0xe5, 0x1a, 0xbf, 0x40, 0xa8, 0x4e, 0x3b, 0x5e, 0xe0, 0xc9, 0x19, 0x10, 0x54,
|
||||
0xd1, 0x76, 0xcb, 0xfb, 0x15, 0x73, 0x34, 0x2f, 0x33, 0xf3, 0xb0, 0x0f, 0x75, 0xb5, 0xf9, 0xeb,
|
||||
0xdb, 0xed, 0x82, 0x95, 0xe9, 0xc4, 0x5f, 0x22, 0xdd, 0x72, 0x3a, 0xa2, 0x11, 0xb4, 0xe9, 0x15,
|
||||
0x29, 0xc3, 0x36, 0xeb, 0x66, 0x32, 0xbc, 0x51, 0xa2, 0x56, 0x92, 0x7d, 0x37, 0xb7, 0xdb, 0x9a,
|
||||
0xf5, 0x50, 0x8d, 0xeb, 0x68, 0xf5, 0x28, 0x10, 0x94, 0x87, 0xdc, 0x8b, 0xe8, 0x19, 0x15, 0x0e,
|
||||
0x59, 0x81, 0xfe, 0xcd, 0xb4, 0x7f, 0x3c, 0x9b, 0x1c, 0xfe, 0x46, 0x8f, 0xbc, 0xfe, 0xd1, 0x55,
|
||||
0xc8, 0x22, 0xda, 0x6e, 0x32, 0x2e, 0xc8, 0x6a, 0x45, 0xdb, 0x5d, 0xb0, 0xb2, 0xd0, 0xce, 0x07,
|
||||
0x40, 0x93, 0x36, 0xe5, 0x2f, 0x1d, 0x3f, 0xa6, 0x72, 0x3a, 0xb0, 0x20, 0x1a, 0xbc, 0x94, 0x0a,
|
||||
0x76, 0x5e, 0x2d, 0xa1, 0x27, 0xb9, 0x77, 0x96, 0xaf, 0x77, 0x62, 0xdb, 0xcd, 0x94, 0x56, 0x72,
|
||||
0x8d, 0x3f, 0x44, 0x2b, 0xf6, 0x69, 0x4b, 0xbe, 0x31, 0xe5, 0x30, 0x97, 0xc7, 0x90, 0x1c, 0x07,
|
||||
0xd3, 0xaa, 0xbe, 0x17, 0xbe, 0xa4, 0xdc, 0xeb, 0x0c, 0x81, 0x82, 0x25, 0x6b, 0x1c, 0xc4, 0xdf,
|
||||
0xa1, 0x45, 0xf5, 0x79, 0xa4, 0x58, 0x29, 0xee, 0x96, 0xf7, 0xf7, 0xa6, 0x4d, 0xc1, 0x54, 0xe5,
|
||||
0x47, 0x81, 0xe0, 0xc3, 0xe4, 0x51, 0x92, 0x1d, 0x24, 0xc7, 0xce, 0xa8, 0xe8, 0xb1, 0x76, 0xca,
|
||||
0x48, 0x15, 0xc9, 0x3b, 0xd4, 0x58, 0x7b, 0x48, 0xb0, 0xba, 0x83, 0x5c, 0xe3, 0x35, 0x54, 0xb4,
|
||||
0x0f, 0x9b, 0x09, 0x47, 0xe5, 0x12, 0x7f, 0x8b, 0x4a, 0x0d, 0xf9, 0xb8, 0x97, 0x8e, 0x0f, 0x1c,
|
||||
0x2d, 0xef, 0x3f, 0x35, 0x95, 0x6c, 0xcd, 0x54, 0xb6, 0x66, 0x3d, 0x91, 0xad, 0x1a, 0xe9, 0xeb,
|
||||
0xdf, 0xb7, 0x35, 0x6b, 0xd4, 0x24, 0x2f, 0xac, 0x48, 0x7d, 0xe6, 0x5c, 0xb5, 0xbc, 0x9f, 0x29,
|
||||
0xd1, 0x2b, 0xda, 0xee, 0x8a, 0x35, 0x0e, 0xe2, 0xaf, 0xd1, 0x92, 0xed, 0x0d, 0x28, 0x8b, 0x05,
|
||||
0xd0, 0x7d, 0xc6, 0x53, 0xd2, 0x1e, 0xdc, 0x47, 0x46, 0x9d, 0x72, 0xda, 0xf5, 0x22, 0x41, 0xf9,
|
||||
0x21, 0xf7, 0x84, 0xe7, 0x3a, 0x7e, 0x42, 0xf7, 0x83, 0x8e, 0xa0, 0x1c, 0x44, 0x32, 0xe3, 0xae,
|
||||
0x53, 0xb6, 0xc2, 0x06, 0x42, 0x2d, 0x97, 0x7b, 0xa1, 0x38, 0xe0, 0xdd, 0x88, 0x20, 0x60, 0x4c,
|
||||
0x06, 0xc1, 0x7b, 0x68, 0xbd, 0xce, 0xdc, 0x3e, 0xe5, 0x87, 0x2c, 0x10, 0x8e, 0x17, 0x50, 0xde,
|
||||
0xa8, 0x83, 0x0c, 0x74, 0x6b, 0x32, 0x21, 0xa9, 0xd7, 0xea, 0x51, 0xdf, 0x4f, 0x94, 0xa8, 0x02,
|
||||
0x39, 0xb4, 0x93, 0xfd, 0x66, 0xe3, 0xfc, 0x98, 0x6c, 0xa8, 0xa1, 0xa9, 0x48, 0x0e, 0xed, 0xd8,
|
||||
0x6a, 0x1e, 0x82, 0x2a, 0x74, 0x0b, 0xd6, 0xf2, 0x7b, 0xe4, 0xdf, 0x1f, 0x23, 0x6a, 0x9f, 0xb6,
|
||||
0x80, 0xec, 0x25, 0x2b, 0x83, 0x48, 0x33, 0x39, 0xf0, 0x3d, 0x27, 0x02, 0x23, 0x7c, 0xa4, 0xcc,
|
||||
0x64, 0x04, 0xe0, 0x1d, 0xb4, 0x0c, 0x41, 0x72, 0x45, 0xb2, 0x06, 0x05, 0x63, 0x18, 0xfe, 0x0c,
|
||||
0x15, 0x6d, 0xfb, 0x94, 0xac, 0xcf, 0xfe, 0x86, 0xb2, 0x7e, 0xeb, 0x87, 0x54, 0x64, 0x40, 0x4b,
|
||||
0x49, 0xae, 0x3e, 0x1d, 0x26, 0x9a, 0x91, 0x4b, 0xbc, 0x87, 0x16, 0x2e, 0x41, 0x76, 0x73, 0x89,
|
||||
0xc8, 0xc7, 0x58, 0x9e, 0xaa, 0xd3, 0x52, 0x45, 0x5f, 0xcd, 0x7d, 0xa1, 0xed, 0xfc, 0xaa, 0x23,
|
||||
0x1d, 0xa8, 0x0f, 0x86, 0x95, 0x71, 0x72, 0xed, 0xbd, 0x38, 0xf9, 0x5c, 0xae, 0x93, 0x17, 0xf3,
|
||||
0x9d, 0x7c, 0x3e, 0xeb, 0xe4, 0xe3, 0xa4, 0x58, 0x98, 0x20, 0x45, 0xea, 0x18, 0x8b, 0x19, 0xc7,
|
||||
0xf8, 0x66, 0xa4, 0xf2, 0x0d, 0x50, 0x79, 0xd6, 0x6b, 0x47, 0x97, 0x9c, 0x49, 0xd9, 0x4b, 0xb9,
|
||||
0xca, 0xde, 0x9a, 0x54, 0x76, 0x29, 0x5f, 0xd9, 0xfa, 0xbb, 0x28, 0x7b, 0x8c, 0x57, 0x68, 0x1a,
|
||||
0xaf, 0xca, 0x39, 0xbc, 0xca, 0x55, 0xca, 0xf2, 0x54, 0xa5, 0xac, 0xe4, 0x2b, 0xe5, 0x79, 0xae,
|
||||
0x52, 0x56, 0xdf, 0xaa, 0x94, 0x47, 0x13, 0x4a, 0x99, 0xb0, 0xf0, 0x67, 0x33, 0x59, 0xf8, 0x5a,
|
||||
0x9e, 0x85, 0x67, 0x1c, 0x6d, 0xfd, 0x1d, 0x1c, 0x2d, 0x91, 0x1c, 0xfe, 0x77, 0x92, 0xc3, 0xfb,
|
||||
0x68, 0xa3, 0x15, 0xbb, 0x2e, 0x8d, 0xa2, 0x1a, 0xed, 0x30, 0x4e, 0x9b, 0x4e, 0x14, 0x79, 0x41,
|
||||
0x97, 0x3c, 0x81, 0x9f, 0xc0, 0xdc, 0x1c, 0xfe, 0x1c, 0x6d, 0xbe, 0x70, 0x3c, 0x3f, 0xe6, 0x34,
|
||||
0x49, 0xa4, 0xae, 0x47, 0x36, 0xa1, 0xeb, 0x2d, 0x59, 0x39, 0xff, 0x26, 0x67, 0x57, 0x43, 0xe0,
|
||||
0xf5, 0xff, 0xd4, 0xfc, 0x47, 0xc0, 0x28, 0x0b, 0x43, 0x20, 0x99, 0x2c, 0x4c, 0x62, 0xba, 0x61,
|
||||
0x3f, 0x7e, 0x7f, 0x86, 0x3d, 0xf1, 0x13, 0xf4, 0x14, 0xee, 0x35, 0x0e, 0xfe, 0x07, 0x6e, 0x55,
|
||||
0x3b, 0xbb, 0xfe, 0xd3, 0x28, 0x5c, 0xdf, 0x19, 0xda, 0xcd, 0x9d, 0xa1, 0xfd, 0x71, 0x67, 0x68,
|
||||
0xaf, 0xee, 0x8d, 0xc2, 0xeb, 0x7b, 0xa3, 0x70, 0x73, 0x6f, 0x14, 0x7e, 0xbb, 0x37, 0x0a, 0x3f,
|
||||
0x7d, 0xfc, 0x4f, 0x66, 0xf5, 0xc6, 0x3f, 0xd3, 0x17, 0x8b, 0x00, 0x7c, 0xfa, 0x77, 0x00, 0x00,
|
||||
0x00, 0xff, 0xff, 0x9d, 0xa1, 0x76, 0xfc, 0x66, 0x0b, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *HealthCheck) Marshal() (dAtA []byte, err error) {
|
||||
|
@ -373,6 +376,11 @@ func (m *HealthCheck) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.ExposedPort != 0 {
|
||||
i = encodeVarintHealthcheck(dAtA, i, uint64(m.ExposedPort))
|
||||
i--
|
||||
dAtA[i] = 0x70
|
||||
}
|
||||
{
|
||||
size, err := m.EnterpriseMeta.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
|
@ -1043,6 +1051,9 @@ func (m *HealthCheck) Size() (n int) {
|
|||
}
|
||||
l = m.EnterpriseMeta.Size()
|
||||
n += 1 + l + sovHealthcheck(uint64(l))
|
||||
if m.ExposedPort != 0 {
|
||||
n += 1 + sovHealthcheck(uint64(m.ExposedPort))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -1715,6 +1726,25 @@ func (m *HealthCheck) Unmarshal(dAtA []byte) error {
|
|||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 14:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ExposedPort", wireType)
|
||||
}
|
||||
m.ExposedPort = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowHealthcheck
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ExposedPort |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipHealthcheck(dAtA[iNdEx:])
|
||||
|
|
|
@ -41,6 +41,9 @@ message HealthCheck {
|
|||
|
||||
// mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs
|
||||
common.EnterpriseMeta EnterpriseMeta = 13 [(gogoproto.nullable) = false];
|
||||
|
||||
// mog: func-to=int func-from=int32
|
||||
int32 ExposedPort = 14;
|
||||
}
|
||||
|
||||
message HeaderValue {
|
||||
|
|
|
@ -99,6 +99,7 @@ func decorate(s string) string {
|
|||
}
|
||||
|
||||
func Run(t Failer, f func(r *R)) {
|
||||
t.Helper()
|
||||
run(DefaultFailer(), t, f)
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ readonly HASHICORP_DOCKER_PROXY="docker.mirror.hashicorp.services"
|
|||
# DEBUG=1 enables set -x for this script so echos every command run
|
||||
DEBUG=${DEBUG:-}
|
||||
|
||||
OLD_XDSV2_AWARE_CONSUL_VERSION="${OLD_XDSV2_AWARE_CONSUL_VERSION:-"${HASHICORP_DOCKER_PROXY}/library/consul:1.9.4"}"
|
||||
OLD_XDSV2_AWARE_CONSUL_VERSION="${OLD_XDSV2_AWARE_CONSUL_VERSION:-"${HASHICORP_DOCKER_PROXY}/library/consul:1.9.5"}"
|
||||
export OLD_XDSV2_AWARE_CONSUL_VERSION
|
||||
|
||||
# TEST_V2_XDS=1 causes it to do just the 'consul connect envoy' part using
|
||||
|
@ -18,7 +18,7 @@ TEST_V2_XDS=${TEST_V2_XDS:-}
|
|||
export TEST_V2_XDS
|
||||
|
||||
# ENVOY_VERSION to run each test against
|
||||
ENVOY_VERSION=${ENVOY_VERSION:-"1.18.2"}
|
||||
ENVOY_VERSION=${ENVOY_VERSION:-"1.18.3"}
|
||||
export ENVOY_VERSION
|
||||
|
||||
if [ ! -z "$DEBUG" ] ; then
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
# Consul::HealthCheck::List
|
||||
|
||||
A presentational component for rendering HealthChecks.
|
||||
|
||||
```hbs preview-template
|
||||
<figure>
|
||||
<figcaption>Grab some mock data...</figcaption>
|
||||
|
||||
<DataSource @src="/default/dc-1/node/my-node" as |source|>
|
||||
<figure>
|
||||
<figcaption>but only show a max of 2 items for docs purposes</figcaption>
|
||||
|
||||
<Consul::HealthCheck::List
|
||||
@items={{slice 0 2 source.data.Checks}}
|
||||
/>
|
||||
|
||||
</figure>
|
||||
</DataSource>
|
||||
|
||||
</figure>
|
||||
```
|
||||
|
||||
## Arguments
|
||||
|
||||
| Argument/Attribute | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `items` | `array` | | An array of HealthChecks |
|
||||
|
||||
## See
|
||||
|
||||
- [Template Source Code](./index.hbs)
|
||||
|
||||
---
|
|
@ -25,7 +25,7 @@
|
|||
<dl>
|
||||
<dt>Type</dt>
|
||||
<dd data-health-check-type>
|
||||
{{or item.Type 'serf'}}
|
||||
{{item.Type}}
|
||||
{{#if item.Exposed}}
|
||||
<em
|
||||
data-test-exposed="true"
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
# Notice
|
||||
|
||||
Presentational component for informational/warning/error banners/notices.
|
||||
|
||||
|
||||
```hbs preview-template
|
||||
<Notice
|
||||
@type={{this.type}}
|
||||
as |notice|>
|
||||
<notice.Header>
|
||||
<h3>Header</h3>
|
||||
</notice.Header>
|
||||
<notice.Body>
|
||||
<p>
|
||||
Body
|
||||
</p>
|
||||
</notice.Body>
|
||||
<notice.Footer>
|
||||
<p>
|
||||
<a href="">Footer link</a>
|
||||
</p>
|
||||
</notice.Footer>
|
||||
</Notice>
|
||||
|
||||
<figure>
|
||||
<figcaption>Provide a widget to change the <code>@type</code></figcaption>
|
||||
|
||||
<select
|
||||
onchange={{action (mut this.type) value="target.value"}}
|
||||
>
|
||||
<option>info</option>
|
||||
<option>warning</option>
|
||||
<option>error</option>
|
||||
</select>
|
||||
|
||||
</figure>
|
||||
```
|
||||
|
||||
## Arguments
|
||||
|
||||
| Argument/Attribute | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `type` | `String` | `info` | Type of notice [info\|warning\|error] |
|
||||
|
||||
## See
|
||||
|
||||
- [Template Source Code](./index.hbs)
|
||||
|
||||
---
|
|
@ -1,46 +0,0 @@
|
|||
import { Meta, Story, Canvas } from '@storybook/addon-docs/blocks';
|
||||
import { hbs } from 'ember-cli-htmlbars';
|
||||
|
||||
<Meta title="Components/Notice" component="Notice" />
|
||||
|
||||
# Notice
|
||||
|
||||
<Canvas>
|
||||
<Story name="Basic"
|
||||
argTypes={{
|
||||
type: {
|
||||
defaultValue: 'success',
|
||||
control: {
|
||||
type: 'select',
|
||||
options: [
|
||||
'success',
|
||||
'warning',
|
||||
'info',
|
||||
'highlight',
|
||||
]
|
||||
}
|
||||
}
|
||||
}}
|
||||
>{(args) => ({
|
||||
template: hbs`<Notice
|
||||
@type={{type}}
|
||||
as |notice|>
|
||||
<notice.Header>
|
||||
<h3>Header</h3>
|
||||
</notice.Header>
|
||||
<notice.Body>
|
||||
<p>
|
||||
Body
|
||||
</p>
|
||||
</notice.Body>
|
||||
<notice.Footer>
|
||||
<p>
|
||||
Footer
|
||||
</p>
|
||||
</notice.Footer>
|
||||
</Notice>`,
|
||||
context: args
|
||||
})}
|
||||
</Story>
|
||||
</Canvas>
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
<div
|
||||
class="notice {{@type}}"
|
||||
class="notice {{or @type 'info'}}"
|
||||
...attributes
|
||||
>
|
||||
{{yield (hash
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* Simple replacing decorator, with the primary usecase for avoiding null API
|
||||
* errors by decorating model attributes: @replace(null, []) @attr() Tags;
|
||||
*/
|
||||
const replace = (find, replace) => (target, propertyKey, desc) => {
|
||||
export const replace = (find, replace) => (target, propertyKey, desc) => {
|
||||
return {
|
||||
get: function() {
|
||||
const value = desc.get.apply(this, arguments);
|
||||
|
|
|
@ -9,7 +9,7 @@ export default {
|
|||
node: (item, value) => item.Kind === value,
|
||||
},
|
||||
check: {
|
||||
serf: (item, value) => item.Type === '',
|
||||
serf: (item, value) => item.Type === value,
|
||||
script: (item, value) => item.Type === value,
|
||||
http: (item, value) => item.Type === value,
|
||||
tcp: (item, value) => item.Type === value,
|
||||
|
|
|
@ -2,27 +2,29 @@ import Fragment from 'ember-data-model-fragments/fragment';
|
|||
import { array } from 'ember-data-model-fragments/attributes';
|
||||
import { attr } from '@ember-data/model';
|
||||
import { computed } from '@ember/object';
|
||||
import { replace, nullValue } from 'consul-ui/decorators/replace';
|
||||
|
||||
export const schema = {
|
||||
Status: {
|
||||
allowedValues: ['passing', 'warning', 'critical'],
|
||||
},
|
||||
Type: {
|
||||
allowedValues: ['', 'script', 'http', 'tcp', 'ttl', 'docker', 'grpc', 'alias'],
|
||||
allowedValues: ['serf', 'script', 'http', 'tcp', 'ttl', 'docker', 'grpc', 'alias'],
|
||||
},
|
||||
};
|
||||
|
||||
export default class HealthCheck extends Fragment {
|
||||
@attr('string') Name;
|
||||
@attr('string') CheckID;
|
||||
@attr('string') Type;
|
||||
// an empty Type means its the Consul serf Check
|
||||
@replace('', 'serf') @attr('string') Type;
|
||||
@attr('string') Status;
|
||||
@attr('string') Notes;
|
||||
@attr('string') Output;
|
||||
@attr('string') ServiceName;
|
||||
@attr('string') ServiceID;
|
||||
@attr('string') Node;
|
||||
@array('string') ServiceTags;
|
||||
@nullValue([]) @array('string') ServiceTags;
|
||||
@attr() Definition; // {}
|
||||
|
||||
// Exposed is only set correct if this Check is accessed via instance.MeshChecks
|
||||
|
|
|
@ -9,5 +9,5 @@ export default {
|
|||
ID: item => item.Service.ID || '',
|
||||
Notes: item => item.Notes,
|
||||
Output: item => item.Output,
|
||||
ServiceTags: item => asArray(item.ServiceTags || []),
|
||||
ServiceTags: item => asArray(item.ServiceTags),
|
||||
};
|
||||
|
|
|
@ -39,13 +39,13 @@ html.is-debug body > .brand-loader {
|
|||
background-color: white;
|
||||
margin-bottom: 2rem;
|
||||
}
|
||||
ol,
|
||||
ul {
|
||||
> ol,
|
||||
> ul {
|
||||
list-style-position: outside;
|
||||
margin-bottom: 1rem;
|
||||
margin-left: 2rem;
|
||||
}
|
||||
ul {
|
||||
> ul {
|
||||
list-style-type: disc;
|
||||
}
|
||||
}
|
||||
|
@ -74,11 +74,18 @@ html.is-debug body > .brand-loader {
|
|||
color: var(--gray-400);
|
||||
font-style: italic;
|
||||
}
|
||||
figcaption code {
|
||||
@extend %inline-code;
|
||||
}
|
||||
figure > [type='text'] {
|
||||
border: 1px solid var(--gray-999);
|
||||
width: 100%;
|
||||
padding: 0.5rem;
|
||||
}
|
||||
figure > select {
|
||||
border: 1px solid var(--gray-999);
|
||||
padding: 0.5rem;
|
||||
}
|
||||
}
|
||||
// &__snippets__tabs__button {
|
||||
// display: none;
|
||||
|
|
|
@ -47,6 +47,26 @@ as |route|>
|
|||
@filter={{filters}}
|
||||
/>
|
||||
{{/if}}
|
||||
{{#let (find-by "Type" "serf" items) as |serf|}}
|
||||
{{#if (and serf (eq serf.Status "critical"))}}
|
||||
<Notice
|
||||
data-test-critical-serf-notice
|
||||
@type="warning"
|
||||
as |notice|>
|
||||
<notice.Header>
|
||||
<h2>
|
||||
{{t "routes.dc.nodes.show.healthchecks.critical-serf-notice.header"}}
|
||||
</h2>
|
||||
</notice.Header>
|
||||
<notice.Body>
|
||||
{{t
|
||||
"routes.dc.nodes.show.healthchecks.critical-serf-notice.body"
|
||||
htmlSafe=true
|
||||
}}
|
||||
</notice.Body>
|
||||
</Notice>
|
||||
{{/if}}
|
||||
{{/let}}
|
||||
<DataCollection
|
||||
@type="health-check"
|
||||
@sort={{sort.value}}
|
||||
|
@ -62,9 +82,10 @@ as |route|>
|
|||
<collection.Empty>
|
||||
<EmptyState>
|
||||
<BlockSlot @name="body">
|
||||
<p>
|
||||
This node has no health checks{{#if (gt items.length 0)}} matching that search{{/if}}.
|
||||
</p>
|
||||
{{t "routes.dc.nodes.show.healthchecks.empty"
|
||||
items=items.length
|
||||
htmlSafe=true
|
||||
}}
|
||||
</BlockSlot>
|
||||
</EmptyState>
|
||||
</collection.Empty>
|
||||
|
|
|
@ -44,6 +44,26 @@ as |route|>
|
|||
/>
|
||||
{{/if}}
|
||||
|
||||
{{#let (find-by "Type" "serf" items) as |serf|}}
|
||||
{{#if (and serf (eq serf.Status "critical"))}}
|
||||
<Notice
|
||||
data-test-critical-serf-notice
|
||||
@type="warning"
|
||||
as |notice|>
|
||||
<notice.Header>
|
||||
<h2>
|
||||
{{t "routes.dc.services.instance.healthchecks.critical-serf-notice.header"}}
|
||||
</h2>
|
||||
</notice.Header>
|
||||
<notice.Body>
|
||||
{{t
|
||||
"routes.dc.services.instance.healthchecks.critical-serf-notice.body"
|
||||
htmlSafe=true
|
||||
}}
|
||||
</notice.Body>
|
||||
</Notice>
|
||||
{{/if}}
|
||||
{{/let}}
|
||||
<DataCollection
|
||||
@type="health-check"
|
||||
@sort={{sort.value}}
|
||||
|
@ -59,9 +79,10 @@ as |route|>
|
|||
<collection.Empty>
|
||||
<EmptyState>
|
||||
<BlockSlot @name="body">
|
||||
<p>
|
||||
This instance has no health checks{{#if (gt items.length 0)}} matching that search{{/if}}.
|
||||
</p>
|
||||
{{t "routes.dc.services.instance.healthchecks.empty"
|
||||
items=items.length
|
||||
htmlSafe=true
|
||||
}}
|
||||
</BlockSlot>
|
||||
</EmptyState>
|
||||
</collection.Empty>
|
||||
|
|
|
@ -8,13 +8,19 @@ module.exports = function(defaults) {
|
|||
|
||||
const env = EmberApp.env();
|
||||
const prodlike = ['production', 'staging'];
|
||||
const sourcemaps = !['production'].includes(env);
|
||||
|
||||
const trees = {};
|
||||
const addons = {};
|
||||
const outputPaths = {};
|
||||
let excludeFiles = [];
|
||||
|
||||
const sourcemaps = !['production'].includes(env);
|
||||
const babel = {
|
||||
plugins: [
|
||||
'@babel/plugin-proposal-object-rest-spread',
|
||||
],
|
||||
sourceMaps: sourcemaps ? 'inline' : false,
|
||||
}
|
||||
|
||||
// setup up different build configuration depending on environment
|
||||
if(!['test'].includes(env)) {
|
||||
|
@ -46,6 +52,13 @@ module.exports = function(defaults) {
|
|||
}
|
||||
}
|
||||
}
|
||||
if(['production'].includes(env)) {
|
||||
// everything apart from production is 'debug', including test
|
||||
// which means this and everything it affects is never tested
|
||||
babel.plugins.push(
|
||||
['strip-function-call', {'strip': ['Ember.runInDebug']}]
|
||||
)
|
||||
}
|
||||
//
|
||||
|
||||
trees.app = new Funnel('app', {
|
||||
|
@ -69,10 +82,7 @@ module.exports = function(defaults) {
|
|||
'ember-cli-math-helpers': {
|
||||
only: ['div'],
|
||||
},
|
||||
babel: {
|
||||
plugins: ['@babel/plugin-proposal-object-rest-spread'],
|
||||
sourceMaps: sourcemaps ? 'inline' : false,
|
||||
},
|
||||
babel: babel,
|
||||
autoImport: {
|
||||
// allows use of a CSP without 'unsafe-eval' directive
|
||||
forbidEval: true,
|
||||
|
|
|
@ -71,6 +71,7 @@
|
|||
"babel-loader": "^8.1.0",
|
||||
"babel-plugin-ember-modules-api-polyfill": "^3.2.0",
|
||||
"babel-plugin-htmlbars-inline-precompile": "^5.0.0",
|
||||
"babel-plugin-strip-function-call": "^1.0.2",
|
||||
"base64-js": "^1.3.0",
|
||||
"broccoli-asset-rev": "^3.0.0",
|
||||
"broccoli-funnel": "^3.0.3",
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
Feature: dc / nodes / show: Show node
|
||||
Background:
|
||||
Given 1 datacenter model with the value "dc1"
|
||||
# 2 nodes are required for the RTT tab to be visible
|
||||
Scenario: Given 2 nodes all the tabs are visible and clickable
|
||||
Given 2 node models from yaml
|
||||
Given 2 node models
|
||||
When I visit the node page for yaml
|
||||
---
|
||||
dc: dc1
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
@setupApplicationTest
|
||||
Feature: dc / nodes / show / health-checks
|
||||
Background:
|
||||
Given 1 datacenter model with the value "dc1"
|
||||
Scenario: A failing serf check
|
||||
Given 1 node model from yaml
|
||||
---
|
||||
ID: node-0
|
||||
Checks:
|
||||
- Type: ''
|
||||
Name: Serf Health Status
|
||||
CheckID: serfHealth
|
||||
Status: critical
|
||||
Output: ouch
|
||||
---
|
||||
When I visit the node page for yaml
|
||||
---
|
||||
dc: dc1
|
||||
node: node-0
|
||||
---
|
||||
And I see healthChecksIsSelected on the tabs
|
||||
And I see criticalSerfNotice on the tabs.healthChecksTab
|
||||
Scenario: A passing serf check
|
||||
Given 1 node model from yaml
|
||||
---
|
||||
ID: node-0
|
||||
Checks:
|
||||
- Type: ''
|
||||
Name: Serf Health Status
|
||||
CheckID: serfHealth
|
||||
Status: passing
|
||||
Output: Agent alive and reachable
|
||||
---
|
||||
When I visit the node page for yaml
|
||||
---
|
||||
dc: dc1
|
||||
node: node-0
|
||||
---
|
||||
And I see healthChecksIsSelected on the tabs
|
||||
And I don't see criticalSerfNotice on the tabs.healthChecksTab
|
|
@ -0,0 +1,66 @@
|
|||
@setupApplicationTest
|
||||
Feature: dc / services / instances / health-checks
|
||||
Background:
|
||||
Given 1 datacenter model with the value "dc1"
|
||||
And 1 proxy model from yaml
|
||||
---
|
||||
- ServiceProxy:
|
||||
DestinationServiceName: service-1
|
||||
DestinationServiceID: ~
|
||||
---
|
||||
Scenario: A failing serf check
|
||||
Given 2 instance models from yaml
|
||||
---
|
||||
- Service:
|
||||
ID: service-0-with-id
|
||||
Node:
|
||||
Node: node-0
|
||||
- Service:
|
||||
ID: service-1-with-id
|
||||
Node:
|
||||
Node: another-node
|
||||
Checks:
|
||||
- Type: ''
|
||||
Name: Serf Health Status
|
||||
CheckID: serfHealth
|
||||
Status: critical
|
||||
Output: ouch
|
||||
---
|
||||
When I visit the instance page for yaml
|
||||
---
|
||||
dc: dc1
|
||||
service: service-0
|
||||
node: another-node
|
||||
id: service-1-with-id
|
||||
---
|
||||
Then the url should be /dc1/services/service-0/instances/another-node/service-1-with-id/health-checks
|
||||
And I see healthChecksIsSelected on the tabs
|
||||
And I see criticalSerfNotice on the tabs.healthChecksTab
|
||||
Scenario: A passing serf check
|
||||
Given 2 instance models from yaml
|
||||
---
|
||||
- Service:
|
||||
ID: service-0-with-id
|
||||
Node:
|
||||
Node: node-0
|
||||
- Service:
|
||||
ID: service-1-with-id
|
||||
Node:
|
||||
Node: another-node
|
||||
Checks:
|
||||
- Type: ''
|
||||
Name: Serf Health Status
|
||||
CheckID: serfHealth
|
||||
Status: passing
|
||||
Output: Agent alive and reachable
|
||||
---
|
||||
When I visit the instance page for yaml
|
||||
---
|
||||
dc: dc1
|
||||
service: service-0
|
||||
node: another-node
|
||||
id: service-1-with-id
|
||||
---
|
||||
Then the url should be /dc1/services/service-0/instances/another-node/service-1-with-id/health-checks
|
||||
And I see healthChecksIsSelected on the tabs
|
||||
And I don't see criticalSerfNotice on the tabs.healthChecksTab
|
|
@ -0,0 +1,10 @@
|
|||
import steps from '../../../steps';
|
||||
|
||||
// step definitions that are shared between features should be moved to the
|
||||
// tests/acceptance/steps/steps.js file
|
||||
|
||||
export default function(assert) {
|
||||
return steps(assert).then('I should find a file', function() {
|
||||
assert.ok(true, this.step);
|
||||
});
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
import steps from '../../../steps';
|
||||
|
||||
// step definitions that are shared between features should be moved to the
|
||||
// tests/acceptance/steps/steps.js file
|
||||
|
||||
export default function(assert) {
|
||||
return steps(assert).then('I should find a file', function() {
|
||||
assert.ok(true, this.step);
|
||||
});
|
||||
}
|
|
@ -164,6 +164,7 @@ export default {
|
|||
visitable,
|
||||
alias,
|
||||
attribute,
|
||||
isPresent,
|
||||
collection,
|
||||
text,
|
||||
tabgroup,
|
||||
|
@ -177,7 +178,9 @@ export default {
|
|||
visitable,
|
||||
deletable,
|
||||
clickable,
|
||||
alias,
|
||||
attribute,
|
||||
isPresent,
|
||||
collection,
|
||||
tabgroup,
|
||||
text,
|
||||
|
|
|
@ -2,13 +2,15 @@ export default function(
|
|||
visitable,
|
||||
deletable,
|
||||
clickable,
|
||||
alias,
|
||||
attribute,
|
||||
present,
|
||||
collection,
|
||||
tabs,
|
||||
text,
|
||||
healthChecks
|
||||
) {
|
||||
return {
|
||||
const page = {
|
||||
visit: visitable('/:dc/nodes/:node'),
|
||||
tabs: tabs('tab', [
|
||||
'health-checks',
|
||||
|
@ -17,7 +19,7 @@ export default function(
|
|||
'lock-sessions',
|
||||
'metadata',
|
||||
]),
|
||||
healthChecks: healthChecks(),
|
||||
healthChecks: alias('tabs.healthChecksTab.healthChecks'),
|
||||
services: collection('.consul-service-instance-list > ul > li:not(:first-child)', {
|
||||
name: text('[data-test-service-name]'),
|
||||
port: attribute('data-test-service-port', '[data-test-service-port]'),
|
||||
|
@ -31,4 +33,9 @@ export default function(
|
|||
}),
|
||||
metadata: collection('.consul-metadata-list [data-test-tabular-row]', {}),
|
||||
};
|
||||
page.tabs.healthChecksTab = {
|
||||
criticalSerfNotice: present('[data-test-critical-serf-notice]'),
|
||||
healthChecks: healthChecks(),
|
||||
};
|
||||
return page;
|
||||
}
|
||||
|
|
|
@ -2,13 +2,14 @@ export default function(
|
|||
visitable,
|
||||
alias,
|
||||
attribute,
|
||||
present,
|
||||
collection,
|
||||
text,
|
||||
tabs,
|
||||
upstreams,
|
||||
healthChecks
|
||||
) {
|
||||
return {
|
||||
const page = {
|
||||
visit: visitable('/:dc/services/:service/instances/:node/:id'),
|
||||
externalSource: attribute('data-test-external-source', '[data-test-external-source]', {
|
||||
scope: '.title',
|
||||
|
@ -26,4 +27,9 @@ export default function(
|
|||
}),
|
||||
metadata: collection('.metadata [data-test-tabular-row]', {}),
|
||||
};
|
||||
page.tabs.healthChecksTab = {
|
||||
criticalSerfNotice: present('[data-test-critical-serf-notice]'),
|
||||
healthChecks: healthChecks(),
|
||||
};
|
||||
return page;
|
||||
}
|
||||
|
|
|
@ -1,4 +1,20 @@
|
|||
dc:
|
||||
nodes:
|
||||
show:
|
||||
healthchecks:
|
||||
empty: |
|
||||
<p>
|
||||
This node has no health checks{items, select,
|
||||
0 {}
|
||||
other { matching that search}
|
||||
}.
|
||||
</p>
|
||||
critical-serf-notice:
|
||||
header: Failing serf check
|
||||
body: |
|
||||
<p>
|
||||
This node has a failing serf node check. The health statuses shown on this page are the statuses as they were known before the node became unreachable.
|
||||
</p>
|
||||
services:
|
||||
show:
|
||||
upstreams:
|
||||
|
@ -7,6 +23,20 @@ dc:
|
|||
Upstreams are services that may receive traffic from this gateway. If you are not using Consul DNS, please make sure your <code>Host:</code> header uses the correct domain name for the gateway to correctly proxy to its upstreams. Learn more about configuring gateways in our <a href="{CONSUL_DOCS_URL}/connect/ingress-gateways" target="_blank" rel="noopener noreferrer">documentation</a>.
|
||||
</p>
|
||||
instance:
|
||||
healthchecks:
|
||||
empty: |
|
||||
<p>
|
||||
This instance has no health checks{items, select,
|
||||
0 {}
|
||||
other { matching that search}
|
||||
}.
|
||||
</p>
|
||||
critical-serf-notice:
|
||||
header: Failing serf check
|
||||
body: |
|
||||
<p>
|
||||
This instance has a failing serf node check. The health statuses shown on this page are the statuses as they were known before the node became unreachable.
|
||||
</p>
|
||||
upstreams:
|
||||
tproxy-mode:
|
||||
header: Transparent proxy mode
|
||||
|
|
|
@ -2746,6 +2746,11 @@ babel-plugin-polyfill-regenerator@^0.1.2:
|
|||
dependencies:
|
||||
"@babel/helper-define-polyfill-provider" "^0.1.5"
|
||||
|
||||
babel-plugin-strip-function-call@^1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/babel-plugin-strip-function-call/-/babel-plugin-strip-function-call-1.0.2.tgz#374a68b5648e16e2b6d1effd280c3abc88648e3a"
|
||||
integrity sha1-N0potWSOFuK20e/9KAw6vIhkjjo=
|
||||
|
||||
babel-plugin-syntax-async-functions@^6.8.0:
|
||||
version "6.13.0"
|
||||
resolved "https://registry.yarnpkg.com/babel-plugin-syntax-async-functions/-/babel-plugin-syntax-async-functions-6.13.0.tgz#cad9cad1191b5ad634bf30ae0872391e0647be95"
|
||||
|
|
|
@ -74,7 +74,7 @@ proxy configuration needed.
|
|||
allowed to access by [Connect intentions](/docs/connect/intentions).
|
||||
|
||||
- `-envoy-version` - The version of envoy that is being started. Default is
|
||||
`1.18.2`. This is required so that the correct configuration can be generated.
|
||||
`1.18.3`. This is required so that the correct configuration can be generated.
|
||||
|
||||
- `-no-central-config` - By default the proxy's bootstrap configuration can be
|
||||
customized centrally. This requires that the command run on the same agent
|
||||
|
|
|
@ -417,7 +417,8 @@ These metrics are used to monitor the health of the Consul servers.
|
|||
| `consul.grpc.server.connection.count` | Counts the number of new gRPC connections received by the server. | connections | counter |
|
||||
| `consul.grpc.server.connections` | Measures the number of active gRPC connections open on the server. | connections | gauge |
|
||||
| `consul.grpc.server.stream.count` | Counts the number of new gRPC streams received by the server. | streams | counter |
|
||||
| `consul.grpc.server.streams` | Measures the number of active gRPC streams handled by the server. | streams | guage |
|
||||
| `consul.grpc.server.streams` | Measures the number of active gRPC streams handled by the server. | streams | gauge |
|
||||
| `consul.xds.server.streams` | Measures the number of active xDS streams handled by the server split by protocol version. | streams | gauge |
|
||||
|
||||
## Cluster Health
|
||||
|
||||
|
|
|
@ -34,8 +34,8 @@ compatible Envoy versions.
|
|||
|
||||
| Consul Version | Compatible Envoy Versions |
|
||||
| ------------------- | -------------------------------- |
|
||||
| 1.10.x | 1.18.2, 1.17.2, 1.16.3, 1.15.4 |
|
||||
| 1.9.x | 1.16.3, 1.15.4, 1.14.7‡, 1.13.7‡ |
|
||||
| 1.10.x | 1.18.3, 1.17.3, 1.16.4, 1.15.5 |
|
||||
| 1.9.x | 1.16.4, 1.15.5, 1.14.7‡, 1.13.7‡ |
|
||||
| 1.8.x | 1.14.7, 1.13.7, 1.12.7, 1.11.2 |
|
||||
| 1.7.x | 1.13.7, 1.12.7, 1.11.2, 1.10.0\* |
|
||||
| 1.6.x, 1.5.3, 1.5.2 | 1.11.1, 1.10.0, 1.9.1, 1.8.0† |
|
||||
|
|
|
@ -20,70 +20,81 @@ must be the [primary](/docs/k8s/installation/multi-cluster/kubernetes#primary-da
|
|||
If your primary datacenter is running on Kubernetes, use the Helm config from the
|
||||
[Primary Datacenter](/docs/k8s/installation/multi-cluster/kubernetes#primary-datacenter) section to install Consul.
|
||||
|
||||
Once installed, and with the `ProxyDefaults` [resource created](/docs/k8s/installation/multi-cluster/kubernetes#proxydefaults),
|
||||
Once installed on Kubernetes, and with the `ProxyDefaults` [resource created](/docs/k8s/installation/multi-cluster/kubernetes#proxydefaults),
|
||||
you'll need to export the following information from the primary Kubernetes cluster:
|
||||
|
||||
* The certificate authority cert:
|
||||
- Certificate authority cert and key (in order to create SSL certs for VMs)
|
||||
- External addresses of Kubernetes mesh gateways
|
||||
- Replication ACL token
|
||||
- Gossip encryption key
|
||||
|
||||
```sh
|
||||
kubectl get secrets/consul-ca-cert --template='{{index .data "tls.crt" }}' |
|
||||
base64 -D > consul-agent-ca.pem
|
||||
```
|
||||
The following sections detail how to export this data.
|
||||
|
||||
* The certificate authority signing key:
|
||||
### Certificates
|
||||
|
||||
```sh
|
||||
kubectl get secrets/consul-ca-key --template='{{index .data "tls.key" }}' |
|
||||
base64 -D > consul-agent-ca-key.pem
|
||||
```
|
||||
1. Retrieve the certificate authority cert:
|
||||
|
||||
With the `consul-agent-ca.pem` and `consul-agent-ca-key.pem` files you can
|
||||
create certificates for your servers and clients running on VMs that share the
|
||||
same certificate authority as your Kubernetes servers.
|
||||
```sh
|
||||
kubectl get secrets/consul-ca-cert --template='{{index .data "tls.crt" }}' |
|
||||
base64 -D > consul-agent-ca.pem
|
||||
```
|
||||
|
||||
You can use the `consul tls` commands to generate those certificates:
|
||||
1. And the certificate authority signing key:
|
||||
|
||||
```sh
|
||||
# NOTE: consul-agent-ca.pem and consul-agent-ca-key.pem must be in the current
|
||||
# directory.
|
||||
$ consul tls cert create -server -dc=vm-dc -node <node_name>
|
||||
==> WARNING: Server Certificates grants authority to become a
|
||||
server and access all state in the cluster including root keys
|
||||
and all ACL tokens. Do not distribute them to production hosts
|
||||
that are not server nodes. Store them as securely as CA keys.
|
||||
==> Using consul-agent-ca.pem and consul-agent-ca-key.pem
|
||||
==> Saved vm-dc-server-consul-0.pem
|
||||
==> Saved vm-dc-server-consul-0-key.pem
|
||||
```
|
||||
```sh
|
||||
kubectl get secrets/consul-ca-key --template='{{index .data "tls.key" }}' |
|
||||
base64 -D > consul-agent-ca-key.pem
|
||||
```
|
||||
|
||||
-> Note the `-node` option in the above command. This should be same as the node name of the [Consul Agent](https://www.consul.io/docs/agent#running-an-agent). This is a [requirement](https://www.consul.io/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways#tls) for Consul Federation to work. Alternatively, if you plan to use the same certificate and key pair on all your Consul server nodes, or you don't know the nodename in advance, use `-node "*"` instead.
|
||||
Not satisfying this requirement would result in the following error in the Consul Server logs:
|
||||
`[ERROR] agent.server.rpc: TLS handshake failed: conn=from= error="remote error: tls: bad certificate"`
|
||||
1. With the `consul-agent-ca.pem` and `consul-agent-ca-key.pem` files you can
|
||||
create certificates for your servers and clients running on VMs that share the
|
||||
same certificate authority as your Kubernetes servers.
|
||||
|
||||
See the help for output of `consul tls cert create -h` to see more options
|
||||
for generating server certificates.
|
||||
You can use the `consul tls` commands to generate those certificates:
|
||||
|
||||
These certificates can be used in your server config file:
|
||||
```sh
|
||||
# NOTE: consul-agent-ca.pem and consul-agent-ca-key.pem must be in the current
|
||||
# directory.
|
||||
$ consul tls cert create -server -dc=vm-dc -node <node_name>
|
||||
==> WARNING: Server Certificates grants authority to become a
|
||||
server and access all state in the cluster including root keys
|
||||
and all ACL tokens. Do not distribute them to production hosts
|
||||
that are not server nodes. Store them as securely as CA keys.
|
||||
==> Using consul-agent-ca.pem and consul-agent-ca-key.pem
|
||||
==> Saved vm-dc-server-consul-0.pem
|
||||
==> Saved vm-dc-server-consul-0-key.pem
|
||||
```
|
||||
|
||||
```hcl
|
||||
# server.hcl
|
||||
cert_file = "vm-dc-server-consul-0.pem"
|
||||
key_file = "vm-dc-server-consul-0-key.pem"
|
||||
ca_file = "consul-agent-ca.pem"
|
||||
```
|
||||
-> Note the `-node` option in the above command. This should be same as the node name of the [Consul Agent](https://www.consul.io/docs/agent#running-an-agent). This is a [requirement](https://www.consul.io/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways#tls) for Consul Federation to work. Alternatively, if you plan to use the same certificate and key pair on all your Consul server nodes, or you don't know the nodename in advance, use `-node "*"` instead.
|
||||
Not satisfying this requirement would result in the following error in the Consul Server logs:
|
||||
`[ERROR] agent.server.rpc: TLS handshake failed: conn=from= error="remote error: tls: bad certificate"`
|
||||
|
||||
For clients, you can generate TLS certs with:
|
||||
See the help for output of `consul tls cert create -h` to see more options
|
||||
for generating server certificates.
|
||||
|
||||
```shell-session
|
||||
$ consul tls cert create -client
|
||||
==> Using consul-agent-ca.pem and consul-agent-ca-key.pem
|
||||
==> Saved dc1-client-consul-0.pem
|
||||
==> Saved dc1-client-consul-0-key.pem
|
||||
```
|
||||
1. These certificates can be used in your server config file:
|
||||
|
||||
Or use the [auto_encrypt](/docs/agent/options#auto_encrypt) feature.
|
||||
```hcl
|
||||
# server.hcl
|
||||
cert_file = "vm-dc-server-consul-0.pem"
|
||||
key_file = "vm-dc-server-consul-0-key.pem"
|
||||
ca_file = "consul-agent-ca.pem"
|
||||
```
|
||||
|
||||
1. The WAN addresses of the mesh gateways:
|
||||
1. For clients, you can generate TLS certs with:
|
||||
|
||||
```shell-session
|
||||
$ consul tls cert create -client
|
||||
==> Using consul-agent-ca.pem and consul-agent-ca-key.pem
|
||||
==> Saved dc1-client-consul-0.pem
|
||||
==> Saved dc1-client-consul-0-key.pem
|
||||
```
|
||||
|
||||
Or use the [auto_encrypt](/docs/agent/options#auto_encrypt) feature.
|
||||
|
||||
### Mesh Gateway Addresses
|
||||
|
||||
Retrieve the WAN addresses of the mesh gateways:
|
||||
|
||||
```shell-session
|
||||
$ kubectl exec statefulset/consul-server -- sh -c \
|
||||
|
@ -108,7 +119,9 @@ setting:
|
|||
primary_gateways = ["1.2.3.4:443"]
|
||||
```
|
||||
|
||||
1. If ACLs are enabled, you'll also need the replication ACL token:
|
||||
### Replication ACL Token
|
||||
|
||||
If ACLs are enabled, you'll also need the replication ACL token:
|
||||
|
||||
```shell-session
|
||||
$ kubectl get secrets/consul-acl-replication-acl-token --template='{{.data.token}}'
|
||||
|
@ -116,25 +129,31 @@ e7924dd1-dc3f-f644-da54-81a73ba0a178
|
|||
```
|
||||
|
||||
This token will be used in the server config for the replication token.
|
||||
You must also create your own agent policy and token.
|
||||
|
||||
```hcl
|
||||
acls {
|
||||
tokens {
|
||||
agent = "<your agent token>"
|
||||
replication = "e7924dd1-dc3f-f644-da54-81a73ba0a178"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
1. If gossip encryption is enabled, you'll need the key as well. The command
|
||||
to retrieve the key will depend on which Kubernetes secret you've stored it in.
|
||||
-> **NOTE:** You'll also need to set up additional ACL tokens as needed by the
|
||||
ACL system. See tutorial [Secure Consul with Access Control Lists (ACLs)](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production#apply-individual-tokens-to-agents)
|
||||
for more information.
|
||||
|
||||
This key will be used in server and client configs for the `encrypt` setting:
|
||||
### Gossip Encryption Key
|
||||
|
||||
```hcl
|
||||
encrypt = "uF+GsbI66cuWU21kiXLze5JLEX5j4iDFlDTb0ZWNpDI="
|
||||
```
|
||||
If gossip encryption is enabled, you'll need the key as well. The command
|
||||
to retrieve the key will depend on which Kubernetes secret you've stored it in.
|
||||
|
||||
This key will be used in server and client configs for the `encrypt` setting:
|
||||
|
||||
```hcl
|
||||
encrypt = "uF+GsbI66cuWU21kiXLze5JLEX5j4iDFlDTb0ZWNpDI="
|
||||
```
|
||||
|
||||
### Final Configuration
|
||||
|
||||
A final example server config file might look like:
|
||||
|
||||
|
@ -192,41 +211,41 @@ You'll need:
|
|||
be routable from the Kubernetes cluster.
|
||||
1. If ACLs are enabled you must create an ACL replication token with the following rules:
|
||||
|
||||
```hcl
|
||||
acl = "write"
|
||||
operator = "write"
|
||||
agent_prefix "" {
|
||||
policy = "read"
|
||||
}
|
||||
node_prefix "" {
|
||||
policy = "write"
|
||||
}
|
||||
service_prefix "" {
|
||||
policy = "read"
|
||||
intentions = "read"
|
||||
}
|
||||
```
|
||||
```hcl
|
||||
acl = "write"
|
||||
operator = "write"
|
||||
agent_prefix "" {
|
||||
policy = "read"
|
||||
}
|
||||
node_prefix "" {
|
||||
policy = "write"
|
||||
}
|
||||
service_prefix "" {
|
||||
policy = "read"
|
||||
intentions = "read"
|
||||
}
|
||||
```
|
||||
|
||||
This token is used for ACL replication and for automatic ACL management in Kubernetes.
|
||||
This token is used for ACL replication and for automatic ACL management in Kubernetes.
|
||||
|
||||
If you're running Consul Enterprise you'll need the rules:
|
||||
If you're running Consul Enterprise you'll need the rules:
|
||||
|
||||
```hcl
|
||||
acl = "write"
|
||||
operator = "write"
|
||||
agent_prefix "" {
|
||||
policy = "read"
|
||||
}
|
||||
node_prefix "" {
|
||||
policy = "write"
|
||||
}
|
||||
namespace_prefix "" {
|
||||
service_prefix "" {
|
||||
policy = "read"
|
||||
intentions = "read"
|
||||
}
|
||||
}
|
||||
```
|
||||
```hcl
|
||||
acl = "write"
|
||||
operator = "write"
|
||||
agent_prefix "" {
|
||||
policy = "read"
|
||||
}
|
||||
node_prefix "" {
|
||||
policy = "write"
|
||||
}
|
||||
namespace_prefix "" {
|
||||
service_prefix "" {
|
||||
policy = "read"
|
||||
intentions = "read"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
1. If gossip encryption is enabled, you'll need the key.
|
||||
|
||||
|
@ -293,11 +312,11 @@ gateways running on VMs.
|
|||
With your config file ready to go, follow our [Installation Guide](/docs/k8s/installation/install)
|
||||
to install Consul on your secondary cluster(s).
|
||||
|
||||
## Next Steps
|
||||
|
||||
After installation, if you're using consul-helm 0.30.0+, [create the
|
||||
`ProxyDefaults` resource](/docs/k8s/installation/multi-cluster/kubernetes#proxydefaults)
|
||||
to allow traffic between datacenters.
|
||||
|
||||
Follow the [Verifying Federation](/docs/k8s/installation/multi-cluster/kubernetes#verifying-federation)
|
||||
## Next Steps
|
||||
|
||||
In both cases (Kubernetes as primary or secondary), after installation, follow the [Verifying Federation](/docs/k8s/installation/multi-cluster/kubernetes#verifying-federation)
|
||||
section to verify that federation is working as expected.
|
||||
|
|
|
@ -32,7 +32,7 @@ export default function NetworkInfrastructureAutomationPage() {
|
|||
textSplit={{
|
||||
heading: 'Automated Firewalling',
|
||||
content:
|
||||
'Using Consul-Terraform-Sync to automate security updates, organizations can elevating their security posture and adopt fine-grained access policies.',
|
||||
'Using Consul-Terraform-Sync to automate security updates, organizations can elevate their security posture and adopt fine-grained access policies.',
|
||||
textSide: 'left',
|
||||
links: [
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue