mirror of https://github.com/hashicorp/consul
Terminating Gateways Proxy Configuration (#7631)
commit
c1dc2f12f7
|
@ -776,12 +776,13 @@ func TestInternal_TerminatingGatewayServices(t *testing.T) {
|
|||
KeyFile: "",
|
||||
},
|
||||
{
|
||||
Service: structs.NewServiceID("redis", nil),
|
||||
Gateway: structs.NewServiceID("gateway", nil),
|
||||
GatewayKind: structs.ServiceKindTerminatingGateway,
|
||||
CAFile: "ca.crt",
|
||||
CertFile: "client.crt",
|
||||
KeyFile: "client.key",
|
||||
Service: structs.NewServiceID("redis", nil),
|
||||
Gateway: structs.NewServiceID("gateway", nil),
|
||||
GatewayKind: structs.ServiceKindTerminatingGateway,
|
||||
CAFile: "ca.crt",
|
||||
CertFile: "client.crt",
|
||||
KeyFile: "client.key",
|
||||
FromWildcard: true,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -1039,7 +1039,7 @@ func (s *Store) serviceNodes(ws memdb.WatchSet, serviceName string, connect bool
|
|||
// to the mesh with a mix of sidecars and gateways until all its instances have a sidecar.
|
||||
if connect {
|
||||
// Look up gateway nodes associated with the service
|
||||
_, nodes, chs, err := s.serviceGatewayNodes(tx, serviceName, structs.ServiceKindTerminatingGateway, entMeta)
|
||||
_, nodes, chs, err := s.serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindTerminatingGateway, entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed gateway nodes lookup: %v", err)
|
||||
}
|
||||
|
@ -1461,12 +1461,25 @@ func (s *Store) deleteServiceTxn(tx *memdb.Txn, idx uint64, nodeName, serviceID
|
|||
return err
|
||||
}
|
||||
|
||||
// Clean up association between service name and gateways
|
||||
if _, err := tx.DeleteAll(gatewayServicesTableName, "service", structs.NewServiceID(svc.ServiceName, entMeta)); err != nil {
|
||||
return fmt.Errorf("failed to truncate gateway services table: %v", err)
|
||||
// Clean up association between service name and gateways if needed
|
||||
gateways, err := s.serviceGateways(tx, svc.ServiceName, &svc.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed gateway lookup for %q: %s", svc.ServiceName, err)
|
||||
}
|
||||
if err := indexUpdateMaxTxn(tx, idx, gatewayServicesTableName); err != nil {
|
||||
return fmt.Errorf("failed updating gateway-services index: %v", err)
|
||||
for mapping := gateways.Next(); mapping != nil; mapping = gateways.Next() {
|
||||
if gs, ok := mapping.(*structs.GatewayService); ok && gs != nil {
|
||||
// Only delete if association was created by a wildcard specifier.
|
||||
// Otherwise the service was specified in the config entry, and the association should be maintained
|
||||
// for when the service is re-registered
|
||||
if gs.FromWildcard {
|
||||
if err := tx.Delete(gatewayServicesTableName, gs); err != nil {
|
||||
return fmt.Errorf("failed to truncate gateway services table: %v", err)
|
||||
}
|
||||
if err := indexUpdateMaxTxn(tx, idx, gatewayServicesTableName); err != nil {
|
||||
return fmt.Errorf("failed updating gateway-services index: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -1943,7 +1956,7 @@ func (s *Store) CheckConnectServiceNodes(ws memdb.WatchSet, serviceName string,
|
|||
func (s *Store) CheckIngressServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) {
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
maxIdx, nodes, watchChs, err := s.serviceGatewayNodes(tx, serviceName, structs.ServiceKindIngressGateway, entMeta)
|
||||
maxIdx, nodes, watchChs, err := s.serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindIngressGateway, entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed gateway nodes lookup: %v", err)
|
||||
}
|
||||
|
@ -2024,7 +2037,7 @@ func (s *Store) checkServiceNodesTxn(tx *memdb.Txn, ws memdb.WatchSet, serviceNa
|
|||
// to the mesh with a mix of sidecars and gateways until all its instances have a sidecar.
|
||||
if connect {
|
||||
// Look up gateway nodes associated with the service
|
||||
_, nodes, _, err := s.serviceGatewayNodes(tx, serviceName, structs.ServiceKindTerminatingGateway, entMeta)
|
||||
_, nodes, _, err := s.serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindTerminatingGateway, entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed gateway nodes lookup: %v", err)
|
||||
}
|
||||
|
@ -2548,7 +2561,10 @@ func (s *Store) updateGatewayNamespace(tx *memdb.Txn, idx uint64, service *struc
|
|||
}
|
||||
|
||||
mapping := service.Clone()
|
||||
|
||||
mapping.Service = structs.NewServiceID(sn.ServiceName, &service.Service.EnterpriseMeta)
|
||||
mapping.FromWildcard = true
|
||||
|
||||
err = s.updateGatewayService(tx, idx, mapping)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -2612,7 +2628,9 @@ func (s *Store) checkGatewayWildcardsAndUpdate(tx *memdb.Txn, idx uint64, svc *s
|
|||
|
||||
// Copy the wildcard mapping and modify it
|
||||
gatewaySvc := wildcardSvc.Clone()
|
||||
|
||||
gatewaySvc.Service = structs.NewServiceID(svc.Service, &svc.EnterpriseMeta)
|
||||
gatewaySvc.FromWildcard = true
|
||||
|
||||
if err = s.updateGatewayService(tx, idx, gatewaySvc); err != nil {
|
||||
return fmt.Errorf("Failed to associate service %q with gateway %q", gatewaySvc.Service.String(), gatewaySvc.Gateway.String())
|
||||
|
@ -2635,13 +2653,17 @@ func (s *Store) gatewayServices(tx *memdb.Txn, name string, entMeta *structs.Ent
|
|||
// TODO(ingress): How to handle index rolling back when a config entry is
|
||||
// deleted that references a service?
|
||||
// We might need something like the service_last_extinction index?
|
||||
func (s *Store) serviceGatewayNodes(tx *memdb.Txn, service string, kind structs.ServiceKind, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, []<-chan struct{}, error) {
|
||||
func (s *Store) serviceGatewayNodes(tx *memdb.Txn, ws memdb.WatchSet, service string, kind structs.ServiceKind, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, []<-chan struct{}, error) {
|
||||
// Look up gateway name associated with the service
|
||||
gws, err := s.serviceGateways(tx, service, entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, nil, fmt.Errorf("failed gateway lookup: %s", err)
|
||||
}
|
||||
|
||||
// Adding this channel to the WatchSet means that the watch will fire if a config entry targeting the service is added.
|
||||
// Otherwise, if there's no associated gateway, then no watch channel would be returned
|
||||
ws.Add(gws.WatchCh())
|
||||
|
||||
var ret structs.ServiceNodes
|
||||
var watchChans []<-chan struct{}
|
||||
var maxIdx uint64
|
||||
|
|
|
@ -3014,16 +3014,16 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
setupFn: nil,
|
||||
svc: "test",
|
||||
wantBeforeResLen: 0,
|
||||
// Only the connect index iterator is watched
|
||||
wantBeforeWatchSetSize: 1,
|
||||
// The connect index and gateway-services iterators are watched
|
||||
wantBeforeWatchSetSize: 2,
|
||||
updateFn: func(s *Store) {
|
||||
testRegisterService(t, s, 4, "node1", "test")
|
||||
},
|
||||
shouldFire: false,
|
||||
wantAfterIndex: 4, // No results falls back to global service index
|
||||
wantAfterResLen: 0,
|
||||
// Only the connect index iterator is watched
|
||||
wantAfterWatchSetSize: 1,
|
||||
// The connect index and gateway-services iterators are watched
|
||||
wantAfterWatchSetSize: 2,
|
||||
},
|
||||
{
|
||||
name: "not affected by non-connect-enabled target service de-registration",
|
||||
|
@ -3032,8 +3032,8 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
},
|
||||
svc: "test",
|
||||
wantBeforeResLen: 0,
|
||||
// Only the connect index iterator is watched
|
||||
wantBeforeWatchSetSize: 1,
|
||||
// The connect index and gateway-services iterators are watched
|
||||
wantBeforeWatchSetSize: 2,
|
||||
updateFn: func(s *Store) {
|
||||
require.NoError(t, s.DeleteService(5, "node1", "test", nil))
|
||||
},
|
||||
|
@ -3044,25 +3044,25 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
shouldFire: false,
|
||||
wantAfterIndex: 5, // No results falls back to global service index
|
||||
wantAfterResLen: 0,
|
||||
// Only the connect index iterator is watched
|
||||
wantAfterWatchSetSize: 1,
|
||||
// The connect index and gateway-services iterators are watched
|
||||
wantAfterWatchSetSize: 2,
|
||||
},
|
||||
{
|
||||
name: "unblocks on first connect-native service registration",
|
||||
setupFn: nil,
|
||||
svc: "test",
|
||||
wantBeforeResLen: 0,
|
||||
// Only the connect index iterator is watched
|
||||
wantBeforeWatchSetSize: 1,
|
||||
// The connect index and gateway-services iterators are watched
|
||||
wantBeforeWatchSetSize: 2,
|
||||
updateFn: func(s *Store) {
|
||||
testRegisterConnectNativeService(t, s, 4, "node1", "test")
|
||||
},
|
||||
shouldFire: true,
|
||||
wantAfterIndex: 4,
|
||||
wantAfterResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantAfterWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantAfterWatchSetSize: 3,
|
||||
},
|
||||
{
|
||||
name: "unblocks on subsequent connect-native service registration",
|
||||
|
@ -3071,18 +3071,18 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
},
|
||||
svc: "test",
|
||||
wantBeforeResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantBeforeWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantBeforeWatchSetSize: 3,
|
||||
updateFn: func(s *Store) {
|
||||
testRegisterConnectNativeService(t, s, 5, "node2", "test")
|
||||
},
|
||||
shouldFire: true,
|
||||
wantAfterIndex: 5,
|
||||
wantAfterResLen: 2,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantAfterWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantAfterWatchSetSize: 3,
|
||||
},
|
||||
{
|
||||
name: "unblocks on connect-native service de-registration",
|
||||
|
@ -3092,18 +3092,18 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
},
|
||||
svc: "test",
|
||||
wantBeforeResLen: 2,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantBeforeWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantBeforeWatchSetSize: 3,
|
||||
updateFn: func(s *Store) {
|
||||
require.NoError(t, s.DeleteService(6, "node2", "test", nil))
|
||||
},
|
||||
shouldFire: true,
|
||||
wantAfterIndex: 6,
|
||||
wantAfterResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantAfterWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantAfterWatchSetSize: 3,
|
||||
},
|
||||
{
|
||||
name: "unblocks on last connect-native service de-registration",
|
||||
|
@ -3112,34 +3112,34 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
},
|
||||
svc: "test",
|
||||
wantBeforeResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantBeforeWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantBeforeWatchSetSize: 3,
|
||||
updateFn: func(s *Store) {
|
||||
require.NoError(t, s.DeleteService(6, "node1", "test", nil))
|
||||
},
|
||||
shouldFire: true,
|
||||
wantAfterIndex: 6,
|
||||
wantAfterResLen: 0,
|
||||
// Only the connect index iterator is watched
|
||||
wantAfterWatchSetSize: 1,
|
||||
// The connect index and gateway-services iterators are watched
|
||||
wantAfterWatchSetSize: 2,
|
||||
},
|
||||
{
|
||||
name: "unblocks on first proxy service registration",
|
||||
setupFn: nil,
|
||||
svc: "test",
|
||||
wantBeforeResLen: 0,
|
||||
// Only the connect index iterator is watched
|
||||
wantBeforeWatchSetSize: 1,
|
||||
// The connect index and gateway-services iterators are watched
|
||||
wantBeforeWatchSetSize: 2,
|
||||
updateFn: func(s *Store) {
|
||||
testRegisterSidecarProxy(t, s, 4, "node1", "test")
|
||||
},
|
||||
shouldFire: true,
|
||||
wantAfterIndex: 4,
|
||||
wantAfterResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantAfterWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantAfterWatchSetSize: 3,
|
||||
},
|
||||
{
|
||||
name: "unblocks on subsequent proxy service registration",
|
||||
|
@ -3148,18 +3148,18 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
},
|
||||
svc: "test",
|
||||
wantBeforeResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantBeforeWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantBeforeWatchSetSize: 3,
|
||||
updateFn: func(s *Store) {
|
||||
testRegisterSidecarProxy(t, s, 5, "node2", "test")
|
||||
},
|
||||
shouldFire: true,
|
||||
wantAfterIndex: 5,
|
||||
wantAfterResLen: 2,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantAfterWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantAfterWatchSetSize: 3,
|
||||
},
|
||||
{
|
||||
name: "unblocks on proxy service de-registration",
|
||||
|
@ -3169,18 +3169,18 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
},
|
||||
svc: "test",
|
||||
wantBeforeResLen: 2,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantBeforeWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantBeforeWatchSetSize: 3,
|
||||
updateFn: func(s *Store) {
|
||||
require.NoError(t, s.DeleteService(6, "node2", "test-sidecar-proxy", nil))
|
||||
},
|
||||
shouldFire: true,
|
||||
wantAfterIndex: 6,
|
||||
wantAfterResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantAfterWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantAfterWatchSetSize: 3,
|
||||
},
|
||||
{
|
||||
name: "unblocks on last proxy service de-registration",
|
||||
|
@ -3189,17 +3189,17 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
},
|
||||
svc: "test",
|
||||
wantBeforeResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantBeforeWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantBeforeWatchSetSize: 3,
|
||||
updateFn: func(s *Store) {
|
||||
require.NoError(t, s.DeleteService(6, "node1", "test-sidecar-proxy", nil))
|
||||
},
|
||||
shouldFire: true,
|
||||
wantAfterIndex: 6,
|
||||
wantAfterResLen: 0,
|
||||
// Only the connect index iterator is watched
|
||||
wantAfterWatchSetSize: 1,
|
||||
// The connect index and gateway-services iterators are watched
|
||||
wantAfterWatchSetSize: 2,
|
||||
},
|
||||
{
|
||||
name: "unblocks on connect-native service health check change",
|
||||
|
@ -3209,18 +3209,18 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
},
|
||||
svc: "test",
|
||||
wantBeforeResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantBeforeWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantBeforeWatchSetSize: 3,
|
||||
updateFn: func(s *Store) {
|
||||
testRegisterCheck(t, s, 7, "node1", "test", "check1", "critical")
|
||||
},
|
||||
shouldFire: true,
|
||||
wantAfterIndex: 7,
|
||||
wantAfterResLen: 1, // critical filtering doesn't happen in the state store method.
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantAfterWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantAfterWatchSetSize: 3,
|
||||
},
|
||||
{
|
||||
name: "unblocks on proxy service health check change",
|
||||
|
@ -3230,18 +3230,18 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
},
|
||||
svc: "test",
|
||||
wantBeforeResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantBeforeWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantBeforeWatchSetSize: 3,
|
||||
updateFn: func(s *Store) {
|
||||
testRegisterCheck(t, s, 7, "node1", "test-sidecar-proxy", "check1", "critical")
|
||||
},
|
||||
shouldFire: true,
|
||||
wantAfterIndex: 7,
|
||||
wantAfterResLen: 1, // critical filtering doesn't happen in the state store method.
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantAfterWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantAfterWatchSetSize: 3,
|
||||
},
|
||||
{
|
||||
name: "unblocks on connect-native node health check change",
|
||||
|
@ -3251,18 +3251,18 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
},
|
||||
svc: "test",
|
||||
wantBeforeResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantBeforeWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantBeforeWatchSetSize: 3,
|
||||
updateFn: func(s *Store) {
|
||||
testRegisterCheck(t, s, 7, "node1", "", "check1", "critical")
|
||||
},
|
||||
shouldFire: true,
|
||||
wantAfterIndex: 7,
|
||||
wantAfterResLen: 1, // critical filtering doesn't happen in the state store method.
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantAfterWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantAfterWatchSetSize: 3,
|
||||
},
|
||||
{
|
||||
name: "unblocks on proxy service health check change",
|
||||
|
@ -3272,18 +3272,18 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
},
|
||||
svc: "test",
|
||||
wantBeforeResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantBeforeWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantBeforeWatchSetSize: 3,
|
||||
updateFn: func(s *Store) {
|
||||
testRegisterCheck(t, s, 7, "node1", "", "check1", "critical")
|
||||
},
|
||||
shouldFire: true,
|
||||
wantAfterIndex: 7,
|
||||
wantAfterResLen: 1, // critical filtering doesn't happen in the state store method.
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantAfterWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantAfterWatchSetSize: 3,
|
||||
},
|
||||
{
|
||||
// See https://github.com/hashicorp/consul/issues/5506. The issue is cause
|
||||
|
@ -3302,18 +3302,18 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
},
|
||||
svc: "test",
|
||||
wantBeforeResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantBeforeWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantBeforeWatchSetSize: 3,
|
||||
updateFn: func(s *Store) {
|
||||
testRegisterCheck(t, s, 7, "node1", "test-sidecar-proxy", "check1", "critical")
|
||||
},
|
||||
shouldFire: true,
|
||||
wantAfterIndex: 7,
|
||||
wantAfterResLen: 1, // critical filtering doesn't happen in the state store method.
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantAfterWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantAfterWatchSetSize: 3,
|
||||
},
|
||||
{
|
||||
// See https://github.com/hashicorp/consul/issues/5506. This is the edge
|
||||
|
@ -3324,9 +3324,9 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
},
|
||||
svc: "test",
|
||||
wantBeforeResLen: 1,
|
||||
// Should take the optimized path where we only watch the service index
|
||||
// and the connect index iterator.
|
||||
wantBeforeWatchSetSize: 2,
|
||||
// Should take the optimized path where we only watch the service index,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantBeforeWatchSetSize: 3,
|
||||
updateFn: func(s *Store) {
|
||||
// Register a new result with a different service name could be another
|
||||
// proxy with a different name, but a native instance works too.
|
||||
|
@ -3335,9 +3335,9 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) {
|
|||
shouldFire: true,
|
||||
wantAfterIndex: 5,
|
||||
wantAfterResLen: 2,
|
||||
// Should take the optimized path where we only watch the teo service
|
||||
// indexes and the connect index iterator.
|
||||
wantAfterWatchSetSize: 3,
|
||||
// Should take the optimized path where we only watch the service indexes,
|
||||
// connect index iterator, and gateway-services iterator.
|
||||
wantAfterWatchSetSize: 4,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -3566,23 +3566,14 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) {
|
|||
assert.Nil(s.EnsureService(14, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"replica"}, Address: "", Port: 8001}))
|
||||
assert.False(watchFired(ws))
|
||||
|
||||
// Register a sidecar and a gateway for db
|
||||
assert.Nil(s.EnsureService(15, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
|
||||
assert.True(watchFired(ws))
|
||||
// Register node and service checks
|
||||
testRegisterCheck(t, s, 15, "foo", "", "check1", api.HealthPassing)
|
||||
testRegisterCheck(t, s, 16, "bar", "", "check2", api.HealthPassing)
|
||||
testRegisterCheck(t, s, 17, "foo", "db", "check3", api.HealthPassing)
|
||||
assert.False(watchFired(ws))
|
||||
|
||||
assert.Nil(s.EnsureService(16, "bar", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway", Service: "gateway", Port: 443}))
|
||||
assert.True(watchFired(ws))
|
||||
|
||||
// Register node checks
|
||||
testRegisterCheck(t, s, 17, "foo", "", "check1", api.HealthPassing)
|
||||
testRegisterCheck(t, s, 18, "bar", "", "check2", api.HealthPassing)
|
||||
|
||||
// Register checks against the services.
|
||||
testRegisterCheck(t, s, 19, "foo", "db", "check3", api.HealthPassing)
|
||||
testRegisterCheck(t, s, 20, "bar", "gateway", "check4", api.HealthPassing)
|
||||
|
||||
// Associate gateway with db
|
||||
assert.Nil(s.EnsureConfigEntry(21, &structs.TerminatingGatewayConfigEntry{
|
||||
// Watch should fire when a gateway is associated with the service, even if the gateway doesn't exist yet
|
||||
assert.Nil(s.EnsureConfigEntry(18, &structs.TerminatingGatewayConfigEntry{
|
||||
Kind: "terminating-gateway",
|
||||
Name: "gateway",
|
||||
Services: []structs.LinkedService{
|
||||
|
@ -3593,11 +3584,23 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) {
|
|||
}, nil))
|
||||
assert.True(watchFired(ws))
|
||||
|
||||
// Watch should fire when a gateway is added
|
||||
assert.Nil(s.EnsureService(19, "bar", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway", Service: "gateway", Port: 443}))
|
||||
assert.True(watchFired(ws))
|
||||
|
||||
// Watch should fire when a check is added to the gateway
|
||||
testRegisterCheck(t, s, 20, "bar", "gateway", "check4", api.HealthPassing)
|
||||
assert.True(watchFired(ws))
|
||||
|
||||
// Watch should fire when a different connect service is registered for db
|
||||
assert.Nil(s.EnsureService(21, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", Proxy: structs.ConnectProxyConfig{DestinationServiceName: "db"}, Port: 8000}))
|
||||
assert.True(watchFired(ws))
|
||||
|
||||
// Read everything back.
|
||||
ws = memdb.NewWatchSet()
|
||||
idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil)
|
||||
assert.Nil(err)
|
||||
assert.Equal(idx, uint64(20))
|
||||
assert.Equal(idx, uint64(21))
|
||||
assert.Len(nodes, 2)
|
||||
|
||||
// Check sidecar
|
||||
|
@ -4559,12 +4562,13 @@ func TestStateStore_GatewayServices_Terminating(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
Service: structs.NewServiceID("redis", nil),
|
||||
Gateway: structs.NewServiceID("gateway", nil),
|
||||
GatewayKind: structs.ServiceKindTerminatingGateway,
|
||||
CAFile: "ca.crt",
|
||||
CertFile: "client.crt",
|
||||
KeyFile: "client.key",
|
||||
Service: structs.NewServiceID("redis", nil),
|
||||
Gateway: structs.NewServiceID("gateway", nil),
|
||||
GatewayKind: structs.ServiceKindTerminatingGateway,
|
||||
CAFile: "ca.crt",
|
||||
CertFile: "client.crt",
|
||||
KeyFile: "client.key",
|
||||
FromWildcard: true,
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: 23,
|
||||
ModifyIndex: 23,
|
||||
|
@ -4656,18 +4660,20 @@ func TestStateStore_GatewayServices_Terminating(t *testing.T) {
|
|||
|
||||
expect = structs.GatewayServices{
|
||||
{
|
||||
Service: structs.NewServiceID("api", nil),
|
||||
Gateway: structs.NewServiceID("gateway2", nil),
|
||||
GatewayKind: structs.ServiceKindTerminatingGateway,
|
||||
Service: structs.NewServiceID("api", nil),
|
||||
Gateway: structs.NewServiceID("gateway2", nil),
|
||||
GatewayKind: structs.ServiceKindTerminatingGateway,
|
||||
FromWildcard: true,
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: 26,
|
||||
ModifyIndex: 26,
|
||||
},
|
||||
},
|
||||
{
|
||||
Service: structs.NewServiceID("db", nil),
|
||||
Gateway: structs.NewServiceID("gateway2", nil),
|
||||
GatewayKind: structs.ServiceKindTerminatingGateway,
|
||||
Service: structs.NewServiceID("db", nil),
|
||||
Gateway: structs.NewServiceID("gateway2", nil),
|
||||
GatewayKind: structs.ServiceKindTerminatingGateway,
|
||||
FromWildcard: true,
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: 26,
|
||||
ModifyIndex: 26,
|
||||
|
@ -4686,6 +4692,154 @@ func TestStateStore_GatewayServices_Terminating(t *testing.T) {
|
|||
assert.Len(t, out, 0)
|
||||
}
|
||||
|
||||
func TestStateStore_GatewayServices_ServiceDeletion(t *testing.T) {
|
||||
s := testStateStore(t)
|
||||
|
||||
// Listing with no results returns an empty list.
|
||||
ws := memdb.NewWatchSet()
|
||||
idx, nodes, err := s.GatewayServices(ws, "gateway", nil)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, idx, uint64(0))
|
||||
assert.Len(t, nodes, 0)
|
||||
|
||||
// Create some nodes
|
||||
assert.Nil(t, s.EnsureNode(10, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
|
||||
assert.Nil(t, s.EnsureNode(11, &structs.Node{Node: "bar", Address: "127.0.0.2"}))
|
||||
assert.Nil(t, s.EnsureNode(12, &structs.Node{Node: "baz", Address: "127.0.0.2"}))
|
||||
|
||||
// Typical services and some consul services spread across two nodes
|
||||
assert.Nil(t, s.EnsureService(13, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000}))
|
||||
assert.Nil(t, s.EnsureService(14, "foo", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000}))
|
||||
|
||||
// Register two gateways
|
||||
assert.Nil(t, s.EnsureService(17, "bar", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "gateway", Service: "gateway", Port: 443}))
|
||||
assert.Nil(t, s.EnsureService(18, "baz", &structs.NodeService{Kind: structs.ServiceKindTerminatingGateway, ID: "other-gateway", Service: "other-gateway", Port: 443}))
|
||||
|
||||
// Associate the first gateway with db
|
||||
assert.Nil(t, s.EnsureConfigEntry(19, &structs.TerminatingGatewayConfigEntry{
|
||||
Kind: "terminating-gateway",
|
||||
Name: "gateway",
|
||||
Services: []structs.LinkedService{
|
||||
{
|
||||
Name: "db",
|
||||
CAFile: "my_ca.pem",
|
||||
},
|
||||
},
|
||||
}, nil))
|
||||
assert.True(t, watchFired(ws))
|
||||
|
||||
// Associate the other gateway with a wildcard
|
||||
assert.Nil(t, s.EnsureConfigEntry(20, &structs.TerminatingGatewayConfigEntry{
|
||||
Kind: "terminating-gateway",
|
||||
Name: "other-gateway",
|
||||
Services: []structs.LinkedService{
|
||||
{
|
||||
Name: "*",
|
||||
},
|
||||
},
|
||||
}, nil))
|
||||
assert.True(t, watchFired(ws))
|
||||
|
||||
// Read everything back for first gateway.
|
||||
ws = memdb.NewWatchSet()
|
||||
idx, out, err := s.GatewayServices(ws, "gateway", nil)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, idx, uint64(20))
|
||||
assert.Len(t, out, 1)
|
||||
|
||||
expect := structs.GatewayServices{
|
||||
{
|
||||
Service: structs.NewServiceID("db", nil),
|
||||
Gateway: structs.NewServiceID("gateway", nil),
|
||||
GatewayKind: structs.ServiceKindTerminatingGateway,
|
||||
CAFile: "my_ca.pem",
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: 19,
|
||||
ModifyIndex: 19,
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expect, out)
|
||||
|
||||
// Read everything back for other gateway.
|
||||
otherWS := memdb.NewWatchSet()
|
||||
idx, out, err = s.GatewayServices(otherWS, "other-gateway", nil)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, idx, uint64(20))
|
||||
assert.Len(t, out, 2)
|
||||
|
||||
expect = structs.GatewayServices{
|
||||
{
|
||||
Service: structs.NewServiceID("api", nil),
|
||||
Gateway: structs.NewServiceID("other-gateway", nil),
|
||||
GatewayKind: structs.ServiceKindTerminatingGateway,
|
||||
FromWildcard: true,
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: 20,
|
||||
ModifyIndex: 20,
|
||||
},
|
||||
},
|
||||
{
|
||||
Service: structs.NewServiceID("db", nil),
|
||||
Gateway: structs.NewServiceID("other-gateway", nil),
|
||||
GatewayKind: structs.ServiceKindTerminatingGateway,
|
||||
FromWildcard: true,
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: 20,
|
||||
ModifyIndex: 20,
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expect, out)
|
||||
|
||||
// Delete a service specified directly.
|
||||
assert.Nil(t, s.DeleteService(20, "foo", "db", nil))
|
||||
|
||||
// Only the watch for other-gateway should fire, since its association to db came from a wildcard
|
||||
assert.False(t, watchFired(ws))
|
||||
assert.True(t, watchFired(otherWS))
|
||||
|
||||
// db should remain in the original gateway
|
||||
idx, out, err = s.GatewayServices(ws, "gateway", nil)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, idx, uint64(20))
|
||||
assert.Len(t, out, 1)
|
||||
|
||||
expect = structs.GatewayServices{
|
||||
{
|
||||
Service: structs.NewServiceID("db", nil),
|
||||
Gateway: structs.NewServiceID("gateway", nil),
|
||||
GatewayKind: structs.ServiceKindTerminatingGateway,
|
||||
CAFile: "my_ca.pem",
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: 19,
|
||||
ModifyIndex: 19,
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expect, out)
|
||||
|
||||
// db should not have been deleted from the other gateway
|
||||
idx, out, err = s.GatewayServices(ws, "other-gateway", nil)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, idx, uint64(20))
|
||||
assert.Len(t, out, 1)
|
||||
|
||||
expect = structs.GatewayServices{
|
||||
{
|
||||
Service: structs.NewServiceID("api", nil),
|
||||
Gateway: structs.NewServiceID("other-gateway", nil),
|
||||
GatewayKind: structs.ServiceKindTerminatingGateway,
|
||||
FromWildcard: true,
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: 20,
|
||||
ModifyIndex: 20,
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expect, out)
|
||||
}
|
||||
|
||||
func TestStateStore_CheckIngressServiceNodes(t *testing.T) {
|
||||
s := testStateStore(t)
|
||||
ws := setupIngressState(t, s)
|
||||
|
|
|
@ -134,6 +134,7 @@ func (m *Manager) syncState() {
|
|||
services := m.State.Services(structs.WildcardEnterpriseMeta())
|
||||
for sid, svc := range services {
|
||||
if svc.Kind != structs.ServiceKindConnectProxy &&
|
||||
svc.Kind != structs.ServiceKindTerminatingGateway &&
|
||||
svc.Kind != structs.ServiceKindMeshGateway &&
|
||||
svc.Kind != structs.ServiceKindIngressGateway {
|
||||
continue
|
||||
|
|
|
@ -2,7 +2,6 @@ package proxycfg
|
|||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/mitchellh/copystructure"
|
||||
)
|
||||
|
@ -57,11 +56,63 @@ func (c *configSnapshotConnectProxy) IsEmpty() bool {
|
|||
len(c.PreparedQueryEndpoints) == 0
|
||||
}
|
||||
|
||||
type configSnapshotTerminatingGateway struct {
|
||||
// WatchedServices is a map of service id to a cancel function. This cancel
|
||||
// function is tied to the watch of linked service instances for the given
|
||||
// id. If the linked services watch would indicate the removal of
|
||||
// a service altogether we then cancel watching that service for its endpoints.
|
||||
WatchedServices map[structs.ServiceID]context.CancelFunc
|
||||
|
||||
// WatchedIntentions is a map of service id to a cancel function.
|
||||
// This cancel function is tied to the watch of intentions for linked services.
|
||||
// As with WatchedServices, intention watches will be cancelled when services
|
||||
// are no longer linked to the gateway.
|
||||
WatchedIntentions map[structs.ServiceID]context.CancelFunc
|
||||
|
||||
// WatchedLeaves is a map of ServiceID to a cancel function.
|
||||
// This cancel function is tied to the watch of leaf certs for linked services.
|
||||
// As with WatchedServices, leaf watches will be cancelled when services
|
||||
// are no longer linked to the gateway.
|
||||
WatchedLeaves map[structs.ServiceID]context.CancelFunc
|
||||
|
||||
// ServiceLeaves is a map of ServiceID to a leaf cert.
|
||||
// Terminating gateways will present different certificates depending
|
||||
// on the service that the caller is trying to reach.
|
||||
ServiceLeaves map[structs.ServiceID]*structs.IssuedCert
|
||||
|
||||
// WatchedResolvers is a map of ServiceID to a cancel function.
|
||||
// This cancel function is tied to the watch of resolvers for linked services.
|
||||
// As with WatchedServices, resolver watches will be cancelled when services
|
||||
// are no longer linked to the gateway.
|
||||
WatchedResolvers map[structs.ServiceID]context.CancelFunc
|
||||
|
||||
// ServiceResolvers is a map of service id to an associated
|
||||
// service-resolver config entry for that service.
|
||||
ServiceResolvers map[structs.ServiceID]*structs.ServiceResolverConfigEntry
|
||||
|
||||
// ServiceGroups is a map of service id to the service instances of that
|
||||
// service in the local datacenter.
|
||||
ServiceGroups map[structs.ServiceID]structs.CheckServiceNodes
|
||||
}
|
||||
|
||||
func (c *configSnapshotTerminatingGateway) IsEmpty() bool {
|
||||
if c == nil {
|
||||
return true
|
||||
}
|
||||
return len(c.ServiceLeaves) == 0 &&
|
||||
len(c.WatchedLeaves) == 0 &&
|
||||
len(c.WatchedIntentions) == 0 &&
|
||||
len(c.ServiceGroups) == 0 &&
|
||||
len(c.WatchedServices) == 0 &&
|
||||
len(c.ServiceResolvers) == 0 &&
|
||||
len(c.WatchedResolvers) == 0
|
||||
}
|
||||
|
||||
type configSnapshotMeshGateway struct {
|
||||
// WatchedServices is a map of service id to a cancel function. This cancel
|
||||
// function is tied to the watch of connect enabled services for the given
|
||||
// id. If the main datacenter services watch would indicate the removal of
|
||||
// a service all together we then cancel watching that service for its
|
||||
// a service altogether we then cancel watching that service for its
|
||||
// connect endpoints.
|
||||
WatchedServices map[structs.ServiceID]context.CancelFunc
|
||||
|
||||
|
@ -177,6 +228,9 @@ type ConfigSnapshot struct {
|
|||
// connect-proxy specific
|
||||
ConnectProxy configSnapshotConnectProxy
|
||||
|
||||
// terminating-gateway specific
|
||||
TerminatingGateway configSnapshotTerminatingGateway
|
||||
|
||||
// mesh-gateway specific
|
||||
MeshGateway configSnapshotMeshGateway
|
||||
|
||||
|
@ -191,6 +245,8 @@ func (s *ConfigSnapshot) Valid() bool {
|
|||
switch s.Kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.Roots != nil && s.ConnectProxy.Leaf != nil
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
return s.Roots != nil
|
||||
case structs.ServiceKindMeshGateway:
|
||||
if s.ServiceMeta[structs.MetaWANFederationKey] == "1" {
|
||||
if len(s.MeshGateway.ConsulServers) == 0 {
|
||||
|
@ -221,6 +277,10 @@ func (s *ConfigSnapshot) Clone() (*ConfigSnapshot, error) {
|
|||
case structs.ServiceKindConnectProxy:
|
||||
snap.ConnectProxy.WatchedUpstreams = nil
|
||||
snap.ConnectProxy.WatchedGateways = nil
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
snap.TerminatingGateway.WatchedServices = nil
|
||||
snap.TerminatingGateway.WatchedIntentions = nil
|
||||
snap.TerminatingGateway.WatchedLeaves = nil
|
||||
case structs.ServiceKindMeshGateway:
|
||||
snap.MeshGateway.WatchedDatacenters = nil
|
||||
snap.MeshGateway.WatchedServices = nil
|
||||
|
|
|
@ -33,6 +33,10 @@ const (
|
|||
datacentersWatchID = "datacenters"
|
||||
serviceResolversWatchID = "service-resolvers"
|
||||
gatewayServicesWatchID = "gateway-services"
|
||||
externalServiceIDPrefix = "external-service:"
|
||||
serviceLeafIDPrefix = "service-leaf:"
|
||||
serviceResolverIDPrefix = "service-resolver:"
|
||||
serviceIntentionsIDPrefix = "service-intentions:"
|
||||
svcChecksWatchIDPrefix = cachetype.ServiceHTTPChecksName + ":"
|
||||
serviceIDPrefix = string(structs.UpstreamDestTypeService) + ":"
|
||||
preparedQueryIDPrefix = string(structs.UpstreamDestTypePreparedQuery) + ":"
|
||||
|
@ -109,10 +113,11 @@ func copyProxyConfig(ns *structs.NodeService) (structs.ConnectProxyConfig, error
|
|||
func newState(ns *structs.NodeService, token string) (*state, error) {
|
||||
switch ns.Kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
case structs.ServiceKindMeshGateway:
|
||||
case structs.ServiceKindIngressGateway:
|
||||
default:
|
||||
return nil, errors.New("not a connect-proxy, mesh-gateway, or ingress-gateway")
|
||||
return nil, errors.New("not a connect-proxy, terminating-gateway, mesh-gateway, or ingress-gateway")
|
||||
}
|
||||
|
||||
proxyCfg, err := copyProxyConfig(ns)
|
||||
|
@ -184,6 +189,8 @@ func (s *state) initWatches() error {
|
|||
switch s.kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.initWatchesConnectProxy()
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
return s.initWatchesTerminatingGateway()
|
||||
case structs.ServiceKindMeshGateway:
|
||||
return s.initWatchesMeshGateway()
|
||||
case structs.ServiceKindIngressGateway:
|
||||
|
@ -359,6 +366,36 @@ func parseReducedUpstreamConfig(m map[string]interface{}) (reducedUpstreamConfig
|
|||
return cfg, err
|
||||
}
|
||||
|
||||
// initWatchesTerminatingGateway sets up the initial watches needed based on the terminating-gateway registration
|
||||
func (s *state) initWatchesTerminatingGateway() error {
|
||||
// Watch for root changes
|
||||
err := s.cache.Notify(s.ctx, cachetype.ConnectCARootName, &structs.DCSpecificRequest{
|
||||
Datacenter: s.source.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
Source: *s.source,
|
||||
}, rootsWatchID, s.ch)
|
||||
if err != nil {
|
||||
s.logger.Named(logging.TerminatingGateway).
|
||||
Error("failed to register watch for root changes", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch for the terminating-gateway's linked services
|
||||
err = s.cache.Notify(s.ctx, cachetype.GatewayServicesName, &structs.ServiceSpecificRequest{
|
||||
Datacenter: s.source.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
ServiceName: s.service,
|
||||
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
||||
}, gatewayServicesWatchID, s.ch)
|
||||
if err != nil {
|
||||
s.logger.Named(logging.TerminatingGateway).
|
||||
Error("failed to register watch for linked services", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// initWatchesMeshGateway sets up the watches needed based on the current mesh gateway registration
|
||||
func (s *state) initWatchesMeshGateway() error {
|
||||
// Watch for root changes
|
||||
|
@ -498,7 +535,14 @@ func (s *state) initialConfigSnapshot() ConfigSnapshot {
|
|||
snap.ConnectProxy.WatchedGatewayEndpoints = make(map[string]map[string]structs.CheckServiceNodes)
|
||||
snap.ConnectProxy.WatchedServiceChecks = make(map[structs.ServiceID][]structs.CheckType)
|
||||
snap.ConnectProxy.PreparedQueryEndpoints = make(map[string]structs.CheckServiceNodes)
|
||||
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
snap.TerminatingGateway.WatchedServices = make(map[structs.ServiceID]context.CancelFunc)
|
||||
snap.TerminatingGateway.WatchedLeaves = make(map[structs.ServiceID]context.CancelFunc)
|
||||
snap.TerminatingGateway.WatchedIntentions = make(map[structs.ServiceID]context.CancelFunc)
|
||||
snap.TerminatingGateway.WatchedResolvers = make(map[structs.ServiceID]context.CancelFunc)
|
||||
snap.TerminatingGateway.ServiceLeaves = make(map[structs.ServiceID]*structs.IssuedCert)
|
||||
snap.TerminatingGateway.ServiceGroups = make(map[structs.ServiceID]structs.CheckServiceNodes)
|
||||
snap.TerminatingGateway.ServiceResolvers = make(map[structs.ServiceID]*structs.ServiceResolverConfigEntry)
|
||||
case structs.ServiceKindMeshGateway:
|
||||
snap.MeshGateway.WatchedServices = make(map[structs.ServiceID]context.CancelFunc)
|
||||
snap.MeshGateway.WatchedDatacenters = make(map[string]context.CancelFunc)
|
||||
|
@ -611,6 +655,8 @@ func (s *state) handleUpdate(u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
|||
switch s.kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.handleUpdateConnectProxy(u, snap)
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
return s.handleUpdateTerminatingGateway(u, snap)
|
||||
case structs.ServiceKindMeshGateway:
|
||||
return s.handleUpdateMeshGateway(u, snap)
|
||||
case structs.ServiceKindIngressGateway:
|
||||
|
@ -633,7 +679,7 @@ func (s *state) handleUpdateConnectProxy(u cache.UpdateEvent, snap *ConfigSnapsh
|
|||
}
|
||||
snap.Roots = roots
|
||||
case u.CorrelationID == intentionsWatchID:
|
||||
// Not in snapshot currently, no op
|
||||
// no-op: Intentions don't get stored in the snapshot, calls to ConnectAuthorize will fetch them from the cache
|
||||
|
||||
case strings.HasPrefix(u.CorrelationID, "upstream:"+preparedQueryIDPrefix):
|
||||
resp, ok := u.Result.(*structs.PreparedQueryExecuteResponse)
|
||||
|
@ -842,6 +888,219 @@ func (s *state) resetWatchesFromChain(
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *state) handleUpdateTerminatingGateway(u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
if u.Err != nil {
|
||||
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
||||
}
|
||||
logger := s.logger.Named(logging.TerminatingGateway)
|
||||
|
||||
switch {
|
||||
case u.CorrelationID == rootsWatchID:
|
||||
roots, ok := u.Result.(*structs.IndexedCARoots)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid type for response: %T", u.Result)
|
||||
}
|
||||
snap.Roots = roots
|
||||
|
||||
// Update watches based on the current list of services associated with the terminating-gateway
|
||||
case u.CorrelationID == gatewayServicesWatchID:
|
||||
services, ok := u.Result.(*structs.IndexedGatewayServices)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid type for response: %T", u.Result)
|
||||
}
|
||||
|
||||
svcMap := make(map[structs.ServiceID]struct{})
|
||||
for _, svc := range services.Services {
|
||||
// Make sure to add every service to this map, we use it to cancel watches below.
|
||||
svcMap[svc.Service] = struct{}{}
|
||||
|
||||
// Watch the health endpoint to discover endpoints for the service
|
||||
if _, ok := snap.TerminatingGateway.WatchedServices[svc.Service]; !ok {
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
err := s.cache.Notify(ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{
|
||||
Datacenter: s.source.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
ServiceName: svc.Service.ID,
|
||||
EnterpriseMeta: svc.Service.EnterpriseMeta,
|
||||
|
||||
// The gateway acts as the service's proxy, so we do NOT want to discover other proxies
|
||||
Connect: false,
|
||||
}, externalServiceIDPrefix+svc.Service.String(), s.ch)
|
||||
|
||||
if err != nil {
|
||||
logger.Error("failed to register watch for external-service",
|
||||
"service", svc.Service.String(),
|
||||
"error", err,
|
||||
)
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
snap.TerminatingGateway.WatchedServices[svc.Service] = cancel
|
||||
}
|
||||
|
||||
// Watch intentions with this service as their destination
|
||||
// The gateway will enforce intentions for connections to the service
|
||||
if _, ok := snap.TerminatingGateway.WatchedIntentions[svc.Service]; !ok {
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
err := s.cache.Notify(ctx, cachetype.IntentionMatchName, &structs.IntentionQueryRequest{
|
||||
Datacenter: s.source.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
Match: &structs.IntentionQueryMatch{
|
||||
Type: structs.IntentionMatchDestination,
|
||||
Entries: []structs.IntentionMatchEntry{
|
||||
{
|
||||
Namespace: svc.Service.NamespaceOrDefault(),
|
||||
Name: svc.Service.ID,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, serviceIntentionsIDPrefix+svc.Service.String(), s.ch)
|
||||
|
||||
if err != nil {
|
||||
logger.Error("failed to register watch for service-intentions",
|
||||
"service", svc.Service.String(),
|
||||
"error", err,
|
||||
)
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
snap.TerminatingGateway.WatchedIntentions[svc.Service] = cancel
|
||||
}
|
||||
|
||||
// Watch leaf certificate for the service
|
||||
// This cert is used to terminate mTLS connections on the service's behalf
|
||||
if _, ok := snap.TerminatingGateway.WatchedLeaves[svc.Service]; !ok {
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
err := s.cache.Notify(ctx, cachetype.ConnectCALeafName, &cachetype.ConnectCALeafRequest{
|
||||
Datacenter: s.source.Datacenter,
|
||||
Token: s.token,
|
||||
Service: svc.Service.ID,
|
||||
EnterpriseMeta: svc.Service.EnterpriseMeta,
|
||||
}, serviceLeafIDPrefix+svc.Service.String(), s.ch)
|
||||
|
||||
if err != nil {
|
||||
logger.Error("failed to register watch for a service-leaf",
|
||||
"service", svc.Service.String(),
|
||||
"error", err,
|
||||
)
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
snap.TerminatingGateway.WatchedLeaves[svc.Service] = cancel
|
||||
}
|
||||
|
||||
// Watch service resolvers for the service
|
||||
// These are used to create clusters and endpoints for the service subsets
|
||||
if _, ok := snap.TerminatingGateway.WatchedResolvers[svc.Service]; !ok {
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
err := s.cache.Notify(ctx, cachetype.ConfigEntriesName, &structs.ConfigEntryQuery{
|
||||
Datacenter: s.source.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: svc.Service.ID,
|
||||
EnterpriseMeta: svc.Service.EnterpriseMeta,
|
||||
}, serviceResolverIDPrefix+svc.Service.String(), s.ch)
|
||||
|
||||
if err != nil {
|
||||
logger.Error("failed to register watch for a service-resolver",
|
||||
"service", svc.Service.String(),
|
||||
"error", err,
|
||||
)
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
snap.TerminatingGateway.WatchedResolvers[svc.Service] = cancel
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel service instance watches for services that were not in the update
|
||||
for sid, cancelFn := range snap.TerminatingGateway.WatchedServices {
|
||||
if _, ok := svcMap[sid]; !ok {
|
||||
logger.Debug("canceling watch for service", "service", sid.String())
|
||||
delete(snap.TerminatingGateway.WatchedServices, sid)
|
||||
delete(snap.TerminatingGateway.ServiceGroups, sid)
|
||||
cancelFn()
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel leaf cert watches for services that were not in the update
|
||||
for sid, cancelFn := range snap.TerminatingGateway.WatchedLeaves {
|
||||
if _, ok := svcMap[sid]; !ok {
|
||||
logger.Debug("canceling watch for leaf cert", "service", sid.String())
|
||||
delete(snap.TerminatingGateway.WatchedLeaves, sid)
|
||||
delete(snap.TerminatingGateway.ServiceLeaves, sid)
|
||||
cancelFn()
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel service-resolver watches for services that were not in the update
|
||||
for sid, cancelFn := range snap.TerminatingGateway.WatchedResolvers {
|
||||
if _, ok := svcMap[sid]; !ok {
|
||||
logger.Debug("canceling watch for service-resolver", "service", sid.String())
|
||||
delete(snap.TerminatingGateway.WatchedResolvers, sid)
|
||||
delete(snap.TerminatingGateway.ServiceResolvers, sid)
|
||||
cancelFn()
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel intention watches for services that were not in the update
|
||||
for sid, cancelFn := range snap.TerminatingGateway.WatchedIntentions {
|
||||
if _, ok := svcMap[sid]; !ok {
|
||||
logger.Debug("canceling watch for intention", "service", sid.String())
|
||||
delete(snap.TerminatingGateway.WatchedIntentions, sid)
|
||||
|
||||
// No additional deletions needed, since intentions aren't stored in snapshot
|
||||
|
||||
cancelFn()
|
||||
}
|
||||
}
|
||||
|
||||
case strings.HasPrefix(u.CorrelationID, externalServiceIDPrefix):
|
||||
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid type for response: %T", u.Result)
|
||||
}
|
||||
|
||||
sid := structs.ServiceIDFromString(strings.TrimPrefix(u.CorrelationID, externalServiceIDPrefix))
|
||||
|
||||
if len(resp.Nodes) > 0 {
|
||||
snap.TerminatingGateway.ServiceGroups[sid] = resp.Nodes
|
||||
} else if _, ok := snap.TerminatingGateway.ServiceGroups[sid]; ok {
|
||||
delete(snap.TerminatingGateway.ServiceGroups, sid)
|
||||
}
|
||||
|
||||
// Store leaf cert for watched service
|
||||
case strings.HasPrefix(u.CorrelationID, serviceLeafIDPrefix):
|
||||
leaf, ok := u.Result.(*structs.IssuedCert)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid type for response: %T", u.Result)
|
||||
}
|
||||
|
||||
sid := structs.ServiceIDFromString(strings.TrimPrefix(u.CorrelationID, serviceLeafIDPrefix))
|
||||
snap.TerminatingGateway.ServiceLeaves[sid] = leaf
|
||||
|
||||
case strings.HasPrefix(u.CorrelationID, "service-resolver:"):
|
||||
configEntries, ok := u.Result.(*structs.IndexedConfigEntries)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid type for response: %T", u.Result)
|
||||
}
|
||||
// There should only ever be one entry for a service resolver within a namespace
|
||||
if len(configEntries.Entries) == 1 {
|
||||
if resolver, ok := configEntries.Entries[0].(*structs.ServiceResolverConfigEntry); ok {
|
||||
snap.TerminatingGateway.ServiceResolvers[structs.NewServiceID(resolver.Name, &resolver.EnterpriseMeta)] = resolver
|
||||
}
|
||||
}
|
||||
|
||||
case strings.HasPrefix(u.CorrelationID, serviceIntentionsIDPrefix):
|
||||
// no-op: Intentions don't get stored in the snapshot, calls to ConnectAuthorize will fetch them from the cache
|
||||
|
||||
default:
|
||||
// do nothing
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state) handleUpdateMeshGateway(u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
if u.Err != nil {
|
||||
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
||||
|
@ -900,6 +1159,8 @@ func (s *state) handleUpdateMeshGateway(u cache.UpdateEvent, snap *ConfigSnapsho
|
|||
for sid, cancelFn := range snap.MeshGateway.WatchedServices {
|
||||
if _, ok := svcMap[sid]; !ok {
|
||||
meshLogger.Debug("canceling watch for service", "service", sid.String())
|
||||
// TODO (gateways) Should the sid also be deleted from snap.MeshGateway.ServiceGroups?
|
||||
// Do those endpoints get cleaned up some other way?
|
||||
delete(snap.MeshGateway.WatchedServices, sid)
|
||||
cancelFn()
|
||||
}
|
||||
|
|
|
@ -206,6 +206,18 @@ func genVerifyLeafWatch(expectedService string, expectedDatacenter string) verif
|
|||
}
|
||||
}
|
||||
|
||||
func genVerifyResolverWatch(expectedService, expectedDatacenter, expectedKind string) verifyWatchRequest {
|
||||
return func(t testing.TB, cacheType string, request cache.Request) {
|
||||
require.Equal(t, cachetype.ConfigEntriesName, cacheType)
|
||||
|
||||
reqReal, ok := request.(*structs.ConfigEntryQuery)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, expectedDatacenter, reqReal.Datacenter)
|
||||
require.Equal(t, expectedService, reqReal.Name)
|
||||
require.Equal(t, expectedKind, reqReal.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
func genVerifyIntentionWatch(expectedService string, expectedDatacenter string) verifyWatchRequest {
|
||||
return func(t testing.TB, cacheType string, request cache.Request) {
|
||||
require.Equal(t, cachetype.IntentionMatchName, cacheType)
|
||||
|
@ -597,7 +609,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
|||
},
|
||||
},
|
||||
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
||||
require.True(t, snap.Valid(), "gateway with empty service list is vaild")
|
||||
require.True(t, snap.Valid(), "gateway with empty service list is valid")
|
||||
require.True(t, snap.ConnectProxy.IsEmpty())
|
||||
require.Equal(t, indexedRoots, snap.Roots)
|
||||
require.Empty(t, snap.MeshGateway.WatchedServices)
|
||||
|
@ -639,7 +651,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
|||
},
|
||||
},
|
||||
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
||||
require.True(t, snap.Valid(), "gateway with service list is vaild")
|
||||
require.True(t, snap.Valid(), "gateway with service list is valid")
|
||||
require.Len(t, snap.MeshGateway.WatchedServices, 1)
|
||||
require.True(t, snap.MeshGateway.WatchedServicesSet)
|
||||
},
|
||||
|
@ -658,7 +670,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
|||
},
|
||||
},
|
||||
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
||||
require.True(t, snap.Valid(), "gateway with service list is vaild")
|
||||
require.True(t, snap.Valid(), "gateway with service list is valid")
|
||||
require.Len(t, snap.MeshGateway.WatchedServices, 2)
|
||||
require.True(t, snap.MeshGateway.WatchedServicesSet)
|
||||
},
|
||||
|
@ -798,6 +810,252 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
"terminating-gateway-initial": testCase{
|
||||
ns: structs.NodeService{
|
||||
Kind: structs.ServiceKindTerminatingGateway,
|
||||
ID: "terminating-gateway",
|
||||
Service: "terminating-gateway",
|
||||
Address: "10.0.1.1",
|
||||
},
|
||||
sourceDC: "dc1",
|
||||
stages: []verificationStage{
|
||||
verificationStage{
|
||||
requiredWatches: map[string]verifyWatchRequest{
|
||||
rootsWatchID: genVerifyRootsWatch("dc1"),
|
||||
gatewayServicesWatchID: genVerifyServiceSpecificRequest(gatewayServicesWatchID,
|
||||
"terminating-gateway", "", "dc1", false),
|
||||
},
|
||||
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
||||
require.False(t, snap.Valid(), "gateway without root is not valid")
|
||||
require.True(t, snap.ConnectProxy.IsEmpty())
|
||||
require.True(t, snap.MeshGateway.IsEmpty())
|
||||
require.True(t, snap.IngressGateway.IsEmpty())
|
||||
},
|
||||
},
|
||||
verificationStage{
|
||||
events: []cache.UpdateEvent{
|
||||
rootWatchEvent(),
|
||||
},
|
||||
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
||||
require.True(t, snap.Valid(), "gateway without services is valid")
|
||||
require.True(t, snap.ConnectProxy.IsEmpty())
|
||||
require.True(t, snap.MeshGateway.IsEmpty())
|
||||
require.True(t, snap.IngressGateway.IsEmpty())
|
||||
require.True(t, snap.TerminatingGateway.IsEmpty())
|
||||
require.Equal(t, indexedRoots, snap.Roots)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"terminating-gateway-handle-update": testCase{
|
||||
ns: structs.NodeService{
|
||||
Kind: structs.ServiceKindTerminatingGateway,
|
||||
ID: "terminating-gateway",
|
||||
Service: "terminating-gateway",
|
||||
Address: "10.0.1.1",
|
||||
},
|
||||
sourceDC: "dc1",
|
||||
stages: []verificationStage{
|
||||
verificationStage{
|
||||
requiredWatches: map[string]verifyWatchRequest{
|
||||
rootsWatchID: genVerifyRootsWatch("dc1"),
|
||||
gatewayServicesWatchID: genVerifyServiceSpecificRequest(gatewayServicesWatchID,
|
||||
"terminating-gateway", "", "dc1", false),
|
||||
},
|
||||
events: []cache.UpdateEvent{
|
||||
rootWatchEvent(),
|
||||
cache.UpdateEvent{
|
||||
CorrelationID: gatewayServicesWatchID,
|
||||
Result: &structs.IndexedGatewayServices{
|
||||
Services: structs.GatewayServices{
|
||||
{
|
||||
Service: structs.NewServiceID("db", nil),
|
||||
Gateway: structs.NewServiceID("terminating-gateway", nil),
|
||||
},
|
||||
},
|
||||
},
|
||||
Err: nil,
|
||||
},
|
||||
},
|
||||
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
||||
require.True(t, snap.Valid(), "gateway with service list is valid")
|
||||
require.Len(t, snap.TerminatingGateway.WatchedServices, 1)
|
||||
},
|
||||
},
|
||||
verificationStage{
|
||||
events: []cache.UpdateEvent{
|
||||
cache.UpdateEvent{
|
||||
CorrelationID: gatewayServicesWatchID,
|
||||
Result: &structs.IndexedGatewayServices{
|
||||
Services: structs.GatewayServices{
|
||||
{
|
||||
Service: structs.NewServiceID("db", nil),
|
||||
Gateway: structs.NewServiceID("terminating-gateway", nil),
|
||||
},
|
||||
{
|
||||
Service: structs.NewServiceID("billing", nil),
|
||||
Gateway: structs.NewServiceID("terminating-gateway", nil),
|
||||
},
|
||||
},
|
||||
},
|
||||
Err: nil,
|
||||
},
|
||||
},
|
||||
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
||||
db := structs.NewServiceID("db", nil)
|
||||
billing := structs.NewServiceID("billing", nil)
|
||||
|
||||
require.True(t, snap.Valid(), "gateway with service list is valid")
|
||||
require.Len(t, snap.TerminatingGateway.WatchedServices, 2)
|
||||
require.Contains(t, snap.TerminatingGateway.WatchedServices, db)
|
||||
require.Contains(t, snap.TerminatingGateway.WatchedServices, billing)
|
||||
|
||||
require.Len(t, snap.TerminatingGateway.WatchedIntentions, 2)
|
||||
require.Contains(t, snap.TerminatingGateway.WatchedIntentions, db)
|
||||
require.Contains(t, snap.TerminatingGateway.WatchedIntentions, billing)
|
||||
|
||||
require.Len(t, snap.TerminatingGateway.WatchedLeaves, 2)
|
||||
require.Contains(t, snap.TerminatingGateway.WatchedLeaves, db)
|
||||
require.Contains(t, snap.TerminatingGateway.WatchedLeaves, billing)
|
||||
|
||||
require.Len(t, snap.TerminatingGateway.WatchedResolvers, 2)
|
||||
require.Contains(t, snap.TerminatingGateway.WatchedResolvers, db)
|
||||
require.Contains(t, snap.TerminatingGateway.WatchedResolvers, billing)
|
||||
},
|
||||
},
|
||||
verificationStage{
|
||||
requiredWatches: map[string]verifyWatchRequest{
|
||||
"external-service:db": genVerifyServiceWatch("db", "", "dc1", false),
|
||||
},
|
||||
events: []cache.UpdateEvent{
|
||||
cache.UpdateEvent{
|
||||
CorrelationID: "external-service:db",
|
||||
Result: &structs.IndexedCheckServiceNodes{
|
||||
Nodes: structs.CheckServiceNodes{
|
||||
{
|
||||
Node: &structs.Node{
|
||||
Node: "node1",
|
||||
Address: "127.0.0.1",
|
||||
},
|
||||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Err: nil,
|
||||
},
|
||||
},
|
||||
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
||||
require.Len(t, snap.TerminatingGateway.ServiceGroups, 1)
|
||||
require.Equal(t, snap.TerminatingGateway.ServiceGroups[structs.NewServiceID("db", nil)],
|
||||
structs.CheckServiceNodes{
|
||||
{
|
||||
Node: &structs.Node{
|
||||
Node: "node1",
|
||||
Address: "127.0.0.1",
|
||||
},
|
||||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
},
|
||||
},
|
||||
verificationStage{
|
||||
requiredWatches: map[string]verifyWatchRequest{
|
||||
"service-leaf:db": genVerifyLeafWatch("db", "dc1"),
|
||||
},
|
||||
events: []cache.UpdateEvent{
|
||||
cache.UpdateEvent{
|
||||
CorrelationID: "service-leaf:db",
|
||||
Result: issuedCert,
|
||||
Err: nil,
|
||||
},
|
||||
},
|
||||
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
||||
require.Equal(t, snap.TerminatingGateway.ServiceLeaves[structs.NewServiceID("db", nil)], issuedCert)
|
||||
},
|
||||
},
|
||||
verificationStage{
|
||||
requiredWatches: map[string]verifyWatchRequest{
|
||||
"service-resolver:db": genVerifyResolverWatch("db", "dc1", structs.ServiceResolver),
|
||||
},
|
||||
events: []cache.UpdateEvent{
|
||||
cache.UpdateEvent{
|
||||
CorrelationID: "service-resolver:db",
|
||||
Result: &structs.IndexedConfigEntries{
|
||||
Kind: structs.ServiceResolver,
|
||||
Entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Name: "db",
|
||||
Kind: structs.ServiceResolver,
|
||||
Redirect: &structs.ServiceResolverRedirect{
|
||||
Service: "db",
|
||||
Datacenter: "dc2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Err: nil,
|
||||
},
|
||||
},
|
||||
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
||||
want := &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "db",
|
||||
Redirect: &structs.ServiceResolverRedirect{
|
||||
Service: "db",
|
||||
Datacenter: "dc2",
|
||||
},
|
||||
}
|
||||
require.Equal(t, want, snap.TerminatingGateway.ServiceResolvers[structs.NewServiceID("db", nil)])
|
||||
},
|
||||
},
|
||||
verificationStage{
|
||||
events: []cache.UpdateEvent{
|
||||
cache.UpdateEvent{
|
||||
CorrelationID: gatewayServicesWatchID,
|
||||
Result: &structs.IndexedGatewayServices{
|
||||
Services: structs.GatewayServices{
|
||||
{
|
||||
Service: structs.NewServiceID("billing", nil),
|
||||
Gateway: structs.NewServiceID("terminating-gateway", nil),
|
||||
},
|
||||
},
|
||||
},
|
||||
Err: nil,
|
||||
},
|
||||
},
|
||||
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
|
||||
billing := structs.NewServiceID("billing", nil)
|
||||
|
||||
require.True(t, snap.Valid(), "gateway with service list is valid")
|
||||
|
||||
// All the watches should have been cancelled for db
|
||||
require.Len(t, snap.TerminatingGateway.WatchedServices, 1)
|
||||
require.Contains(t, snap.TerminatingGateway.WatchedServices, billing)
|
||||
|
||||
require.Len(t, snap.TerminatingGateway.WatchedIntentions, 1)
|
||||
require.Contains(t, snap.TerminatingGateway.WatchedIntentions, billing)
|
||||
|
||||
require.Len(t, snap.TerminatingGateway.WatchedLeaves, 1)
|
||||
require.Contains(t, snap.TerminatingGateway.WatchedLeaves, billing)
|
||||
|
||||
require.Len(t, snap.TerminatingGateway.WatchedResolvers, 1)
|
||||
require.Contains(t, snap.TerminatingGateway.WatchedResolvers, billing)
|
||||
|
||||
// There was no update event for billing's leaf/endpoints, so length is 0
|
||||
require.Len(t, snap.TerminatingGateway.ServiceGroups, 0)
|
||||
require.Len(t, snap.TerminatingGateway.ServiceLeaves, 0)
|
||||
require.Len(t, snap.TerminatingGateway.ServiceResolvers, 0)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"connect-proxy": newConnectProxyCase(structs.MeshGatewayModeDefault),
|
||||
"connect-proxy-mesh-gateway-local": newConnectProxyCase(structs.MeshGatewayModeLocal),
|
||||
}
|
||||
|
|
|
@ -3,7 +3,9 @@ package proxycfg
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -1446,6 +1448,69 @@ func TestConfigSnapshotExposeConfig(t testing.T) *ConfigSnapshot {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConfigSnapshotTerminatingGateway(t testing.T) *ConfigSnapshot {
|
||||
return testConfigSnapshotTerminatingGateway(t, true)
|
||||
}
|
||||
|
||||
func TestConfigSnapshotTerminatingGatewayNoServices(t testing.T) *ConfigSnapshot {
|
||||
return testConfigSnapshotTerminatingGateway(t, false)
|
||||
}
|
||||
|
||||
func testConfigSnapshotTerminatingGateway(t testing.T, populateServices bool) *ConfigSnapshot {
|
||||
roots, _ := TestCerts(t)
|
||||
|
||||
snap := &ConfigSnapshot{
|
||||
Kind: structs.ServiceKindTerminatingGateway,
|
||||
Service: "terminating-gateway",
|
||||
ProxyID: structs.NewServiceID("terminating-gateway", nil),
|
||||
Address: "1.2.3.4",
|
||||
TaggedAddresses: map[string]structs.ServiceAddress{
|
||||
structs.TaggedAddressWAN: structs.ServiceAddress{
|
||||
Address: "198.18.0.1",
|
||||
Port: 443,
|
||||
},
|
||||
},
|
||||
Port: 8443,
|
||||
Roots: roots,
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
if populateServices {
|
||||
web := structs.NewServiceID("web", nil)
|
||||
webNodes := TestUpstreamNodes(t)
|
||||
webNodes[0].Service.Meta = map[string]string{
|
||||
"version": "1",
|
||||
}
|
||||
webNodes[1].Service.Meta = map[string]string{
|
||||
"version": "2",
|
||||
}
|
||||
|
||||
api := structs.NewServiceID("api", nil)
|
||||
apiNodes := TestUpstreamNodes(t)
|
||||
for i := 0; i < len(apiNodes); i++ {
|
||||
apiNodes[i].Service.Service = "api"
|
||||
apiNodes[i].Service.Port = 8081
|
||||
}
|
||||
|
||||
snap.TerminatingGateway = configSnapshotTerminatingGateway{
|
||||
ServiceGroups: map[structs.ServiceID]structs.CheckServiceNodes{
|
||||
web: webNodes,
|
||||
api: apiNodes,
|
||||
},
|
||||
}
|
||||
snap.TerminatingGateway.ServiceLeaves = map[structs.ServiceID]*structs.IssuedCert{
|
||||
structs.NewServiceID("web", nil): {
|
||||
CertPEM: golden(t, "test-leaf-cert"),
|
||||
PrivateKeyPEM: golden(t, "test-leaf-key"),
|
||||
},
|
||||
structs.NewServiceID("api", nil): {
|
||||
CertPEM: golden(t, "alt-test-leaf-cert"),
|
||||
PrivateKeyPEM: golden(t, "alt-test-leaf-key"),
|
||||
},
|
||||
}
|
||||
}
|
||||
return snap
|
||||
}
|
||||
|
||||
func TestConfigSnapshotGRPCExposeHTTP1(t testing.T) *ConfigSnapshot {
|
||||
return &ConfigSnapshot{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
|
@ -1572,3 +1637,14 @@ func (ct *ControllableCacheType) RegisterOptions() cache.RegisterOptions {
|
|||
RefreshTimeout: 10 * time.Minute,
|
||||
}
|
||||
}
|
||||
|
||||
// golden is used to read golden files stores in consul/agent/xds/testdata
|
||||
func golden(t testing.T, name string) string {
|
||||
t.Helper()
|
||||
|
||||
golden := filepath.Join("../xds/testdata", name+".golden")
|
||||
expected, err := ioutil.ReadFile(golden)
|
||||
require.NoError(t, err)
|
||||
|
||||
return string(expected)
|
||||
}
|
||||
|
|
|
@ -264,7 +264,7 @@ func (e *TerminatingGatewayConfigEntry) CanRead(authz acl.Authorizer) bool {
|
|||
var authzContext acl.AuthorizerContext
|
||||
e.FillAuthzContext(&authzContext)
|
||||
|
||||
return authz.OperatorRead(&authzContext) == acl.Allow
|
||||
return authz.ServiceRead(e.Name, &authzContext) == acl.Allow
|
||||
}
|
||||
|
||||
func (e *TerminatingGatewayConfigEntry) CanWrite(authz acl.Authorizer) bool {
|
||||
|
@ -292,13 +292,14 @@ func (e *TerminatingGatewayConfigEntry) GetEnterpriseMeta() *EnterpriseMeta {
|
|||
|
||||
// GatewayService is used to associate gateways with their linked services.
|
||||
type GatewayService struct {
|
||||
Gateway ServiceID
|
||||
Service ServiceID
|
||||
GatewayKind ServiceKind
|
||||
Port int
|
||||
CAFile string
|
||||
CertFile string
|
||||
KeyFile string
|
||||
Gateway ServiceID
|
||||
Service ServiceID
|
||||
GatewayKind ServiceKind
|
||||
Port int
|
||||
CAFile string
|
||||
CertFile string
|
||||
KeyFile string
|
||||
FromWildcard bool
|
||||
RaftIndex
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,8 @@ func (s *Server) clustersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, _ string
|
|||
switch cfgSnap.Kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.clustersFromSnapshotConnectProxy(cfgSnap)
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
return s.clustersFromSnapshotTerminatingGateway(cfgSnap)
|
||||
case structs.ServiceKindMeshGateway:
|
||||
return s.clustersFromSnapshotMeshGateway(cfgSnap)
|
||||
case structs.ServiceKindIngressGateway:
|
||||
|
@ -117,6 +119,12 @@ func makeExposeClusterName(destinationPort int) string {
|
|||
return fmt.Sprintf("exposed_cluster_%d", destinationPort)
|
||||
}
|
||||
|
||||
// clustersFromSnapshotTerminatingGateway returns the xDS API representation of the "clusters"
|
||||
// for a terminating gateway. This will include 1 cluster per service and service subset.
|
||||
func (s *Server) clustersFromSnapshotTerminatingGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
||||
return s.clustersFromServicesAndResolvers(cfgSnap, cfgSnap.TerminatingGateway.ServiceGroups, cfgSnap.TerminatingGateway.ServiceResolvers)
|
||||
}
|
||||
|
||||
// clustersFromSnapshotMeshGateway returns the xDS API representation of the "clusters"
|
||||
// for a mesh gateway. This will include 1 cluster per remote datacenter as well as
|
||||
// 1 cluster for each service subset.
|
||||
|
@ -133,7 +141,7 @@ func (s *Server) clustersFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapsho
|
|||
}
|
||||
clusterName := connect.DatacenterSNI(dc, cfgSnap.Roots.TrustDomain)
|
||||
|
||||
cluster, err := s.makeMeshGatewayCluster(clusterName, cfgSnap)
|
||||
cluster, err := s.makeGatewayCluster(clusterName, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -145,7 +153,7 @@ func (s *Server) clustersFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapsho
|
|||
for _, dc := range datacenters {
|
||||
clusterName := cfgSnap.ServerSNIFn(dc, "")
|
||||
|
||||
cluster, err := s.makeMeshGatewayCluster(clusterName, cfgSnap)
|
||||
cluster, err := s.makeGatewayCluster(clusterName, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -156,7 +164,7 @@ func (s *Server) clustersFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapsho
|
|||
for _, srv := range cfgSnap.MeshGateway.ConsulServers {
|
||||
clusterName := cfgSnap.ServerSNIFn(cfgSnap.Datacenter, srv.Node.Node)
|
||||
|
||||
cluster, err := s.makeMeshGatewayCluster(clusterName, cfgSnap)
|
||||
cluster, err := s.makeGatewayCluster(clusterName, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -164,21 +172,37 @@ func (s *Server) clustersFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapsho
|
|||
}
|
||||
}
|
||||
|
||||
// generate the per-service clusters
|
||||
for svc, _ := range cfgSnap.MeshGateway.ServiceGroups {
|
||||
// generate the per-service/subset clusters
|
||||
c, err := s.clustersFromServicesAndResolvers(cfgSnap, cfgSnap.MeshGateway.ServiceGroups, cfgSnap.MeshGateway.ServiceResolvers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clusters = append(clusters, c...)
|
||||
|
||||
return clusters, nil
|
||||
}
|
||||
|
||||
func (s *Server) clustersFromServicesAndResolvers(
|
||||
cfgSnap *proxycfg.ConfigSnapshot,
|
||||
services map[structs.ServiceID]structs.CheckServiceNodes,
|
||||
resolvers map[structs.ServiceID]*structs.ServiceResolverConfigEntry) ([]proto.Message, error) {
|
||||
|
||||
clusters := make([]proto.Message, 0, len(services))
|
||||
|
||||
for svc, _ := range services {
|
||||
clusterName := connect.ServiceSNI(svc.ID, "", svc.NamespaceOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain)
|
||||
resolver, hasResolver := cfgSnap.MeshGateway.ServiceResolvers[svc]
|
||||
resolver, hasResolver := resolvers[svc]
|
||||
|
||||
// Create the cluster for default/unnamed services
|
||||
var cluster *envoy.Cluster
|
||||
var err error
|
||||
if hasResolver {
|
||||
cluster, err = s.makeMeshGatewayClusterWithConnectTimeout(clusterName, cfgSnap, resolver.ConnectTimeout)
|
||||
cluster, err = s.makeGatewayClusterWithConnectTimeout(clusterName, cfgSnap, resolver.ConnectTimeout)
|
||||
} else {
|
||||
cluster, err = s.makeMeshGatewayCluster(clusterName, cfgSnap)
|
||||
cluster, err = s.makeGatewayCluster(clusterName, cfgSnap)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to make %s cluster: %v", cfgSnap.Kind, err)
|
||||
}
|
||||
clusters = append(clusters, cluster)
|
||||
|
||||
|
@ -188,9 +212,9 @@ func (s *Server) clustersFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapsho
|
|||
for subsetName := range resolver.Subsets {
|
||||
clusterName := connect.ServiceSNI(svc.ID, subsetName, svc.NamespaceOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain)
|
||||
|
||||
cluster, err := s.makeMeshGatewayClusterWithConnectTimeout(clusterName, cfgSnap, resolver.ConnectTimeout)
|
||||
cluster, err := s.makeGatewayClusterWithConnectTimeout(clusterName, cfgSnap, resolver.ConnectTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to make %s cluster: %v", cfgSnap.Kind, err)
|
||||
}
|
||||
clusters = append(clusters, cluster)
|
||||
}
|
||||
|
@ -325,7 +349,7 @@ func (s *Server) makeUpstreamClusterForPreparedQuery(upstream structs.Upstream,
|
|||
|
||||
// Enable TLS upstream with the configured client certificate.
|
||||
c.TlsContext = &envoyauth.UpstreamTlsContext{
|
||||
CommonTlsContext: makeCommonTLSContext(cfgSnap),
|
||||
CommonTlsContext: makeCommonTLSContext(cfgSnap, cfgSnap.Leaf()),
|
||||
Sni: sni,
|
||||
}
|
||||
|
||||
|
@ -436,7 +460,7 @@ func (s *Server) makeUpstreamClustersForDiscoveryChain(
|
|||
|
||||
// Enable TLS upstream with the configured client certificate.
|
||||
c.TlsContext = &envoyauth.UpstreamTlsContext{
|
||||
CommonTlsContext: makeCommonTLSContext(cfgSnap),
|
||||
CommonTlsContext: makeCommonTLSContext(cfgSnap, cfgSnap.Leaf()),
|
||||
Sni: sni,
|
||||
}
|
||||
|
||||
|
@ -504,20 +528,20 @@ func makeClusterFromUserConfig(configJSON string) (*envoy.Cluster, error) {
|
|||
return &c, err
|
||||
}
|
||||
|
||||
func (s *Server) makeMeshGatewayCluster(clusterName string, cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Cluster, error) {
|
||||
return s.makeMeshGatewayClusterWithConnectTimeout(clusterName, cfgSnap, 0)
|
||||
func (s *Server) makeGatewayCluster(clusterName string, cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Cluster, error) {
|
||||
return s.makeGatewayClusterWithConnectTimeout(clusterName, cfgSnap, 0)
|
||||
}
|
||||
|
||||
// makeMeshGatewayClusterWithConnectTimeout initializes a mesh gateway cluster
|
||||
// makeGatewayClusterWithConnectTimeout initializes a gateway cluster
|
||||
// with the specified connect timeout. If the timeout is 0, the connect timeout
|
||||
// defaults to use the mesh gateway timeout.
|
||||
func (s *Server) makeMeshGatewayClusterWithConnectTimeout(clusterName string, cfgSnap *proxycfg.ConfigSnapshot,
|
||||
// defaults to use the configured gateway timeout.
|
||||
func (s *Server) makeGatewayClusterWithConnectTimeout(clusterName string, cfgSnap *proxycfg.ConfigSnapshot,
|
||||
connectTimeout time.Duration) (*envoy.Cluster, error) {
|
||||
cfg, err := ParseGatewayConfig(cfgSnap.Proxy.Config)
|
||||
if err != nil {
|
||||
// Don't hard fail on a config typo, just warn. The parse func returns
|
||||
// default config if there is an error so it's safe to continue.
|
||||
s.Logger.Warn("failed to parse mesh gateway config", "error", err)
|
||||
s.Logger.Warn("failed to parse gateway config", "error", err)
|
||||
}
|
||||
|
||||
if connectTimeout <= 0 {
|
||||
|
|
|
@ -418,6 +418,73 @@ func TestClustersFromSnapshot(t *testing.T) {
|
|||
create: proxycfg.TestConfigSnapshotIngress_SplitterWithResolverRedirectMultiDC,
|
||||
setup: nil,
|
||||
},
|
||||
{
|
||||
name: "terminating-gateway",
|
||||
create: proxycfg.TestConfigSnapshotTerminatingGateway,
|
||||
setup: nil,
|
||||
},
|
||||
{
|
||||
name: "terminating-gateway-no-services",
|
||||
create: proxycfg.TestConfigSnapshotTerminatingGatewayNoServices,
|
||||
setup: nil,
|
||||
},
|
||||
{
|
||||
name: "terminating-gateway-service-subsets",
|
||||
create: proxycfg.TestConfigSnapshotTerminatingGateway,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.TerminatingGateway.ServiceResolvers = map[structs.ServiceID]*structs.ServiceResolverConfigEntry{
|
||||
structs.NewServiceID("web", nil): {
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "web",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.Version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.Version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "terminating-gateway-ignore-extra-resolvers",
|
||||
create: proxycfg.TestConfigSnapshotTerminatingGateway,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.TerminatingGateway.ServiceResolvers = map[structs.ServiceID]*structs.ServiceResolverConfigEntry{
|
||||
structs.NewServiceID("web", nil): {
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "web",
|
||||
DefaultSubset: "v2",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.Version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.Version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
structs.NewServiceID("notfound", nil): {
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "notfound",
|
||||
DefaultSubset: "v2",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.Version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.Version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
|
|
@ -3,7 +3,6 @@ package xds
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
envoy "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
envoycore "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
envoyendpoint "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint"
|
||||
|
@ -30,6 +29,8 @@ func (s *Server) endpointsFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, _ strin
|
|||
switch cfgSnap.Kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.endpointsFromSnapshotConnectProxy(cfgSnap)
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
return s.endpointsFromSnapshotTerminatingGateway(cfgSnap)
|
||||
case structs.ServiceKindMeshGateway:
|
||||
return s.endpointsFromSnapshotMeshGateway(cfgSnap)
|
||||
case structs.ServiceKindIngressGateway:
|
||||
|
@ -106,6 +107,10 @@ func (s *Server) filterSubsetEndpoints(subset *structs.ServiceResolverSubset, en
|
|||
return endpoints, nil
|
||||
}
|
||||
|
||||
func (s *Server) endpointsFromSnapshotTerminatingGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
||||
return s.endpointsFromServicesAndResolvers(cfgSnap, cfgSnap.TerminatingGateway.ServiceGroups, cfgSnap.TerminatingGateway.ServiceResolvers)
|
||||
}
|
||||
|
||||
func (s *Server) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
||||
datacenters := cfgSnap.MeshGateway.Datacenters()
|
||||
resources := make([]proto.Message, 0, len(datacenters)+len(cfgSnap.MeshGateway.ServiceGroups))
|
||||
|
@ -192,38 +197,53 @@ func (s *Server) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapsh
|
|||
}
|
||||
|
||||
// Generate the endpoints for each service and its subsets
|
||||
for svc, endpoints := range cfgSnap.MeshGateway.ServiceGroups {
|
||||
clusterEndpoints := make(map[string]loadAssignmentEndpointGroup)
|
||||
clusterEndpoints[UnnamedSubset] = loadAssignmentEndpointGroup{Endpoints: endpoints, OnlyPassing: false}
|
||||
e, err := s.endpointsFromServicesAndResolvers(cfgSnap, cfgSnap.MeshGateway.ServiceGroups, cfgSnap.MeshGateway.ServiceResolvers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resources = append(resources, e...)
|
||||
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func (s *Server) endpointsFromServicesAndResolvers(
|
||||
cfgSnap *proxycfg.ConfigSnapshot,
|
||||
services map[structs.ServiceID]structs.CheckServiceNodes,
|
||||
resolvers map[structs.ServiceID]*structs.ServiceResolverConfigEntry) ([]proto.Message, error) {
|
||||
|
||||
resources := make([]proto.Message, 0, len(services))
|
||||
|
||||
// generate the endpoints for the linked service groups
|
||||
for svc, endpoints := range services {
|
||||
clusterEndpoints := make(map[string][]loadAssignmentEndpointGroup)
|
||||
clusterEndpoints[UnnamedSubset] = []loadAssignmentEndpointGroup{{Endpoints: endpoints, OnlyPassing: false}}
|
||||
|
||||
// Collect all of the loadAssignmentEndpointGroups for the various subsets. We do this before generating
|
||||
// the endpoints for the default/unnamed subset so that we can take into account the DefaultSubset on the
|
||||
// service-resolver which may prevent the default/unnamed cluster from creating endpoints for all service
|
||||
// instances.
|
||||
if resolver, hasResolver := cfgSnap.MeshGateway.ServiceResolvers[svc]; hasResolver {
|
||||
if resolver, hasResolver := resolvers[svc]; hasResolver {
|
||||
for subsetName, subset := range resolver.Subsets {
|
||||
subsetEndpoints, err := s.filterSubsetEndpoints(&subset, endpoints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
group := loadAssignmentEndpointGroup{Endpoints: subsetEndpoints, OnlyPassing: subset.OnlyPassing}
|
||||
clusterEndpoints[subsetName] = group
|
||||
groups := []loadAssignmentEndpointGroup{{Endpoints: subsetEndpoints, OnlyPassing: subset.OnlyPassing}}
|
||||
clusterEndpoints[subsetName] = groups
|
||||
|
||||
// if this subset is the default then override the unnamed subset with this configuration
|
||||
if subsetName == resolver.DefaultSubset {
|
||||
clusterEndpoints[UnnamedSubset] = group
|
||||
clusterEndpoints[UnnamedSubset] = groups
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now generate the load assignment for all subsets
|
||||
for subsetName, group := range clusterEndpoints {
|
||||
for subsetName, groups := range clusterEndpoints {
|
||||
clusterName := connect.ServiceSNI(svc.ID, subsetName, svc.NamespaceOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain)
|
||||
la := makeLoadAssignment(
|
||||
clusterName,
|
||||
[]loadAssignmentEndpointGroup{
|
||||
group,
|
||||
},
|
||||
groups,
|
||||
cfgSnap.Datacenter,
|
||||
)
|
||||
resources = append(resources, la)
|
||||
|
|
|
@ -456,6 +456,86 @@ func Test_endpointsFromSnapshot(t *testing.T) {
|
|||
create: proxycfg.TestConfigSnapshotIngress_SplitterWithResolverRedirectMultiDC,
|
||||
setup: nil,
|
||||
},
|
||||
{
|
||||
name: "terminating-gateway",
|
||||
create: proxycfg.TestConfigSnapshotTerminatingGateway,
|
||||
setup: nil,
|
||||
},
|
||||
{
|
||||
name: "terminating-gateway-no-services",
|
||||
create: proxycfg.TestConfigSnapshotTerminatingGatewayNoServices,
|
||||
setup: nil,
|
||||
},
|
||||
{
|
||||
name: "terminating-gateway-service-subsets",
|
||||
create: proxycfg.TestConfigSnapshotTerminatingGateway,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.TerminatingGateway.ServiceResolvers = map[structs.ServiceID]*structs.ServiceResolverConfigEntry{
|
||||
structs.NewServiceID("web", nil): {
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "web",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
structs.NewServiceID("web", nil): {
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "web",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "terminating-gateway-default-service-subset",
|
||||
create: proxycfg.TestConfigSnapshotTerminatingGateway,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.TerminatingGateway.ServiceResolvers = map[structs.ServiceID]*structs.ServiceResolverConfigEntry{
|
||||
structs.NewServiceID("web", nil): &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "web",
|
||||
DefaultSubset: "v2",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
structs.NewServiceID("web", nil): &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "web",
|
||||
DefaultSubset: "v2",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
"net"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
@ -38,8 +39,10 @@ func (s *Server) listenersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token s
|
|||
switch cfgSnap.Kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.listenersFromSnapshotConnectProxy(cfgSnap, token)
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
return s.listenersFromSnapshotGateway(cfgSnap, token)
|
||||
case structs.ServiceKindMeshGateway:
|
||||
return s.listenersFromSnapshotMeshGateway(cfgSnap)
|
||||
return s.listenersFromSnapshotGateway(cfgSnap, token)
|
||||
case structs.ServiceKindIngressGateway:
|
||||
return s.listenersFromSnapshotIngressGateway(cfgSnap)
|
||||
default:
|
||||
|
@ -181,8 +184,8 @@ func parseCheckPath(check structs.CheckType) (structs.ExposePath, error) {
|
|||
return path, nil
|
||||
}
|
||||
|
||||
// listenersFromSnapshotMeshGateway returns the "listener" for a mesh-gateway service
|
||||
func (s *Server) listenersFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
||||
// listenersFromSnapshotGateway returns the "listener" for a terminating-gateway or mesh-gateway service
|
||||
func (s *Server) listenersFromSnapshotGateway(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
|
||||
cfg, err := ParseGatewayConfig(cfgSnap.Proxy.Config)
|
||||
if err != nil {
|
||||
// Don't hard fail on a config typo, just warn. The parse func returns
|
||||
|
@ -190,8 +193,14 @@ func (s *Server) listenersFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapsh
|
|||
s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err)
|
||||
}
|
||||
|
||||
// TODO - prevent invalid configurations of binding to the same port/addr
|
||||
// twice including with the any addresses
|
||||
// Prevent invalid configurations of binding to the same port/addr twice
|
||||
// including with the any addresses
|
||||
type namedAddress struct {
|
||||
name string
|
||||
structs.ServiceAddress
|
||||
}
|
||||
seen := make(map[structs.ServiceAddress]bool)
|
||||
addrs := make([]namedAddress, 0)
|
||||
|
||||
var resources []proto.Message
|
||||
if !cfg.NoDefaultBind {
|
||||
|
@ -200,31 +209,60 @@ func (s *Server) listenersFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapsh
|
|||
addr = "0.0.0.0"
|
||||
}
|
||||
|
||||
l, err := s.makeGatewayListener("default", addr, cfgSnap.Port, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
a := structs.ServiceAddress{
|
||||
Address: addr,
|
||||
Port: cfgSnap.Port,
|
||||
}
|
||||
if !seen[a] {
|
||||
addrs = append(addrs, namedAddress{name: "default", ServiceAddress: a})
|
||||
seen[a] = true
|
||||
}
|
||||
resources = append(resources, l)
|
||||
}
|
||||
|
||||
if cfg.BindTaggedAddresses {
|
||||
for name, addrCfg := range cfgSnap.TaggedAddresses {
|
||||
l, err := s.makeGatewayListener(name, addrCfg.Address, addrCfg.Port, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
a := structs.ServiceAddress{
|
||||
Address: addrCfg.Address,
|
||||
Port: addrCfg.Port,
|
||||
}
|
||||
if !seen[a] {
|
||||
addrs = append(addrs, namedAddress{name: name, ServiceAddress: a})
|
||||
seen[a] = true
|
||||
}
|
||||
resources = append(resources, l)
|
||||
}
|
||||
}
|
||||
|
||||
for name, addrCfg := range cfg.BindAddresses {
|
||||
l, err := s.makeGatewayListener(name, addrCfg.Address, addrCfg.Port, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
a := structs.ServiceAddress{
|
||||
Address: addrCfg.Address,
|
||||
Port: addrCfg.Port,
|
||||
}
|
||||
if !seen[a] {
|
||||
addrs = append(addrs, namedAddress{name: name, ServiceAddress: a})
|
||||
seen[a] = true
|
||||
}
|
||||
resources = append(resources, l)
|
||||
}
|
||||
|
||||
// Make listeners once deduplicated
|
||||
for _, a := range addrs {
|
||||
var l *envoy.Listener
|
||||
|
||||
switch cfgSnap.Kind {
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
l, err = s.makeTerminatingGatewayListener(a.name, a.Address, a.Port, cfgSnap, token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case structs.ServiceKindMeshGateway:
|
||||
l, err = s.makeMeshGatewayListener(a.name, a.Address, a.Port, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if l != nil {
|
||||
resources = append(resources, l)
|
||||
}
|
||||
}
|
||||
return resources, err
|
||||
}
|
||||
|
||||
|
@ -339,9 +377,8 @@ func injectConnectFilters(cfgSnap *proxycfg.ConfigSnapshot, token string, listen
|
|||
listener.FilterChains[idx].Filters =
|
||||
append([]envoylistener.Filter{authFilter}, listener.FilterChains[idx].Filters...)
|
||||
|
||||
// Force our TLS for all filter chains on a public listener
|
||||
listener.FilterChains[idx].TlsContext = &envoyauth.DownstreamTlsContext{
|
||||
CommonTlsContext: makeCommonTLSContext(cfgSnap),
|
||||
CommonTlsContext: makeCommonTLSContext(cfgSnap, cfgSnap.Leaf()),
|
||||
RequireClientCertificate: &types.BoolValue{Value: true},
|
||||
}
|
||||
}
|
||||
|
@ -517,7 +554,101 @@ func (s *Server) makeUpstreamListenerIgnoreDiscoveryChain(
|
|||
return l, nil
|
||||
}
|
||||
|
||||
func (s *Server) makeGatewayListener(name, addr string, port int, cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Listener, error) {
|
||||
func (s *Server) makeTerminatingGatewayListener(name, addr string, port int, cfgSnap *proxycfg.ConfigSnapshot, token string) (*envoy.Listener, error) {
|
||||
l := makeListener(name, addr, port)
|
||||
|
||||
tlsInspector, err := makeTLSInspectorListenerFilter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.ListenerFilters = []envoylistener.ListenerFilter{tlsInspector}
|
||||
|
||||
// Make a FilterChain for each linked service
|
||||
// Match on the cluster name,
|
||||
for svc, _ := range cfgSnap.TerminatingGateway.ServiceGroups {
|
||||
clusterName := connect.ServiceSNI(svc.ID, "", svc.NamespaceOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain)
|
||||
resolver, hasResolver := cfgSnap.TerminatingGateway.ServiceResolvers[svc]
|
||||
|
||||
// Skip the service if we don't have a cert to present for mTLS
|
||||
if cert, ok := cfgSnap.TerminatingGateway.ServiceLeaves[svc]; !ok || cert == nil {
|
||||
// TODO (gateways) (freddy) Should the error suggest that the issue may be ACLs? (need service:write on service)
|
||||
s.Logger.Named(logging.TerminatingGateway).
|
||||
Error("no client certificate available for linked service, skipping filter chain creation",
|
||||
"service", svc.String(), "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
clusterChain, err := s.sniFilterChainTerminatingGateway(name, clusterName, token, svc, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make filter chain for cluster %q: %v", clusterName, err)
|
||||
}
|
||||
l.FilterChains = append(l.FilterChains, clusterChain)
|
||||
|
||||
// if there is a service-resolver for this service then also setup subset filter chains for it
|
||||
if hasResolver {
|
||||
// generate 1 filter chain for each service subset
|
||||
for subsetName := range resolver.Subsets {
|
||||
clusterName := connect.ServiceSNI(svc.ID, subsetName, svc.NamespaceOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain)
|
||||
|
||||
clusterChain, err := s.sniFilterChainTerminatingGateway(name, clusterName, token, svc, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make filter chain for cluster %q: %v", clusterName, err)
|
||||
}
|
||||
l.FilterChains = append(l.FilterChains, clusterChain)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This fallback catch-all filter ensures a listener will be present for health checks to pass
|
||||
// Envoy will reset these connections since known endpoints are caught by filter chain matches above
|
||||
tcpProxy, err := makeTCPProxyFilter(name, "", "terminating_gateway_")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fallback := envoylistener.FilterChain{
|
||||
Filters: []envoylistener.Filter{
|
||||
{Name: "envoy.filters.network.sni_cluster"},
|
||||
tcpProxy,
|
||||
},
|
||||
}
|
||||
l.FilterChains = append(l.FilterChains, fallback)
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func (s *Server) sniFilterChainTerminatingGateway(listener, cluster, token string, service structs.ServiceID,
|
||||
cfgSnap *proxycfg.ConfigSnapshot) (envoylistener.FilterChain, error) {
|
||||
|
||||
authFilter, err := makeExtAuthFilter(token)
|
||||
if err != nil {
|
||||
return envoylistener.FilterChain{}, err
|
||||
}
|
||||
sniCluster, err := makeSNIClusterFilter()
|
||||
if err != nil {
|
||||
return envoylistener.FilterChain{}, err
|
||||
}
|
||||
|
||||
// The cluster name here doesn't matter as the sni_cluster filter will fill it in for us.
|
||||
tcpProxy, err := makeTCPProxyFilter(listener, "", fmt.Sprintf("terminating_gateway_%s_", service.String()))
|
||||
if err != nil {
|
||||
return envoylistener.FilterChain{}, err
|
||||
}
|
||||
|
||||
return envoylistener.FilterChain{
|
||||
FilterChainMatch: makeSNIFilterChainMatch(cluster),
|
||||
Filters: []envoylistener.Filter{
|
||||
authFilter,
|
||||
sniCluster,
|
||||
tcpProxy,
|
||||
},
|
||||
TlsContext: &envoyauth.DownstreamTlsContext{
|
||||
CommonTlsContext: makeCommonTLSContext(cfgSnap, cfgSnap.TerminatingGateway.ServiceLeaves[service]),
|
||||
RequireClientCertificate: &types.BoolValue{Value: true},
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
func (s *Server) makeMeshGatewayListener(name, addr string, port int, cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Listener, error) {
|
||||
tlsInspector, err := makeTLSInspectorListenerFilter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -711,11 +842,10 @@ func makeTLSInspectorListenerFilter() (envoylistener.ListenerFilter, error) {
|
|||
return envoylistener.ListenerFilter{Name: util.TlsInspector}, nil
|
||||
}
|
||||
|
||||
// TODO(rb): should this be dead code?
|
||||
func makeSNIFilterChainMatch(sniMatch string) (*envoylistener.FilterChainMatch, error) {
|
||||
func makeSNIFilterChainMatch(sniMatch string) *envoylistener.FilterChainMatch {
|
||||
return &envoylistener.FilterChainMatch{
|
||||
ServerNames: []string{sniMatch},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func makeSNIClusterFilter() (envoylistener.Filter, error) {
|
||||
|
@ -881,7 +1011,7 @@ func makeFilter(name string, cfg proto.Message) (envoylistener.Filter, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func makeCommonTLSContext(cfgSnap *proxycfg.ConfigSnapshot) *envoyauth.CommonTlsContext {
|
||||
func makeCommonTLSContext(cfgSnap *proxycfg.ConfigSnapshot, leaf *structs.IssuedCert) *envoyauth.CommonTlsContext {
|
||||
// Concatenate all the root PEMs into one.
|
||||
// TODO(banks): verify this actually works with Envoy (docs are not clear).
|
||||
rootPEMS := ""
|
||||
|
@ -892,7 +1022,6 @@ func makeCommonTLSContext(cfgSnap *proxycfg.ConfigSnapshot) *envoyauth.CommonTls
|
|||
rootPEMS += root.RootCert
|
||||
}
|
||||
|
||||
leaf := cfgSnap.Leaf()
|
||||
return &envoyauth.CommonTlsContext{
|
||||
TlsParams: &envoyauth.TlsParameters{},
|
||||
TlsCertificates: []*envoyauth.TlsCertificate{
|
||||
|
|
|
@ -298,6 +298,78 @@ func TestListenersFromSnapshot(t *testing.T) {
|
|||
create: proxycfg.TestConfigSnapshotIngress_SplitterWithResolverRedirectMultiDC,
|
||||
setup: nil,
|
||||
},
|
||||
{
|
||||
name: "terminating-gateway",
|
||||
create: proxycfg.TestConfigSnapshotTerminatingGateway,
|
||||
setup: nil,
|
||||
},
|
||||
{
|
||||
name: "terminating-gateway-no-services",
|
||||
create: proxycfg.TestConfigSnapshotTerminatingGatewayNoServices,
|
||||
setup: nil,
|
||||
},
|
||||
{
|
||||
name: "terminating-gateway-custom-and-tagged-addresses",
|
||||
create: proxycfg.TestConfigSnapshotTerminatingGateway,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Config = map[string]interface{}{
|
||||
"envoy_gateway_no_default_bind": true,
|
||||
"envoy_gateway_bind_tagged_addresses": true,
|
||||
"envoy_gateway_bind_addresses": map[string]structs.ServiceAddress{
|
||||
"foo": {
|
||||
Address: "198.17.2.3",
|
||||
Port: 8080,
|
||||
},
|
||||
// This bind address should not get a listener due to deduplication
|
||||
"duplicate-of-tagged-wan-addr": {
|
||||
Address: "198.18.0.1",
|
||||
Port: 443,
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "terminating-gateway-service-subsets",
|
||||
create: proxycfg.TestConfigSnapshotTerminatingGateway,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.TerminatingGateway.ServiceResolvers = map[structs.ServiceID]*structs.ServiceResolverConfigEntry{
|
||||
structs.NewServiceID("web", nil): {
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "web",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
structs.NewServiceID("web", nil): {
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "web",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v1": {
|
||||
Filter: "Service.Meta.version == 1",
|
||||
},
|
||||
"v2": {
|
||||
Filter: "Service.Meta.version == 2",
|
||||
OnlyPassing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "terminating-gateway-no-api-cert",
|
||||
create: proxycfg.TestConfigSnapshotTerminatingGateway,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.TerminatingGateway.ServiceLeaves[structs.NewServiceID("api", nil)] = nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
@ -327,6 +399,19 @@ func TestListenersFromSnapshot(t *testing.T) {
|
|||
return listeners[i].(*envoy.Listener).Name < listeners[j].(*envoy.Listener).Name
|
||||
})
|
||||
|
||||
// For terminating gateways we create filter chain matches for services/subsets from the ServiceGroups map
|
||||
if snap.Kind == structs.ServiceKindTerminatingGateway {
|
||||
for i := 0; i < len(listeners); i++ {
|
||||
l := listeners[i].(*envoy.Listener)
|
||||
|
||||
// Sort chains by the matched name with the exception of the last one
|
||||
// The last chain is a fallback and does not have a FilterChainMatch
|
||||
sort.Slice(l.FilterChains[:len(l.FilterChains)-1], func(i, j int) bool {
|
||||
return l.FilterChains[i].FilterChainMatch.ServerNames[0] < l.FilterChains[j].FilterChainMatch.ServerNames[0]
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(err)
|
||||
r, err := createResponse(ListenerType, "00000001", "00000001", listeners)
|
||||
require.NoError(err)
|
||||
|
|
|
@ -219,8 +219,8 @@ func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest)
|
|||
resources: s.clustersFromSnapshot,
|
||||
stream: stream,
|
||||
allowEmptyFn: func(cfgSnap *proxycfg.ConfigSnapshot) bool {
|
||||
// Mesh gateways are allowed to inform CDS of no clusters.
|
||||
return cfgSnap.Kind == structs.ServiceKindMeshGateway
|
||||
// Mesh and Terminating gateways are allowed to inform CDS of no clusters.
|
||||
return cfgSnap.Kind == structs.ServiceKindMeshGateway || cfgSnap.Kind == structs.ServiceKindTerminatingGateway
|
||||
},
|
||||
},
|
||||
RouteType: {
|
||||
|
@ -262,12 +262,7 @@ func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest)
|
|||
if rule != nil && rule.ServiceWrite(cfgSnap.Proxy.DestinationServiceName, &authzContext) != acl.Allow {
|
||||
return status.Errorf(codes.PermissionDenied, "permission denied")
|
||||
}
|
||||
case structs.ServiceKindMeshGateway:
|
||||
cfgSnap.ProxyID.EnterpriseMeta.FillAuthzContext(&authzContext)
|
||||
if rule != nil && rule.ServiceWrite(cfgSnap.Service, &authzContext) != acl.Allow {
|
||||
return status.Errorf(codes.PermissionDenied, "permission denied")
|
||||
}
|
||||
case structs.ServiceKindIngressGateway:
|
||||
case structs.ServiceKindMeshGateway, structs.ServiceKindTerminatingGateway, structs.ServiceKindIngressGateway:
|
||||
cfgSnap.ProxyID.EnterpriseMeta.FillAuthzContext(&authzContext)
|
||||
if rule != nil && rule.ServiceWrite(cfgSnap.Service, &authzContext) != acl.Allow {
|
||||
return status.Errorf(codes.PermissionDenied, "permission denied")
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIICnTCCAkKgAwIBAgIRAJrvEdaRAkSltrotd/l/j2cwCgYIKoZIzj0EAwIwgbgx
|
||||
CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj
|
||||
bzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw
|
||||
FQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE/MD0GA1UEAxM2Q29uc3VsIEFnZW50IENB
|
||||
IDk2NjM4NzM1MDkzNTU5NTIwNDk3MTQwOTU3MDY1MTc0OTg3NDMxMB4XDTIwMDQx
|
||||
NDIyMzE1MloXDTIxMDQxNDIyMzE1MlowHDEaMBgGA1UEAxMRc2VydmVyLmRjMS5j
|
||||
b25zdWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ4v0FoIYI0OWmxE2MR6w5l
|
||||
0pWGhc02RpsOPj/6RS1fmXMMu7JzPzwCmkGcR16RlwwhNFKCZsWpvAjVRHf/pTp+
|
||||
o4HHMIHEMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB
|
||||
BQUHAwIwDAYDVR0TAQH/BAIwADApBgNVHQ4EIgQgk7kABFitAy3PluyNtmzYiC7H
|
||||
jSN8W/K/OXNJQAQAscMwKwYDVR0jBCQwIoAgNKbPPepvRHXSAPTc+a/BXBzFX1qJ
|
||||
y+Zi7qtjlFX7qtUwLQYDVR0RBCYwJIIRc2VydmVyLmRjMS5jb25zdWyCCWxvY2Fs
|
||||
aG9zdIcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAhP4HmN5BWysWTbQWClXaWUah
|
||||
LpBGFrvc/2cCQuyEZKsCIQD6JyYCYMArtWwZ4G499zktxrFlqfX14bqyONrxtA5I
|
||||
Dw==
|
||||
-----END CERTIFICATE-----
|
|
@ -0,0 +1,5 @@
|
|||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIE3KbKXHdsa0vvC1fysQaGdoJRgjRALIolI4XJanie+coAoGCCqGSM49
|
||||
AwEHoUQDQgAEOL9BaCGCNDlpsRNjEesOZdKVhoXNNkabDj4/+kUtX5lzDLuycz88
|
||||
AppBnEdekZcMITRSgmbFqbwI1UR3/6U6fg==
|
||||
-----END EC PRIVATE KEY-----
|
|
@ -0,0 +1,18 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIC6zCCApGgAwIBAgIQSLPxgLmgVh30JCHk+3oypzAKBggqhkjOPQQDAjCBuDEL
|
||||
MAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv
|
||||
MRowGAYDVQQJExExMDEgU2Vjb25kIFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAV
|
||||
BgNVBAoTDkhhc2hpQ29ycCBJbmMuMT8wPQYDVQQDEzZDb25zdWwgQWdlbnQgQ0Eg
|
||||
OTY2Mzg3MzUwOTM1NTk1MjA0OTcxNDA5NTcwNjUxNzQ5ODc0MzEwHhcNMjAwNDE0
|
||||
MjIzMTQ3WhcNMjUwNDEzMjIzMTQ3WjCBuDELMAkGA1UEBhMCVVMxCzAJBgNVBAgT
|
||||
AkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRowGAYDVQQJExExMDEgU2Vjb25k
|
||||
IFN0cmVldDEOMAwGA1UEERMFOTQxMDUxFzAVBgNVBAoTDkhhc2hpQ29ycCBJbmMu
|
||||
MT8wPQYDVQQDEzZDb25zdWwgQWdlbnQgQ0EgOTY2Mzg3MzUwOTM1NTk1MjA0OTcx
|
||||
NDA5NTcwNjUxNzQ5ODc0MzEwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATwxYvc
|
||||
sS41PyFkQT/Ig8Wi0G7p38YcH8qCKL0irTwFou0MtFRbXLFzjfeSqjr9DbsZVFGf
|
||||
Bbo0WVXZBsgb2OETo3sweTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
|
||||
/zApBgNVHQ4EIgQgNKbPPepvRHXSAPTc+a/BXBzFX1qJy+Zi7qtjlFX7qtUwKwYD
|
||||
VR0jBCQwIoAgNKbPPepvRHXSAPTc+a/BXBzFX1qJy+Zi7qtjlFX7qtUwCgYIKoZI
|
||||
zj0EAwIDSAAwRQIhAIknhzVE0ygBcVOHx1dqmQsZklWLDpKlAL3KkDuj0dQ+AiBU
|
||||
OdRsTln8773X3OJkmAHhlgEKtWOVt9v+CtAZ8N6kww==
|
||||
-----END CERTIFICATE-----
|
71
agent/xds/testdata/clusters/terminating-gateway-ignore-extra-resolvers.golden
vendored
Normal file
71
agent/xds/testdata/clusters/terminating-gateway-ignore-extra-resolvers.golden
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"name": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"outlierDetection": {
|
||||
|
||||
}
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"name": "v1.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"outlierDetection": {
|
||||
|
||||
}
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"name": "v2.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"outlierDetection": {
|
||||
|
||||
}
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"name": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"outlierDetection": {
|
||||
|
||||
}
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"name": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"outlierDetection": {
|
||||
|
||||
}
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"name": "v1.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"outlierDetection": {
|
||||
|
||||
}
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"name": "v2.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"outlierDetection": {
|
||||
|
||||
}
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"name": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"outlierDetection": {
|
||||
|
||||
}
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"name": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"outlierDetection": {
|
||||
|
||||
}
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"name": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"outlierDetection": {
|
||||
|
||||
}
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"nonce": "00000001"
|
||||
}
|
107
agent/xds/testdata/endpoints/terminating-gateway-default-service-subset.golden
vendored
Normal file
107
agent/xds/testdata/endpoints/terminating-gateway-default-service-subset.golden
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"clusterName": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.1",
|
||||
"portValue": 8081
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
},
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.2",
|
||||
"portValue": 8081
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"clusterName": "v1.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.1",
|
||||
"portValue": 8080
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"clusterName": "v2.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.2",
|
||||
"portValue": 8080
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"clusterName": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.2",
|
||||
"portValue": 8080
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"clusterName": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.1",
|
||||
"portValue": 8081
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
},
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.2",
|
||||
"portValue": 8081
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"clusterName": "v1.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.1",
|
||||
"portValue": 8080
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"clusterName": "v2.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.2",
|
||||
"portValue": 8080
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"clusterName": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.1",
|
||||
"portValue": 8080
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
},
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.2",
|
||||
"portValue": 8080
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"clusterName": "api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.1",
|
||||
"portValue": 8081
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
},
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.2",
|
||||
"portValue": 8081
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"clusterName": "web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.1",
|
||||
"portValue": 8080
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
},
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.2",
|
||||
"portValue": 8080
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"nonce": "00000001"
|
||||
}
|
305
agent/xds/testdata/listeners/terminating-gateway-custom-and-tagged-addresses.golden
vendored
Normal file
305
agent/xds/testdata/listeners/terminating-gateway-custom-and-tagged-addresses.golden
vendored
Normal file
|
@ -0,0 +1,305 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "foo:198.17.2.3:8080",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "198.17.2.3",
|
||||
"portValue": 8080
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
]
|
||||
},
|
||||
"tlsContext": {
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkKgAwIBAgIRAJrvEdaRAkSltrotd/l/j2cwCgYIKoZIzj0EAwIwgbgx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE/MD0GA1UEAxM2Q29uc3VsIEFnZW50IENB\nIDk2NjM4NzM1MDkzNTU5NTIwNDk3MTQwOTU3MDY1MTc0OTg3NDMxMB4XDTIwMDQx\nNDIyMzE1MloXDTIxMDQxNDIyMzE1MlowHDEaMBgGA1UEAxMRc2VydmVyLmRjMS5j\nb25zdWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ4v0FoIYI0OWmxE2MR6w5l\n0pWGhc02RpsOPj/6RS1fmXMMu7JzPzwCmkGcR16RlwwhNFKCZsWpvAjVRHf/pTp+\no4HHMIHEMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB\nBQUHAwIwDAYDVR0TAQH/BAIwADApBgNVHQ4EIgQgk7kABFitAy3PluyNtmzYiC7H\njSN8W/K/OXNJQAQAscMwKwYDVR0jBCQwIoAgNKbPPepvRHXSAPTc+a/BXBzFX1qJ\ny+Zi7qtjlFX7qtUwLQYDVR0RBCYwJIIRc2VydmVyLmRjMS5jb25zdWyCCWxvY2Fs\naG9zdIcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAhP4HmN5BWysWTbQWClXaWUah\nLpBGFrvc/2cCQuyEZKsCIQD6JyYCYMArtWwZ4G499zktxrFlqfX14bqyONrxtA5I\nDw==\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIE3KbKXHdsa0vvC1fysQaGdoJRgjRALIolI4XJanie+coAoGCCqGSM49\nAwEHoUQDQgAEOL9BaCGCNDlpsRNjEesOZdKVhoXNNkabDj4/+kUtX5lzDLuycz88\nAppBnEdekZcMITRSgmbFqbwI1UR3/6U6fg==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"requireClientCertificate": true
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.ext_authz",
|
||||
"config": {
|
||||
"grpc_service": {
|
||||
"envoy_grpc": {
|
||||
"cluster_name": "local_agent"
|
||||
},
|
||||
"initial_metadata": [
|
||||
{
|
||||
"key": "x-consul-token",
|
||||
"value": "my-token"
|
||||
}
|
||||
]
|
||||
},
|
||||
"stat_prefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_api_foo_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
]
|
||||
},
|
||||
"tlsContext": {
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"requireClientCertificate": true
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.ext_authz",
|
||||
"config": {
|
||||
"grpc_service": {
|
||||
"envoy_grpc": {
|
||||
"cluster_name": "local_agent"
|
||||
},
|
||||
"initial_metadata": [
|
||||
{
|
||||
"key": "x-consul-token",
|
||||
"value": "my-token"
|
||||
}
|
||||
]
|
||||
},
|
||||
"stat_prefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_web_foo_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_foo_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"listenerFilters": [
|
||||
{
|
||||
"name": "envoy.listener.tls_inspector"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "wan:198.18.0.1:443",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "198.18.0.1",
|
||||
"portValue": 443
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
]
|
||||
},
|
||||
"tlsContext": {
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkKgAwIBAgIRAJrvEdaRAkSltrotd/l/j2cwCgYIKoZIzj0EAwIwgbgx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE/MD0GA1UEAxM2Q29uc3VsIEFnZW50IENB\nIDk2NjM4NzM1MDkzNTU5NTIwNDk3MTQwOTU3MDY1MTc0OTg3NDMxMB4XDTIwMDQx\nNDIyMzE1MloXDTIxMDQxNDIyMzE1MlowHDEaMBgGA1UEAxMRc2VydmVyLmRjMS5j\nb25zdWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ4v0FoIYI0OWmxE2MR6w5l\n0pWGhc02RpsOPj/6RS1fmXMMu7JzPzwCmkGcR16RlwwhNFKCZsWpvAjVRHf/pTp+\no4HHMIHEMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB\nBQUHAwIwDAYDVR0TAQH/BAIwADApBgNVHQ4EIgQgk7kABFitAy3PluyNtmzYiC7H\njSN8W/K/OXNJQAQAscMwKwYDVR0jBCQwIoAgNKbPPepvRHXSAPTc+a/BXBzFX1qJ\ny+Zi7qtjlFX7qtUwLQYDVR0RBCYwJIIRc2VydmVyLmRjMS5jb25zdWyCCWxvY2Fs\naG9zdIcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAhP4HmN5BWysWTbQWClXaWUah\nLpBGFrvc/2cCQuyEZKsCIQD6JyYCYMArtWwZ4G499zktxrFlqfX14bqyONrxtA5I\nDw==\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIE3KbKXHdsa0vvC1fysQaGdoJRgjRALIolI4XJanie+coAoGCCqGSM49\nAwEHoUQDQgAEOL9BaCGCNDlpsRNjEesOZdKVhoXNNkabDj4/+kUtX5lzDLuycz88\nAppBnEdekZcMITRSgmbFqbwI1UR3/6U6fg==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"requireClientCertificate": true
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.ext_authz",
|
||||
"config": {
|
||||
"grpc_service": {
|
||||
"envoy_grpc": {
|
||||
"cluster_name": "local_agent"
|
||||
},
|
||||
"initial_metadata": [
|
||||
{
|
||||
"key": "x-consul-token",
|
||||
"value": "my-token"
|
||||
}
|
||||
]
|
||||
},
|
||||
"stat_prefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_api_wan_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
]
|
||||
},
|
||||
"tlsContext": {
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"requireClientCertificate": true
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.ext_authz",
|
||||
"config": {
|
||||
"grpc_service": {
|
||||
"envoy_grpc": {
|
||||
"cluster_name": "local_agent"
|
||||
},
|
||||
"initial_metadata": [
|
||||
{
|
||||
"key": "x-consul-token",
|
||||
"value": "my-token"
|
||||
}
|
||||
]
|
||||
},
|
||||
"stat_prefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_web_wan_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_wan_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"listenerFilters": [
|
||||
{
|
||||
"name": "envoy.listener.tls_inspector"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "default:1.2.3.4:8443",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "1.2.3.4",
|
||||
"portValue": 8443
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
]
|
||||
},
|
||||
"tlsContext": {
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"requireClientCertificate": true
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.ext_authz",
|
||||
"config": {
|
||||
"grpc_service": {
|
||||
"envoy_grpc": {
|
||||
"cluster_name": "local_agent"
|
||||
},
|
||||
"initial_metadata": [
|
||||
{
|
||||
"key": "x-consul-token",
|
||||
"value": "my-token"
|
||||
}
|
||||
]
|
||||
},
|
||||
"stat_prefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_web_default_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_default_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"listenerFilters": [
|
||||
{
|
||||
"name": "envoy.listener.tls_inspector"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "default:1.2.3.4:8443",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "1.2.3.4",
|
||||
"portValue": 8443
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_default_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"listenerFilters": [
|
||||
{
|
||||
"name": "envoy.listener.tls_inspector"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,274 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "default:1.2.3.4:8443",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "1.2.3.4",
|
||||
"portValue": 8443
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
]
|
||||
},
|
||||
"tlsContext": {
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkKgAwIBAgIRAJrvEdaRAkSltrotd/l/j2cwCgYIKoZIzj0EAwIwgbgx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE/MD0GA1UEAxM2Q29uc3VsIEFnZW50IENB\nIDk2NjM4NzM1MDkzNTU5NTIwNDk3MTQwOTU3MDY1MTc0OTg3NDMxMB4XDTIwMDQx\nNDIyMzE1MloXDTIxMDQxNDIyMzE1MlowHDEaMBgGA1UEAxMRc2VydmVyLmRjMS5j\nb25zdWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ4v0FoIYI0OWmxE2MR6w5l\n0pWGhc02RpsOPj/6RS1fmXMMu7JzPzwCmkGcR16RlwwhNFKCZsWpvAjVRHf/pTp+\no4HHMIHEMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB\nBQUHAwIwDAYDVR0TAQH/BAIwADApBgNVHQ4EIgQgk7kABFitAy3PluyNtmzYiC7H\njSN8W/K/OXNJQAQAscMwKwYDVR0jBCQwIoAgNKbPPepvRHXSAPTc+a/BXBzFX1qJ\ny+Zi7qtjlFX7qtUwLQYDVR0RBCYwJIIRc2VydmVyLmRjMS5jb25zdWyCCWxvY2Fs\naG9zdIcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAhP4HmN5BWysWTbQWClXaWUah\nLpBGFrvc/2cCQuyEZKsCIQD6JyYCYMArtWwZ4G499zktxrFlqfX14bqyONrxtA5I\nDw==\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIE3KbKXHdsa0vvC1fysQaGdoJRgjRALIolI4XJanie+coAoGCCqGSM49\nAwEHoUQDQgAEOL9BaCGCNDlpsRNjEesOZdKVhoXNNkabDj4/+kUtX5lzDLuycz88\nAppBnEdekZcMITRSgmbFqbwI1UR3/6U6fg==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"requireClientCertificate": true
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.ext_authz",
|
||||
"config": {
|
||||
"grpc_service": {
|
||||
"envoy_grpc": {
|
||||
"cluster_name": "local_agent"
|
||||
},
|
||||
"initial_metadata": [
|
||||
{
|
||||
"key": "x-consul-token",
|
||||
"value": "my-token"
|
||||
}
|
||||
]
|
||||
},
|
||||
"stat_prefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_api_default_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"v1.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
]
|
||||
},
|
||||
"tlsContext": {
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"requireClientCertificate": true
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.ext_authz",
|
||||
"config": {
|
||||
"grpc_service": {
|
||||
"envoy_grpc": {
|
||||
"cluster_name": "local_agent"
|
||||
},
|
||||
"initial_metadata": [
|
||||
{
|
||||
"key": "x-consul-token",
|
||||
"value": "my-token"
|
||||
}
|
||||
]
|
||||
},
|
||||
"stat_prefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_web_default_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"v2.web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
]
|
||||
},
|
||||
"tlsContext": {
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"requireClientCertificate": true
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.ext_authz",
|
||||
"config": {
|
||||
"grpc_service": {
|
||||
"envoy_grpc": {
|
||||
"cluster_name": "local_agent"
|
||||
},
|
||||
"initial_metadata": [
|
||||
{
|
||||
"key": "x-consul-token",
|
||||
"value": "my-token"
|
||||
}
|
||||
]
|
||||
},
|
||||
"stat_prefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_web_default_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
]
|
||||
},
|
||||
"tlsContext": {
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"requireClientCertificate": true
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.ext_authz",
|
||||
"config": {
|
||||
"grpc_service": {
|
||||
"envoy_grpc": {
|
||||
"cluster_name": "local_agent"
|
||||
},
|
||||
"initial_metadata": [
|
||||
{
|
||||
"key": "x-consul-token",
|
||||
"value": "my-token"
|
||||
}
|
||||
]
|
||||
},
|
||||
"stat_prefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_web_default_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_default_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"listenerFilters": [
|
||||
{
|
||||
"name": "envoy.listener.tls_inspector"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,156 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "default:1.2.3.4:8443",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "1.2.3.4",
|
||||
"portValue": 8443
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
]
|
||||
},
|
||||
"tlsContext": {
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICnTCCAkKgAwIBAgIRAJrvEdaRAkSltrotd/l/j2cwCgYIKoZIzj0EAwIwgbgx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNj\nbzEaMBgGA1UECRMRMTAxIFNlY29uZCBTdHJlZXQxDjAMBgNVBBETBTk0MTA1MRcw\nFQYDVQQKEw5IYXNoaUNvcnAgSW5jLjE/MD0GA1UEAxM2Q29uc3VsIEFnZW50IENB\nIDk2NjM4NzM1MDkzNTU5NTIwNDk3MTQwOTU3MDY1MTc0OTg3NDMxMB4XDTIwMDQx\nNDIyMzE1MloXDTIxMDQxNDIyMzE1MlowHDEaMBgGA1UEAxMRc2VydmVyLmRjMS5j\nb25zdWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ4v0FoIYI0OWmxE2MR6w5l\n0pWGhc02RpsOPj/6RS1fmXMMu7JzPzwCmkGcR16RlwwhNFKCZsWpvAjVRHf/pTp+\no4HHMIHEMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB\nBQUHAwIwDAYDVR0TAQH/BAIwADApBgNVHQ4EIgQgk7kABFitAy3PluyNtmzYiC7H\njSN8W/K/OXNJQAQAscMwKwYDVR0jBCQwIoAgNKbPPepvRHXSAPTc+a/BXBzFX1qJ\ny+Zi7qtjlFX7qtUwLQYDVR0RBCYwJIIRc2VydmVyLmRjMS5jb25zdWyCCWxvY2Fs\naG9zdIcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAhP4HmN5BWysWTbQWClXaWUah\nLpBGFrvc/2cCQuyEZKsCIQD6JyYCYMArtWwZ4G499zktxrFlqfX14bqyONrxtA5I\nDw==\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIE3KbKXHdsa0vvC1fysQaGdoJRgjRALIolI4XJanie+coAoGCCqGSM49\nAwEHoUQDQgAEOL9BaCGCNDlpsRNjEesOZdKVhoXNNkabDj4/+kUtX5lzDLuycz88\nAppBnEdekZcMITRSgmbFqbwI1UR3/6U6fg==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"requireClientCertificate": true
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.ext_authz",
|
||||
"config": {
|
||||
"grpc_service": {
|
||||
"envoy_grpc": {
|
||||
"cluster_name": "local_agent"
|
||||
},
|
||||
"initial_metadata": [
|
||||
{
|
||||
"key": "x-consul-token",
|
||||
"value": "my-token"
|
||||
}
|
||||
]
|
||||
},
|
||||
"stat_prefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_api_default_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"web.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
]
|
||||
},
|
||||
"tlsContext": {
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"requireClientCertificate": true
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.ext_authz",
|
||||
"config": {
|
||||
"grpc_service": {
|
||||
"envoy_grpc": {
|
||||
"cluster_name": "local_agent"
|
||||
},
|
||||
"initial_metadata": [
|
||||
{
|
||||
"key": "x-consul-token",
|
||||
"value": "my-token"
|
||||
}
|
||||
]
|
||||
},
|
||||
"stat_prefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_web_default_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "terminating_gateway_default_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"listenerFilters": [
|
||||
{
|
||||
"name": "envoy.listener.tls_inspector"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -1,52 +1,53 @@
|
|||
package logging
|
||||
|
||||
const (
|
||||
ACL string = "acl"
|
||||
Agent string = "agent"
|
||||
AntiEntropy string = "anti_entropy"
|
||||
AutoEncrypt string = "auto_encrypt"
|
||||
Autopilot string = "autopilot"
|
||||
AWS string = "aws"
|
||||
Azure string = "azure"
|
||||
CA string = "ca"
|
||||
CentralConfig string = "central_config"
|
||||
ConfigEntry string = "config_entry"
|
||||
Connect string = "connect"
|
||||
Consul string = "consul"
|
||||
ConsulClient string = "client"
|
||||
ConsulServer string = "server"
|
||||
Coordinate string = "coordinate"
|
||||
DNS string = "dns"
|
||||
Envoy string = "envoy"
|
||||
FederationState string = "federation_state"
|
||||
FSM string = "fsm"
|
||||
GatewayLocator string = "gateway_locator"
|
||||
HTTP string = "http"
|
||||
Intentions string = "intentions"
|
||||
Internal string = "internal"
|
||||
KV string = "kvs"
|
||||
LAN string = "lan"
|
||||
Leader string = "leader"
|
||||
Legacy string = "legacy"
|
||||
License string = "license"
|
||||
Manager string = "manager"
|
||||
Memberlist string = "memberlist"
|
||||
MeshGateway string = "mesh_gateway"
|
||||
Namespace string = "namespace"
|
||||
Operator string = "operator"
|
||||
PreparedQuery string = "prepared_query"
|
||||
Proxy string = "proxy"
|
||||
ProxyConfig string = "proxycfg"
|
||||
Raft string = "raft"
|
||||
Replication string = "replication"
|
||||
Router string = "router"
|
||||
RPC string = "rpc"
|
||||
Serf string = "serf"
|
||||
Session string = "session"
|
||||
Sentinel string = "sentinel"
|
||||
Snapshot string = "snapshot"
|
||||
TLSUtil string = "tlsutil"
|
||||
Transaction string = "txn"
|
||||
WAN string = "wan"
|
||||
Watch string = "watch"
|
||||
ACL string = "acl"
|
||||
Agent string = "agent"
|
||||
AntiEntropy string = "anti_entropy"
|
||||
AutoEncrypt string = "auto_encrypt"
|
||||
Autopilot string = "autopilot"
|
||||
AWS string = "aws"
|
||||
Azure string = "azure"
|
||||
CA string = "ca"
|
||||
CentralConfig string = "central_config"
|
||||
ConfigEntry string = "config_entry"
|
||||
Connect string = "connect"
|
||||
Consul string = "consul"
|
||||
ConsulClient string = "client"
|
||||
ConsulServer string = "server"
|
||||
Coordinate string = "coordinate"
|
||||
DNS string = "dns"
|
||||
Envoy string = "envoy"
|
||||
FederationState string = "federation_state"
|
||||
FSM string = "fsm"
|
||||
GatewayLocator string = "gateway_locator"
|
||||
HTTP string = "http"
|
||||
Intentions string = "intentions"
|
||||
Internal string = "internal"
|
||||
KV string = "kvs"
|
||||
LAN string = "lan"
|
||||
Leader string = "leader"
|
||||
Legacy string = "legacy"
|
||||
License string = "license"
|
||||
Manager string = "manager"
|
||||
Memberlist string = "memberlist"
|
||||
MeshGateway string = "mesh_gateway"
|
||||
Namespace string = "namespace"
|
||||
Operator string = "operator"
|
||||
PreparedQuery string = "prepared_query"
|
||||
Proxy string = "proxy"
|
||||
ProxyConfig string = "proxycfg"
|
||||
Raft string = "raft"
|
||||
Replication string = "replication"
|
||||
Router string = "router"
|
||||
RPC string = "rpc"
|
||||
Serf string = "serf"
|
||||
Session string = "session"
|
||||
Sentinel string = "sentinel"
|
||||
Snapshot string = "snapshot"
|
||||
TerminatingGateway string = "terminating_gateway"
|
||||
TLSUtil string = "tlsutil"
|
||||
Transaction string = "txn"
|
||||
WAN string = "wan"
|
||||
Watch string = "watch"
|
||||
)
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
enable_central_service_config = true
|
||||
|
||||
config_entries {
|
||||
bootstrap {
|
||||
kind = "terminating-gateway"
|
||||
name = "terminating-gateway"
|
||||
|
||||
services = [
|
||||
{
|
||||
name = "s2"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
services {
|
||||
name = "terminating-gateway"
|
||||
kind = "terminating-gateway"
|
||||
port = 8443
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# wait for bootstrap to apply config entries
|
||||
wait_for_config_entry terminating-gateway terminating-gateway
|
||||
|
||||
gen_envoy_bootstrap terminating-gateway 20000 primary true
|
||||
gen_envoy_bootstrap s1 19000
|
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
|
||||
# There is no sidecar proxy for s2, since the terminating gateway acts as the proxy
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 terminating-gateway-primary"
|
|
@ -0,0 +1,33 @@
|
|||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "terminating proxy admin is up on :20000" {
|
||||
retry_default curl -f -s localhost:20000/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s1 proxy admin is up on :19000" {
|
||||
retry_default curl -f -s localhost:19000/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "terminating-gateway-primary listener is up on :8443" {
|
||||
retry_default nc -z localhost:8443
|
||||
}
|
||||
|
||||
@test "terminating-gateway should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:20000 s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2" {
|
||||
run retry_default curl -s -f -d hello localhost:5000
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "hello" ]
|
||||
}
|
||||
|
||||
@test "terminating-gateway is used for the upstream connection" {
|
||||
assert_envoy_metric_at_least 127.0.0.1:20000 "s2.default.primary.*cx_total" 1
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
|
||||
snapshot_envoy_admin localhost:20000 terminating-gateway primary || true
|
||||
snapshot_envoy_admin localhost:19000 s1 primary || true
|
|
@ -0,0 +1,37 @@
|
|||
config_entries {
|
||||
bootstrap {
|
||||
kind = "terminating-gateway"
|
||||
name = "terminating-gateway"
|
||||
|
||||
services = [
|
||||
{
|
||||
name = "s2"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
bootstrap {
|
||||
kind = "proxy-defaults"
|
||||
name = "global"
|
||||
|
||||
config {
|
||||
protocol = "http"
|
||||
}
|
||||
}
|
||||
|
||||
bootstrap {
|
||||
kind = "service-resolver"
|
||||
name = "s2"
|
||||
|
||||
default_subset = "v1"
|
||||
|
||||
subsets = {
|
||||
"v1" = {
|
||||
filter = "Service.Meta.version == v1"
|
||||
}
|
||||
"v2" = {
|
||||
filter = "Service.Meta.version == v2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
services {
|
||||
name = "terminating-gateway"
|
||||
kind = "terminating-gateway"
|
||||
port = 8443
|
||||
|
||||
meta {
|
||||
version = "v1"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
services {
|
||||
id = "s2-v1"
|
||||
name = "s2"
|
||||
port = 8182
|
||||
|
||||
meta {
|
||||
version = "v1"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
services {
|
||||
id = "s2-v2"
|
||||
name = "s2"
|
||||
port = 8183
|
||||
|
||||
meta {
|
||||
version = "v2"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# wait for bootstrap to apply config entries
|
||||
wait_for_config_entry terminating-gateway terminating-gateway
|
||||
wait_for_config_entry proxy-defaults global
|
||||
wait_for_config_entry service-resolver s2
|
||||
|
||||
# terminating gateway will act as s2's proxy
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap terminating-gateway 20000 primary true
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
|
||||
# There is no sidecar proxy for s2-v1, since the terminating gateway acts as the proxy
|
||||
export REQUIRED_SERVICES="
|
||||
s1 s1-sidecar-proxy
|
||||
s2-v1
|
||||
terminating-gateway-primary
|
||||
"
|
|
@ -0,0 +1,40 @@
|
|||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "s1 proxy admin is up on :19000" {
|
||||
retry_default curl -f -s localhost:19000/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "terminating proxy admin is up on :20000" {
|
||||
retry_default curl -f -s localhost:20000/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "terminating-gateway-primary listener is up on :8443" {
|
||||
retry_default nc -z localhost:8443
|
||||
}
|
||||
|
||||
@test "s1 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21000 s1
|
||||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for v1.s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 v1.s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "terminating-gateway should have healthy endpoints for v1.s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:20000 v1.s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "terminating-gateway should have healthy endpoints for v2.s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:20000 v2.s2 HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2-v1 via terminating-gateway" {
|
||||
assert_expected_fortio_name s2-v1
|
||||
}
|
||||
|
||||
@test "terminating-gateway is used for the upstream connection" {
|
||||
assert_envoy_metric_at_least 127.0.0.1:20000 "v1.s2.default.primary.*cx_total" 1
|
||||
}
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
bind_addr = "0.0.0.0"
|
||||
advertise_addr = "{{ GetInterfaceIP \"eth0\" }}"
|
|
@ -0,0 +1,5 @@
|
|||
services {
|
||||
name = "terminating-gateway"
|
||||
kind = "terminating-gateway"
|
||||
port = 4431
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
# We don't want an s1 service
|
|
@ -0,0 +1 @@
|
|||
# We don't want an s2 service
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -eEuo pipefail
|
||||
|
||||
gen_envoy_bootstrap terminating-gateway 19000 primary true
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
export REQUIRED_SERVICES="terminating-gateway-primary"
|
|
@ -0,0 +1,11 @@
|
|||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "terminating-gateway-primary proxy admin is up on :19000" {
|
||||
retry_default curl -f -s localhost:19000/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "terminating-gateway-primary listener is up on :4431" {
|
||||
retry_default nc -z localhost:4431
|
||||
}
|
|
@ -580,6 +580,23 @@ services:
|
|||
- *workdir-volume
|
||||
network_mode: service:consul-primary
|
||||
|
||||
terminating-gateway-primary:
|
||||
depends_on:
|
||||
- consul-primary
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/primary/envoy/terminating-gateway-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
- "--disable-hot-restart"
|
||||
- "--drain-time-s"
|
||||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul-primary
|
||||
|
||||
verify-primary:
|
||||
depends_on:
|
||||
- consul-primary
|
||||
|
|
Loading…
Reference in New Issue