2023-03-28 18:39:22 +00:00
// Copyright (c) HashiCorp, Inc.
2023-08-11 13:12:13 +00:00
// SPDX-License-Identifier: BUSL-1.1
2023-03-28 18:39:22 +00:00
2018-10-03 18:18:55 +00:00
package xds
import (
"errors"
"fmt"
2019-09-26 02:55:52 +00:00
"net"
"net/url"
"regexp"
2021-02-05 22:28:07 +00:00
"sort"
2019-09-26 02:55:52 +00:00
"strconv"
2019-04-29 16:27:57 +00:00
"strings"
2021-01-25 19:50:00 +00:00
"time"
2018-10-03 18:18:55 +00:00
2021-02-26 22:23:15 +00:00
envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
2022-04-18 16:36:07 +00:00
envoy_grpc_http1_bridge_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_http1_bridge/v3"
2021-04-29 20:22:03 +00:00
envoy_grpc_stats_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_stats/v3"
2022-04-18 16:36:07 +00:00
envoy_http_router_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3"
2022-12-22 20:18:15 +00:00
envoy_extensions_filters_listener_http_inspector_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/http_inspector/v3"
2022-04-18 16:36:07 +00:00
envoy_original_dst_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/original_dst/v3"
envoy_tls_inspector_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/tls_inspector/v3"
2022-05-19 17:06:13 +00:00
envoy_connection_limit_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/connection_limit/v3"
2021-02-26 22:23:15 +00:00
envoy_http_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
2022-04-18 16:36:07 +00:00
envoy_sni_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/sni_cluster/v3"
2021-02-26 22:23:15 +00:00
envoy_tcp_proxy_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3"
envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
envoy_type_v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
2023-08-15 18:57:07 +00:00
"github.com/hashicorp/consul/agent/xds/config"
"github.com/hashicorp/consul/agent/xds/naming"
"github.com/hashicorp/consul/agent/xds/platform"
2021-02-22 21:00:15 +00:00
2022-12-22 20:18:15 +00:00
"github.com/hashicorp/go-hclog"
2023-01-11 14:39:10 +00:00
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
2022-06-03 21:42:50 +00:00
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/wrapperspb"
2021-02-22 21:00:15 +00:00
2022-06-01 20:31:37 +00:00
"github.com/hashicorp/consul/acl"
2019-08-19 18:03:03 +00:00
"github.com/hashicorp/consul/agent/connect"
2018-10-03 18:18:55 +00:00
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/hashicorp/consul/agent/structs"
2022-12-22 20:18:15 +00:00
"github.com/hashicorp/consul/agent/xds/accesslogs"
2023-08-17 18:43:21 +00:00
"github.com/hashicorp/consul/agent/xds/response"
2023-02-06 17:14:35 +00:00
"github.com/hashicorp/consul/envoyextensions/xdscommon"
2022-06-01 20:31:37 +00:00
"github.com/hashicorp/consul/lib"
2022-06-15 19:36:18 +00:00
"github.com/hashicorp/consul/lib/stringslice"
2023-02-17 21:14:46 +00:00
"github.com/hashicorp/consul/proto/private/pbpeering"
2021-04-29 20:22:03 +00:00
"github.com/hashicorp/consul/sdk/iptables"
2022-06-01 20:31:37 +00:00
"github.com/hashicorp/consul/types"
2018-10-03 18:18:55 +00:00
)
2019-06-24 19:05:36 +00:00
// listenersFromSnapshot returns the xDS API representation of the "listeners" in the snapshot.
2021-04-29 18:54:05 +00:00
func ( s * ResourceGenerator ) listenersFromSnapshot ( cfgSnap * proxycfg . ConfigSnapshot ) ( [ ] proto . Message , error ) {
2018-10-03 18:18:55 +00:00
if cfgSnap == nil {
return nil , errors . New ( "nil config given" )
}
2019-06-24 19:05:36 +00:00
switch cfgSnap . Kind {
case structs . ServiceKindConnectProxy :
2021-04-29 18:54:05 +00:00
return s . listenersFromSnapshotConnectProxy ( cfgSnap )
2023-02-08 21:52:12 +00:00
case structs . ServiceKindTerminatingGateway ,
structs . ServiceKindMeshGateway ,
structs . ServiceKindIngressGateway ,
structs . ServiceKindAPIGateway :
2021-04-29 18:54:05 +00:00
return s . listenersFromSnapshotGateway ( cfgSnap )
2019-06-24 19:05:36 +00:00
default :
return nil , fmt . Errorf ( "Invalid service kind: %v" , cfgSnap . Kind )
}
}
2019-06-18 00:52:01 +00:00
// listenersFromSnapshotConnectProxy returns the "listeners" for a connect proxy service
2021-04-29 18:54:05 +00:00
func ( s * ResourceGenerator ) listenersFromSnapshotConnectProxy ( cfgSnap * proxycfg . ConfigSnapshot ) ( [ ] proto . Message , error ) {
2021-03-17 19:40:49 +00:00
resources := make ( [ ] proto . Message , 1 )
2018-10-03 18:18:55 +00:00
var err error
2021-03-17 01:22:26 +00:00
// Configure inbound listener.
2023-01-06 17:13:40 +00:00
resources [ 0 ] , err = s . makeInboundListener ( cfgSnap , xdscommon . PublicListenerName )
2018-10-03 18:18:55 +00:00
if err != nil {
return nil , err
}
2019-07-02 03:10:51 +00:00
2021-04-12 15:35:14 +00:00
// This outboundListener is exclusively used when transparent proxy mode is active.
2021-03-17 19:40:49 +00:00
// In that situation there is a single listener where we are redirecting outbound traffic,
// and each upstream gets a filter chain attached to that listener.
var outboundListener * envoy_listener_v3 . Listener
2021-04-12 15:35:14 +00:00
if cfgSnap . Proxy . Mode == structs . ProxyModeTransparent {
port := iptables . DefaultTProxyOutboundPort
if cfgSnap . Proxy . TransparentProxy . OutboundListenerPort != 0 {
port = cfgSnap . Proxy . TransparentProxy . OutboundListenerPort
}
2022-04-18 16:36:07 +00:00
originalDstFilter , err := makeEnvoyListenerFilter ( "envoy.filters.listener.original_dst" , & envoy_original_dst_v3 . OriginalDst { } )
if err != nil {
return nil , err
}
2022-12-22 20:18:15 +00:00
opts := makeListenerOpts {
2023-01-27 19:43:16 +00:00
name : xdscommon . OutboundListenerName ,
2022-12-22 20:18:15 +00:00
accessLogs : cfgSnap . Proxy . AccessLogs ,
addr : "127.0.0.1" ,
port : port ,
direction : envoy_core_v3 . TrafficDirection_OUTBOUND ,
logger : s . Logger ,
}
outboundListener = makeListener ( opts )
2021-03-17 19:40:49 +00:00
outboundListener . FilterChains = make ( [ ] * envoy_listener_v3 . FilterChain , 0 )
2022-07-14 18:45:51 +00:00
2021-06-08 19:18:41 +00:00
outboundListener . ListenerFilters = [ ] * envoy_listener_v3 . ListenerFilter {
2022-04-18 16:36:07 +00:00
// The original_dst filter is a listener filter that recovers the original destination
// address before the iptables redirection. This filter is needed for transparent
// proxies because they route to upstreams using filter chains that match on the
// destination IP address. If the filter is not present, no chain will match.
originalDstFilter ,
2021-06-08 19:18:41 +00:00
}
2021-03-17 19:40:49 +00:00
}
2023-08-15 18:57:07 +00:00
proxyCfg , err := config . ParseProxyConfig ( cfgSnap . Proxy . Config )
2022-08-02 06:52:48 +00:00
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
s . Logger . Warn ( "failed to parse Connect.Proxy.Config" , "error" , err )
}
var tracing * envoy_http_v3 . HttpConnectionManager_Tracing
2022-08-30 06:36:06 +00:00
if proxyCfg . ListenerTracingJSON != "" {
if tracing , err = makeTracingFromUserConfig ( proxyCfg . ListenerTracingJSON ) ; err != nil {
s . Logger . Warn ( "failed to parse ListenerTracingJSON config" , "error" , err )
2022-08-02 06:52:48 +00:00
}
}
2022-09-09 17:58:28 +00:00
upstreamsSnapshot , err := cfgSnap . ToConfigSnapshotUpstreams ( )
if err != nil {
return nil , err
}
2021-12-13 22:30:49 +00:00
2022-09-09 17:58:28 +00:00
for uid , chain := range cfgSnap . ConnectProxy . DiscoveryChain {
2023-01-18 19:43:53 +00:00
upstreamCfg , skip := cfgSnap . ConnectProxy . GetUpstream ( uid , & cfgSnap . ProxyID . EnterpriseMeta )
2022-09-09 17:58:28 +00:00
if skip {
2021-12-13 22:30:49 +00:00
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
continue
}
2022-01-20 16:12:04 +00:00
cfg := s . getAndModifyUpstreamConfigForListener ( uid , upstreamCfg , chain )
2021-03-17 19:40:49 +00:00
// If escape hatch is present, create a listener from it and move on to the next
2021-03-18 03:13:40 +00:00
if cfg . EnvoyListenerJSON != "" {
upstreamListener , err := makeListenerFromUserConfig ( cfg . EnvoyListenerJSON )
2021-03-17 19:40:49 +00:00
if err != nil {
return nil , err
}
resources = append ( resources , upstreamListener )
continue
}
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
// RDS, Envoy's Route Discovery Service, is only used for HTTP services with a customized discovery chain.
2022-03-30 15:04:18 +00:00
useRDS := chain . Protocol != "tcp" && ! chain . Default
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
2023-02-23 16:32:32 +00:00
var clusterName string
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
if ! useRDS {
// When not using RDS we must generate a cluster name to attach to the filter chain.
// With RDS, cluster names get attached to the dynamic routes instead.
target , err := simpleChainTarget ( chain )
if err != nil {
return nil , err
}
2022-09-09 17:58:28 +00:00
2023-07-20 22:02:21 +00:00
clusterName = s . getTargetClusterName ( upstreamsSnapshot , chain , target . ID , false )
2023-02-23 16:32:32 +00:00
if clusterName == "" {
2022-09-09 17:58:28 +00:00
continue
}
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
}
filterName := fmt . Sprintf ( "%s.%s.%s.%s" , chain . ServiceName , chain . Namespace , chain . Partition , chain . Datacenter )
2021-03-26 20:00:44 +00:00
// Generate the upstream listeners for when they are explicitly set with a local bind port or socket path
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
if upstreamCfg != nil && upstreamCfg . HasLocalPortOrSocket ( ) {
filterChain , err := s . makeUpstreamFilterChain ( filterChainOpts {
2022-12-22 20:18:15 +00:00
accessLogs : & cfgSnap . Proxy . AccessLogs ,
2022-01-20 16:12:04 +00:00
routeName : uid . EnvoyID ( ) ,
2023-02-23 16:32:32 +00:00
clusterName : clusterName ,
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
filterName : filterName ,
protocol : cfg . Protocol ,
useRDS : useRDS ,
2022-08-02 06:52:48 +00:00
tracing : tracing ,
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
} )
2021-03-17 19:40:49 +00:00
if err != nil {
return nil , err
}
2022-12-22 20:18:15 +00:00
opts := makeListenerOpts {
name : uid . EnvoyID ( ) ,
accessLogs : cfgSnap . Proxy . AccessLogs ,
direction : envoy_core_v3 . TrafficDirection_OUTBOUND ,
logger : s . Logger ,
upstream : upstreamCfg ,
}
upstreamListener := makeListener ( opts )
2022-09-26 16:29:06 +00:00
s . injectConnectionBalanceConfig ( cfg . BalanceOutboundConnections , upstreamListener )
2021-03-17 19:40:49 +00:00
upstreamListener . FilterChains = [ ] * envoy_listener_v3 . FilterChain {
filterChain ,
}
resources = append ( resources , upstreamListener )
// Avoid creating filter chains below for upstreams that have dedicated listeners
continue
2019-07-02 03:10:51 +00:00
}
2021-05-04 14:45:19 +00:00
// The rest of this loop is used exclusively for transparent proxies.
// Below we create a filter chain per upstream, rather than a listener per upstream
// as we do for explicit upstreams above.
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
filterChain , err := s . makeUpstreamFilterChain ( filterChainOpts {
2022-12-22 20:18:15 +00:00
accessLogs : & cfgSnap . Proxy . AccessLogs ,
2022-01-20 16:12:04 +00:00
routeName : uid . EnvoyID ( ) ,
2023-02-23 16:32:32 +00:00
clusterName : clusterName ,
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
filterName : filterName ,
protocol : cfg . Protocol ,
useRDS : useRDS ,
2022-08-02 06:52:48 +00:00
tracing : tracing ,
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
} )
2018-10-03 18:18:55 +00:00
if err != nil {
return nil , err
}
2021-03-17 19:40:49 +00:00
2022-01-20 16:12:04 +00:00
endpoints := cfgSnap . ConnectProxy . WatchedUpstreamEndpoints [ uid ] [ chain . ID ( ) ]
2021-03-17 19:40:49 +00:00
uniqueAddrs := make ( map [ string ] struct { } )
2023-05-17 18:18:39 +00:00
if chain . Partition == cfgSnap . ProxyID . PartitionOrDefault ( ) {
for _ , ip := range chain . AutoVirtualIPs {
uniqueAddrs [ ip ] = struct { } { }
}
for _ , ip := range chain . ManualVirtualIPs {
uniqueAddrs [ ip ] = struct { } { }
}
}
2021-05-04 14:45:19 +00:00
// Match on the virtual IP for the upstream service (identified by the chain's ID).
// We do not match on all endpoints here since it would lead to load balancing across
// all instances when any instance address is dialed.
for _ , e := range endpoints {
2022-01-12 20:08:49 +00:00
if e . Service . Kind == structs . ServiceKind ( structs . TerminatingGateway ) {
key := structs . ServiceGatewayVirtualIPTag ( chain . CompoundServiceName ( ) )
if vip := e . Service . TaggedAddresses [ key ] ; vip . Address != "" {
uniqueAddrs [ vip . Address ] = struct { } { }
}
continue
}
2021-12-01 06:03:08 +00:00
if vip := e . Service . TaggedAddresses [ structs . TaggedAddressVirtualIP ] ; vip . Address != "" {
2021-05-04 14:45:19 +00:00
uniqueAddrs [ vip . Address ] = struct { } { }
2021-03-17 19:40:49 +00:00
}
2021-12-01 06:03:08 +00:00
// The virtualIPTag is used by consul-k8s to store the ClusterIP for a service.
// We only match on this virtual IP if the upstream is in the proxy's partition.
// This is because the IP is not guaranteed to be unique across k8s clusters.
2022-04-05 21:10:06 +00:00
if acl . EqualPartitions ( e . Node . PartitionOrDefault ( ) , cfgSnap . ProxyID . PartitionOrDefault ( ) ) {
2023-08-15 18:57:07 +00:00
if vip := e . Service . TaggedAddresses [ naming . VirtualIPTag ] ; vip . Address != "" {
2021-12-01 06:03:08 +00:00
uniqueAddrs [ vip . Address ] = struct { } { }
}
}
2021-03-17 19:40:49 +00:00
}
2021-12-01 06:03:08 +00:00
if len ( uniqueAddrs ) > 2 {
s . Logger . Debug ( "detected multiple virtual IPs for an upstream, all will be used to match traffic" ,
2022-01-20 16:12:04 +00:00
"upstream" , uid , "ip_count" , len ( uniqueAddrs ) )
2021-05-04 14:45:19 +00:00
}
2021-03-17 19:40:49 +00:00
// For every potential address we collected, create the appropriate address prefix to match on.
// In this case we are matching on exact addresses, so the prefix is the address itself,
// and the prefix length is based on whether it's IPv4 or IPv6.
2021-06-09 20:34:17 +00:00
filterChain . FilterChainMatch = makeFilterChainMatchFromAddrs ( uniqueAddrs )
2021-03-17 19:40:49 +00:00
2021-06-09 20:34:17 +00:00
// Only attach the filter chain if there are addresses to match on
if filterChain . FilterChainMatch != nil && len ( filterChain . FilterChainMatch . PrefixRanges ) > 0 {
outboundListener . FilterChains = append ( outboundListener . FilterChains , filterChain )
}
}
2022-07-18 21:10:06 +00:00
requiresTLSInspector := false
2022-08-01 18:12:43 +00:00
requiresHTTPInspector := false
2022-07-14 18:45:51 +00:00
2022-08-01 18:12:43 +00:00
configuredPorts := make ( map [ int ] interface { } )
2022-07-14 18:45:51 +00:00
err = cfgSnap . ConnectProxy . DestinationsUpstream . ForEachKeyE ( func ( uid proxycfg . UpstreamID ) error {
2022-07-18 21:10:06 +00:00
svcConfig , ok := cfgSnap . ConnectProxy . DestinationsUpstream . Get ( uid )
if ! ok || svcConfig == nil {
return nil
}
2022-07-14 18:45:51 +00:00
2022-08-01 18:12:43 +00:00
if structs . IsProtocolHTTPLike ( svcConfig . Protocol ) {
if _ , ok := configuredPorts [ svcConfig . Destination . Port ] ; ok {
return nil
}
configuredPorts [ svcConfig . Destination . Port ] = struct { } { }
2022-09-27 13:49:28 +00:00
const name = "~http" // name used for the shared route name
2022-08-01 18:12:43 +00:00
routeName := clusterNameForDestination ( cfgSnap , name , fmt . Sprintf ( "%d" , svcConfig . Destination . Port ) , svcConfig . NamespaceOrDefault ( ) , svcConfig . PartitionOrDefault ( ) )
2022-07-14 18:45:51 +00:00
filterChain , err := s . makeUpstreamFilterChain ( filterChainOpts {
2022-12-22 20:18:15 +00:00
accessLogs : & cfgSnap . Proxy . AccessLogs ,
2022-08-01 18:12:43 +00:00
routeName : routeName ,
filterName : routeName ,
protocol : svcConfig . Protocol ,
useRDS : true ,
2022-08-02 06:52:48 +00:00
tracing : tracing ,
2022-07-14 18:45:51 +00:00
} )
if err != nil {
return err
}
2022-08-01 18:12:43 +00:00
filterChain . FilterChainMatch = makeFilterChainMatchFromAddressWithPort ( "" , svcConfig . Destination . Port )
2022-07-14 18:45:51 +00:00
outboundListener . FilterChains = append ( outboundListener . FilterChains , filterChain )
2022-08-01 18:12:43 +00:00
requiresHTTPInspector = true
} else {
for _ , address := range svcConfig . Destination . Addresses {
clusterName := clusterNameForDestination ( cfgSnap , uid . Name , address , uid . NamespaceOrDefault ( ) , uid . PartitionOrDefault ( ) )
2022-07-14 18:45:51 +00:00
2022-08-01 18:12:43 +00:00
filterChain , err := s . makeUpstreamFilterChain ( filterChainOpts {
2022-12-22 20:18:15 +00:00
accessLogs : & cfgSnap . Proxy . AccessLogs ,
2022-08-01 18:12:43 +00:00
routeName : uid . EnvoyID ( ) ,
clusterName : clusterName ,
filterName : clusterName ,
protocol : svcConfig . Protocol ,
2022-08-02 06:52:48 +00:00
tracing : tracing ,
2022-08-01 18:12:43 +00:00
} )
if err != nil {
return err
}
filterChain . FilterChainMatch = makeFilterChainMatchFromAddressWithPort ( address , svcConfig . Destination . Port )
outboundListener . FilterChains = append ( outboundListener . FilterChains , filterChain )
requiresTLSInspector = len ( filterChain . FilterChainMatch . ServerNames ) != 0 || requiresTLSInspector
}
2022-07-14 18:45:51 +00:00
}
return nil
} )
if err != nil {
return nil , err
}
2021-06-09 20:34:17 +00:00
2022-07-18 21:10:06 +00:00
if requiresTLSInspector {
2022-07-14 18:45:51 +00:00
tlsInspector , err := makeTLSInspectorListenerFilter ( )
if err != nil {
return nil , err
}
outboundListener . ListenerFilters = append ( outboundListener . ListenerFilters , tlsInspector )
}
2022-07-19 18:56:28 +00:00
2022-08-01 18:12:43 +00:00
if requiresHTTPInspector {
httpInspector , err := makeHTTPInspectorListenerFilter ( )
if err != nil {
return nil , err
}
outboundListener . ListenerFilters = append ( outboundListener . ListenerFilters , httpInspector )
}
2022-07-19 18:56:28 +00:00
// Looping over explicit and implicit upstreams is only needed for cross-peer
// because they do not have discovery chains.
2022-06-03 21:42:50 +00:00
for _ , uid := range cfgSnap . ConnectProxy . PeeredUpstreamIDs ( ) {
2023-01-18 19:43:53 +00:00
upstreamCfg , skip := cfgSnap . ConnectProxy . GetUpstream ( uid , & cfgSnap . ProxyID . EnterpriseMeta )
2022-09-09 17:58:28 +00:00
if skip {
2022-06-03 21:42:50 +00:00
// Not associated with a known explicit or implicit upstream so it is skipped.
continue
}
2023-01-03 16:44:08 +00:00
peerMeta , found := cfgSnap . ConnectProxy . UpstreamPeerMeta ( uid )
if ! found {
s . Logger . Warn ( "failed to fetch upstream peering metadata for listener" , "uid" , uid )
}
2022-06-03 21:42:50 +00:00
cfg := s . getAndModifyUpstreamConfigForPeeredListener ( uid , upstreamCfg , peerMeta )
// If escape hatch is present, create a listener from it and move on to the next
if cfg . EnvoyListenerJSON != "" {
upstreamListener , err := makeListenerFromUserConfig ( cfg . EnvoyListenerJSON )
if err != nil {
s . Logger . Error ( "failed to parse envoy_listener_json" ,
"upstream" , uid ,
"error" , err )
continue
}
resources = append ( resources , upstreamListener )
continue
}
2022-07-25 17:49:00 +00:00
tbs , ok := cfgSnap . ConnectProxy . UpstreamPeerTrustBundles . Get ( uid . Peer )
if ! ok {
// this should never happen since we loop through upstreams with
// set trust bundles
return nil , fmt . Errorf ( "trust bundle not ready for peer %s" , uid . Peer )
2022-06-03 21:42:50 +00:00
}
2022-07-25 17:49:00 +00:00
clusterName := generatePeeredClusterName ( uid , tbs )
2022-06-03 21:42:50 +00:00
// Generate the upstream listeners for when they are explicitly set with a local bind port or socket path
if upstreamCfg != nil && upstreamCfg . HasLocalPortOrSocket ( ) {
filterChain , err := s . makeUpstreamFilterChain ( filterChainOpts {
2022-12-22 20:18:15 +00:00
accessLogs : & cfgSnap . Proxy . AccessLogs ,
2022-06-03 21:42:50 +00:00
clusterName : clusterName ,
2022-07-25 17:49:00 +00:00
filterName : fmt . Sprintf ( "%s.%s.%s" ,
upstreamCfg . DestinationName ,
upstreamCfg . DestinationNamespace ,
upstreamCfg . DestinationPeer ) ,
routeName : uid . EnvoyID ( ) ,
protocol : cfg . Protocol ,
useRDS : false ,
statPrefix : "upstream_peered." ,
2022-06-03 21:42:50 +00:00
} )
if err != nil {
return nil , err
}
2022-12-22 20:18:15 +00:00
opts := makeListenerOpts {
name : uid . EnvoyID ( ) ,
accessLogs : cfgSnap . Proxy . AccessLogs ,
direction : envoy_core_v3 . TrafficDirection_OUTBOUND ,
logger : s . Logger ,
upstream : upstreamCfg ,
}
upstreamListener := makeListener ( opts )
2022-09-26 16:29:06 +00:00
s . injectConnectionBalanceConfig ( cfg . BalanceOutboundConnections , upstreamListener )
2022-06-03 21:42:50 +00:00
upstreamListener . FilterChains = [ ] * envoy_listener_v3 . FilterChain {
filterChain ,
}
resources = append ( resources , upstreamListener )
// Avoid creating filter chains below for upstreams that have dedicated listeners
continue
}
// The rest of this loop is used exclusively for transparent proxies.
// Below we create a filter chain per upstream, rather than a listener per upstream
// as we do for explicit upstreams above.
2022-07-19 18:56:28 +00:00
filterChain , err := s . makeUpstreamFilterChain ( filterChainOpts {
2022-12-22 20:18:15 +00:00
accessLogs : & cfgSnap . Proxy . AccessLogs ,
2022-07-19 18:56:28 +00:00
routeName : uid . EnvoyID ( ) ,
clusterName : clusterName ,
2022-07-25 17:49:00 +00:00
filterName : fmt . Sprintf ( "%s.%s.%s" ,
uid . Name ,
uid . NamespaceOrDefault ( ) ,
uid . Peer ) ,
protocol : cfg . Protocol ,
useRDS : false ,
statPrefix : "upstream_peered." ,
2022-08-02 06:52:48 +00:00
tracing : tracing ,
2022-07-19 18:56:28 +00:00
} )
if err != nil {
return nil , err
}
endpoints , _ := cfgSnap . ConnectProxy . PeerUpstreamEndpoints . Get ( uid )
uniqueAddrs := make ( map [ string ] struct { } )
// Match on the virtual IP for the upstream service (identified by the chain's ID).
// We do not match on all endpoints here since it would lead to load balancing across
// all instances when any instance address is dialed.
for _ , e := range endpoints {
if vip := e . Service . TaggedAddresses [ structs . TaggedAddressVirtualIP ] ; vip . Address != "" {
uniqueAddrs [ vip . Address ] = struct { } { }
}
// The virtualIPTag is used by consul-k8s to store the ClusterIP for a service.
// For services imported from a peer,the partition will be equal in all cases.
if acl . EqualPartitions ( e . Node . PartitionOrDefault ( ) , cfgSnap . ProxyID . PartitionOrDefault ( ) ) {
2023-08-15 18:57:07 +00:00
if vip := e . Service . TaggedAddresses [ naming . VirtualIPTag ] ; vip . Address != "" {
2022-07-19 18:56:28 +00:00
uniqueAddrs [ vip . Address ] = struct { } { }
}
}
}
if len ( uniqueAddrs ) > 2 {
s . Logger . Debug ( "detected multiple virtual IPs for an upstream, all will be used to match traffic" ,
"upstream" , uid , "ip_count" , len ( uniqueAddrs ) )
}
// For every potential address we collected, create the appropriate address prefix to match on.
// In this case we are matching on exact addresses, so the prefix is the address itself,
// and the prefix length is based on whether it's IPv4 or IPv6.
filterChain . FilterChainMatch = makeFilterChainMatchFromAddrs ( uniqueAddrs )
// Only attach the filter chain if there are addresses to match on
if filterChain . FilterChainMatch != nil && len ( filterChain . FilterChainMatch . PrefixRanges ) > 0 {
outboundListener . FilterChains = append ( outboundListener . FilterChains , filterChain )
}
2022-06-03 21:42:50 +00:00
}
2021-06-09 20:34:17 +00:00
if outboundListener != nil {
// Add a passthrough for every mesh endpoint that can be dialed directly,
// as opposed to via a virtual IP.
var passthroughChains [ ] * envoy_listener_v3 . FilterChain
2021-03-17 19:40:49 +00:00
2022-01-28 06:49:06 +00:00
for _ , targets := range cfgSnap . ConnectProxy . PassthroughUpstreams {
for tid , addrs := range targets {
2022-01-28 03:52:26 +00:00
uid := proxycfg . NewUpstreamIDFromTargetID ( tid )
2021-04-08 17:27:57 +00:00
2022-01-28 03:52:26 +00:00
sni := connect . ServiceSNI (
uid . Name , "" , uid . NamespaceOrDefault ( ) , uid . PartitionOrDefault ( ) , cfgSnap . Datacenter , cfgSnap . Roots . TrustDomain )
2021-04-08 17:27:57 +00:00
2022-01-28 06:49:06 +00:00
filterName := fmt . Sprintf ( "%s.%s.%s.%s" , uid . Name , uid . NamespaceOrDefault ( ) , uid . PartitionOrDefault ( ) , cfgSnap . Datacenter )
2021-03-17 19:40:49 +00:00
2022-01-28 03:52:26 +00:00
filterChain , err := s . makeUpstreamFilterChain ( filterChainOpts {
2022-12-22 20:18:15 +00:00
accessLogs : & cfgSnap . Proxy . AccessLogs ,
2022-01-28 03:52:26 +00:00
clusterName : "passthrough~" + sni ,
filterName : filterName ,
protocol : "tcp" ,
} )
if err != nil {
return nil , err
}
filterChain . FilterChainMatch = makeFilterChainMatchFromAddrs ( addrs )
passthroughChains = append ( passthroughChains , filterChain )
}
2021-03-17 19:40:49 +00:00
}
2021-06-09 20:34:17 +00:00
outboundListener . FilterChains = append ( outboundListener . FilterChains , passthroughChains ... )
2021-03-17 19:40:49 +00:00
// Filter chains are stable sorted to avoid draining if the list is provided out of order
sort . SliceStable ( outboundListener . FilterChains , func ( i , j int ) bool {
2022-07-14 18:45:51 +00:00
si := ""
sj := ""
if len ( outboundListener . FilterChains [ i ] . FilterChainMatch . PrefixRanges ) > 0 {
si += outboundListener . FilterChains [ i ] . FilterChainMatch . PrefixRanges [ 0 ] . AddressPrefix +
"/" + outboundListener . FilterChains [ i ] . FilterChainMatch . PrefixRanges [ 0 ] . PrefixLen . String ( ) +
":" + outboundListener . FilterChains [ i ] . FilterChainMatch . DestinationPort . String ( )
}
if len ( outboundListener . FilterChains [ i ] . FilterChainMatch . ServerNames ) > 0 {
2022-08-01 18:12:43 +00:00
si += outboundListener . FilterChains [ i ] . FilterChainMatch . ServerNames [ 0 ] +
":" + outboundListener . FilterChains [ i ] . FilterChainMatch . DestinationPort . String ( )
} else {
si += outboundListener . FilterChains [ i ] . FilterChainMatch . DestinationPort . String ( )
2022-07-14 18:45:51 +00:00
}
if len ( outboundListener . FilterChains [ j ] . FilterChainMatch . PrefixRanges ) > 0 {
sj += outboundListener . FilterChains [ j ] . FilterChainMatch . PrefixRanges [ 0 ] . AddressPrefix +
"/" + outboundListener . FilterChains [ j ] . FilterChainMatch . PrefixRanges [ 0 ] . PrefixLen . String ( ) +
":" + outboundListener . FilterChains [ j ] . FilterChainMatch . DestinationPort . String ( )
}
if len ( outboundListener . FilterChains [ j ] . FilterChainMatch . ServerNames ) > 0 {
2022-08-01 18:12:43 +00:00
sj += outboundListener . FilterChains [ j ] . FilterChainMatch . ServerNames [ 0 ] +
":" + outboundListener . FilterChains [ j ] . FilterChainMatch . DestinationPort . String ( )
} else {
sj += outboundListener . FilterChains [ j ] . FilterChainMatch . DestinationPort . String ( )
2022-07-14 18:45:51 +00:00
}
return si < sj
2021-03-17 19:40:49 +00:00
} )
2021-06-14 20:15:09 +00:00
// Add a catch-all filter chain that acts as a TCP proxy to destinations outside the mesh
2022-03-30 18:43:59 +00:00
if meshConf := cfgSnap . MeshConfig ( ) ; meshConf == nil ||
! meshConf . TransparentProxy . MeshDestinationsOnly {
2021-04-06 18:19:59 +00:00
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
filterChain , err := s . makeUpstreamFilterChain ( filterChainOpts {
2022-12-22 20:18:15 +00:00
accessLogs : & cfgSnap . Proxy . AccessLogs ,
2023-08-15 18:57:07 +00:00
clusterName : naming . OriginalDestinationClusterName ,
filterName : naming . OriginalDestinationClusterName ,
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
protocol : "tcp" ,
} )
2021-04-06 18:19:59 +00:00
if err != nil {
return nil , err
}
2022-07-14 18:45:51 +00:00
outboundListener . DefaultFilterChain = filterChain
2021-03-17 19:40:49 +00:00
}
2021-06-09 20:34:17 +00:00
// Only add the outbound listener if configured.
2022-07-14 18:45:51 +00:00
if len ( outboundListener . FilterChains ) > 0 || outboundListener . DefaultFilterChain != nil {
2021-06-09 20:34:17 +00:00
resources = append ( resources , outboundListener )
}
2021-03-17 19:40:49 +00:00
}
2021-03-17 21:32:52 +00:00
// Looping over explicit upstreams is only needed for prepared queries because they do not have discovery chains
2022-01-20 16:12:04 +00:00
for uid , u := range cfgSnap . ConnectProxy . UpstreamConfig {
2021-03-17 21:32:52 +00:00
if u . DestinationType != structs . UpstreamDestTypePreparedQuery {
2021-03-17 19:40:49 +00:00
continue
}
cfg , err := structs . ParseUpstreamConfig ( u . Config )
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
2022-01-20 16:12:04 +00:00
s . Logger . Warn ( "failed to parse" , "upstream" , uid , "error" , err )
2021-03-17 19:40:49 +00:00
}
2021-09-22 19:27:10 +00:00
// If escape hatch is present, create a listener from it and move on to the next
if cfg . EnvoyListenerJSON != "" {
upstreamListener , err := makeListenerFromUserConfig ( cfg . EnvoyListenerJSON )
if err != nil {
s . Logger . Error ( "failed to parse envoy_listener_json" ,
2022-01-20 16:12:04 +00:00
"upstream" , uid ,
2021-09-22 19:27:10 +00:00
"error" , err )
continue
}
resources = append ( resources , upstreamListener )
continue
}
2022-12-22 20:18:15 +00:00
opts := makeListenerOpts {
name : uid . EnvoyID ( ) ,
accessLogs : cfgSnap . Proxy . AccessLogs ,
direction : envoy_core_v3 . TrafficDirection_OUTBOUND ,
logger : s . Logger ,
upstream : u ,
}
upstreamListener := makeListener ( opts )
2022-09-26 16:29:06 +00:00
s . injectConnectionBalanceConfig ( cfg . BalanceOutboundConnections , upstreamListener )
2021-03-17 19:40:49 +00:00
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
filterChain , err := s . makeUpstreamFilterChain ( filterChainOpts {
// TODO (SNI partition) add partition for upstream SNI
2022-12-22 20:18:15 +00:00
accessLogs : & cfgSnap . Proxy . AccessLogs ,
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
clusterName : connect . UpstreamSNI ( u , "" , cfgSnap . Datacenter , cfgSnap . Roots . TrustDomain ) ,
2022-01-20 16:12:04 +00:00
filterName : uid . EnvoyID ( ) ,
routeName : uid . EnvoyID ( ) ,
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
protocol : cfg . Protocol ,
2022-08-02 06:52:48 +00:00
tracing : tracing ,
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
} )
2021-03-17 19:40:49 +00:00
if err != nil {
return nil , err
}
upstreamListener . FilterChains = [ ] * envoy_listener_v3 . FilterChain {
filterChain ,
}
resources = append ( resources , upstreamListener )
2018-10-03 18:18:55 +00:00
}
2019-09-26 02:55:52 +00:00
2020-01-28 23:50:41 +00:00
cfgSnap . Proxy . Expose . Finalize ( )
2019-09-26 02:55:52 +00:00
paths := cfgSnap . Proxy . Expose . Paths
// Add service health checks to the list of paths to create listeners for if needed
if cfgSnap . Proxy . Expose . Checks {
2020-01-24 15:04:58 +00:00
psid := structs . NewServiceID ( cfgSnap . Proxy . DestinationServiceID , & cfgSnap . ProxyID . EnterpriseMeta )
2022-06-06 14:15:33 +00:00
for _ , check := range cfgSnap . ConnectProxy . WatchedServiceChecks [ psid ] {
2019-09-26 02:55:52 +00:00
p , err := parseCheckPath ( check )
if err != nil {
2020-01-28 23:50:41 +00:00
s . Logger . Warn ( "failed to create listener for" , "check" , check . CheckID , "error" , err )
2019-09-26 02:55:52 +00:00
continue
}
paths = append ( paths , p )
}
}
// Configure additional listener for exposed check paths
for _ , path := range paths {
2023-01-06 17:13:40 +00:00
clusterName := xdscommon . LocalAppClusterName
2019-09-26 02:55:52 +00:00
if path . LocalPathPort != cfgSnap . Proxy . LocalServicePort {
clusterName = makeExposeClusterName ( path . LocalPathPort )
}
l , err := s . makeExposedCheckListener ( cfgSnap , clusterName , path )
if err != nil {
return nil , err
}
resources = append ( resources , l )
}
2018-10-03 18:18:55 +00:00
return resources , nil
}
2021-06-09 20:34:17 +00:00
func makeFilterChainMatchFromAddrs ( addrs map [ string ] struct { } ) * envoy_listener_v3 . FilterChainMatch {
ranges := make ( [ ] * envoy_core_v3 . CidrRange , 0 )
for addr := range addrs {
ip := net . ParseIP ( addr )
if ip == nil {
continue
}
pfxLen := uint32 ( 32 )
if ip . To4 ( ) == nil {
pfxLen = 128
}
ranges = append ( ranges , & envoy_core_v3 . CidrRange {
AddressPrefix : addr ,
2023-01-11 14:39:10 +00:00
PrefixLen : & wrapperspb . UInt32Value { Value : pfxLen } ,
2021-06-09 20:34:17 +00:00
} )
}
// The match rules are stable sorted to avoid draining if the list is provided out of order
sort . SliceStable ( ranges , func ( i , j int ) bool {
return ranges [ i ] . AddressPrefix < ranges [ j ] . AddressPrefix
} )
return & envoy_listener_v3 . FilterChainMatch {
PrefixRanges : ranges ,
}
}
2022-07-14 18:45:51 +00:00
func makeFilterChainMatchFromAddressWithPort ( address string , port int ) * envoy_listener_v3 . FilterChainMatch {
ranges := make ( [ ] * envoy_core_v3 . CidrRange , 0 )
ip := net . ParseIP ( address )
if ip == nil {
2022-08-01 18:12:43 +00:00
if address != "" {
return & envoy_listener_v3 . FilterChainMatch {
ServerNames : [ ] string { address } ,
2023-01-11 14:39:10 +00:00
DestinationPort : & wrapperspb . UInt32Value { Value : uint32 ( port ) } ,
2022-08-01 18:12:43 +00:00
}
}
2022-07-14 18:45:51 +00:00
return & envoy_listener_v3 . FilterChainMatch {
2023-01-11 14:39:10 +00:00
DestinationPort : & wrapperspb . UInt32Value { Value : uint32 ( port ) } ,
2022-07-14 18:45:51 +00:00
}
}
pfxLen := uint32 ( 32 )
if ip . To4 ( ) == nil {
pfxLen = 128
}
ranges = append ( ranges , & envoy_core_v3 . CidrRange {
AddressPrefix : address ,
2023-01-11 14:39:10 +00:00
PrefixLen : & wrapperspb . UInt32Value { Value : pfxLen } ,
2022-07-14 18:45:51 +00:00
} )
return & envoy_listener_v3 . FilterChainMatch {
PrefixRanges : ranges ,
2023-01-11 14:39:10 +00:00
DestinationPort : & wrapperspb . UInt32Value { Value : uint32 ( port ) } ,
2022-07-14 18:45:51 +00:00
}
}
2019-09-26 02:55:52 +00:00
func parseCheckPath ( check structs . CheckType ) ( structs . ExposePath , error ) {
var path structs . ExposePath
if check . HTTP != "" {
path . Protocol = "http"
// Get path and local port from original HTTP target
u , err := url . Parse ( check . HTTP )
if err != nil {
return path , fmt . Errorf ( "failed to parse url '%s': %v" , check . HTTP , err )
}
path . Path = u . Path
_ , portStr , err := net . SplitHostPort ( u . Host )
if err != nil {
return path , fmt . Errorf ( "failed to parse port from '%s': %v" , check . HTTP , err )
}
path . LocalPathPort , err = strconv . Atoi ( portStr )
if err != nil {
return path , fmt . Errorf ( "failed to parse port from '%s': %v" , check . HTTP , err )
}
// Get listener port from proxied HTTP target
u , err = url . Parse ( check . ProxyHTTP )
if err != nil {
return path , fmt . Errorf ( "failed to parse url '%s': %v" , check . ProxyHTTP , err )
}
_ , portStr , err = net . SplitHostPort ( u . Host )
if err != nil {
return path , fmt . Errorf ( "failed to parse port from '%s': %v" , check . ProxyHTTP , err )
}
path . ListenerPort , err = strconv . Atoi ( portStr )
if err != nil {
return path , fmt . Errorf ( "failed to parse port from '%s': %v" , check . ProxyHTTP , err )
}
}
if check . GRPC != "" {
path . Path = "/grpc.health.v1.Health/Check"
path . Protocol = "http2"
// Get local port from original GRPC target of the form: host/service
proxyServerAndService := strings . SplitN ( check . GRPC , "/" , 2 )
_ , portStr , err := net . SplitHostPort ( proxyServerAndService [ 0 ] )
if err != nil {
return path , fmt . Errorf ( "failed to split host/port from '%s': %v" , check . GRPC , err )
}
path . LocalPathPort , err = strconv . Atoi ( portStr )
if err != nil {
return path , fmt . Errorf ( "failed to parse port from '%s': %v" , check . GRPC , err )
}
// Get listener port from proxied GRPC target of the form: host/service
proxyServerAndService = strings . SplitN ( check . ProxyGRPC , "/" , 2 )
_ , portStr , err = net . SplitHostPort ( proxyServerAndService [ 0 ] )
if err != nil {
return path , fmt . Errorf ( "failed to split host/port from '%s': %v" , check . ProxyGRPC , err )
}
path . ListenerPort , err = strconv . Atoi ( portStr )
if err != nil {
return path , fmt . Errorf ( "failed to parse port from '%s': %v" , check . ProxyGRPC , err )
}
}
path . ParsedFromCheck = true
return path , nil
}
2020-04-13 16:33:01 +00:00
// listenersFromSnapshotGateway returns the "listener" for a terminating-gateway or mesh-gateway service
2021-04-29 18:54:05 +00:00
func ( s * ResourceGenerator ) listenersFromSnapshotGateway ( cfgSnap * proxycfg . ConfigSnapshot ) ( [ ] proto . Message , error ) {
2023-08-15 18:57:07 +00:00
cfg , err := config . ParseGatewayConfig ( cfgSnap . Proxy . Config )
2019-06-18 00:52:01 +00:00
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
2020-01-28 23:50:41 +00:00
s . Logger . Warn ( "failed to parse Connect.Proxy.Config" , "error" , err )
2019-06-18 00:52:01 +00:00
}
2021-02-05 22:28:07 +00:00
// We'll collect all of the desired listeners first, and deduplicate them later.
2020-04-13 16:33:01 +00:00
type namedAddress struct {
name string
structs . ServiceAddress
}
addrs := make ( [ ] namedAddress , 0 )
2019-06-18 00:52:01 +00:00
var resources [ ] proto . Message
if ! cfg . NoDefaultBind {
addr := cfgSnap . Address
if addr == "" {
addr = "0.0.0.0"
}
2020-04-13 16:33:01 +00:00
a := structs . ServiceAddress {
Address : addr ,
Port : cfgSnap . Port ,
}
2021-02-05 22:28:07 +00:00
addrs = append ( addrs , namedAddress { name : "default" , ServiceAddress : a } )
2019-06-18 00:52:01 +00:00
}
if cfg . BindTaggedAddresses {
for name , addrCfg := range cfgSnap . TaggedAddresses {
2020-04-13 16:33:01 +00:00
a := structs . ServiceAddress {
Address : addrCfg . Address ,
Port : addrCfg . Port ,
}
2021-02-05 22:28:07 +00:00
addrs = append ( addrs , namedAddress { name : name , ServiceAddress : a } )
2019-06-18 00:52:01 +00:00
}
}
for name , addrCfg := range cfg . BindAddresses {
2020-04-13 16:33:01 +00:00
a := structs . ServiceAddress {
Address : addrCfg . Address ,
Port : addrCfg . Port ,
}
2021-02-05 22:28:07 +00:00
addrs = append ( addrs , namedAddress { name : name , ServiceAddress : a } )
2019-06-18 00:52:01 +00:00
}
2021-02-05 22:28:07 +00:00
// Prevent invalid configurations of binding to the same port/addr twice
// including with the any addresses
//
// Sort the list and then if two items share a service address, take the
// first one to ensure we generate one listener per address and it's
// stable.
sort . Slice ( addrs , func ( i , j int ) bool {
return addrs [ i ] . name < addrs [ j ] . name
} )
// Make listeners and deduplicate on the fly.
seen := make ( map [ structs . ServiceAddress ] bool )
2020-04-13 16:33:01 +00:00
for _ , a := range addrs {
2021-02-05 22:28:07 +00:00
if seen [ a . ServiceAddress ] {
continue
}
seen [ a . ServiceAddress ] = true
2021-02-26 22:23:15 +00:00
var l * envoy_listener_v3 . Listener
2020-04-13 16:33:01 +00:00
switch cfgSnap . Kind {
case structs . ServiceKindTerminatingGateway :
2021-04-29 18:54:05 +00:00
l , err = s . makeTerminatingGatewayListener ( cfgSnap , a . name , a . Address , a . Port )
2020-04-13 16:33:01 +00:00
if err != nil {
return nil , err
}
2023-02-08 21:52:12 +00:00
case structs . ServiceKindAPIGateway :
xds: generate listeners directly from API gateway snapshot (#17398)
* API Gateway XDS Primitives, endpoints and clusters (#17002)
* XDS primitive generation for endpoints and clusters
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* server_test
* deleted extra file
* add missing parents to test
---------
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* Routes for API Gateway (#17158)
* XDS primitive generation for endpoints and clusters
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* server_test
* deleted extra file
* add missing parents to test
* checkpoint
* delete extra file
* httproute flattening code
* linting issue
* so close on this, calling for tonight
* unit test passing
* add in header manip to virtual host
* upstream rebuild commented out
* Use consistent upstream name whether or not we're rebuilding
* Start working through route naming logic
* Fix typos in test descriptions
* Simplify route naming logic
* Simplify RebuildHTTPRouteUpstream
* Merge additional compiled discovery chains instead of overwriting
* Use correct chain for flattened route, clean up + add TODOs
* Remove empty conditional branch
* Restore previous variable declaration
Limit the scope of this PR
* Clean up, improve TODO
* add logging, clean up todos
* clean up function
---------
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* checkpoint, skeleton, tests not passing
* checkpoint
* endpoints xds cluster configuration
* resources test fix
* fix reversion in resources_test
* checkpoint
* Update agent/proxycfg/api_gateway.go
Co-authored-by: John Maguire <john.maguire@hashicorp.com>
* unit tests passing
* gofmt
* add deterministic sorting to appease the unit test gods
* remove panic
* Find ready upstream matching listener instead of first in list
* Clean up, improve TODO
* Modify getReadyUpstreams to filter upstreams by listener (#17410)
Each listener would previously have all upstreams from any route that bound to the listener. This is problematic when a route bound to one listener also binds to other listeners and so includes upstreams for multiple listeners. The list for a given listener would then wind up including upstreams for other listeners.
* clean up todos, references to api gateway in listeners_ingress
* merge in Nathan's fix
* Update agent/consul/discoverychain/gateway.go
* cleanup current todos, remove snapshot manipulation from generation code
* Update agent/structs/config_entry_gateways.go
Co-authored-by: Thomas Eckert <teckert@hashicorp.com>
* Update agent/consul/discoverychain/gateway.go
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* Update agent/consul/discoverychain/gateway.go
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* Update agent/proxycfg/snapshot.go
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* clarified header comment for FlattenHTTPRoute, changed RebuildHTTPRouteUpstream to BuildHTTPRouteUpstream
* simplify cert logic
* Delete scratch
* revert route related changes in listener PR
* Update agent/consul/discoverychain/gateway.go
* Update agent/proxycfg/snapshot.go
* clean up uneeded extra lines in endpoints
---------
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
Co-authored-by: John Maguire <john.maguire@hashicorp.com>
Co-authored-by: Thomas Eckert <teckert@hashicorp.com>
2023-05-22 21:36:29 +00:00
listeners , err := s . makeAPIGatewayListeners ( a . Address , cfgSnap )
2023-02-08 21:52:12 +00:00
if err != nil {
return nil , err
}
xds: generate listeners directly from API gateway snapshot (#17398)
* API Gateway XDS Primitives, endpoints and clusters (#17002)
* XDS primitive generation for endpoints and clusters
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* server_test
* deleted extra file
* add missing parents to test
---------
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* Routes for API Gateway (#17158)
* XDS primitive generation for endpoints and clusters
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* server_test
* deleted extra file
* add missing parents to test
* checkpoint
* delete extra file
* httproute flattening code
* linting issue
* so close on this, calling for tonight
* unit test passing
* add in header manip to virtual host
* upstream rebuild commented out
* Use consistent upstream name whether or not we're rebuilding
* Start working through route naming logic
* Fix typos in test descriptions
* Simplify route naming logic
* Simplify RebuildHTTPRouteUpstream
* Merge additional compiled discovery chains instead of overwriting
* Use correct chain for flattened route, clean up + add TODOs
* Remove empty conditional branch
* Restore previous variable declaration
Limit the scope of this PR
* Clean up, improve TODO
* add logging, clean up todos
* clean up function
---------
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* checkpoint, skeleton, tests not passing
* checkpoint
* endpoints xds cluster configuration
* resources test fix
* fix reversion in resources_test
* checkpoint
* Update agent/proxycfg/api_gateway.go
Co-authored-by: John Maguire <john.maguire@hashicorp.com>
* unit tests passing
* gofmt
* add deterministic sorting to appease the unit test gods
* remove panic
* Find ready upstream matching listener instead of first in list
* Clean up, improve TODO
* Modify getReadyUpstreams to filter upstreams by listener (#17410)
Each listener would previously have all upstreams from any route that bound to the listener. This is problematic when a route bound to one listener also binds to other listeners and so includes upstreams for multiple listeners. The list for a given listener would then wind up including upstreams for other listeners.
* clean up todos, references to api gateway in listeners_ingress
* merge in Nathan's fix
* Update agent/consul/discoverychain/gateway.go
* cleanup current todos, remove snapshot manipulation from generation code
* Update agent/structs/config_entry_gateways.go
Co-authored-by: Thomas Eckert <teckert@hashicorp.com>
* Update agent/consul/discoverychain/gateway.go
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* Update agent/consul/discoverychain/gateway.go
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* Update agent/proxycfg/snapshot.go
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
* clarified header comment for FlattenHTTPRoute, changed RebuildHTTPRouteUpstream to BuildHTTPRouteUpstream
* simplify cert logic
* Delete scratch
* revert route related changes in listener PR
* Update agent/consul/discoverychain/gateway.go
* Update agent/proxycfg/snapshot.go
* clean up uneeded extra lines in endpoints
---------
Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com>
Co-authored-by: John Maguire <john.maguire@hashicorp.com>
Co-authored-by: Thomas Eckert <teckert@hashicorp.com>
2023-05-22 21:36:29 +00:00
2023-02-08 21:52:12 +00:00
resources = append ( resources , listeners ... )
2020-05-21 14:08:12 +00:00
case structs . ServiceKindIngressGateway :
listeners , err := s . makeIngressGatewayListeners ( a . Address , cfgSnap )
if err != nil {
return nil , err
}
resources = append ( resources , listeners ... )
2020-04-13 16:33:01 +00:00
case structs . ServiceKindMeshGateway :
l , err = s . makeMeshGatewayListener ( a . name , a . Address , a . Port , cfgSnap )
if err != nil {
return nil , err
}
}
2020-04-17 01:08:41 +00:00
if l != nil {
resources = append ( resources , l )
}
2020-04-13 16:33:01 +00:00
}
2019-06-18 00:52:01 +00:00
return resources , err
}
2018-10-03 18:18:55 +00:00
// makeListener returns a listener with name and bind details set. Filters must
// be added before it's useful.
//
// Note on names: Envoy listeners attempt graceful transitions of connections
// when their config changes but that means they can't have their bind address
// or port changed in a running instance. Since our users might choose to change
// a bind address or port for the public or upstream listeners, we need to
// encode those into the unique name for the listener such that if the user
// changes them, we actually create a whole new listener on the new address and
// port. Envoy should take care of closing the old one once it sees it's no
// longer in the config.
2022-12-22 20:18:15 +00:00
type makeListenerOpts struct {
addr string
accessLogs structs . AccessLogsConfig
logger hclog . Logger
mode string
name string
path string
port int
direction envoy_core_v3 . TrafficDirection
upstream * structs . Upstream
2021-03-26 20:00:44 +00:00
}
2022-12-22 20:18:15 +00:00
func makeListener ( opts makeListenerOpts ) * envoy_listener_v3 . Listener {
if opts . upstream != nil && opts . upstream . LocalBindPort == 0 && opts . upstream . LocalBindSocketPath != "" {
opts . path = opts . upstream . LocalBindSocketPath
opts . mode = opts . upstream . LocalBindSocketMode
return makePipeListener ( opts )
2018-10-03 18:18:55 +00:00
}
2022-12-22 20:18:15 +00:00
if opts . upstream != nil {
opts . port = opts . upstream . LocalBindPort
opts . addr = opts . upstream . LocalBindAddress
return makeListenerWithDefault ( opts )
}
return makeListenerWithDefault ( opts )
2018-10-03 18:18:55 +00:00
}
2022-12-22 20:18:15 +00:00
func makeListenerWithDefault ( opts makeListenerOpts ) * envoy_listener_v3 . Listener {
if opts . addr == "" {
opts . addr = "127.0.0.1"
}
accessLog , err := accesslogs . MakeAccessLogs ( & opts . accessLogs , true )
if err != nil && opts . logger != nil {
// Since access logging is non-essential for routing, warn and move on
opts . logger . Warn ( "error generating access log xds" , err )
}
return & envoy_listener_v3 . Listener {
Name : fmt . Sprintf ( "%s:%s:%d" , opts . name , opts . addr , opts . port ) ,
AccessLog : accessLog ,
2023-08-17 18:43:21 +00:00
Address : response . MakeAddress ( opts . addr , opts . port ) ,
2022-12-22 20:18:15 +00:00
TrafficDirection : opts . direction ,
2021-03-26 20:00:44 +00:00
}
}
2022-12-22 20:18:15 +00:00
func makePipeListener ( opts makeListenerOpts ) * envoy_listener_v3 . Listener {
2021-04-14 01:01:30 +00:00
// We've already validated this, so it should not fail.
2022-12-22 20:18:15 +00:00
modeInt , err := strconv . ParseUint ( opts . mode , 0 , 32 )
2021-04-14 01:01:30 +00:00
if err != nil {
2022-12-22 20:18:15 +00:00
modeInt = 0
}
accessLog , err := accesslogs . MakeAccessLogs ( & opts . accessLogs , true )
if err != nil && opts . logger != nil {
// Since access logging is non-essential for routing, warn and move on
opts . logger . Warn ( "error generating access log xds" , err )
2021-04-14 01:01:30 +00:00
}
2021-03-26 20:00:44 +00:00
return & envoy_listener_v3 . Listener {
2022-12-22 20:18:15 +00:00
Name : fmt . Sprintf ( "%s:%s" , opts . name , opts . path ) ,
AccessLog : accessLog ,
2023-08-17 18:43:21 +00:00
Address : response . MakePipeAddress ( opts . path , uint32 ( modeInt ) ) ,
2022-12-22 20:18:15 +00:00
TrafficDirection : opts . direction ,
2021-03-26 20:00:44 +00:00
}
}
2018-10-03 18:18:55 +00:00
// makeListenerFromUserConfig returns the listener config decoded from an
// arbitrary proto3 json format string or an error if it's invalid.
//
// For now we only support embedding in JSON strings because of the hcl parsing
2020-06-09 21:43:05 +00:00
// pain (see Background section in the comment for decode.HookWeakDecodeFromSlice).
// This may be fixed in decode.HookWeakDecodeFromSlice in the future.
2018-10-03 18:18:55 +00:00
//
// When we do that we can support just nesting the config directly into the
// JSON/hcl naturally but this is a stop-gap that gets us an escape hatch
// immediately. It's also probably not a bad thing to support long-term since
// any config generated by other systems will likely be in canonical protobuf
// from rather than our slight variant in JSON/hcl.
2021-02-26 22:23:15 +00:00
func makeListenerFromUserConfig ( configJSON string ) ( * envoy_listener_v3 . Listener , error ) {
2023-01-11 14:39:10 +00:00
// Type field is present so decode it as a anypb.Any
var any anypb . Any
if err := protojson . Unmarshal ( [ ] byte ( configJSON ) , & any ) ; err != nil {
2018-10-03 18:18:55 +00:00
return nil , err
}
2021-02-26 22:23:15 +00:00
var l envoy_listener_v3 . Listener
2021-02-22 21:00:15 +00:00
if err := proto . Unmarshal ( any . Value , & l ) ; err != nil {
return nil , err
2018-10-03 18:18:55 +00:00
}
2021-02-22 21:00:15 +00:00
return & l , nil
2018-10-03 18:18:55 +00:00
}
2022-09-26 16:29:06 +00:00
func ( s * ResourceGenerator ) injectConnectionBalanceConfig ( balanceType string , listener * envoy_listener_v3 . Listener ) {
switch balanceType {
case "" :
// Default with no balancing.
case structs . ConnectionExactBalance :
listener . ConnectionBalanceConfig = & envoy_listener_v3 . Listener_ConnectionBalanceConfig {
BalanceType : & envoy_listener_v3 . Listener_ConnectionBalanceConfig_ExactBalance_ { } ,
}
default :
s . Logger . Warn ( "ignoring invalid connection balance option" , "value" , balanceType )
}
}
2020-08-27 17:20:58 +00:00
// Ensure that the first filter in each filter chain of a public listener is
// the authz filter to prevent unauthorized access.
2021-04-29 18:54:05 +00:00
func ( s * ResourceGenerator ) injectConnectFilters ( cfgSnap * proxycfg . ConfigSnapshot , listener * envoy_listener_v3 . Listener ) error {
2020-08-27 17:20:58 +00:00
authzFilter , err := makeRBACNetworkFilter (
cfgSnap . ConnectProxy . Intentions ,
cfgSnap . IntentionDefaultAllow ,
2022-06-29 15:29:54 +00:00
rbacLocalInfo {
trustDomain : cfgSnap . Roots . TrustDomain ,
datacenter : cfgSnap . Datacenter ,
partition : cfgSnap . ProxyID . PartitionOrDefault ( ) ,
} ,
2022-06-21 02:47:14 +00:00
cfgSnap . ConnectProxy . InboundPeerTrustBundles ,
2020-08-27 17:20:58 +00:00
)
2018-10-03 18:18:55 +00:00
if err != nil {
return err
}
2020-08-27 17:20:58 +00:00
2018-10-03 18:18:55 +00:00
for idx := range listener . FilterChains {
// Insert our authz filter before any others
listener . FilterChains [ idx ] . Filters =
2021-02-26 22:23:15 +00:00
append ( [ ] * envoy_listener_v3 . Filter {
2020-08-27 17:20:58 +00:00
authzFilter ,
} , listener . FilterChains [ idx ] . Filters ... )
}
return nil
}
2021-02-22 21:00:15 +00:00
const (
httpConnectionManagerOldName = "envoy.http_connection_manager"
httpConnectionManagerNewName = "envoy.filters.network.http_connection_manager"
)
2020-08-27 17:20:58 +00:00
2021-06-14 22:20:27 +00:00
func extractRdsResourceNames ( listener * envoy_listener_v3 . Listener ) ( [ ] string , error ) {
var found [ ] string
for chainIdx , chain := range listener . FilterChains {
for filterIdx , filter := range chain . Filters {
if filter . Name != httpConnectionManagerNewName {
continue
}
tc , ok := filter . ConfigType . ( * envoy_listener_v3 . Filter_TypedConfig )
if ! ok {
return nil , fmt . Errorf (
"filter chain %d has a %q filter %d with an unsupported config type: %T" ,
chainIdx ,
filter . Name ,
filterIdx ,
filter . ConfigType ,
)
}
var hcm envoy_http_v3 . HttpConnectionManager
2023-01-11 14:39:10 +00:00
if err := tc . TypedConfig . UnmarshalTo ( & hcm ) ; err != nil {
2021-06-14 22:20:27 +00:00
return nil , err
}
if hcm . RouteSpecifier == nil {
continue
}
rds , ok := hcm . RouteSpecifier . ( * envoy_http_v3 . HttpConnectionManager_Rds )
if ! ok {
continue
}
if rds . Rds == nil {
continue
}
found = append ( found , rds . Rds . RouteConfigName )
}
}
return found , nil
}
2020-08-27 17:20:58 +00:00
// Locate the existing http connect manager L4 filter and inject our RBAC filter at the top.
2021-03-17 01:22:26 +00:00
func injectHTTPFilterOnFilterChains (
2021-02-26 22:23:15 +00:00
listener * envoy_listener_v3 . Listener ,
authzFilter * envoy_http_v3 . HttpFilter ,
2020-08-27 17:20:58 +00:00
) error {
for chainIdx , chain := range listener . FilterChains {
var (
2021-02-26 22:23:15 +00:00
hcmFilter * envoy_listener_v3 . Filter
2020-08-27 17:20:58 +00:00
hcmFilterIdx int
)
for filterIdx , filter := range chain . Filters {
2021-02-22 21:00:15 +00:00
if filter . Name == httpConnectionManagerOldName ||
2020-08-27 17:20:58 +00:00
filter . Name == httpConnectionManagerNewName {
hcmFilter = filter
hcmFilterIdx = filterIdx
break
}
}
if hcmFilter == nil {
return fmt . Errorf (
"filter chain %d lacks either a %q or %q filter" ,
chainIdx ,
2021-02-22 21:00:15 +00:00
httpConnectionManagerOldName ,
2020-08-27 17:20:58 +00:00
httpConnectionManagerNewName ,
)
}
2021-08-20 16:57:45 +00:00
var hcm envoy_http_v3 . HttpConnectionManager
2021-02-26 22:23:15 +00:00
tc , ok := hcmFilter . ConfigType . ( * envoy_listener_v3 . Filter_TypedConfig )
2021-02-22 21:00:15 +00:00
if ! ok {
2020-08-27 17:20:58 +00:00
return fmt . Errorf (
"filter chain %d has a %q filter with an unsupported config type: %T" ,
chainIdx ,
hcmFilter . Name ,
2021-02-22 21:00:15 +00:00
hcmFilter . ConfigType ,
2020-08-27 17:20:58 +00:00
)
}
2018-10-03 18:18:55 +00:00
2023-01-11 14:39:10 +00:00
if err := tc . TypedConfig . UnmarshalTo ( & hcm ) ; err != nil {
2021-02-22 21:00:15 +00:00
return err
}
2020-08-27 17:20:58 +00:00
// Insert our authz filter before any others
2021-02-26 22:23:15 +00:00
hcm . HttpFilters = append ( [ ] * envoy_http_v3 . HttpFilter {
2020-08-27 17:20:58 +00:00
authzFilter ,
} , hcm . HttpFilters ... )
// And persist the modified filter.
2021-02-22 21:00:15 +00:00
newFilter , err := makeFilter ( hcmFilter . Name , & hcm )
2020-08-27 17:20:58 +00:00
if err != nil {
return err
}
chain . Filters [ hcmFilterIdx ] = newFilter
}
return nil
}
2022-06-15 19:36:18 +00:00
// NOTE: This method MUST only be used for connect proxy public listeners,
// since TLS validation will be done against root certs for all peers
// that might dial this proxy.
func ( s * ResourceGenerator ) injectConnectTLSForPublicListener ( cfgSnap * proxycfg . ConfigSnapshot , listener * envoy_listener_v3 . Listener ) error {
transportSocket , err := createDownstreamTransportSocketForConnectTLS ( cfgSnap , cfgSnap . PeeringTrustBundles ( ) )
if err != nil {
return err
}
2020-08-27 17:20:58 +00:00
for idx := range listener . FilterChains {
2021-02-22 21:00:15 +00:00
listener . FilterChains [ idx ] . TransportSocket = transportSocket
2018-10-03 18:18:55 +00:00
}
return nil
}
2022-10-10 20:13:56 +00:00
func getAlpnProtocols ( protocol string ) [ ] string {
var alpnProtocols [ ] string
switch protocol {
case "grpc" , "http2" :
alpnProtocols = append ( alpnProtocols , "h2" , "http/1.1" )
case "http" :
alpnProtocols = append ( alpnProtocols , "http/1.1" )
}
return alpnProtocols
}
2022-06-15 19:36:18 +00:00
func createDownstreamTransportSocketForConnectTLS ( cfgSnap * proxycfg . ConfigSnapshot , peerBundles [ ] * pbpeering . PeeringTrustBundle ) ( * envoy_core_v3 . TransportSocket , error ) {
switch cfgSnap . Kind {
case structs . ServiceKindConnectProxy :
case structs . ServiceKindMeshGateway :
default :
return nil , fmt . Errorf ( "cannot inject peering trust bundles for kind %q" , cfgSnap . Kind )
2022-06-01 20:31:37 +00:00
}
2022-10-10 20:13:56 +00:00
// Determine listener protocol type from configured service protocol. Don't hard fail on a config typo,
//The parse func returns default config if there is an error, so it's safe to continue.
2023-08-15 18:57:07 +00:00
cfg , _ := config . ParseProxyConfig ( cfgSnap . Proxy . Config )
2022-10-10 20:13:56 +00:00
2022-06-01 20:31:37 +00:00
// Create TLS validation context for mTLS with leaf certificate and root certs.
tlsContext := makeCommonTLSContext (
cfgSnap . Leaf ( ) ,
cfgSnap . RootPEMs ( ) ,
makeTLSParametersFromProxyTLSConfig ( cfgSnap . MeshConfigTLSIncoming ( ) ) ,
)
2022-10-10 20:13:56 +00:00
if tlsContext != nil {
// Configure alpn protocols on CommonTLSContext
tlsContext . AlpnProtocols = getAlpnProtocols ( cfg . Protocol )
}
2022-06-01 20:31:37 +00:00
// Inject peering trust bundles if this service is exported to peered clusters.
2022-06-15 19:36:18 +00:00
if len ( peerBundles ) > 0 {
spiffeConfig , err := makeSpiffeValidatorConfig (
cfgSnap . Roots . TrustDomain ,
cfgSnap . RootPEMs ( ) ,
peerBundles ,
)
2022-06-01 20:31:37 +00:00
if err != nil {
2022-06-15 19:36:18 +00:00
return nil , err
2022-06-01 20:31:37 +00:00
}
typ , ok := tlsContext . ValidationContextType . ( * envoy_tls_v3 . CommonTlsContext_ValidationContext )
if ! ok {
2022-06-15 19:36:18 +00:00
return nil , fmt . Errorf ( "unexpected type for TLS context validation: %T" , tlsContext . ValidationContextType )
2022-06-01 20:31:37 +00:00
}
// makeCommonTLSFromLead injects the local trust domain's CA root certs as the TrustedCA.
// We nil it out here since the local roots are included in the SPIFFE validator config.
typ . ValidationContext . TrustedCa = nil
typ . ValidationContext . CustomValidatorConfig = & envoy_core_v3 . TypedExtensionConfig {
// The typed config name is hard-coded because it is not available as a wellknown var in the control plane lib.
Name : "envoy.tls.cert_validator.spiffe" ,
TypedConfig : spiffeConfig ,
}
}
2022-06-15 19:36:18 +00:00
return makeDownstreamTLSTransportSocket ( & envoy_tls_v3 . DownstreamTlsContext {
2022-06-01 20:31:37 +00:00
CommonTlsContext : tlsContext ,
2023-01-11 14:39:10 +00:00
RequireClientCertificate : & wrapperspb . BoolValue { Value : true } ,
2022-06-01 20:31:37 +00:00
} )
}
// SPIFFECertValidatorConfig is used to validate certificates from trust domains other than our own.
// With cluster peering we expect peered clusters to have independent certificate authorities.
// This means that we cannot use a single set of root CA certificates to validate client certificates for mTLS,
// but rather we need to validate against different roots depending on the trust domain of the certificate presented.
2023-01-11 14:39:10 +00:00
func makeSpiffeValidatorConfig ( trustDomain , roots string , peerBundles [ ] * pbpeering . PeeringTrustBundle ) ( * anypb . Any , error ) {
2022-06-01 20:31:37 +00:00
// Store the trust bundle for the local trust domain.
bundles := map [ string ] string { trustDomain : roots }
// Store the trust bundle for each trust domain of the peers this proxy is exported to.
// This allows us to validate traffic from other trust domains.
for _ , b := range peerBundles {
var pems string
for _ , pem := range b . RootPEMs {
pems += lib . EnsureTrailingNewline ( pem )
}
bundles [ b . TrustDomain ] = pems
}
cfg := & envoy_tls_v3 . SPIFFECertValidatorConfig {
TrustDomains : make ( [ ] * envoy_tls_v3 . SPIFFECertValidatorConfig_TrustDomain , 0 , len ( bundles ) ) ,
}
for domain , bundle := range bundles {
cfg . TrustDomains = append ( cfg . TrustDomains , & envoy_tls_v3 . SPIFFECertValidatorConfig_TrustDomain {
Name : domain ,
TrustBundle : & envoy_core_v3 . DataSource {
Specifier : & envoy_core_v3 . DataSource_InlineString {
InlineString : bundle ,
} ,
} ,
} )
}
// Sort the trust domains so that the output is stable.
// This benefits tests but also prevents Envoy from mistakenly thinking the listener
// changed and needs to be drained only because this ordering is different.
sort . Slice ( cfg . TrustDomains , func ( i int , j int ) bool {
return cfg . TrustDomains [ i ] . Name < cfg . TrustDomains [ j ] . Name
} )
2023-01-11 14:39:10 +00:00
return anypb . New ( cfg )
2022-06-01 20:31:37 +00:00
}
2021-04-29 18:54:05 +00:00
func ( s * ResourceGenerator ) makeInboundListener ( cfgSnap * proxycfg . ConfigSnapshot , name string ) ( proto . Message , error ) {
2021-02-26 22:23:15 +00:00
var l * envoy_listener_v3 . Listener
2018-10-03 18:18:55 +00:00
var err error
2023-08-15 18:57:07 +00:00
cfg , err := config . ParseProxyConfig ( cfgSnap . Proxy . Config )
2019-04-29 16:27:57 +00:00
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
2020-01-28 23:50:41 +00:00
s . Logger . Warn ( "failed to parse Connect.Proxy.Config" , "error" , err )
2019-04-29 16:27:57 +00:00
}
2021-03-17 01:22:26 +00:00
// This controls if we do L4 or L7 intention checks.
useHTTPFilter := structs . IsProtocolHTTPLike ( cfg . Protocol )
// Generate and return custom public listener from config if one was provided.
2019-04-29 16:27:57 +00:00
if cfg . PublicListenerJSON != "" {
l , err = makeListenerFromUserConfig ( cfg . PublicListenerJSON )
if err != nil {
2021-03-17 01:22:26 +00:00
return nil , err
2018-10-03 18:18:55 +00:00
}
2019-07-05 15:06:47 +00:00
2021-03-17 22:18:56 +00:00
// For HTTP-like services attach an RBAC http filter and do a best-effort insert
if useHTTPFilter {
httpAuthzFilter , err := makeRBACHTTPFilter (
cfgSnap . ConnectProxy . Intentions ,
cfgSnap . IntentionDefaultAllow ,
2022-06-29 15:29:54 +00:00
rbacLocalInfo {
trustDomain : cfgSnap . Roots . TrustDomain ,
datacenter : cfgSnap . Datacenter ,
partition : cfgSnap . ProxyID . PartitionOrDefault ( ) ,
} ,
2022-06-21 02:47:14 +00:00
cfgSnap . ConnectProxy . InboundPeerTrustBundles ,
Use JWT-auth filter in metadata mode & Delegate validation to RBAC filter (#18062)
### Description
<!-- Please describe why you're making this change, in plain English.
-->
- Currently the jwt-auth filter doesn't take into account the service
identity when validating jwt-auth, it only takes into account the path
and jwt provider during validation. This causes issues when multiple
source intentions restrict access to an endpoint with different JWT
providers.
- To fix these issues, rather than use the JWT auth filter for
validation, we use it in metadata mode and allow it to forward the
successful validated JWT token payload to the RBAC filter which will
make the decisions.
This PR ensures requests with and without JWT tokens successfully go
through the jwt-authn filter. The filter however only forwards the data
for successful/valid tokens. On the RBAC filter level, we check the
payload for claims and token issuer + existing rbac rules.
### Testing & Reproduction steps
<!--
* In the case of bugs, describe how to replicate
* If any manual tests were done, document the steps and the conditions
to replicate
* Call out any important/ relevant unit tests, e2e tests or integration
tests you have added or are adding
-->
- This test covers a multi level jwt requirements (requirements at top
level and permissions level). It also assumes you have envoy running,
you have a redis and a sidecar proxy service registered, and have a way
to generate jwks with jwt. I mostly use:
https://www.scottbrady91.com/tools/jwt for this.
- first write your proxy defaults
```
Kind = "proxy-defaults"
name = "global"
config {
protocol = "http"
}
```
- Create two providers
```
Kind = "jwt-provider"
Name = "auth0"
Issuer = "https://ronald.local"
JSONWebKeySet = {
Local = {
JWKS = "eyJrZXlzIjog....."
}
}
```
```
Kind = "jwt-provider"
Name = "okta"
Issuer = "https://ronald.local"
JSONWebKeySet = {
Local = {
JWKS = "eyJrZXlzIjogW3...."
}
}
```
- add a service intention
```
Kind = "service-intentions"
Name = "redis"
JWT = {
Providers = [
{
Name = "okta"
},
]
}
Sources = [
{
Name = "*"
Permissions = [{
Action = "allow"
HTTP = {
PathPrefix = "/workspace"
}
JWT = {
Providers = [
{
Name = "okta"
VerifyClaims = [
{
Path = ["aud"]
Value = "my_client_app"
},
{
Path = ["sub"]
Value = "5be86359073c434bad2da3932222dabe"
}
]
},
]
}
},
{
Action = "allow"
HTTP = {
PathPrefix = "/"
}
JWT = {
Providers = [
{
Name = "auth0"
},
]
}
}]
}
]
```
- generate 3 jwt tokens: 1 from auth0 jwks, 1 from okta jwks with
different claims than `/workspace` expects and 1 with correct claims
- connect to your envoy (change service and address as needed) to view
logs and potential errors. You can add: `-- --log-level debug` to see
what data is being forwarded
```
consul connect envoy -sidecar-for redis1 -grpc-addr 127.0.0.1:8502
```
- Make the following requests:
```
curl -s -H "Authorization: Bearer $Auth0_TOKEN" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v
RBAC filter denied
curl -s -H "Authorization: Bearer $Okta_TOKEN_with_wrong_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v
RBAC filter denied
curl -s -H "Authorization: Bearer $Okta_TOKEN_with_correct_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v
Successful request
```
### TODO
* [x] Update test coverage
* [ ] update integration tests (follow-up PR)
* [x] appropriate backport labels added
2023-07-17 15:32:49 +00:00
cfgSnap . JWTProviders ,
2021-03-17 22:18:56 +00:00
)
if err != nil {
return nil , err
}
// Try our best to inject the HTTP RBAC filter.
if err := injectHTTPFilterOnFilterChains ( l , httpAuthzFilter ) ; err != nil {
s . Logger . Warn (
"could not inject the HTTP RBAC filter to enforce intentions on user-provided " +
"'envoy_public_listener_json' config; falling back on the RBAC network filter instead" ,
"proxy" , cfgSnap . ProxyID ,
"error" , err ,
)
// If we get an error inject the RBAC network filter instead.
useHTTPFilter = false
}
}
2022-10-24 16:48:02 +00:00
err := s . finalizePublicListenerFromConfig ( l , cfgSnap , useHTTPFilter )
2021-03-17 01:22:26 +00:00
if err != nil {
return nil , fmt . Errorf ( "failed to attach Consul filters and TLS context to custom public listener: %v" , err )
2018-10-03 18:18:55 +00:00
}
2021-03-17 01:22:26 +00:00
return l , nil
}
2019-07-05 15:06:47 +00:00
2021-03-17 22:18:56 +00:00
// No JSON user config, use default listener address
2021-03-17 19:40:49 +00:00
// Default to listening on all addresses, but override with bind address if one is set.
2021-03-17 01:22:26 +00:00
addr := cfgSnap . Address
2021-03-17 19:40:49 +00:00
if addr == "" {
addr = "0.0.0.0"
}
2021-03-17 01:22:26 +00:00
if cfg . BindAddress != "" {
addr = cfg . BindAddress
}
2019-04-29 16:27:57 +00:00
2021-03-17 01:22:26 +00:00
// Override with bind port if one is set, otherwise default to
// proxy service's address
port := cfgSnap . Port
if cfg . BindPort != 0 {
port = cfg . BindPort
}
2020-08-27 17:20:58 +00:00
2022-12-22 20:18:15 +00:00
opts := makeListenerOpts {
name : name ,
accessLogs : cfgSnap . Proxy . AccessLogs ,
addr : addr ,
port : port ,
direction : envoy_core_v3 . TrafficDirection_INBOUND ,
logger : s . Logger ,
}
l = makeListener ( opts )
2022-09-26 16:29:06 +00:00
s . injectConnectionBalanceConfig ( cfg . BalanceInboundConnections , l )
2020-08-27 17:20:58 +00:00
2022-08-02 06:52:48 +00:00
var tracing * envoy_http_v3 . HttpConnectionManager_Tracing
2022-08-30 06:36:06 +00:00
if cfg . ListenerTracingJSON != "" {
if tracing , err = makeTracingFromUserConfig ( cfg . ListenerTracingJSON ) ; err != nil {
s . Logger . Warn ( "failed to parse ListenerTracingJSON config" , "error" , err )
2022-08-02 06:52:48 +00:00
}
}
2021-03-17 01:22:26 +00:00
filterOpts := listenerFilterOpts {
protocol : cfg . Protocol ,
filterName : name ,
routeName : name ,
2023-01-06 17:13:40 +00:00
cluster : xdscommon . LocalAppClusterName ,
2021-03-17 01:22:26 +00:00
requestTimeoutMs : cfg . LocalRequestTimeoutMs ,
2022-11-29 22:43:15 +00:00
idleTimeoutMs : cfg . LocalIdleTimeoutMs ,
2022-08-02 06:52:48 +00:00
tracing : tracing ,
2022-12-22 20:18:15 +00:00
accessLogs : & cfgSnap . Proxy . AccessLogs ,
logger : s . Logger ,
2021-03-17 01:22:26 +00:00
}
if useHTTPFilter {
Use JWT-auth filter in metadata mode & Delegate validation to RBAC filter (#18062)
### Description
<!-- Please describe why you're making this change, in plain English.
-->
- Currently the jwt-auth filter doesn't take into account the service
identity when validating jwt-auth, it only takes into account the path
and jwt provider during validation. This causes issues when multiple
source intentions restrict access to an endpoint with different JWT
providers.
- To fix these issues, rather than use the JWT auth filter for
validation, we use it in metadata mode and allow it to forward the
successful validated JWT token payload to the RBAC filter which will
make the decisions.
This PR ensures requests with and without JWT tokens successfully go
through the jwt-authn filter. The filter however only forwards the data
for successful/valid tokens. On the RBAC filter level, we check the
payload for claims and token issuer + existing rbac rules.
### Testing & Reproduction steps
<!--
* In the case of bugs, describe how to replicate
* If any manual tests were done, document the steps and the conditions
to replicate
* Call out any important/ relevant unit tests, e2e tests or integration
tests you have added or are adding
-->
- This test covers a multi level jwt requirements (requirements at top
level and permissions level). It also assumes you have envoy running,
you have a redis and a sidecar proxy service registered, and have a way
to generate jwks with jwt. I mostly use:
https://www.scottbrady91.com/tools/jwt for this.
- first write your proxy defaults
```
Kind = "proxy-defaults"
name = "global"
config {
protocol = "http"
}
```
- Create two providers
```
Kind = "jwt-provider"
Name = "auth0"
Issuer = "https://ronald.local"
JSONWebKeySet = {
Local = {
JWKS = "eyJrZXlzIjog....."
}
}
```
```
Kind = "jwt-provider"
Name = "okta"
Issuer = "https://ronald.local"
JSONWebKeySet = {
Local = {
JWKS = "eyJrZXlzIjogW3...."
}
}
```
- add a service intention
```
Kind = "service-intentions"
Name = "redis"
JWT = {
Providers = [
{
Name = "okta"
},
]
}
Sources = [
{
Name = "*"
Permissions = [{
Action = "allow"
HTTP = {
PathPrefix = "/workspace"
}
JWT = {
Providers = [
{
Name = "okta"
VerifyClaims = [
{
Path = ["aud"]
Value = "my_client_app"
},
{
Path = ["sub"]
Value = "5be86359073c434bad2da3932222dabe"
}
]
},
]
}
},
{
Action = "allow"
HTTP = {
PathPrefix = "/"
}
JWT = {
Providers = [
{
Name = "auth0"
},
]
}
}]
}
]
```
- generate 3 jwt tokens: 1 from auth0 jwks, 1 from okta jwks with
different claims than `/workspace` expects and 1 with correct claims
- connect to your envoy (change service and address as needed) to view
logs and potential errors. You can add: `-- --log-level debug` to see
what data is being forwarded
```
consul connect envoy -sidecar-for redis1 -grpc-addr 127.0.0.1:8502
```
- Make the following requests:
```
curl -s -H "Authorization: Bearer $Auth0_TOKEN" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v
RBAC filter denied
curl -s -H "Authorization: Bearer $Okta_TOKEN_with_wrong_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v
RBAC filter denied
curl -s -H "Authorization: Bearer $Okta_TOKEN_with_correct_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v
Successful request
```
### TODO
* [x] Update test coverage
* [ ] update integration tests (follow-up PR)
* [x] appropriate backport labels added
2023-07-17 15:32:49 +00:00
jwtFilter , err := makeJWTAuthFilter ( cfgSnap . JWTProviders , cfgSnap . ConnectProxy . Intentions )
if err != nil {
return nil , err
2023-05-19 22:14:16 +00:00
}
2023-01-06 17:13:40 +00:00
rbacFilter , err := makeRBACHTTPFilter (
2021-03-17 01:22:26 +00:00
cfgSnap . ConnectProxy . Intentions ,
cfgSnap . IntentionDefaultAllow ,
2022-06-29 15:29:54 +00:00
rbacLocalInfo {
trustDomain : cfgSnap . Roots . TrustDomain ,
datacenter : cfgSnap . Datacenter ,
partition : cfgSnap . ProxyID . PartitionOrDefault ( ) ,
} ,
2022-06-21 02:47:14 +00:00
cfgSnap . ConnectProxy . InboundPeerTrustBundles ,
Use JWT-auth filter in metadata mode & Delegate validation to RBAC filter (#18062)
### Description
<!-- Please describe why you're making this change, in plain English.
-->
- Currently the jwt-auth filter doesn't take into account the service
identity when validating jwt-auth, it only takes into account the path
and jwt provider during validation. This causes issues when multiple
source intentions restrict access to an endpoint with different JWT
providers.
- To fix these issues, rather than use the JWT auth filter for
validation, we use it in metadata mode and allow it to forward the
successful validated JWT token payload to the RBAC filter which will
make the decisions.
This PR ensures requests with and without JWT tokens successfully go
through the jwt-authn filter. The filter however only forwards the data
for successful/valid tokens. On the RBAC filter level, we check the
payload for claims and token issuer + existing rbac rules.
### Testing & Reproduction steps
<!--
* In the case of bugs, describe how to replicate
* If any manual tests were done, document the steps and the conditions
to replicate
* Call out any important/ relevant unit tests, e2e tests or integration
tests you have added or are adding
-->
- This test covers a multi level jwt requirements (requirements at top
level and permissions level). It also assumes you have envoy running,
you have a redis and a sidecar proxy service registered, and have a way
to generate jwks with jwt. I mostly use:
https://www.scottbrady91.com/tools/jwt for this.
- first write your proxy defaults
```
Kind = "proxy-defaults"
name = "global"
config {
protocol = "http"
}
```
- Create two providers
```
Kind = "jwt-provider"
Name = "auth0"
Issuer = "https://ronald.local"
JSONWebKeySet = {
Local = {
JWKS = "eyJrZXlzIjog....."
}
}
```
```
Kind = "jwt-provider"
Name = "okta"
Issuer = "https://ronald.local"
JSONWebKeySet = {
Local = {
JWKS = "eyJrZXlzIjogW3...."
}
}
```
- add a service intention
```
Kind = "service-intentions"
Name = "redis"
JWT = {
Providers = [
{
Name = "okta"
},
]
}
Sources = [
{
Name = "*"
Permissions = [{
Action = "allow"
HTTP = {
PathPrefix = "/workspace"
}
JWT = {
Providers = [
{
Name = "okta"
VerifyClaims = [
{
Path = ["aud"]
Value = "my_client_app"
},
{
Path = ["sub"]
Value = "5be86359073c434bad2da3932222dabe"
}
]
},
]
}
},
{
Action = "allow"
HTTP = {
PathPrefix = "/"
}
JWT = {
Providers = [
{
Name = "auth0"
},
]
}
}]
}
]
```
- generate 3 jwt tokens: 1 from auth0 jwks, 1 from okta jwks with
different claims than `/workspace` expects and 1 with correct claims
- connect to your envoy (change service and address as needed) to view
logs and potential errors. You can add: `-- --log-level debug` to see
what data is being forwarded
```
consul connect envoy -sidecar-for redis1 -grpc-addr 127.0.0.1:8502
```
- Make the following requests:
```
curl -s -H "Authorization: Bearer $Auth0_TOKEN" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v
RBAC filter denied
curl -s -H "Authorization: Bearer $Okta_TOKEN_with_wrong_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v
RBAC filter denied
curl -s -H "Authorization: Bearer $Okta_TOKEN_with_correct_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v
Successful request
```
### TODO
* [x] Update test coverage
* [ ] update integration tests (follow-up PR)
* [x] appropriate backport labels added
2023-07-17 15:32:49 +00:00
cfgSnap . JWTProviders ,
2021-03-17 01:22:26 +00:00
)
2018-10-03 18:18:55 +00:00
if err != nil {
2019-04-29 16:27:57 +00:00
return nil , err
2018-10-03 18:18:55 +00:00
}
2023-07-06 11:27:30 +00:00
filterOpts . httpAuthzFilters = [ ] * envoy_http_v3 . HttpFilter { }
2023-05-19 22:14:16 +00:00
if jwtFilter != nil {
filterOpts . httpAuthzFilters = append ( filterOpts . httpAuthzFilters , jwtFilter )
}
2023-07-06 11:27:30 +00:00
filterOpts . httpAuthzFilters = append ( filterOpts . httpAuthzFilters , rbacFilter )
2023-05-19 22:14:16 +00:00
2023-01-06 17:13:40 +00:00
meshConfig := cfgSnap . MeshConfig ( )
includeXFCC := meshConfig == nil || meshConfig . HTTP == nil || ! meshConfig . HTTP . SanitizeXForwardedClientCert
notGRPC := cfg . Protocol != "grpc"
if includeXFCC && notGRPC {
2022-04-26 20:46:29 +00:00
filterOpts . forwardClientDetails = true
filterOpts . forwardClientPolicy = envoy_http_v3 . HttpConnectionManager_APPEND_FORWARD
2023-01-06 17:13:40 +00:00
addMeta , err := parseXFCCToDynamicMetaHTTPFilter ( )
if err != nil {
return nil , err
}
filterOpts . httpAuthzFilters = append ( filterOpts . httpAuthzFilters , addMeta )
2022-04-26 20:46:29 +00:00
}
2021-03-17 01:22:26 +00:00
}
2022-08-24 18:13:10 +00:00
// If an inbound connect limit is set, inject a connection limit filter on each chain.
if cfg . MaxInboundConnections > 0 {
connectionLimitFilter , err := makeConnectionLimitFilter ( cfg . MaxInboundConnections )
if err != nil {
return nil , err
}
l . FilterChains = [ ] * envoy_listener_v3 . FilterChain {
{
Filters : [ ] * envoy_listener_v3 . Filter {
connectionLimitFilter ,
} ,
} ,
}
}
2021-03-17 01:22:26 +00:00
filter , err := makeListenerFilter ( filterOpts )
if err != nil {
return nil , err
}
2022-08-24 18:13:10 +00:00
if len ( l . FilterChains ) > 0 {
// The list of FilterChains has already been initialized
l . FilterChains [ 0 ] . Filters = append ( l . FilterChains [ 0 ] . Filters , filter )
} else {
l . FilterChains = [ ] * envoy_listener_v3 . FilterChain {
{
Filters : [ ] * envoy_listener_v3 . Filter {
filter ,
} ,
2018-10-03 18:18:55 +00:00
} ,
2022-08-24 18:13:10 +00:00
}
2021-03-17 01:22:26 +00:00
}
2022-10-24 16:48:02 +00:00
err = s . finalizePublicListenerFromConfig ( l , cfgSnap , useHTTPFilter )
2021-03-17 22:18:56 +00:00
if err != nil {
return nil , fmt . Errorf ( "failed to attach Consul filters and TLS context to custom public listener: %v" , err )
2021-03-17 01:22:26 +00:00
}
2020-08-27 17:20:58 +00:00
2023-04-19 19:45:00 +00:00
// When permissive mTLS mode is enabled, include an additional filter chain
// that matches on the `destination_port == <service port>`. Traffic sent
// directly to the service port is passed through to the application
// unmodified.
2023-05-24 17:01:17 +00:00
if cfgSnap . Proxy . Mode == structs . ProxyModeTransparent &&
cfgSnap . Proxy . MutualTLSMode == structs . MutualTLSModePermissive {
2023-04-19 19:45:00 +00:00
chain , err := makePermissiveFilterChain ( cfgSnap , filterOpts )
if err != nil {
return nil , fmt . Errorf ( "unable to add permissive mtls filter chain: %w" , err )
}
if chain == nil {
s . Logger . Debug ( "no service port defined for service in permissive mTLS mode; not adding filter chain for non-mTLS traffic" )
} else {
l . FilterChains = append ( l . FilterChains , chain )
// With tproxy, the REDIRECT iptables target rewrites the destination ip/port
// to the proxy ip/port (e.g. 127.0.0.1:20000) for incoming packets.
// We need the original_dst filter to recover the original destination address.
2023-05-24 17:01:17 +00:00
originalDstFilter , err := makeEnvoyListenerFilter ( "envoy.filters.listener.original_dst" , & envoy_original_dst_v3 . OriginalDst { } )
if err != nil {
return nil , err
}
l . ListenerFilters = append ( l . ListenerFilters , originalDstFilter )
2023-04-19 19:45:00 +00:00
}
}
2021-03-17 01:22:26 +00:00
return l , err
}
2023-04-19 19:45:00 +00:00
func makePermissiveFilterChain ( cfgSnap * proxycfg . ConfigSnapshot , opts listenerFilterOpts ) ( * envoy_listener_v3 . FilterChain , error ) {
servicePort := cfgSnap . Proxy . LocalServicePort
if servicePort <= 0 {
// No service port means the service does not accept incoming traffic, so
// the connect proxy does not need to listen for incoming non-mTLS traffic.
return nil , nil
}
opts . statPrefix += "permissive_"
filter , err := makeTCPProxyFilter ( opts )
if err != nil {
return nil , err
}
chain := & envoy_listener_v3 . FilterChain {
FilterChainMatch : & envoy_listener_v3 . FilterChainMatch {
DestinationPort : & wrapperspb . UInt32Value { Value : uint32 ( servicePort ) } ,
} ,
Filters : [ ] * envoy_listener_v3 . Filter { filter } ,
}
return chain , nil
}
2021-03-17 22:18:56 +00:00
// finalizePublicListenerFromConfig is used for best-effort injection of Consul filter-chains onto listeners.
// This include L4 authorization filters and TLS context.
2022-10-24 16:48:02 +00:00
func ( s * ResourceGenerator ) finalizePublicListenerFromConfig ( l * envoy_listener_v3 . Listener , cfgSnap * proxycfg . ConfigSnapshot , useHTTPFilter bool ) error {
2020-08-27 17:20:58 +00:00
if ! useHTTPFilter {
2021-03-17 01:22:26 +00:00
// Best-effort injection of L4 intentions
2021-04-29 18:54:05 +00:00
if err := s . injectConnectFilters ( cfgSnap , l ) ; err != nil {
2021-03-17 01:22:26 +00:00
return nil
2020-08-27 17:20:58 +00:00
}
}
2021-03-17 01:22:26 +00:00
// Always apply TLS certificates
2022-06-01 20:31:37 +00:00
if err := s . injectConnectTLSForPublicListener ( cfgSnap , l ) ; err != nil {
2021-03-17 01:22:26 +00:00
return nil
2018-10-03 18:18:55 +00:00
}
2022-05-19 17:06:13 +00:00
2021-03-17 01:22:26 +00:00
return nil
2018-10-03 18:18:55 +00:00
}
2021-04-29 18:54:05 +00:00
func ( s * ResourceGenerator ) makeExposedCheckListener ( cfgSnap * proxycfg . ConfigSnapshot , cluster string , path structs . ExposePath ) ( proto . Message , error ) {
2023-08-15 18:57:07 +00:00
cfg , err := config . ParseProxyConfig ( cfgSnap . Proxy . Config )
2019-09-26 02:55:52 +00:00
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
2020-01-28 23:50:41 +00:00
s . Logger . Warn ( "failed to parse Connect.Proxy.Config" , "error" , err )
2019-09-26 02:55:52 +00:00
}
// No user config, use default listener
addr := cfgSnap . Address
// Override with bind address if one is set, otherwise default to 0.0.0.0
if cfg . BindAddress != "" {
addr = cfg . BindAddress
} else if addr == "" {
addr = "0.0.0.0"
}
// Strip any special characters from path to make a valid and hopefully unique name
r := regexp . MustCompile ( ` [^a-zA-Z0-9]+ ` )
strippedPath := r . ReplaceAllString ( path . Path , "" )
listenerName := fmt . Sprintf ( "exposed_path_%s" , strippedPath )
2022-12-22 20:18:15 +00:00
listenerOpts := makeListenerOpts {
name : listenerName ,
accessLogs : cfgSnap . Proxy . AccessLogs ,
addr : addr ,
port : path . ListenerPort ,
direction : envoy_core_v3 . TrafficDirection_INBOUND ,
logger : s . Logger ,
}
l := makeListener ( listenerOpts )
2019-09-26 02:55:52 +00:00
filterName := fmt . Sprintf ( "exposed_path_filter_%s_%d" , strippedPath , path . ListenerPort )
2022-12-22 20:18:15 +00:00
filterOpts := listenerFilterOpts {
2023-01-06 17:13:40 +00:00
useRDS : false ,
protocol : path . Protocol ,
filterName : filterName ,
routeName : filterName ,
cluster : cluster ,
statPrefix : "" ,
routePath : path . Path ,
httpAuthzFilters : nil ,
accessLogs : & cfgSnap . Proxy . AccessLogs ,
logger : s . Logger ,
2022-08-30 06:36:06 +00:00
// in the exposed check listener we don't set the tracing configuration
2020-08-27 17:20:58 +00:00
}
2022-12-22 20:18:15 +00:00
f , err := makeListenerFilter ( filterOpts )
2019-09-26 02:55:52 +00:00
if err != nil {
return nil , err
}
2021-02-26 22:23:15 +00:00
chain := & envoy_listener_v3 . FilterChain {
Filters : [ ] * envoy_listener_v3 . Filter { f } ,
2019-09-26 02:55:52 +00:00
}
// For registered checks restrict traffic sources to localhost and Consul's advertise addr
if path . ParsedFromCheck {
// For the advertise addr we use a CidrRange that only matches one address
advertise := s . CfgFetcher . AdvertiseAddrLAN ( )
// Get prefix length based on whether address is ipv4 (32 bits) or ipv6 (128 bits)
advertiseLen := 32
ip := net . ParseIP ( advertise )
if ip != nil && strings . Contains ( advertise , ":" ) {
advertiseLen = 128
}
2021-02-26 22:23:15 +00:00
ranges := make ( [ ] * envoy_core_v3 . CidrRange , 0 , 3 )
2021-02-19 20:38:43 +00:00
ranges = append ( ranges ,
2023-01-11 14:39:10 +00:00
& envoy_core_v3 . CidrRange { AddressPrefix : "127.0.0.1" , PrefixLen : & wrapperspb . UInt32Value { Value : 8 } } ,
& envoy_core_v3 . CidrRange { AddressPrefix : advertise , PrefixLen : & wrapperspb . UInt32Value { Value : uint32 ( advertiseLen ) } } ,
2021-02-19 20:38:43 +00:00
)
2023-08-15 18:57:07 +00:00
if ok , err := platform . SupportsIPv6 ( ) ; err != nil {
2021-02-19 20:38:43 +00:00
return nil , err
} else if ok {
ranges = append ( ranges ,
2023-01-11 14:39:10 +00:00
& envoy_core_v3 . CidrRange { AddressPrefix : "::1" , PrefixLen : & wrapperspb . UInt32Value { Value : 128 } } ,
2021-02-19 20:38:43 +00:00
)
}
2021-02-26 22:23:15 +00:00
chain . FilterChainMatch = & envoy_listener_v3 . FilterChainMatch {
2021-02-19 20:38:43 +00:00
SourcePrefixRanges : ranges ,
2019-09-26 02:55:52 +00:00
}
}
2021-02-26 22:23:15 +00:00
l . FilterChains = [ ] * envoy_listener_v3 . FilterChain { chain }
2019-09-26 02:55:52 +00:00
return l , err
}
2021-04-29 18:54:05 +00:00
func ( s * ResourceGenerator ) makeTerminatingGatewayListener (
2020-07-09 22:04:51 +00:00
cfgSnap * proxycfg . ConfigSnapshot ,
name , addr string ,
port int ,
2021-02-26 22:23:15 +00:00
) ( * envoy_listener_v3 . Listener , error ) {
2022-12-22 20:18:15 +00:00
listenerOpts := makeListenerOpts {
name : name ,
accessLogs : cfgSnap . Proxy . AccessLogs ,
addr : addr ,
port : port ,
direction : envoy_core_v3 . TrafficDirection_INBOUND ,
logger : s . Logger ,
}
l := makeListener ( listenerOpts )
2020-04-13 16:33:01 +00:00
tlsInspector , err := makeTLSInspectorListenerFilter ( )
if err != nil {
return nil , err
}
2021-02-26 22:23:15 +00:00
l . ListenerFilters = [ ] * envoy_listener_v3 . ListenerFilter { tlsInspector }
2020-04-13 16:33:01 +00:00
// Make a FilterChain for each linked service
// Match on the cluster name,
2020-08-27 17:20:58 +00:00
for _ , svc := range cfgSnap . TerminatingGateway . ValidServices ( ) {
2021-09-01 14:35:39 +00:00
clusterName := connect . ServiceSNI ( svc . Name , "" , svc . NamespaceOrDefault ( ) , svc . PartitionOrDefault ( ) , cfgSnap . Datacenter , cfgSnap . Roots . TrustDomain )
2020-08-27 17:20:58 +00:00
// Resolvers are optional.
2020-04-14 14:59:23 +00:00
resolver , hasResolver := cfgSnap . TerminatingGateway . ServiceResolvers [ svc ]
2020-04-13 16:33:01 +00:00
2020-08-27 17:20:58 +00:00
intentions := cfgSnap . TerminatingGateway . Intentions [ svc ]
svcConfig := cfgSnap . TerminatingGateway . ServiceConfigs [ svc ]
2023-08-15 18:57:07 +00:00
cfg , err := config . ParseProxyConfig ( svcConfig . ProxyConfig )
2020-08-27 17:20:58 +00:00
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
2021-04-29 18:54:05 +00:00
s . Logger . Warn (
2020-08-27 17:20:58 +00:00
"failed to parse Connect.Proxy.Config for linked service" ,
"service" , svc . String ( ) ,
"error" , err ,
)
2020-04-17 01:04:14 +00:00
}
2022-07-18 21:10:06 +00:00
opts := terminatingGatewayFilterChainOpts {
cluster : clusterName ,
service : svc ,
intentions : intentions ,
protocol : cfg . Protocol ,
}
clusterChain , err := s . makeFilterChainTerminatingGateway ( cfgSnap , opts )
2020-04-13 16:33:01 +00:00
if err != nil {
2020-04-14 14:59:23 +00:00
return nil , fmt . Errorf ( "failed to make filter chain for cluster %q: %v" , clusterName , err )
2020-04-13 16:33:01 +00:00
}
2020-04-14 14:59:23 +00:00
l . FilterChains = append ( l . FilterChains , clusterChain )
2020-04-13 16:33:01 +00:00
2020-04-14 14:59:23 +00:00
// if there is a service-resolver for this service then also setup subset filter chains for it
if hasResolver {
// generate 1 filter chain for each service subset
for subsetName := range resolver . Subsets {
2021-09-01 14:35:39 +00:00
subsetClusterName := connect . ServiceSNI ( svc . Name , subsetName , svc . NamespaceOrDefault ( ) , svc . PartitionOrDefault ( ) , cfgSnap . Datacenter , cfgSnap . Roots . TrustDomain )
2020-08-27 17:20:58 +00:00
2022-07-18 21:10:06 +00:00
opts . cluster = subsetClusterName
subsetClusterChain , err := s . makeFilterChainTerminatingGateway ( cfgSnap , opts )
2020-04-14 14:59:23 +00:00
if err != nil {
2020-08-27 17:20:58 +00:00
return nil , fmt . Errorf ( "failed to make filter chain for cluster %q: %v" , subsetClusterName , err )
2020-04-14 14:59:23 +00:00
}
2020-08-27 17:20:58 +00:00
l . FilterChains = append ( l . FilterChains , subsetClusterChain )
2020-04-14 14:59:23 +00:00
}
2020-04-13 16:33:01 +00:00
}
}
2022-05-24 16:51:52 +00:00
for _ , svc := range cfgSnap . TerminatingGateway . ValidDestinations ( ) {
intentions := cfgSnap . TerminatingGateway . Intentions [ svc ]
svcConfig := cfgSnap . TerminatingGateway . ServiceConfigs [ svc ]
2023-08-15 18:57:07 +00:00
cfg , err := config . ParseProxyConfig ( svcConfig . ProxyConfig )
2022-05-24 16:51:52 +00:00
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
s . Logger . Warn (
"failed to parse Connect.Proxy.Config for linked destination" ,
"destination" , svc . String ( ) ,
"error" , err ,
)
}
var dest * structs . DestinationConfig
2022-07-14 18:45:51 +00:00
dest = & svcConfig . Destination
2022-07-18 21:10:06 +00:00
opts := terminatingGatewayFilterChainOpts {
service : svc ,
intentions : intentions ,
protocol : cfg . Protocol ,
port : dest . Port ,
2022-05-24 16:51:52 +00:00
}
2022-07-18 21:10:06 +00:00
for _ , address := range dest . Addresses {
clusterName := clusterNameForDestination ( cfgSnap , svc . Name , address , svc . NamespaceOrDefault ( ) , svc . PartitionOrDefault ( ) )
opts . cluster = clusterName
opts . address = address
clusterChain , err := s . makeFilterChainTerminatingGateway ( cfgSnap , opts )
if err != nil {
return nil , fmt . Errorf ( "failed to make filter chain for cluster %q: %v" , clusterName , err )
}
l . FilterChains = append ( l . FilterChains , clusterChain )
}
2022-05-24 16:51:52 +00:00
}
2021-02-08 16:19:57 +00:00
// Before we add the fallback, sort these chains by the matched name. All
// of these filter chains are independent, but envoy requires them to be in
// some order. If we put them in a random order then every xDS iteration
// envoy will force the listener to be replaced. Sorting these has no
// effect on how they operate, but it does mean that we won't churn
// listeners at idle.
sort . Slice ( l . FilterChains , func ( i , j int ) bool {
return l . FilterChains [ i ] . FilterChainMatch . ServerNames [ 0 ] < l . FilterChains [ j ] . FilterChainMatch . ServerNames [ 0 ]
} )
2020-04-14 21:13:25 +00:00
// This fallback catch-all filter ensures a listener will be present for health checks to pass
// Envoy will reset these connections since known endpoints are caught by filter chain matches above
2022-12-22 20:18:15 +00:00
filterOpts := listenerFilterOpts {
accessLogs : & cfgSnap . Proxy . AccessLogs ,
cluster : "" ,
filterName : name ,
logger : s . Logger ,
statPrefix : "terminating_gateway." ,
}
tcpProxy , err := makeTCPProxyFilter ( filterOpts )
2020-04-14 21:13:25 +00:00
if err != nil {
return nil , err
}
2022-04-18 16:36:07 +00:00
sniCluster , err := makeSNIClusterFilter ( )
if err != nil {
return nil , err
}
2021-02-26 22:23:15 +00:00
fallback := & envoy_listener_v3 . FilterChain {
Filters : [ ] * envoy_listener_v3 . Filter {
2022-04-18 16:36:07 +00:00
sniCluster ,
2020-04-14 21:13:25 +00:00
tcpProxy ,
} ,
}
l . FilterChains = append ( l . FilterChains , fallback )
2020-04-13 16:33:01 +00:00
return l , nil
}
2022-07-18 21:10:06 +00:00
type terminatingGatewayFilterChainOpts struct {
cluster string
service structs . ServiceName
2023-04-20 16:16:04 +00:00
intentions structs . SimplifiedIntentions
2022-07-18 21:10:06 +00:00
protocol string
address string // only valid for destination listeners
port int // only valid for destination listeners
}
func ( s * ResourceGenerator ) makeFilterChainTerminatingGateway ( cfgSnap * proxycfg . ConfigSnapshot , tgtwyOpts terminatingGatewayFilterChainOpts ) ( * envoy_listener_v3 . FilterChain , error ) {
2021-02-26 22:23:15 +00:00
tlsContext := & envoy_tls_v3 . DownstreamTlsContext {
2022-06-01 21:53:52 +00:00
CommonTlsContext : makeCommonTLSContext (
2022-07-18 21:10:06 +00:00
cfgSnap . TerminatingGateway . ServiceLeaves [ tgtwyOpts . service ] ,
2022-06-01 21:53:52 +00:00
cfgSnap . RootPEMs ( ) ,
2022-03-30 18:43:59 +00:00
makeTLSParametersFromProxyTLSConfig ( cfgSnap . MeshConfigTLSIncoming ( ) ) ,
) ,
2023-01-11 14:39:10 +00:00
RequireClientCertificate : & wrapperspb . BoolValue { Value : true } ,
2021-02-22 21:00:15 +00:00
}
transportSocket , err := makeDownstreamTLSTransportSocket ( tlsContext )
if err != nil {
return nil , err
}
2022-07-14 18:45:51 +00:00
filterChain := & envoy_listener_v3 . FilterChain {
2022-07-18 21:10:06 +00:00
FilterChainMatch : makeSNIFilterChainMatch ( tgtwyOpts . cluster ) ,
2022-07-14 18:45:51 +00:00
Filters : make ( [ ] * envoy_listener_v3 . Filter , 0 , 3 ) ,
TransportSocket : transportSocket ,
2020-04-24 20:24:00 +00:00
}
2020-08-27 17:20:58 +00:00
// This controls if we do L4 or L7 intention checks.
2022-07-18 21:10:06 +00:00
useHTTPFilter := structs . IsProtocolHTTPLike ( tgtwyOpts . protocol )
2020-08-27 17:20:58 +00:00
// If this is L4, the first filter we setup is to do intention checks.
if ! useHTTPFilter {
authFilter , err := makeRBACNetworkFilter (
2022-07-18 21:10:06 +00:00
tgtwyOpts . intentions ,
2020-08-27 17:20:58 +00:00
cfgSnap . IntentionDefaultAllow ,
2022-06-29 15:29:54 +00:00
rbacLocalInfo {
trustDomain : cfgSnap . Roots . TrustDomain ,
datacenter : cfgSnap . Datacenter ,
partition : cfgSnap . ProxyID . PartitionOrDefault ( ) ,
} ,
2022-06-10 21:15:22 +00:00
nil , // TODO(peering): verify intentions w peers don't apply to terminatingGateway
2020-08-27 17:20:58 +00:00
)
if err != nil {
return nil , err
}
filterChain . Filters = append ( filterChain . Filters , authFilter )
2020-04-14 14:59:23 +00:00
}
2023-08-15 18:57:07 +00:00
proxyCfg , err := config . ParseProxyConfig ( cfgSnap . Proxy . Config )
2022-08-02 06:52:48 +00:00
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
s . Logger . Warn ( "failed to parse Connect.Proxy.Config" , "error" , err )
}
var tracing * envoy_http_v3 . HttpConnectionManager_Tracing
2022-08-30 06:36:06 +00:00
if proxyCfg . ListenerTracingJSON != "" {
if tracing , err = makeTracingFromUserConfig ( proxyCfg . ListenerTracingJSON ) ; err != nil {
s . Logger . Warn ( "failed to parse ListenerTracingJSON config" , "error" , err )
2022-08-02 06:52:48 +00:00
}
}
2020-08-27 17:20:58 +00:00
// Lastly we setup the actual proxying component. For L4 this is a straight
// tcp proxy. For L7 this is a very hands-off HTTP proxy just to inject an
// HTTP filter to do intention checks here instead.
opts := listenerFilterOpts {
2022-07-18 21:10:06 +00:00
protocol : tgtwyOpts . protocol ,
filterName : fmt . Sprintf ( "%s.%s.%s.%s" , tgtwyOpts . service . Name , tgtwyOpts . service . NamespaceOrDefault ( ) , tgtwyOpts . service . PartitionOrDefault ( ) , cfgSnap . Datacenter ) ,
routeName : tgtwyOpts . cluster , // Set cluster name for route config since each will have its own
cluster : tgtwyOpts . cluster ,
2022-07-14 18:45:51 +00:00
statPrefix : "upstream." ,
routePath : "" ,
2022-08-02 06:52:48 +00:00
tracing : tracing ,
2022-12-22 20:18:15 +00:00
accessLogs : & cfgSnap . Proxy . AccessLogs ,
logger : s . Logger ,
2020-08-27 17:20:58 +00:00
}
if useHTTPFilter {
var err error
2023-01-06 17:13:40 +00:00
rbacFilter , err := makeRBACHTTPFilter (
2022-07-18 21:10:06 +00:00
tgtwyOpts . intentions ,
2020-08-27 17:20:58 +00:00
cfgSnap . IntentionDefaultAllow ,
2022-06-29 15:29:54 +00:00
rbacLocalInfo {
trustDomain : cfgSnap . Roots . TrustDomain ,
datacenter : cfgSnap . Datacenter ,
partition : cfgSnap . ProxyID . PartitionOrDefault ( ) ,
} ,
2022-06-10 21:15:22 +00:00
nil , // TODO(peering): verify intentions w peers don't apply to terminatingGateway
Use JWT-auth filter in metadata mode & Delegate validation to RBAC filter (#18062)
### Description
<!-- Please describe why you're making this change, in plain English.
-->
- Currently the jwt-auth filter doesn't take into account the service
identity when validating jwt-auth, it only takes into account the path
and jwt provider during validation. This causes issues when multiple
source intentions restrict access to an endpoint with different JWT
providers.
- To fix these issues, rather than use the JWT auth filter for
validation, we use it in metadata mode and allow it to forward the
successful validated JWT token payload to the RBAC filter which will
make the decisions.
This PR ensures requests with and without JWT tokens successfully go
through the jwt-authn filter. The filter however only forwards the data
for successful/valid tokens. On the RBAC filter level, we check the
payload for claims and token issuer + existing rbac rules.
### Testing & Reproduction steps
<!--
* In the case of bugs, describe how to replicate
* If any manual tests were done, document the steps and the conditions
to replicate
* Call out any important/ relevant unit tests, e2e tests or integration
tests you have added or are adding
-->
- This test covers a multi level jwt requirements (requirements at top
level and permissions level). It also assumes you have envoy running,
you have a redis and a sidecar proxy service registered, and have a way
to generate jwks with jwt. I mostly use:
https://www.scottbrady91.com/tools/jwt for this.
- first write your proxy defaults
```
Kind = "proxy-defaults"
name = "global"
config {
protocol = "http"
}
```
- Create two providers
```
Kind = "jwt-provider"
Name = "auth0"
Issuer = "https://ronald.local"
JSONWebKeySet = {
Local = {
JWKS = "eyJrZXlzIjog....."
}
}
```
```
Kind = "jwt-provider"
Name = "okta"
Issuer = "https://ronald.local"
JSONWebKeySet = {
Local = {
JWKS = "eyJrZXlzIjogW3...."
}
}
```
- add a service intention
```
Kind = "service-intentions"
Name = "redis"
JWT = {
Providers = [
{
Name = "okta"
},
]
}
Sources = [
{
Name = "*"
Permissions = [{
Action = "allow"
HTTP = {
PathPrefix = "/workspace"
}
JWT = {
Providers = [
{
Name = "okta"
VerifyClaims = [
{
Path = ["aud"]
Value = "my_client_app"
},
{
Path = ["sub"]
Value = "5be86359073c434bad2da3932222dabe"
}
]
},
]
}
},
{
Action = "allow"
HTTP = {
PathPrefix = "/"
}
JWT = {
Providers = [
{
Name = "auth0"
},
]
}
}]
}
]
```
- generate 3 jwt tokens: 1 from auth0 jwks, 1 from okta jwks with
different claims than `/workspace` expects and 1 with correct claims
- connect to your envoy (change service and address as needed) to view
logs and potential errors. You can add: `-- --log-level debug` to see
what data is being forwarded
```
consul connect envoy -sidecar-for redis1 -grpc-addr 127.0.0.1:8502
```
- Make the following requests:
```
curl -s -H "Authorization: Bearer $Auth0_TOKEN" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v
RBAC filter denied
curl -s -H "Authorization: Bearer $Okta_TOKEN_with_wrong_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v
RBAC filter denied
curl -s -H "Authorization: Bearer $Okta_TOKEN_with_correct_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v
Successful request
```
### TODO
* [x] Update test coverage
* [ ] update integration tests (follow-up PR)
* [x] appropriate backport labels added
2023-07-17 15:32:49 +00:00
cfgSnap . JWTProviders ,
2020-08-27 17:20:58 +00:00
)
if err != nil {
return nil , err
}
2020-08-28 20:27:40 +00:00
2023-01-06 17:13:40 +00:00
opts . httpAuthzFilters = [ ] * envoy_http_v3 . HttpFilter { rbacFilter }
2020-08-28 20:27:40 +00:00
opts . cluster = ""
opts . useRDS = true
2022-04-26 20:46:29 +00:00
if meshConfig := cfgSnap . MeshConfig ( ) ; meshConfig == nil || meshConfig . HTTP == nil || ! meshConfig . HTTP . SanitizeXForwardedClientCert {
opts . forwardClientDetails = true
2022-05-03 20:57:57 +00:00
// This assumes that we have a client cert (mTLS) (implied by the context of this function)
opts . forwardClientPolicy = envoy_http_v3 . HttpConnectionManager_APPEND_FORWARD
2022-04-26 20:46:29 +00:00
}
2020-08-27 17:20:58 +00:00
}
filter , err := makeListenerFilter ( opts )
2020-04-14 14:59:23 +00:00
if err != nil {
2022-07-18 21:10:06 +00:00
s . Logger . Error ( "failed to make listener" , "cluster" , tgtwyOpts . cluster , "error" , err )
2020-06-23 20:19:56 +00:00
return nil , err
2020-04-14 14:59:23 +00:00
}
2020-08-27 17:20:58 +00:00
filterChain . Filters = append ( filterChain . Filters , filter )
2020-04-14 14:59:23 +00:00
2020-08-27 17:20:58 +00:00
return filterChain , nil
2020-04-14 14:59:23 +00:00
}
2021-04-29 18:54:05 +00:00
func ( s * ResourceGenerator ) makeMeshGatewayListener ( name , addr string , port int , cfgSnap * proxycfg . ConfigSnapshot ) ( * envoy_listener_v3 . Listener , error ) {
2019-06-18 00:52:01 +00:00
tlsInspector , err := makeTLSInspectorListenerFilter ( )
if err != nil {
return nil , err
}
sniCluster , err := makeSNIClusterFilter ( )
if err != nil {
return nil , err
}
// The cluster name here doesn't matter as the sni_cluster
// filter will fill it in for us.
2022-12-22 20:18:15 +00:00
filterOpts := listenerFilterOpts {
accessLogs : & cfgSnap . Proxy . AccessLogs ,
cluster : "" ,
filterName : name ,
logger : s . Logger ,
statPrefix : "mesh_gateway_local." ,
}
tcpProxy , err := makeTCPProxyFilter ( filterOpts )
2019-06-18 00:52:01 +00:00
if err != nil {
return nil , err
}
2021-02-26 22:23:15 +00:00
sniClusterChain := & envoy_listener_v3 . FilterChain {
Filters : [ ] * envoy_listener_v3 . Filter {
2019-06-18 00:52:01 +00:00
sniCluster ,
tcpProxy ,
} ,
}
2022-12-22 20:18:15 +00:00
opts := makeListenerOpts {
name : name ,
accessLogs : cfgSnap . Proxy . AccessLogs ,
addr : addr ,
port : port ,
direction : envoy_core_v3 . TrafficDirection_UNSPECIFIED ,
logger : s . Logger ,
}
l := makeListener ( opts )
2021-02-26 22:23:15 +00:00
l . ListenerFilters = [ ] * envoy_listener_v3 . ListenerFilter { tlsInspector }
2019-06-18 00:52:01 +00:00
2022-06-28 19:52:25 +00:00
for _ , svc := range cfgSnap . MeshGatewayValidExportedServices ( ) {
peerNames := cfgSnap . MeshGateway . ExportedServicesWithPeers [ svc ]
chain := cfgSnap . MeshGateway . DiscoveryChain [ svc ]
2022-06-06 19:20:41 +00:00
2022-06-28 19:52:25 +00:00
filterChain , err := s . makeMeshGatewayPeerFilterChain ( cfgSnap , svc , peerNames , chain )
2022-06-06 19:20:41 +00:00
if err != nil {
return nil , err
2022-06-28 19:52:25 +00:00
} else if filterChain == nil {
continue
2022-06-15 19:36:18 +00:00
}
l . FilterChains = append ( l . FilterChains , filterChain )
2022-06-06 19:20:41 +00:00
}
2021-10-27 18:36:44 +00:00
// We need 1 Filter Chain per remote cluster
2021-10-26 21:58:23 +00:00
keys := cfgSnap . MeshGateway . GatewayKeys ( )
2021-10-23 20:17:29 +00:00
for _ , key := range keys {
2021-10-24 15:51:55 +00:00
if key . Matches ( cfgSnap . Datacenter , cfgSnap . ProxyID . PartitionOrEmpty ( ) ) {
2020-03-09 20:59:02 +00:00
continue // skip local
}
2021-10-24 15:51:55 +00:00
2021-10-24 15:16:28 +00:00
clusterName := connect . GatewaySNI ( key . Datacenter , key . Partition , cfgSnap . Roots . TrustDomain )
2021-10-23 20:17:29 +00:00
filterName := fmt . Sprintf ( "%s.%s" , name , key . String ( ) )
2022-12-22 20:18:15 +00:00
filterOpts := listenerFilterOpts {
accessLogs : & cfgSnap . Proxy . AccessLogs ,
cluster : clusterName ,
filterName : filterName ,
logger : s . Logger ,
statPrefix : "mesh_gateway_remote." ,
}
dcTCPProxy , err := makeTCPProxyFilter ( filterOpts )
2019-06-18 00:52:01 +00:00
if err != nil {
return nil , err
}
2021-02-26 22:23:15 +00:00
l . FilterChains = append ( l . FilterChains , & envoy_listener_v3 . FilterChain {
FilterChainMatch : & envoy_listener_v3 . FilterChainMatch {
2019-06-18 00:52:01 +00:00
ServerNames : [ ] string { fmt . Sprintf ( "*.%s" , clusterName ) } ,
} ,
2021-02-26 22:23:15 +00:00
Filters : [ ] * envoy_listener_v3 . Filter {
2019-06-18 00:52:01 +00:00
dcTCPProxy ,
} ,
} )
}
2022-09-26 16:50:17 +00:00
// --------
// WAN Federation over mesh gateways
// --------
2021-10-26 22:10:30 +00:00
if cfgSnap . ProxyID . InDefaultPartition ( ) &&
2021-10-26 21:58:23 +00:00
cfgSnap . ServiceMeta [ structs . MetaWANFederationKey ] == "1" &&
cfgSnap . ServerSNIFn != nil {
2021-10-23 20:17:29 +00:00
for _ , key := range keys {
if key . Datacenter == cfgSnap . Datacenter {
2020-03-09 20:59:02 +00:00
continue // skip local
}
2021-10-23 20:17:29 +00:00
clusterName := cfgSnap . ServerSNIFn ( key . Datacenter , "" )
filterName := fmt . Sprintf ( "%s.%s" , name , key . String ( ) )
2022-12-22 20:18:15 +00:00
filterOpts := listenerFilterOpts {
accessLogs : & cfgSnap . Proxy . AccessLogs ,
cluster : clusterName ,
filterName : filterName ,
logger : s . Logger ,
statPrefix : "mesh_gateway_remote." ,
}
dcTCPProxy , err := makeTCPProxyFilter ( filterOpts )
2020-03-09 20:59:02 +00:00
if err != nil {
return nil , err
}
2021-02-26 22:23:15 +00:00
l . FilterChains = append ( l . FilterChains , & envoy_listener_v3 . FilterChain {
FilterChainMatch : & envoy_listener_v3 . FilterChainMatch {
2020-03-09 20:59:02 +00:00
ServerNames : [ ] string { fmt . Sprintf ( "*.%s" , clusterName ) } ,
} ,
2021-02-26 22:23:15 +00:00
Filters : [ ] * envoy_listener_v3 . Filter {
2020-03-09 20:59:02 +00:00
dcTCPProxy ,
} ,
} )
}
// Wildcard all flavors to each server.
2022-09-26 16:50:17 +00:00
servers , _ := cfgSnap . MeshGateway . WatchedLocalServers . Get ( structs . ConsulServiceName )
2022-09-23 01:24:13 +00:00
for _ , srv := range servers {
2020-03-09 20:59:02 +00:00
clusterName := cfgSnap . ServerSNIFn ( cfgSnap . Datacenter , srv . Node . Node )
2020-11-16 23:37:19 +00:00
filterName := fmt . Sprintf ( "%s.%s" , name , cfgSnap . Datacenter )
2022-12-22 20:18:15 +00:00
filterOpts := listenerFilterOpts {
accessLogs : & cfgSnap . Proxy . AccessLogs ,
cluster : clusterName ,
filterName : filterName ,
logger : s . Logger ,
statPrefix : "mesh_gateway_local_server." ,
}
dcTCPProxy , err := makeTCPProxyFilter ( filterOpts )
2020-03-09 20:59:02 +00:00
if err != nil {
return nil , err
}
2021-02-26 22:23:15 +00:00
l . FilterChains = append ( l . FilterChains , & envoy_listener_v3 . FilterChain {
FilterChainMatch : & envoy_listener_v3 . FilterChainMatch {
2022-09-23 03:14:25 +00:00
ServerNames : [ ] string { clusterName } ,
2020-03-09 20:59:02 +00:00
} ,
2021-02-26 22:23:15 +00:00
Filters : [ ] * envoy_listener_v3 . Filter {
2020-03-09 20:59:02 +00:00
dcTCPProxy ,
} ,
} )
}
}
2022-09-26 16:50:17 +00:00
// --------
// Peering control plane
// --------
// Create a single filter chain for local servers to be dialed by peers.
2022-09-23 03:14:25 +00:00
// When peering through gateways we load balance across the local servers. They cannot be addressed individually.
2022-09-26 16:50:17 +00:00
if cfgSnap . MeshConfig ( ) . PeerThroughMeshGateways ( ) {
servers , _ := cfgSnap . MeshGateway . WatchedLocalServers . Get ( structs . ConsulServiceName )
2022-09-23 03:14:25 +00:00
// Peering control-plane traffic can only ever be handled by the local leader.
// We avoid routing to read replicas since they will never be Raft voters.
if haveVoters ( servers ) {
clusterName := connect . PeeringServerSAN ( cfgSnap . Datacenter , cfgSnap . Roots . TrustDomain )
filterName := fmt . Sprintf ( "%s.%s" , name , cfgSnap . Datacenter )
2022-12-22 20:18:15 +00:00
filterOpts := listenerFilterOpts {
accessLogs : & cfgSnap . Proxy . AccessLogs ,
cluster : clusterName ,
filterName : filterName ,
logger : s . Logger ,
statPrefix : "mesh_gateway_local_peering_server." ,
}
filter , err := makeTCPProxyFilter ( filterOpts )
2022-09-23 03:14:25 +00:00
if err != nil {
return nil , err
}
l . FilterChains = append ( l . FilterChains , & envoy_listener_v3 . FilterChain {
FilterChainMatch : & envoy_listener_v3 . FilterChainMatch {
ServerNames : [ ] string { clusterName } ,
} ,
Filters : [ ] * envoy_listener_v3 . Filter {
filter ,
} ,
} )
}
}
2022-09-26 16:50:17 +00:00
// Create a filter chain per outbound peer server cluster. Listen for the SNI provided
// as the peer's ServerName.
var peerServerFilterChains [ ] * envoy_listener_v3 . FilterChain
for name := range cfgSnap . MeshGateway . PeerServers {
2022-12-22 20:18:15 +00:00
filterOpts := listenerFilterOpts {
accessLogs : & cfgSnap . Proxy . AccessLogs ,
cluster : name ,
filterName : name ,
logger : s . Logger ,
statPrefix : "mesh_gateway_remote_peering_servers." ,
}
dcTCPProxy , err := makeTCPProxyFilter ( filterOpts )
2022-09-26 16:50:17 +00:00
if err != nil {
return nil , err
}
peerServerFilterChains = append ( peerServerFilterChains , & envoy_listener_v3 . FilterChain {
FilterChainMatch : makeSNIFilterChainMatch ( name ) ,
Filters : [ ] * envoy_listener_v3 . Filter {
dcTCPProxy ,
} ,
} )
}
// Sort so the output is stable and the listener doesn't get drained
sort . Slice ( peerServerFilterChains , func ( i , j int ) bool {
return peerServerFilterChains [ i ] . FilterChainMatch . ServerNames [ 0 ] < peerServerFilterChains [ j ] . FilterChainMatch . ServerNames [ 0 ]
} )
l . FilterChains = append ( l . FilterChains , peerServerFilterChains ... )
2019-06-18 00:52:01 +00:00
// This needs to get tacked on at the end as it has no
// matching and will act as a catch all
l . FilterChains = append ( l . FilterChains , sniClusterChain )
return l , nil
}
2022-06-28 19:52:25 +00:00
func ( s * ResourceGenerator ) makeMeshGatewayPeerFilterChain (
cfgSnap * proxycfg . ConfigSnapshot ,
svc structs . ServiceName ,
peerNames [ ] string ,
chain * structs . CompiledDiscoveryChain ,
) ( * envoy_listener_v3 . FilterChain , error ) {
var (
useHTTPFilter = structs . IsProtocolHTTPLike ( chain . Protocol )
// RDS, Envoy's Route Discovery Service, is only used for HTTP services.
useRDS = useHTTPFilter
)
2022-06-29 15:29:54 +00:00
if useHTTPFilter && cfgSnap . MeshGateway . Leaf == nil {
return nil , nil // ignore; not ready
}
2022-06-28 19:52:25 +00:00
var clusterName string
if ! useRDS {
// When not using RDS we must generate a cluster name to attach to the filter chain.
// With RDS, cluster names get attached to the dynamic routes instead.
target , err := simpleChainTarget ( chain )
if err != nil {
return nil , err
}
2023-08-15 18:57:07 +00:00
clusterName = meshGatewayExportedClusterNamePrefix + naming . CustomizeClusterName ( target . Name , chain )
2022-06-28 19:52:25 +00:00
}
uid := proxycfg . NewUpstreamIDFromServiceName ( svc )
filterName := fmt . Sprintf ( "%s.%s.%s.%s" , chain . ServiceName , chain . Namespace , chain . Partition , chain . Datacenter )
filterChain , err := s . makeUpstreamFilterChain ( filterChainOpts {
2022-12-22 20:18:15 +00:00
accessLogs : & cfgSnap . Proxy . AccessLogs ,
2022-06-28 20:32:42 +00:00
routeName : uid . EnvoyID ( ) ,
clusterName : clusterName ,
filterName : filterName ,
protocol : chain . Protocol ,
useRDS : useRDS ,
statPrefix : "mesh_gateway_local_peered." ,
forwardClientDetails : true ,
forwardClientPolicy : envoy_http_v3 . HttpConnectionManager_SANITIZE_SET ,
2022-06-28 19:52:25 +00:00
} )
if err != nil {
return nil , err
}
var peeredServerNames [ ] string
for _ , peerName := range peerNames {
peeredSNI := connect . PeeredServiceSNI (
svc . Name ,
svc . NamespaceOrDefault ( ) ,
svc . PartitionOrDefault ( ) ,
peerName ,
cfgSnap . Roots . TrustDomain ,
)
peeredServerNames = append ( peeredServerNames , peeredSNI )
}
filterChain . FilterChainMatch = & envoy_listener_v3 . FilterChainMatch {
ServerNames : peeredServerNames ,
}
if useHTTPFilter {
// We only terminate TLS if we're doing an L7 proxy.
var peerBundles [ ] * pbpeering . PeeringTrustBundle
for _ , bundle := range cfgSnap . MeshGateway . PeeringTrustBundles {
if stringslice . Contains ( peerNames , bundle . PeerName ) {
peerBundles = append ( peerBundles , bundle )
}
}
peeredTransportSocket , err := createDownstreamTransportSocketForConnectTLS ( cfgSnap , peerBundles )
if err != nil {
return nil , err
}
filterChain . TransportSocket = peeredTransportSocket
}
return filterChain , nil
}
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
type filterChainOpts struct {
2022-12-22 20:18:15 +00:00
accessLogs * structs . AccessLogsConfig
2022-06-28 20:32:42 +00:00
routeName string
clusterName string
filterName string
protocol string
useRDS bool
tlsContext * envoy_tls_v3 . DownstreamTlsContext
statPrefix string
forwardClientDetails bool
forwardClientPolicy envoy_http_v3 . HttpConnectionManager_ForwardClientCertDetails
2022-08-02 06:52:48 +00:00
tracing * envoy_http_v3 . HttpConnectionManager_Tracing
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
}
2021-03-17 19:40:49 +00:00
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
func ( s * ResourceGenerator ) makeUpstreamFilterChain ( opts filterChainOpts ) ( * envoy_listener_v3 . FilterChain , error ) {
2022-06-28 19:52:25 +00:00
if opts . statPrefix == "" {
opts . statPrefix = "upstream."
}
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
filter , err := makeListenerFilter ( listenerFilterOpts {
2022-06-28 20:32:42 +00:00
useRDS : opts . useRDS ,
protocol : opts . protocol ,
filterName : opts . filterName ,
routeName : opts . routeName ,
cluster : opts . clusterName ,
statPrefix : opts . statPrefix ,
forwardClientDetails : opts . forwardClientDetails ,
forwardClientPolicy : opts . forwardClientPolicy ,
2022-08-02 06:52:48 +00:00
tracing : opts . tracing ,
2022-12-22 20:18:15 +00:00
accessLogs : opts . accessLogs ,
logger : s . Logger ,
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
} )
2021-03-17 19:40:49 +00:00
if err != nil {
return nil , err
}
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
transportSocket , err := makeDownstreamTLSTransportSocket ( opts . tlsContext )
2021-03-17 19:40:49 +00:00
if err != nil {
return nil , err
}
return & envoy_listener_v3 . FilterChain {
Filters : [ ] * envoy_listener_v3 . Filter {
filter ,
} ,
TransportSocket : transportSocket ,
} , nil
}
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
// simpleChainTarget returns the discovery target for a chain with a single node.
// A chain can have a single target if it is for a TCP service or an HTTP service without
// multiple splits/routes/failovers.
func simpleChainTarget ( chain * structs . CompiledDiscoveryChain ) ( * structs . DiscoveryTarget , error ) {
startNode := chain . Nodes [ chain . StartNode ]
if startNode == nil {
return nil , fmt . Errorf ( "missing first node in compiled discovery chain for: %s" , chain . ServiceName )
2019-07-08 11:48:48 +00:00
}
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
if startNode . Type != structs . DiscoveryGraphNodeTypeResolver {
return nil , fmt . Errorf ( "expected discovery chain with single node, found unexpected start node: %s" , startNode . Type )
2019-07-08 11:48:48 +00:00
}
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
targetID := startNode . Resolver . Target
return chain . Targets [ targetID ] , nil
2019-07-02 03:10:51 +00:00
}
2022-01-20 16:12:04 +00:00
func ( s * ResourceGenerator ) getAndModifyUpstreamConfigForListener (
uid proxycfg . UpstreamID ,
u * structs . Upstream ,
chain * structs . CompiledDiscoveryChain ,
) structs . UpstreamConfig {
2020-05-21 14:08:12 +00:00
var (
2021-03-09 05:10:27 +00:00
cfg structs . UpstreamConfig
2020-05-21 14:08:12 +00:00
err error
)
2021-03-17 19:40:49 +00:00
configMap := make ( map [ string ] interface { } )
if u != nil {
configMap = u . Config
}
2022-03-30 15:04:18 +00:00
if chain == nil || chain . Default {
2022-02-17 19:47:20 +00:00
cfg , err = structs . ParseUpstreamConfigNoDefaults ( configMap )
2020-05-21 14:08:12 +00:00
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
2022-01-20 16:12:04 +00:00
s . Logger . Warn ( "failed to parse" , "upstream" , uid , "error" , err )
2020-05-21 14:08:12 +00:00
}
} else {
// Use NoDefaults here so that we can set the protocol to the chain
// protocol if necessary
2021-03-17 19:40:49 +00:00
cfg , err = structs . ParseUpstreamConfigNoDefaults ( configMap )
2020-05-21 14:08:12 +00:00
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
2022-01-20 16:12:04 +00:00
s . Logger . Warn ( "failed to parse" , "upstream" , uid , "error" , err )
2020-05-21 14:08:12 +00:00
}
2021-03-15 20:12:57 +00:00
if cfg . EnvoyListenerJSON != "" {
2021-04-29 18:54:05 +00:00
s . Logger . Warn ( "ignoring escape hatch setting because already configured for" ,
2022-01-20 16:12:04 +00:00
"discovery chain" , chain . ServiceName , "upstream" , uid , "config" , "envoy_listener_json" )
2020-05-21 14:08:12 +00:00
// Remove from config struct so we don't use it later on
2021-03-15 20:12:57 +00:00
cfg . EnvoyListenerJSON = ""
2020-05-21 14:08:12 +00:00
}
2022-02-17 19:47:20 +00:00
}
protocol := cfg . Protocol
2022-07-14 18:45:51 +00:00
if chain != nil {
if protocol == "" {
protocol = chain . Protocol
}
if protocol == "" {
protocol = "tcp"
}
} else {
2022-02-17 19:47:20 +00:00
protocol = "tcp"
2020-05-21 14:08:12 +00:00
}
2022-02-17 19:47:20 +00:00
// set back on the config so that we can use it from return value
cfg . Protocol = protocol
2020-05-21 14:08:12 +00:00
return cfg
}
2022-06-03 21:42:50 +00:00
func ( s * ResourceGenerator ) getAndModifyUpstreamConfigForPeeredListener (
uid proxycfg . UpstreamID ,
u * structs . Upstream ,
peerMeta structs . PeeringServiceMeta ,
) structs . UpstreamConfig {
var (
cfg structs . UpstreamConfig
err error
)
configMap := make ( map [ string ] interface { } )
if u != nil {
configMap = u . Config
}
cfg , err = structs . ParseUpstreamConfigNoDefaults ( configMap )
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
s . Logger . Warn ( "failed to parse" , "upstream" , uid , "error" , err )
}
2023-01-18 19:43:53 +00:00
// Ignore the configured protocol for peer upstreams, since it is defined by the remote
// cluster, which we cannot control.
protocol := peerMeta . Protocol
2022-06-03 21:42:50 +00:00
if protocol == "" {
protocol = "tcp"
}
// set back on the config so that we can use it from return value
cfg . Protocol = protocol
if cfg . ConnectTimeoutMs == 0 {
cfg . ConnectTimeoutMs = 5000
}
2022-09-27 13:49:28 +00:00
if cfg . MeshGateway . Mode == "" && u != nil {
cfg . MeshGateway = u . MeshGateway
}
2022-06-03 21:42:50 +00:00
return cfg
}
2020-08-27 17:20:58 +00:00
type listenerFilterOpts struct {
2022-12-22 20:18:15 +00:00
// All listener filters
accessLogs * structs . AccessLogsConfig
cluster string
filterName string
logger hclog . Logger
protocol string
statPrefix string
// HTTP listener filter options
2022-07-14 18:45:51 +00:00
forwardClientDetails bool
forwardClientPolicy envoy_http_v3 . HttpConnectionManager_ForwardClientCertDetails
2023-01-06 17:13:40 +00:00
httpAuthzFilters [ ] * envoy_http_v3 . HttpFilter
2022-12-22 20:18:15 +00:00
idleTimeoutMs * int
requestTimeoutMs * int
routeName string
routePath string
2022-08-02 06:52:48 +00:00
tracing * envoy_http_v3 . HttpConnectionManager_Tracing
2022-12-22 20:18:15 +00:00
useRDS bool
2020-08-27 17:20:58 +00:00
}
2019-09-26 02:55:52 +00:00
2021-02-26 22:23:15 +00:00
func makeListenerFilter ( opts listenerFilterOpts ) ( * envoy_listener_v3 . Filter , error ) {
2020-08-27 17:20:58 +00:00
switch opts . protocol {
2020-09-04 18:45:05 +00:00
case "grpc" , "http2" , "http" :
return makeHTTPFilter ( opts )
2019-04-29 16:27:57 +00:00
case "tcp" :
fallthrough
default :
2020-08-27 17:20:58 +00:00
if opts . useRDS {
2020-06-23 20:19:56 +00:00
return nil , fmt . Errorf ( "RDS is not compatible with the tcp proxy filter" )
2020-08-27 17:20:58 +00:00
} else if opts . cluster == "" {
2020-06-23 20:19:56 +00:00
return nil , fmt . Errorf ( "cluster name is required for a tcp proxy filter" )
2019-10-17 21:44:59 +00:00
}
2022-12-22 20:18:15 +00:00
return makeTCPProxyFilter ( opts )
2019-04-29 16:27:57 +00:00
}
}
2021-02-26 22:23:15 +00:00
func makeTLSInspectorListenerFilter ( ) ( * envoy_listener_v3 . ListenerFilter , error ) {
2022-04-18 16:36:07 +00:00
return makeEnvoyListenerFilter ( "envoy.filters.listener.tls_inspector" , & envoy_tls_inspector_v3 . TlsInspector { } )
2019-06-18 00:52:01 +00:00
}
2022-08-01 18:12:43 +00:00
func makeHTTPInspectorListenerFilter ( ) ( * envoy_listener_v3 . ListenerFilter , error ) {
return makeEnvoyListenerFilter ( "envoy.filters.listener.http_inspector" , & envoy_extensions_filters_listener_http_inspector_v3 . HttpInspector { } )
}
2021-08-24 13:10:16 +00:00
func makeSNIFilterChainMatch ( sniMatches ... string ) * envoy_listener_v3 . FilterChainMatch {
2023-02-17 17:46:03 +00:00
if sniMatches == nil {
return nil
}
2021-02-26 22:23:15 +00:00
return & envoy_listener_v3 . FilterChainMatch {
2021-08-24 13:10:16 +00:00
ServerNames : sniMatches ,
2020-04-13 16:33:01 +00:00
}
2019-06-18 00:52:01 +00:00
}
2021-02-26 22:23:15 +00:00
func makeSNIClusterFilter ( ) ( * envoy_listener_v3 . Filter , error ) {
2022-04-18 16:36:07 +00:00
return makeFilter ( "envoy.filters.network.sni_cluster" , & envoy_sni_cluster_v3 . SniCluster { } )
2019-06-18 00:52:01 +00:00
}
2022-12-22 20:18:15 +00:00
func makeTCPProxyFilter ( opts listenerFilterOpts ) ( * envoy_listener_v3 . Filter , error ) {
accessLogs , err := accesslogs . MakeAccessLogs ( opts . accessLogs , false )
if err != nil && opts . logger != nil {
opts . logger . Warn ( "could not make access log xds for tcp proxy" , err )
}
2021-02-26 22:23:15 +00:00
cfg := & envoy_tcp_proxy_v3 . TcpProxy {
2022-12-22 20:18:15 +00:00
AccessLog : accessLogs ,
ClusterSpecifier : & envoy_tcp_proxy_v3 . TcpProxy_Cluster { Cluster : opts . cluster } ,
StatPrefix : makeStatPrefix ( opts . statPrefix , opts . filterName ) ,
2018-10-03 18:18:55 +00:00
}
2021-02-22 21:00:15 +00:00
return makeFilter ( "envoy.filters.network.tcp_proxy" , cfg )
2018-10-03 18:18:55 +00:00
}
2022-05-19 17:06:13 +00:00
func makeConnectionLimitFilter ( limit int ) ( * envoy_listener_v3 . Filter , error ) {
cfg := & envoy_connection_limit_v3 . ConnectionLimit {
2022-08-24 18:13:10 +00:00
StatPrefix : "inbound_connection_limit" ,
2022-05-19 17:06:13 +00:00
MaxConnections : wrapperspb . UInt64 ( uint64 ( limit ) ) ,
}
return makeFilter ( "envoy.filters.network.connection_limit" , cfg )
}
2020-11-16 23:37:19 +00:00
func makeStatPrefix ( prefix , filterName string ) string {
2019-04-29 16:27:57 +00:00
// Replace colons here because Envoy does that in the metrics for the actual
// clusters but doesn't in the stat prefix here while dashboards assume they
// will match.
2020-11-16 23:37:19 +00:00
return fmt . Sprintf ( "%s%s" , prefix , strings . Replace ( filterName , ":" , "_" , - 1 ) )
2019-04-29 16:27:57 +00:00
}
2022-08-02 06:52:48 +00:00
func makeTracingFromUserConfig ( configJSON string ) ( * envoy_http_v3 . HttpConnectionManager_Tracing , error ) {
2023-01-11 14:39:10 +00:00
// Type field is present so decode it as a anypb.Any
var any anypb . Any
if err := protojson . Unmarshal ( [ ] byte ( configJSON ) , & any ) ; err != nil {
2022-08-02 06:52:48 +00:00
return nil , err
}
var t envoy_http_v3 . HttpConnectionManager_Tracing
if err := proto . Unmarshal ( any . Value , & t ) ; err != nil {
return nil , err
}
return & t , nil
}
2021-02-26 22:23:15 +00:00
func makeHTTPFilter ( opts listenerFilterOpts ) ( * envoy_listener_v3 . Filter , error ) {
2022-04-18 16:36:07 +00:00
router , err := makeEnvoyHTTPFilter ( "envoy.filters.http.router" , & envoy_http_router_v3 . Router { } )
if err != nil {
return nil , err
}
2022-12-22 20:18:15 +00:00
accessLogs , err := accesslogs . MakeAccessLogs ( opts . accessLogs , false )
if err != nil && opts . logger != nil {
opts . logger . Warn ( "could not make access log xds for http connection manager" , err )
}
2021-02-26 22:23:15 +00:00
cfg := & envoy_http_v3 . HttpConnectionManager {
2022-12-22 20:18:15 +00:00
AccessLog : accessLogs ,
2020-11-16 23:37:19 +00:00
StatPrefix : makeStatPrefix ( opts . statPrefix , opts . filterName ) ,
2021-02-26 22:23:15 +00:00
CodecType : envoy_http_v3 . HttpConnectionManager_AUTO ,
HttpFilters : [ ] * envoy_http_v3 . HttpFilter {
2022-04-18 16:36:07 +00:00
router ,
2019-07-02 03:10:51 +00:00
} ,
2021-02-26 22:23:15 +00:00
Tracing : & envoy_http_v3 . HttpConnectionManager_Tracing {
2019-07-02 03:10:51 +00:00
// Don't trace any requests by default unless the client application
// explicitly propagates trace headers that indicate this should be
// sampled.
2021-02-26 22:23:15 +00:00
RandomSampling : & envoy_type_v3 . Percent { Value : 0.0 } ,
2019-07-02 03:10:51 +00:00
} ,
2023-07-20 20:24:43 +00:00
// Explicitly enable WebSocket upgrades for all HTTP listeners
UpgradeConfigs : [ ] * envoy_http_v3 . HttpConnectionManager_UpgradeConfig {
{ UpgradeType : "websocket" } ,
} ,
2019-07-02 03:10:51 +00:00
}
2022-08-02 06:52:48 +00:00
if opts . tracing != nil {
cfg . Tracing = opts . tracing
}
2020-09-04 18:45:05 +00:00
if opts . useRDS {
if opts . cluster != "" {
2020-06-23 20:19:56 +00:00
return nil , fmt . Errorf ( "cannot specify cluster name when using RDS" )
2019-07-02 03:10:51 +00:00
}
2021-02-26 22:23:15 +00:00
cfg . RouteSpecifier = & envoy_http_v3 . HttpConnectionManager_Rds {
Rds : & envoy_http_v3 . Rds {
2020-09-04 18:45:05 +00:00
RouteConfigName : opts . routeName ,
2021-02-26 22:23:15 +00:00
ConfigSource : & envoy_core_v3 . ConfigSource {
ResourceApiVersion : envoy_core_v3 . ApiVersion_V3 ,
ConfigSourceSpecifier : & envoy_core_v3 . ConfigSource_Ads {
Ads : & envoy_core_v3 . AggregatedConfigSource { } ,
2019-07-02 03:10:51 +00:00
} ,
} ,
} ,
}
} else {
2020-09-04 18:45:05 +00:00
if opts . cluster == "" {
2020-06-23 20:19:56 +00:00
return nil , fmt . Errorf ( "must specify cluster name when not using RDS" )
2019-07-02 03:10:51 +00:00
}
2021-01-25 19:50:00 +00:00
2021-02-26 22:23:15 +00:00
route := & envoy_route_v3 . Route {
Match : & envoy_route_v3 . RouteMatch {
PathSpecifier : & envoy_route_v3 . RouteMatch_Prefix {
2019-09-26 02:55:52 +00:00
Prefix : "/" ,
} ,
// TODO(banks) Envoy supports matching only valid GRPC
// requests which might be nice to add here for gRPC services
// but it's not supported in our current envoy SDK version
// although docs say it was supported by 1.8.0. Going to defer
// that until we've updated the deps.
} ,
2021-02-26 22:23:15 +00:00
Action : & envoy_route_v3 . Route_Route {
Route : & envoy_route_v3 . RouteAction {
ClusterSpecifier : & envoy_route_v3 . RouteAction_Cluster {
2020-09-04 18:45:05 +00:00
Cluster : opts . cluster ,
2019-09-26 02:55:52 +00:00
} ,
} ,
} ,
}
2021-01-25 19:50:00 +00:00
if opts . requestTimeoutMs != nil {
r := route . GetRoute ( )
2022-05-25 01:44:54 +00:00
r . Timeout = durationpb . New ( time . Duration ( * opts . requestTimeoutMs ) * time . Millisecond )
2021-01-25 19:50:00 +00:00
}
2022-11-29 22:43:15 +00:00
if opts . idleTimeoutMs != nil {
r := route . GetRoute ( )
r . IdleTimeout = durationpb . New ( time . Duration ( * opts . idleTimeoutMs ) * time . Millisecond )
}
2019-09-26 02:55:52 +00:00
// If a path is provided, do not match on a catch-all prefix
2020-09-04 18:45:05 +00:00
if opts . routePath != "" {
2021-02-26 22:23:15 +00:00
route . Match . PathSpecifier = & envoy_route_v3 . RouteMatch_Path { Path : opts . routePath }
2019-09-26 02:55:52 +00:00
}
2021-02-26 22:23:15 +00:00
cfg . RouteSpecifier = & envoy_http_v3 . HttpConnectionManager_RouteConfig {
RouteConfig : & envoy_route_v3 . RouteConfiguration {
2020-09-04 18:45:05 +00:00
Name : opts . routeName ,
2021-02-26 22:23:15 +00:00
VirtualHosts : [ ] * envoy_route_v3 . VirtualHost {
2019-09-26 02:55:52 +00:00
{
2020-09-04 18:45:05 +00:00
Name : opts . filterName ,
2019-04-29 16:27:57 +00:00
Domains : [ ] string { "*" } ,
2021-02-26 22:23:15 +00:00
Routes : [ ] * envoy_route_v3 . Route {
2019-09-26 02:55:52 +00:00
route ,
2019-04-29 16:27:57 +00:00
} ,
} ,
} ,
} ,
2019-07-02 03:10:51 +00:00
}
2019-04-29 16:27:57 +00:00
}
2020-09-04 18:45:05 +00:00
if opts . protocol == "http2" || opts . protocol == "grpc" {
2021-02-26 22:23:15 +00:00
cfg . Http2ProtocolOptions = & envoy_core_v3 . Http2ProtocolOptions { }
2019-04-29 16:27:57 +00:00
}
2022-04-26 20:46:29 +00:00
// Note the default leads to setting HttpConnectionManager_SANITIZE
if opts . forwardClientDetails {
cfg . ForwardClientCertDetails = opts . forwardClientPolicy
cfg . SetCurrentClientCertDetails = & envoy_http_v3 . HttpConnectionManager_SetCurrentClientCertDetails {
2023-01-11 14:39:10 +00:00
Subject : & wrapperspb . BoolValue { Value : true } ,
2022-04-26 20:46:29 +00:00
Cert : true ,
Chain : true ,
Dns : true ,
Uri : true ,
}
}
2020-08-27 17:20:58 +00:00
// Like injectConnectFilters for L4, here we ensure that the first filter
// (other than the "envoy.grpc_http1_bridge" filter) in the http filter
// chain of a public listener is the authz filter to prevent unauthorized
// access and that every filter chain uses our TLS certs.
2023-01-06 17:13:40 +00:00
if len ( opts . httpAuthzFilters ) > 0 {
cfg . HttpFilters = append ( opts . httpAuthzFilters , cfg . HttpFilters ... )
2020-08-27 17:20:58 +00:00
}
2020-09-04 18:45:05 +00:00
if opts . protocol == "grpc" {
2022-04-18 16:36:07 +00:00
grpcHttp1Bridge , err := makeEnvoyHTTPFilter (
"envoy.filters.http.grpc_http1_bridge" ,
& envoy_grpc_http1_bridge_v3 . Config { } ,
)
if err != nil {
return nil , err
}
2021-04-29 20:22:03 +00:00
// In envoy 1.14.x the default value "stats_for_all_methods=true" was
// deprecated, and was changed to "false" in 1.18.x. Avoid using the
// default. TODO: we may want to expose this to users somehow easily.
grpcStatsFilter , err := makeEnvoyHTTPFilter (
"envoy.filters.http.grpc_stats" ,
& envoy_grpc_stats_v3 . FilterConfig {
PerMethodStatSpecifier : & envoy_grpc_stats_v3 . FilterConfig_StatsForAllMethods {
2023-08-17 18:43:21 +00:00
StatsForAllMethods : response . MakeBoolValue ( true ) ,
2021-04-29 20:22:03 +00:00
} ,
} ,
)
if err != nil {
return nil , err
}
2022-04-18 16:36:07 +00:00
// Add grpc bridge before router and authz, and the stats in front of that.
2021-04-29 20:22:03 +00:00
cfg . HttpFilters = append ( [ ] * envoy_http_v3 . HttpFilter {
grpcStatsFilter ,
2022-04-18 16:36:07 +00:00
grpcHttp1Bridge ,
2021-04-29 20:22:03 +00:00
} , cfg . HttpFilters ... )
2019-04-29 16:27:57 +00:00
}
2021-02-22 21:00:15 +00:00
return makeFilter ( "envoy.filters.network.http_connection_manager" , cfg )
2019-04-29 16:27:57 +00:00
}
2022-04-18 16:36:07 +00:00
func makeEnvoyListenerFilter ( name string , cfg proto . Message ) ( * envoy_listener_v3 . ListenerFilter , error ) {
2023-01-11 14:39:10 +00:00
any , err := anypb . New ( cfg )
2022-04-18 16:36:07 +00:00
if err != nil {
return nil , err
}
return & envoy_listener_v3 . ListenerFilter {
Name : name ,
ConfigType : & envoy_listener_v3 . ListenerFilter_TypedConfig { TypedConfig : any } ,
} , nil
}
2021-02-26 22:23:15 +00:00
func makeFilter ( name string , cfg proto . Message ) ( * envoy_listener_v3 . Filter , error ) {
2023-01-11 14:39:10 +00:00
any , err := anypb . New ( cfg )
2021-02-22 21:00:15 +00:00
if err != nil {
return nil , err
2020-08-27 17:20:58 +00:00
}
2021-02-26 22:23:15 +00:00
return & envoy_listener_v3 . Filter {
Name : name ,
ConfigType : & envoy_listener_v3 . Filter_TypedConfig { TypedConfig : any } ,
} , nil
2018-10-03 18:18:55 +00:00
}
2021-02-26 22:23:15 +00:00
func makeEnvoyHTTPFilter ( name string , cfg proto . Message ) ( * envoy_http_v3 . HttpFilter , error ) {
2023-01-11 14:39:10 +00:00
any , err := anypb . New ( cfg )
2018-10-03 18:18:55 +00:00
if err != nil {
2020-06-23 20:19:56 +00:00
return nil , err
2018-10-03 18:18:55 +00:00
}
2021-02-26 22:23:15 +00:00
return & envoy_http_v3 . HttpFilter {
2019-06-07 12:10:43 +00:00
Name : name ,
2021-02-26 22:23:15 +00:00
ConfigType : & envoy_http_v3 . HttpFilter_TypedConfig { TypedConfig : any } ,
2018-10-03 18:18:55 +00:00
} , nil
}
2022-06-01 21:53:52 +00:00
func makeCommonTLSContext (
2022-03-30 18:43:59 +00:00
leaf * structs . IssuedCert ,
2022-06-01 21:53:52 +00:00
rootPEMs string ,
2022-03-30 18:43:59 +00:00
tlsParams * envoy_tls_v3 . TlsParameters ,
) * envoy_tls_v3 . CommonTlsContext {
2022-06-01 21:53:52 +00:00
if rootPEMs == "" {
2019-03-22 19:37:14 +00:00
return nil
}
2022-01-11 16:46:42 +00:00
if tlsParams == nil {
tlsParams = & envoy_tls_v3 . TlsParameters { }
}
2021-02-26 22:23:15 +00:00
return & envoy_tls_v3 . CommonTlsContext {
2022-01-11 16:46:42 +00:00
TlsParams : tlsParams ,
2021-02-26 22:23:15 +00:00
TlsCertificates : [ ] * envoy_tls_v3 . TlsCertificate {
2020-06-16 17:19:31 +00:00
{
2021-02-26 22:23:15 +00:00
CertificateChain : & envoy_core_v3 . DataSource {
Specifier : & envoy_core_v3 . DataSource_InlineString {
2022-06-01 21:53:52 +00:00
InlineString : lib . EnsureTrailingNewline ( leaf . CertPEM ) ,
2018-10-03 18:18:55 +00:00
} ,
} ,
2021-02-26 22:23:15 +00:00
PrivateKey : & envoy_core_v3 . DataSource {
Specifier : & envoy_core_v3 . DataSource_InlineString {
2022-06-01 21:53:52 +00:00
InlineString : lib . EnsureTrailingNewline ( leaf . PrivateKeyPEM ) ,
2018-10-03 18:18:55 +00:00
} ,
} ,
} ,
} ,
2021-02-26 22:23:15 +00:00
ValidationContextType : & envoy_tls_v3 . CommonTlsContext_ValidationContext {
ValidationContext : & envoy_tls_v3 . CertificateValidationContext {
2018-10-03 18:18:55 +00:00
// TODO(banks): later for L7 support we may need to configure ALPN here.
2021-02-26 22:23:15 +00:00
TrustedCa : & envoy_core_v3 . DataSource {
Specifier : & envoy_core_v3 . DataSource_InlineString {
2022-06-01 21:53:52 +00:00
InlineString : rootPEMs ,
2018-10-03 18:18:55 +00:00
} ,
} ,
} ,
} ,
}
}
2020-04-27 22:25:37 +00:00
2021-02-26 22:23:15 +00:00
func makeDownstreamTLSTransportSocket ( tlsContext * envoy_tls_v3 . DownstreamTlsContext ) ( * envoy_core_v3 . TransportSocket , error ) {
2021-02-22 21:00:15 +00:00
if tlsContext == nil {
return nil , nil
}
return makeTransportSocket ( "tls" , tlsContext )
}
2021-02-26 22:23:15 +00:00
func makeUpstreamTLSTransportSocket ( tlsContext * envoy_tls_v3 . UpstreamTlsContext ) ( * envoy_core_v3 . TransportSocket , error ) {
2021-02-22 21:00:15 +00:00
if tlsContext == nil {
return nil , nil
}
return makeTransportSocket ( "tls" , tlsContext )
}
2021-02-26 22:23:15 +00:00
func makeTransportSocket ( name string , config proto . Message ) ( * envoy_core_v3 . TransportSocket , error ) {
2023-01-11 14:39:10 +00:00
any , err := anypb . New ( config )
2021-02-22 21:00:15 +00:00
if err != nil {
return nil , err
}
2021-02-26 22:23:15 +00:00
return & envoy_core_v3 . TransportSocket {
2021-02-22 21:00:15 +00:00
Name : name ,
2021-02-26 22:23:15 +00:00
ConfigType : & envoy_core_v3 . TransportSocket_TypedConfig {
2021-02-22 21:00:15 +00:00
TypedConfig : any ,
} ,
} , nil
}
2021-02-26 22:23:15 +00:00
func makeCommonTLSContextFromFiles ( caFile , certFile , keyFile string ) * envoy_tls_v3 . CommonTlsContext {
ctx := envoy_tls_v3 . CommonTlsContext {
TlsParams : & envoy_tls_v3 . TlsParameters { } ,
2020-04-27 22:25:37 +00:00
}
// Verify certificate of peer if caFile is specified
if caFile != "" {
2021-02-26 22:23:15 +00:00
ctx . ValidationContextType = & envoy_tls_v3 . CommonTlsContext_ValidationContext {
ValidationContext : & envoy_tls_v3 . CertificateValidationContext {
TrustedCa : & envoy_core_v3 . DataSource {
Specifier : & envoy_core_v3 . DataSource_Filename {
2020-04-27 22:25:37 +00:00
Filename : caFile ,
} ,
} ,
} ,
}
}
// Present certificate for mTLS if cert and key files are specified
if certFile != "" && keyFile != "" {
2021-02-26 22:23:15 +00:00
ctx . TlsCertificates = [ ] * envoy_tls_v3 . TlsCertificate {
2020-04-27 22:25:37 +00:00
{
2021-02-26 22:23:15 +00:00
CertificateChain : & envoy_core_v3 . DataSource {
Specifier : & envoy_core_v3 . DataSource_Filename {
2020-04-27 22:25:37 +00:00
Filename : certFile ,
} ,
} ,
2021-02-26 22:23:15 +00:00
PrivateKey : & envoy_core_v3 . DataSource {
Specifier : & envoy_core_v3 . DataSource_Filename {
2020-04-27 22:25:37 +00:00
Filename : keyFile ,
} ,
} ,
} ,
}
}
return & ctx
}
2022-03-30 18:43:59 +00:00
func validateListenerTLSConfig ( tlsMinVersion types . TLSVersion , cipherSuites [ ] types . TLSCipherSuite ) error {
// Validate. Configuring cipher suites is only applicable to connections negotiated
// via TLS 1.2 or earlier. Other cases shouldn't be possible as we validate them at
// input but be resilient to bugs later.
if len ( cipherSuites ) != 0 {
if _ , ok := tlsVersionsWithConfigurableCipherSuites [ tlsMinVersion ] ; ! ok {
return fmt . Errorf ( "configuring CipherSuites is only applicable to connections negotiated with TLS 1.2 or earlier, TLSMinVersion is set to %s in config" , tlsMinVersion )
}
}
return nil
}
var tlsVersionsWithConfigurableCipherSuites = map [ types . TLSVersion ] struct { } {
// Remove these two if Envoy ever sets TLS 1.3 as default minimum
types . TLSVersionUnspecified : { } ,
types . TLSVersionAuto : { } ,
types . TLSv1_0 : { } ,
types . TLSv1_1 : { } ,
types . TLSv1_2 : { } ,
}
func makeTLSParametersFromProxyTLSConfig ( tlsConf * structs . MeshDirectionalTLSConfig ) * envoy_tls_v3 . TlsParameters {
if tlsConf == nil {
return & envoy_tls_v3 . TlsParameters { }
}
return makeTLSParametersFromTLSConfig ( tlsConf . TLSMinVersion , tlsConf . TLSMaxVersion , tlsConf . CipherSuites )
}
func makeTLSParametersFromTLSConfig (
tlsMinVersion types . TLSVersion ,
tlsMaxVersion types . TLSVersion ,
cipherSuites [ ] types . TLSCipherSuite ,
) * envoy_tls_v3 . TlsParameters {
tlsParams := envoy_tls_v3 . TlsParameters { }
if tlsMinVersion != types . TLSVersionUnspecified {
if minVersion , ok := envoyTLSVersions [ tlsMinVersion ] ; ok {
tlsParams . TlsMinimumProtocolVersion = minVersion
}
}
if tlsMaxVersion != types . TLSVersionUnspecified {
if maxVersion , ok := envoyTLSVersions [ tlsMaxVersion ] ; ok {
tlsParams . TlsMaximumProtocolVersion = maxVersion
}
}
if len ( cipherSuites ) != 0 {
tlsParams . CipherSuites = types . MarshalEnvoyTLSCipherSuiteStrings ( cipherSuites )
}
return & tlsParams
}
var envoyTLSVersions = map [ types . TLSVersion ] envoy_tls_v3 . TlsParameters_TlsProtocol {
types . TLSVersionAuto : envoy_tls_v3 . TlsParameters_TLS_AUTO ,
types . TLSv1_0 : envoy_tls_v3 . TlsParameters_TLSv1_0 ,
types . TLSv1_1 : envoy_tls_v3 . TlsParameters_TLSv1_1 ,
types . TLSv1_2 : envoy_tls_v3 . TlsParameters_TLSv1_2 ,
types . TLSv1_3 : envoy_tls_v3 . TlsParameters_TLSv1_3 ,
}