mirror of https://github.com/hashicorp/consul
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
891 lines
24 KiB
891 lines
24 KiB
package proxycfg |
|
|
|
import ( |
|
"context" |
|
"fmt" |
|
"io/ioutil" |
|
"path" |
|
"path/filepath" |
|
"runtime" |
|
"sync" |
|
"sync/atomic" |
|
"time" |
|
|
|
"github.com/hashicorp/go-hclog" |
|
"github.com/mitchellh/go-testing-interface" |
|
"github.com/stretchr/testify/require" |
|
|
|
"github.com/hashicorp/consul/agent/cache" |
|
cachetype "github.com/hashicorp/consul/agent/cache-types" |
|
"github.com/hashicorp/consul/agent/connect" |
|
"github.com/hashicorp/consul/agent/structs" |
|
"github.com/hashicorp/consul/api" |
|
) |
|
|
|
// TestCacheTypes encapsulates all the different cache types proxycfg.State will |
|
// watch/request for controlling one during testing. |
|
type TestCacheTypes struct { |
|
roots *ControllableCacheType |
|
leaf *ControllableCacheType |
|
intentions *ControllableCacheType |
|
health *ControllableCacheType |
|
query *ControllableCacheType |
|
compiledChain *ControllableCacheType |
|
serviceHTTPChecks *ControllableCacheType |
|
configEntry *ControllableCacheType |
|
} |
|
|
|
// NewTestCacheTypes creates a set of ControllableCacheTypes for all types that |
|
// proxycfg will watch suitable for testing a proxycfg.State or Manager. |
|
func NewTestCacheTypes(t testing.T) *TestCacheTypes { |
|
t.Helper() |
|
ct := &TestCacheTypes{ |
|
roots: NewControllableCacheType(t), |
|
leaf: NewControllableCacheType(t), |
|
intentions: NewControllableCacheType(t), |
|
health: NewControllableCacheType(t), |
|
query: NewControllableCacheType(t), |
|
compiledChain: NewControllableCacheType(t), |
|
serviceHTTPChecks: NewControllableCacheType(t), |
|
configEntry: NewControllableCacheType(t), |
|
} |
|
ct.query.blocking = false |
|
return ct |
|
} |
|
|
|
// TestCacheWithTypes registers ControllableCacheTypes for all types that |
|
// proxycfg will watch suitable for testing a proxycfg.State or Manager. |
|
func TestCacheWithTypes(t testing.T, types *TestCacheTypes) *cache.Cache { |
|
c := cache.New(cache.Options{}) |
|
c.RegisterType(cachetype.ConnectCARootName, types.roots) |
|
c.RegisterType(cachetype.ConnectCALeafName, types.leaf) |
|
c.RegisterType(cachetype.IntentionMatchName, types.intentions) |
|
c.RegisterType(cachetype.HealthServicesName, types.health) |
|
c.RegisterType(cachetype.PreparedQueryName, types.query) |
|
c.RegisterType(cachetype.CompiledDiscoveryChainName, types.compiledChain) |
|
c.RegisterType(cachetype.ServiceHTTPChecksName, types.serviceHTTPChecks) |
|
c.RegisterType(cachetype.ConfigEntryName, types.configEntry) |
|
|
|
return c |
|
} |
|
|
|
// TestCerts generates a CA and Leaf suitable for returning as mock CA |
|
// root/leaf cache requests. |
|
func TestCerts(t testing.T) (*structs.IndexedCARoots, *structs.IssuedCert) { |
|
t.Helper() |
|
|
|
ca := connect.TestCA(t, nil) |
|
roots := &structs.IndexedCARoots{ |
|
ActiveRootID: ca.ID, |
|
TrustDomain: fmt.Sprintf("%s.consul", connect.TestClusterID), |
|
Roots: []*structs.CARoot{ca}, |
|
} |
|
return roots, TestLeafForCA(t, ca) |
|
} |
|
|
|
// TestLeafForCA generates new Leaf suitable for returning as mock CA |
|
// leaf cache response, signed by an existing CA. |
|
func TestLeafForCA(t testing.T, ca *structs.CARoot) *structs.IssuedCert { |
|
leafPEM, pkPEM := connect.TestLeaf(t, "web", ca) |
|
|
|
leafCert, err := connect.ParseCert(leafPEM) |
|
require.NoError(t, err) |
|
|
|
return &structs.IssuedCert{ |
|
SerialNumber: connect.EncodeSerialNumber(leafCert.SerialNumber), |
|
CertPEM: leafPEM, |
|
PrivateKeyPEM: pkPEM, |
|
Service: "web", |
|
ServiceURI: leafCert.URIs[0].String(), |
|
ValidAfter: leafCert.NotBefore, |
|
ValidBefore: leafCert.NotAfter, |
|
} |
|
} |
|
|
|
// TestIntentions returns a sample intentions match result useful to |
|
// mocking service discovery cache results. |
|
func TestIntentions() *structs.IndexedIntentionMatches { |
|
return &structs.IndexedIntentionMatches{ |
|
Matches: []structs.Intentions{ |
|
[]*structs.Intention{ |
|
{ |
|
ID: "foo", |
|
SourceNS: "default", |
|
SourceName: "billing", |
|
DestinationNS: "default", |
|
DestinationName: "web", |
|
Action: structs.IntentionActionAllow, |
|
}, |
|
}, |
|
}, |
|
} |
|
} |
|
|
|
// TestUpstreamNodes returns a sample service discovery result useful to |
|
// mocking service discovery cache results. |
|
func TestUpstreamNodes(t testing.T, service string) structs.CheckServiceNodes { |
|
return structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "test1", |
|
Node: "test1", |
|
Address: "10.10.1.1", |
|
Datacenter: "dc1", |
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), |
|
}, |
|
Service: structs.TestNodeServiceWithName(t, service), |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "test2", |
|
Node: "test2", |
|
Address: "10.10.1.2", |
|
Datacenter: "dc1", |
|
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), |
|
}, |
|
Service: structs.TestNodeServiceWithName(t, service), |
|
}, |
|
} |
|
} |
|
|
|
// TestPreparedQueryNodes returns instances of a service spread across two datacenters. |
|
// The service instance names use a "-target" suffix to ensure we don't use the |
|
// prepared query's name for SAN validation. |
|
// The name of prepared queries won't always match the name of the service they target. |
|
func TestPreparedQueryNodes(t testing.T, query string) structs.CheckServiceNodes { |
|
nodes := structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "test1", |
|
Node: "test1", |
|
Address: "10.10.1.1", |
|
Datacenter: "dc1", |
|
}, |
|
Service: &structs.NodeService{ |
|
Kind: structs.ServiceKindConnectProxy, |
|
Service: query + "-sidecar-proxy", |
|
Port: 8080, |
|
Proxy: structs.ConnectProxyConfig{ |
|
DestinationServiceName: query + "-target", |
|
}, |
|
}, |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "test2", |
|
Node: "test2", |
|
Address: "10.20.1.2", |
|
Datacenter: "dc2", |
|
}, |
|
Service: &structs.NodeService{ |
|
Kind: structs.ServiceKindTypical, |
|
Service: query + "-target", |
|
Port: 8080, |
|
Connect: structs.ServiceConnect{Native: true}, |
|
}, |
|
}, |
|
} |
|
return nodes |
|
} |
|
|
|
func TestUpstreamNodesInStatus(t testing.T, status string) structs.CheckServiceNodes { |
|
return structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "test1", |
|
Node: "test1", |
|
Address: "10.10.1.1", |
|
Datacenter: "dc1", |
|
}, |
|
Service: structs.TestNodeService(t), |
|
Checks: structs.HealthChecks{ |
|
&structs.HealthCheck{ |
|
Node: "test1", |
|
ServiceName: "web", |
|
Name: "force", |
|
Status: status, |
|
}, |
|
}, |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "test2", |
|
Node: "test2", |
|
Address: "10.10.1.2", |
|
Datacenter: "dc1", |
|
}, |
|
Service: structs.TestNodeService(t), |
|
Checks: structs.HealthChecks{ |
|
&structs.HealthCheck{ |
|
Node: "test2", |
|
ServiceName: "web", |
|
Name: "force", |
|
Status: status, |
|
}, |
|
}, |
|
}, |
|
} |
|
} |
|
|
|
func TestUpstreamNodesDC2(t testing.T) structs.CheckServiceNodes { |
|
return structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "test1", |
|
Node: "test1", |
|
Address: "10.20.1.1", |
|
Datacenter: "dc2", |
|
}, |
|
Service: structs.TestNodeService(t), |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "test2", |
|
Node: "test2", |
|
Address: "10.20.1.2", |
|
Datacenter: "dc2", |
|
}, |
|
Service: structs.TestNodeService(t), |
|
}, |
|
} |
|
} |
|
|
|
func TestUpstreamNodesInStatusDC2(t testing.T, status string) structs.CheckServiceNodes { |
|
return structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "test1", |
|
Node: "test1", |
|
Address: "10.20.1.1", |
|
Datacenter: "dc2", |
|
}, |
|
Service: structs.TestNodeService(t), |
|
Checks: structs.HealthChecks{ |
|
&structs.HealthCheck{ |
|
Node: "test1", |
|
ServiceName: "web", |
|
Name: "force", |
|
Status: status, |
|
}, |
|
}, |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "test2", |
|
Node: "test2", |
|
Address: "10.20.1.2", |
|
Datacenter: "dc2", |
|
}, |
|
Service: structs.TestNodeService(t), |
|
Checks: structs.HealthChecks{ |
|
&structs.HealthCheck{ |
|
Node: "test2", |
|
ServiceName: "web", |
|
Name: "force", |
|
Status: status, |
|
}, |
|
}, |
|
}, |
|
} |
|
} |
|
|
|
func TestUpstreamNodesAlternate(t testing.T) structs.CheckServiceNodes { |
|
return structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "alt-test1", |
|
Node: "alt-test1", |
|
Address: "10.20.1.1", |
|
Datacenter: "dc1", |
|
}, |
|
Service: structs.TestNodeService(t), |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "alt-test2", |
|
Node: "alt-test2", |
|
Address: "10.20.1.2", |
|
Datacenter: "dc1", |
|
}, |
|
Service: structs.TestNodeService(t), |
|
}, |
|
} |
|
} |
|
|
|
func TestGatewayNodesDC1(t testing.T) structs.CheckServiceNodes { |
|
return structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "mesh-gateway-1", |
|
Node: "mesh-gateway", |
|
Address: "10.10.1.1", |
|
Datacenter: "dc1", |
|
}, |
|
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t, |
|
"10.10.1.1", 8443, |
|
structs.ServiceAddress{Address: "10.10.1.1", Port: 8443}, |
|
structs.ServiceAddress{Address: "198.118.1.1", Port: 443}), |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "mesh-gateway-2", |
|
Node: "mesh-gateway", |
|
Address: "10.10.1.2", |
|
Datacenter: "dc1", |
|
}, |
|
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t, |
|
"10.10.1.2", 8443, |
|
structs.ServiceAddress{Address: "10.0.1.2", Port: 8443}, |
|
structs.ServiceAddress{Address: "198.118.1.2", Port: 443}), |
|
}, |
|
} |
|
} |
|
|
|
func TestGatewayNodesDC2(t testing.T) structs.CheckServiceNodes { |
|
return structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "mesh-gateway-1", |
|
Node: "mesh-gateway", |
|
Address: "10.0.1.1", |
|
Datacenter: "dc2", |
|
}, |
|
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t, |
|
"10.0.1.1", 8443, |
|
structs.ServiceAddress{Address: "10.0.1.1", Port: 8443}, |
|
structs.ServiceAddress{Address: "198.18.1.1", Port: 443}), |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "mesh-gateway-2", |
|
Node: "mesh-gateway", |
|
Address: "10.0.1.2", |
|
Datacenter: "dc2", |
|
}, |
|
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t, |
|
"10.0.1.2", 8443, |
|
structs.ServiceAddress{Address: "10.0.1.2", Port: 8443}, |
|
structs.ServiceAddress{Address: "198.18.1.2", Port: 443}), |
|
}, |
|
} |
|
} |
|
|
|
func TestGatewayNodesDC3(t testing.T) structs.CheckServiceNodes { |
|
return structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "mesh-gateway-1", |
|
Node: "mesh-gateway", |
|
Address: "10.30.1.1", |
|
Datacenter: "dc3", |
|
}, |
|
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t, |
|
"10.30.1.1", 8443, |
|
structs.ServiceAddress{Address: "10.0.1.1", Port: 8443}, |
|
structs.ServiceAddress{Address: "198.38.1.1", Port: 443}), |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "mesh-gateway-2", |
|
Node: "mesh-gateway", |
|
Address: "10.30.1.2", |
|
Datacenter: "dc3", |
|
}, |
|
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t, |
|
"10.30.1.2", 8443, |
|
structs.ServiceAddress{Address: "10.30.1.2", Port: 8443}, |
|
structs.ServiceAddress{Address: "198.38.1.2", Port: 443}), |
|
}, |
|
} |
|
} |
|
|
|
func TestGatewayNodesDC4Hostname(t testing.T) structs.CheckServiceNodes { |
|
return structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "mesh-gateway-1", |
|
Node: "mesh-gateway", |
|
Address: "10.30.1.1", |
|
Datacenter: "dc4", |
|
}, |
|
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t, |
|
"10.30.1.1", 8443, |
|
structs.ServiceAddress{Address: "10.0.1.1", Port: 8443}, |
|
structs.ServiceAddress{Address: "123.us-west-2.elb.notaws.com", Port: 443}), |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "mesh-gateway-2", |
|
Node: "mesh-gateway", |
|
Address: "10.30.1.2", |
|
Datacenter: "dc4", |
|
}, |
|
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t, |
|
"10.30.1.2", 8443, |
|
structs.ServiceAddress{Address: "10.30.1.2", Port: 8443}, |
|
structs.ServiceAddress{Address: "456.us-west-2.elb.notaws.com", Port: 443}), |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "mesh-gateway-3", |
|
Node: "mesh-gateway", |
|
Address: "10.30.1.3", |
|
Datacenter: "dc4", |
|
}, |
|
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t, |
|
"10.30.1.3", 8443, |
|
structs.ServiceAddress{Address: "10.30.1.3", Port: 8443}, |
|
structs.ServiceAddress{Address: "198.38.1.1", Port: 443}), |
|
}, |
|
} |
|
} |
|
|
|
func TestGatewayNodesDC5Hostname(t testing.T) structs.CheckServiceNodes { |
|
return structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "mesh-gateway-1", |
|
Node: "mesh-gateway", |
|
Address: "10.30.1.1", |
|
Datacenter: "dc5", |
|
}, |
|
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t, |
|
"10.30.1.1", 8443, |
|
structs.ServiceAddress{Address: "10.0.1.1", Port: 8443}, |
|
structs.ServiceAddress{Address: "123.us-west-2.elb.notaws.com", Port: 443}), |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "mesh-gateway-2", |
|
Node: "mesh-gateway", |
|
Address: "10.30.1.2", |
|
Datacenter: "dc5", |
|
}, |
|
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t, |
|
"10.30.1.2", 8443, |
|
structs.ServiceAddress{Address: "10.30.1.2", Port: 8443}, |
|
structs.ServiceAddress{Address: "456.us-west-2.elb.notaws.com", Port: 443}), |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "mesh-gateway-3", |
|
Node: "mesh-gateway", |
|
Address: "10.30.1.3", |
|
Datacenter: "dc5", |
|
}, |
|
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t, |
|
"10.30.1.3", 8443, |
|
structs.ServiceAddress{Address: "10.30.1.3", Port: 8443}, |
|
structs.ServiceAddress{Address: "198.38.1.1", Port: 443}), |
|
}, |
|
} |
|
} |
|
|
|
func TestGatewayNodesDC6Hostname(t testing.T) structs.CheckServiceNodes { |
|
return structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "mesh-gateway-1", |
|
Node: "mesh-gateway", |
|
Address: "10.30.1.1", |
|
Datacenter: "dc6", |
|
}, |
|
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t, |
|
"10.30.1.1", 8443, |
|
structs.ServiceAddress{Address: "10.0.1.1", Port: 8443}, |
|
structs.ServiceAddress{Address: "123.us-east-1.elb.notaws.com", Port: 443}), |
|
Checks: structs.HealthChecks{ |
|
{ |
|
Status: api.HealthCritical, |
|
}, |
|
}, |
|
}, |
|
} |
|
} |
|
|
|
func TestGatewayServiceGroupBarDC1(t testing.T) structs.CheckServiceNodes { |
|
return structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "bar-node-1", |
|
Node: "bar-node-1", |
|
Address: "10.1.1.4", |
|
Datacenter: "dc1", |
|
}, |
|
Service: &structs.NodeService{ |
|
Kind: structs.ServiceKindConnectProxy, |
|
Service: "bar-sidecar-proxy", |
|
Address: "172.16.1.6", |
|
Port: 2222, |
|
Meta: map[string]string{ |
|
"version": "1", |
|
}, |
|
Proxy: structs.ConnectProxyConfig{ |
|
DestinationServiceName: "bar", |
|
Upstreams: structs.TestUpstreams(t), |
|
}, |
|
}, |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "bar-node-2", |
|
Node: "bar-node-2", |
|
Address: "10.1.1.5", |
|
Datacenter: "dc1", |
|
}, |
|
Service: &structs.NodeService{ |
|
Kind: structs.ServiceKindConnectProxy, |
|
Service: "bar-sidecar-proxy", |
|
Address: "172.16.1.7", |
|
Port: 2222, |
|
Meta: map[string]string{ |
|
"version": "1", |
|
}, |
|
Proxy: structs.ConnectProxyConfig{ |
|
DestinationServiceName: "bar", |
|
Upstreams: structs.TestUpstreams(t), |
|
}, |
|
}, |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "bar-node-3", |
|
Node: "bar-node-3", |
|
Address: "10.1.1.6", |
|
Datacenter: "dc1", |
|
}, |
|
Service: &structs.NodeService{ |
|
Kind: structs.ServiceKindConnectProxy, |
|
Service: "bar-sidecar-proxy", |
|
Address: "172.16.1.8", |
|
Port: 2222, |
|
Meta: map[string]string{ |
|
"version": "2", |
|
}, |
|
Proxy: structs.ConnectProxyConfig{ |
|
DestinationServiceName: "bar", |
|
Upstreams: structs.TestUpstreams(t), |
|
}, |
|
}, |
|
}, |
|
} |
|
} |
|
|
|
func TestGatewayServiceGroupFooDC1(t testing.T) structs.CheckServiceNodes { |
|
return structs.CheckServiceNodes{ |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "foo-node-1", |
|
Node: "foo-node-1", |
|
Address: "10.1.1.1", |
|
Datacenter: "dc1", |
|
}, |
|
Service: &structs.NodeService{ |
|
Kind: structs.ServiceKindConnectProxy, |
|
Service: "foo-sidecar-proxy", |
|
Address: "172.16.1.3", |
|
Port: 2222, |
|
Meta: map[string]string{ |
|
"version": "1", |
|
}, |
|
Proxy: structs.ConnectProxyConfig{ |
|
DestinationServiceName: "foo", |
|
Upstreams: structs.TestUpstreams(t), |
|
}, |
|
}, |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "foo-node-2", |
|
Node: "foo-node-2", |
|
Address: "10.1.1.2", |
|
Datacenter: "dc1", |
|
}, |
|
Service: &structs.NodeService{ |
|
Kind: structs.ServiceKindConnectProxy, |
|
Service: "foo-sidecar-proxy", |
|
Address: "172.16.1.4", |
|
Port: 2222, |
|
Meta: map[string]string{ |
|
"version": "1", |
|
}, |
|
Proxy: structs.ConnectProxyConfig{ |
|
DestinationServiceName: "foo", |
|
Upstreams: structs.TestUpstreams(t), |
|
}, |
|
}, |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "foo-node-3", |
|
Node: "foo-node-3", |
|
Address: "10.1.1.3", |
|
Datacenter: "dc1", |
|
}, |
|
Service: &structs.NodeService{ |
|
Kind: structs.ServiceKindConnectProxy, |
|
Service: "foo-sidecar-proxy", |
|
Address: "172.16.1.5", |
|
Port: 2222, |
|
Meta: map[string]string{ |
|
"version": "2", |
|
}, |
|
Proxy: structs.ConnectProxyConfig{ |
|
DestinationServiceName: "foo", |
|
Upstreams: structs.TestUpstreams(t), |
|
}, |
|
}, |
|
}, |
|
structs.CheckServiceNode{ |
|
Node: &structs.Node{ |
|
ID: "foo-node-4", |
|
Node: "foo-node-4", |
|
Address: "10.1.1.7", |
|
Datacenter: "dc1", |
|
}, |
|
Service: &structs.NodeService{ |
|
Kind: structs.ServiceKindConnectProxy, |
|
Service: "foo-sidecar-proxy", |
|
Address: "172.16.1.9", |
|
Port: 2222, |
|
Meta: map[string]string{ |
|
"version": "2", |
|
}, |
|
Proxy: structs.ConnectProxyConfig{ |
|
DestinationServiceName: "foo", |
|
Upstreams: structs.TestUpstreams(t), |
|
}, |
|
}, |
|
Checks: structs.HealthChecks{ |
|
&structs.HealthCheck{ |
|
Node: "foo-node-4", |
|
ServiceName: "foo-sidecar-proxy", |
|
Name: "proxy-alive", |
|
Status: "warning", |
|
}, |
|
}, |
|
}, |
|
} |
|
} |
|
|
|
type noopCacheNotifier struct{} |
|
|
|
var _ CacheNotifier = (*noopCacheNotifier)(nil) |
|
|
|
func (*noopCacheNotifier) Notify(_ context.Context, _ string, _ cache.Request, _ string, _ chan<- cache.UpdateEvent) error { |
|
return nil |
|
} |
|
|
|
type noopHealth struct{} |
|
|
|
var _ Health = (*noopHealth)(nil) |
|
|
|
func (*noopHealth) Notify(_ context.Context, _ structs.ServiceSpecificRequest, _ string, _ chan<- cache.UpdateEvent) error { |
|
return nil |
|
} |
|
|
|
// testConfigSnapshotFixture helps you execute normal proxycfg event machinery |
|
// to assemble a ConfigSnapshot via standard means to ensure test data used in |
|
// any tests is actually a valid configuration. |
|
// |
|
// The provided ns argument will be manipulated by the nsFn callback if present |
|
// before it is used. |
|
// |
|
// The events provided in the updates slice will be fed into the event |
|
// machinery. |
|
func testConfigSnapshotFixture( |
|
t testing.T, |
|
ns *structs.NodeService, |
|
nsFn func(ns *structs.NodeService), |
|
serverSNIFn ServerSNIFunc, |
|
updates []cache.UpdateEvent, |
|
) *ConfigSnapshot { |
|
const token = "" |
|
|
|
if nsFn != nil { |
|
nsFn(ns) |
|
} |
|
|
|
config := stateConfig{ |
|
logger: hclog.NewNullLogger(), |
|
source: &structs.QuerySource{ |
|
Datacenter: "dc1", |
|
}, |
|
cache: &noopCacheNotifier{}, |
|
health: &noopHealth{}, |
|
dnsConfig: DNSConfig{ // TODO: make configurable |
|
Domain: "consul", |
|
AltDomain: "", |
|
}, |
|
serverSNIFn: serverSNIFn, |
|
intentionDefaultAllow: false, // TODO: make configurable |
|
} |
|
s, err := newServiceInstanceFromNodeService(ns, token) |
|
if err != nil { |
|
t.Fatalf("err: %v", err) |
|
return nil |
|
} |
|
|
|
handler, err := newKindHandler(config, s, nil) // NOTE: nil channel |
|
if err != nil { |
|
t.Fatalf("err: %v", err) |
|
return nil |
|
} |
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
defer cancel() |
|
|
|
snap, err := handler.initialize(ctx) |
|
if err != nil { |
|
t.Fatalf("err: %v", err) |
|
return nil |
|
} |
|
|
|
for _, u := range updates { |
|
if err := handler.handleUpdate(ctx, u, &snap); err != nil { |
|
t.Fatalf("Failed to handle update from watch %q: %v", u.CorrelationID, err) |
|
return nil |
|
} |
|
} |
|
return &snap |
|
} |
|
|
|
func testSpliceEvents(base, extra []cache.UpdateEvent) []cache.UpdateEvent { |
|
if len(extra) == 0 { |
|
return base |
|
} |
|
var ( |
|
hasExtra = make(map[string]cache.UpdateEvent) |
|
completeExtra = make(map[string]struct{}) |
|
|
|
allEvents []cache.UpdateEvent |
|
) |
|
|
|
for _, e := range extra { |
|
hasExtra[e.CorrelationID] = e |
|
} |
|
|
|
// Override base events with extras if they share the same correlationID, |
|
// then put the rest of the extras at the end. |
|
for _, e := range base { |
|
if extraEvt, ok := hasExtra[e.CorrelationID]; ok { |
|
if extraEvt.Result != nil { // nil results are tombstones |
|
allEvents = append(allEvents, extraEvt) |
|
} |
|
completeExtra[e.CorrelationID] = struct{}{} |
|
} else { |
|
allEvents = append(allEvents, e) |
|
} |
|
} |
|
for _, e := range extra { |
|
if _, ok := completeExtra[e.CorrelationID]; !ok { |
|
allEvents = append(allEvents, e) |
|
} |
|
} |
|
return allEvents |
|
} |
|
|
|
func testSpliceNodeServiceFunc(prev, next func(ns *structs.NodeService)) func(ns *structs.NodeService) { |
|
return func(ns *structs.NodeService) { |
|
if prev != nil { |
|
prev(ns) |
|
} |
|
next(ns) |
|
} |
|
} |
|
|
|
// ControllableCacheType is a cache.Type that simulates a typical blocking RPC |
|
// but lets us control the responses and when they are delivered easily. |
|
type ControllableCacheType struct { |
|
index uint64 |
|
value sync.Map |
|
// Need a condvar to trigger all blocking requests (there might be multiple |
|
// for same type due to background refresh and timing issues) when values |
|
// change. Chans make it nondeterministic which one triggers or need extra |
|
// locking to coordinate replacing after close etc. |
|
triggerMu sync.Mutex |
|
trigger *sync.Cond |
|
blocking bool |
|
lastReq atomic.Value |
|
} |
|
|
|
// NewControllableCacheType returns a cache.Type that can be controlled for |
|
// testing. |
|
func NewControllableCacheType(t testing.T) *ControllableCacheType { |
|
c := &ControllableCacheType{ |
|
index: 5, |
|
blocking: true, |
|
} |
|
c.trigger = sync.NewCond(&c.triggerMu) |
|
return c |
|
} |
|
|
|
// Set sets the response value to be returned from subsequent cache gets for the |
|
// type. |
|
func (ct *ControllableCacheType) Set(key string, value interface{}) { |
|
atomic.AddUint64(&ct.index, 1) |
|
ct.value.Store(key, value) |
|
ct.triggerMu.Lock() |
|
ct.trigger.Broadcast() |
|
ct.triggerMu.Unlock() |
|
} |
|
|
|
// Fetch implements cache.Type. It simulates blocking or non-blocking queries. |
|
func (ct *ControllableCacheType) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) { |
|
index := atomic.LoadUint64(&ct.index) |
|
|
|
ct.lastReq.Store(req) |
|
|
|
shouldBlock := ct.blocking && opts.MinIndex > 0 && opts.MinIndex == index |
|
if shouldBlock { |
|
// Wait for return to be triggered. We ignore timeouts based on opts.Timeout |
|
// since in practice they will always be way longer than our tests run for |
|
// and the caller can simulate timeout by triggering return without changing |
|
// index or value. |
|
ct.triggerMu.Lock() |
|
ct.trigger.Wait() |
|
ct.triggerMu.Unlock() |
|
} |
|
|
|
info := req.CacheInfo() |
|
key := path.Join(info.Key, info.Datacenter) // omit token for testing purposes |
|
|
|
// reload index as it probably got bumped |
|
index = atomic.LoadUint64(&ct.index) |
|
val, _ := ct.value.Load(key) |
|
|
|
if err, ok := val.(error); ok { |
|
return cache.FetchResult{ |
|
Value: nil, |
|
Index: index, |
|
}, err |
|
} |
|
return cache.FetchResult{ |
|
Value: val, |
|
Index: index, |
|
}, nil |
|
} |
|
|
|
func (ct *ControllableCacheType) RegisterOptions() cache.RegisterOptions { |
|
return cache.RegisterOptions{ |
|
Refresh: ct.blocking, |
|
SupportsBlocking: ct.blocking, |
|
QueryTimeout: 10 * time.Minute, |
|
} |
|
} |
|
|
|
// golden is used to read golden files stores in consul/agent/xds/testdata |
|
func golden(t testing.T, name string) string { |
|
t.Helper() |
|
|
|
golden := filepath.Join(projectRoot(), "../", "/xds/testdata", name+".golden") |
|
expected, err := ioutil.ReadFile(golden) |
|
require.NoError(t, err) |
|
|
|
return string(expected) |
|
} |
|
|
|
func projectRoot() string { |
|
_, base, _, _ := runtime.Caller(0) |
|
return filepath.Dir(base) |
|
}
|
|
|