mirror of https://github.com/hashicorp/consul
Backport of [API Gateway] Fix invalid cluster causing gateway programming delay into release/1.15.x (#16668)
* backport of commitpull/16675/head9ea73b3b8d
* backport of commitd3cffdeb4d
* backport of commit0848aac017
* backport of commit90b5e39d2d
* Refactor and fix flaky tests * Fix bad merge * add file that was never backported * Fix bad merge again * fix duplicate method * remove extra import * backport a slew of testing library code * backport changes coinciding with library update * backport changes coinciding with library update --------- Co-authored-by: Andrew Stucki <andrew.stucki@hashicorp.com>
parent
bc111cb215
commit
eb63d46abb
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
gateways: Fixes a bug API gateways using HTTP listeners were taking upwards of 15 seconds to get configured over xDS.
|
||||
```
|
|
@ -128,6 +128,29 @@ func (l *GatewayChainSynthesizer) Synthesize(chains ...*structs.CompiledDiscover
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
node := compiled.Nodes[compiled.StartNode]
|
||||
if node.IsRouter() {
|
||||
resolverPrefix := structs.DiscoveryGraphNodeTypeResolver + ":" + node.Name
|
||||
|
||||
// clean out the clusters that will get added for the router
|
||||
for name := range compiled.Nodes {
|
||||
if strings.HasPrefix(name, resolverPrefix) {
|
||||
delete(compiled.Nodes, name)
|
||||
}
|
||||
}
|
||||
|
||||
// clean out the route rules that'll get added for the router
|
||||
filtered := []*structs.DiscoveryRoute{}
|
||||
for _, route := range node.Routes {
|
||||
if strings.HasPrefix(route.NextNode, resolverPrefix) {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, route)
|
||||
}
|
||||
node.Routes = filtered
|
||||
}
|
||||
compiled.Nodes[compiled.StartNode] = node
|
||||
|
||||
// fix up the nodes for the terminal targets to either be a splitter or resolver if there is no splitter present
|
||||
for name, node := range compiled.Nodes {
|
||||
switch node.Type {
|
||||
|
|
|
@ -47,7 +47,7 @@ func TestGatewayChainSynthesizer_AddHTTPRoute(t *testing.T) {
|
|||
route structs.HTTPRouteConfigEntry
|
||||
expectedMatchesByHostname map[string][]hostnameMatch
|
||||
}{
|
||||
"no hostanames": {
|
||||
"no hostnames": {
|
||||
route: structs.HTTPRouteConfigEntry{
|
||||
Kind: structs.HTTPRoute,
|
||||
Name: "route",
|
||||
|
@ -539,15 +539,6 @@ func TestGatewayChainSynthesizer_Synthesize(t *testing.T) {
|
|||
Protocol: "http",
|
||||
StartNode: "router:gateway-suffix-9b9265b.default.default",
|
||||
Nodes: map[string]*structs.DiscoveryGraphNode{
|
||||
"resolver:gateway-suffix-9b9265b.default.default.dc1": {
|
||||
Type: "resolver",
|
||||
Name: "gateway-suffix-9b9265b.default.default.dc1",
|
||||
Resolver: &structs.DiscoveryResolver{
|
||||
Target: "gateway-suffix-9b9265b.default.default.dc1",
|
||||
Default: true,
|
||||
ConnectTimeout: 5000000000,
|
||||
},
|
||||
},
|
||||
"router:gateway-suffix-9b9265b.default.default": {
|
||||
Type: "router",
|
||||
Name: "gateway-suffix-9b9265b.default.default",
|
||||
|
@ -569,20 +560,6 @@ func TestGatewayChainSynthesizer_Synthesize(t *testing.T) {
|
|||
},
|
||||
},
|
||||
NextNode: "resolver:foo.default.default.dc1",
|
||||
}, {
|
||||
Definition: &structs.ServiceRoute{
|
||||
Match: &structs.ServiceRouteMatch{
|
||||
HTTP: &structs.ServiceRouteHTTPMatch{
|
||||
PathPrefix: "/",
|
||||
},
|
||||
},
|
||||
Destination: &structs.ServiceRouteDestination{
|
||||
Service: "gateway-suffix-9b9265b",
|
||||
Partition: "default",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
NextNode: "resolver:gateway-suffix-9b9265b.default.default.dc1",
|
||||
}},
|
||||
},
|
||||
"resolver:foo.default.default.dc1": {
|
||||
|
@ -704,15 +681,6 @@ func TestGatewayChainSynthesizer_ComplexChain(t *testing.T) {
|
|||
Protocol: "http",
|
||||
StartNode: "router:gateway-suffix-9b9265b.default.default",
|
||||
Nodes: map[string]*structs.DiscoveryGraphNode{
|
||||
"resolver:gateway-suffix-9b9265b.default.default.dc1": {
|
||||
Type: "resolver",
|
||||
Name: "gateway-suffix-9b9265b.default.default.dc1",
|
||||
Resolver: &structs.DiscoveryResolver{
|
||||
Target: "gateway-suffix-9b9265b.default.default.dc1",
|
||||
Default: true,
|
||||
ConnectTimeout: 5000000000,
|
||||
},
|
||||
},
|
||||
"resolver:service-one.default.default.dc1": {
|
||||
Type: "resolver",
|
||||
Name: "service-one.default.default.dc1",
|
||||
|
@ -770,20 +738,6 @@ func TestGatewayChainSynthesizer_ComplexChain(t *testing.T) {
|
|||
},
|
||||
},
|
||||
NextNode: "splitter:splitter-one.default.default",
|
||||
}, {
|
||||
Definition: &structs.ServiceRoute{
|
||||
Match: &structs.ServiceRouteMatch{
|
||||
HTTP: &structs.ServiceRouteHTTPMatch{
|
||||
PathPrefix: "/",
|
||||
},
|
||||
},
|
||||
Destination: &structs.ServiceRouteDestination{
|
||||
Service: "gateway-suffix-9b9265b",
|
||||
Partition: "default",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
NextNode: "resolver:gateway-suffix-9b9265b.default.default.dc1",
|
||||
}},
|
||||
},
|
||||
"splitter:splitter-one.default.default": {
|
||||
|
|
|
@ -11,6 +11,8 @@ import (
|
|||
envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
|
||||
envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
|
||||
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/consul/discoverychain"
|
||||
"github.com/hashicorp/consul/agent/xds/testcommon"
|
||||
"github.com/hashicorp/consul/envoyextensions/xdscommon"
|
||||
|
||||
|
@ -175,7 +177,7 @@ func TestAllResourcesFromSnapshot(t *testing.T) {
|
|||
tests = append(tests, getMeshGatewayPeeringGoldenTestCases()...)
|
||||
tests = append(tests, getTrafficControlPeeringGoldenTestCases()...)
|
||||
tests = append(tests, getEnterpriseGoldenTestCases()...)
|
||||
tests = append(tests, getAPIGatewayGoldenTestCases()...)
|
||||
tests = append(tests, getAPIGatewayGoldenTestCases(t)...)
|
||||
|
||||
latestEnvoyVersion := xdscommon.EnvoyVersions[0]
|
||||
for _, envoyVersion := range xdscommon.EnvoyVersions {
|
||||
|
@ -314,7 +316,13 @@ AAJAMaoXmoYVdgXV+CPuBb2M4XCpuzLu3bcA2PXm5ipSyIgntMKwXV7r
|
|||
-----END CERTIFICATE-----`
|
||||
)
|
||||
|
||||
func getAPIGatewayGoldenTestCases() []goldenTestCase {
|
||||
func getAPIGatewayGoldenTestCases(t *testing.T) []goldenTestCase {
|
||||
t.Helper()
|
||||
|
||||
service := structs.NewServiceName("service", nil)
|
||||
serviceUID := proxycfg.NewUpstreamIDFromServiceName(service)
|
||||
serviceChain := discoverychain.TestCompileConfigEntries(t, "service", "default", "default", "dc1", connect.TestClusterID+".consul", nil)
|
||||
|
||||
return []goldenTestCase{
|
||||
{
|
||||
name: "api-gateway-with-tcp-route-and-inline-certificate",
|
||||
|
@ -362,5 +370,48 @@ func getAPIGatewayGoldenTestCases() []goldenTestCase {
|
|||
}}, nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "api-gateway-with-http-route-and-inline-certificate",
|
||||
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
|
||||
return proxycfg.TestConfigSnapshotAPIGateway(t, "default", nil, func(entry *structs.APIGatewayConfigEntry, bound *structs.BoundAPIGatewayConfigEntry) {
|
||||
entry.Listeners = []structs.APIGatewayListener{
|
||||
{
|
||||
Name: "listener",
|
||||
Protocol: structs.ListenerProtocolHTTP,
|
||||
Port: 8080,
|
||||
},
|
||||
}
|
||||
bound.Listeners = []structs.BoundAPIGatewayListener{
|
||||
{
|
||||
Name: "listener",
|
||||
Routes: []structs.ResourceReference{{
|
||||
Kind: structs.HTTPRoute,
|
||||
Name: "route",
|
||||
}},
|
||||
},
|
||||
}
|
||||
}, []structs.BoundRoute{
|
||||
&structs.HTTPRouteConfigEntry{
|
||||
Kind: structs.HTTPRoute,
|
||||
Name: "route",
|
||||
Rules: []structs.HTTPRouteRule{{
|
||||
Services: []structs.HTTPService{{
|
||||
Name: "service",
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}, nil, []proxycfg.UpdateEvent{{
|
||||
CorrelationID: "discovery-chain:" + serviceUID.String(),
|
||||
Result: &structs.DiscoveryChainResponse{
|
||||
Chain: serviceChain,
|
||||
},
|
||||
}, {
|
||||
CorrelationID: "upstream-target:" + serviceChain.ID() + ":" + serviceUID.String(),
|
||||
Result: &structs.IndexedCheckServiceNodes{
|
||||
Nodes: proxycfg.TestUpstreamNodes(t, "service"),
|
||||
},
|
||||
}})
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
55
agent/xds/testdata/clusters/api-gateway-with-http-route-and-inline-certificate.latest.golden
vendored
Normal file
55
agent/xds/testdata/clusters/api-gateway-with-http-route-and-inline-certificate.latest.golden
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
"name": "service.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"altStatName": "service.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {},
|
||||
"resourceApiVersion": "V3"
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"circuitBreakers": {},
|
||||
"outlierDetection": {},
|
||||
"commonLbConfig": {
|
||||
"healthyPanicThreshold": {}
|
||||
},
|
||||
"transportSocket": {
|
||||
"name": "tls",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"matchSubjectAltNames": [
|
||||
{
|
||||
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/service"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"sni": "service.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
"nonce": "00000001"
|
||||
}
|
41
agent/xds/testdata/endpoints/api-gateway-with-http-route-and-inline-certificate.latest.golden
vendored
Normal file
41
agent/xds/testdata/endpoints/api-gateway-with-http-route-and-inline-certificate.latest.golden
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
|
||||
"clusterName": "service.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.1",
|
||||
"portValue": 8080
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
},
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.2",
|
||||
"portValue": 8080
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
|
||||
"nonce": "00000001"
|
||||
}
|
49
agent/xds/testdata/listeners/api-gateway-with-http-route-and-inline-certificate.latest.golden
vendored
Normal file
49
agent/xds/testdata/listeners/api-gateway-with-http-route-and-inline-certificate.latest.golden
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.listener.v3.Listener",
|
||||
"name": "http:1.2.3.4:8080",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "1.2.3.4",
|
||||
"portValue": 8080
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.http_connection_manager",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
|
||||
"statPrefix": "ingress_upstream_8080",
|
||||
"rds": {
|
||||
"configSource": {
|
||||
"ads": {},
|
||||
"resourceApiVersion": "V3"
|
||||
},
|
||||
"routeConfigName": "8080"
|
||||
},
|
||||
"httpFilters": [
|
||||
{
|
||||
"name": "envoy.filters.http.router",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tracing": {
|
||||
"randomSampling": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"trafficDirection": "OUTBOUND"
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener",
|
||||
"nonce": "00000001"
|
||||
}
|
31
agent/xds/testdata/routes/api-gateway-with-http-route-and-inline-certificate.latest.golden
vendored
Normal file
31
agent/xds/testdata/routes/api-gateway-with-http-route-and-inline-certificate.latest.golden
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration",
|
||||
"name": "8080",
|
||||
"virtualHosts": [
|
||||
{
|
||||
"name": "api-gateway-listener-9b9265b",
|
||||
"domains": [
|
||||
"*",
|
||||
"*:8080"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": {
|
||||
"prefix": "/"
|
||||
},
|
||||
"route": {
|
||||
"cluster": "service.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"validateClusters": true
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration",
|
||||
"nonce": "00000001"
|
||||
}
|
5
agent/xds/testdata/secrets/api-gateway-with-http-route-and-inline-certificate.latest.golden
vendored
Normal file
5
agent/xds/testdata/secrets/api-gateway-with-http-route-and-inline-certificate.latest.golden
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"typeUrl": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -24,9 +24,9 @@ const (
|
|||
)
|
||||
|
||||
// CatalogServiceExists verifies the service name exists in the Consul catalog
|
||||
func CatalogServiceExists(t *testing.T, c *api.Client, svc string) {
|
||||
func CatalogServiceExists(t *testing.T, c *api.Client, svc string, opts *api.QueryOptions) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
services, _, err := c.Catalog().Service(svc, "", nil)
|
||||
services, _, err := c.Catalog().Service(svc, "", opts)
|
||||
if err != nil {
|
||||
r.Fatal("error reading service data")
|
||||
}
|
||||
|
@ -122,22 +122,22 @@ func ServiceLogContains(t *testing.T, service libservice.Service, target string)
|
|||
// has a `FORTIO_NAME` env variable set. This validates that the client is sending
|
||||
// traffic to the right envoy proxy.
|
||||
//
|
||||
// If reqHost is set, the Host field of the HTTP request will be set to its value.
|
||||
//
|
||||
// It retries with timeout defaultHTTPTimeout and wait defaultHTTPWait.
|
||||
func AssertFortioName(t *testing.T, urlbase string, name string) {
|
||||
func AssertFortioName(t *testing.T, urlbase string, name string, reqHost string) {
|
||||
t.Helper()
|
||||
var fortioNameRE = regexp.MustCompile(("\nFORTIO_NAME=(.+)\n"))
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DisableKeepAlives: true,
|
||||
},
|
||||
}
|
||||
client := cleanhttp.DefaultClient()
|
||||
retry.RunWith(&retry.Timer{Timeout: defaultHTTPTimeout, Wait: defaultHTTPWait}, t, func(r *retry.R) {
|
||||
fullurl := fmt.Sprintf("%s/debug?env=dump", urlbase)
|
||||
t.Logf("making call to %s", fullurl)
|
||||
req, err := http.NewRequest("GET", fullurl, nil)
|
||||
if err != nil {
|
||||
r.Fatal("could not make request to service ", fullurl)
|
||||
}
|
||||
if reqHost != "" {
|
||||
req.Host = reqHost
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
|
|
|
@ -17,6 +17,7 @@ type Agent interface {
|
|||
NewClient(string, bool) (*api.Client, error)
|
||||
GetName() string
|
||||
GetAgentName() string
|
||||
GetPartition() string
|
||||
GetPod() testcontainers.Container
|
||||
ClaimAdminPort() (int, error)
|
||||
GetConfig() Config
|
||||
|
|
|
@ -245,6 +245,11 @@ func (b *Builder) Peering(enable bool) *Builder {
|
|||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) Partition(name string) *Builder {
|
||||
b.conf.Set("partition", name)
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) RetryJoin(names ...string) *Builder {
|
||||
b.conf.Set("retry_join", names)
|
||||
return b
|
||||
|
|
|
@ -66,7 +66,7 @@ func NewN(t TestingT, conf Config, count int) (*Cluster, error) {
|
|||
func New(t TestingT, configs []Config, ports ...int) (*Cluster, error) {
|
||||
id, err := shortid.Generate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not cluster id: %w", err)
|
||||
return nil, fmt.Errorf("could not generate cluster id: %w", err)
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("consul-int-cluster-%s", id)
|
||||
|
@ -114,7 +114,7 @@ func (c *Cluster) AddN(conf Config, count int, join bool) error {
|
|||
return c.Add(configs, join)
|
||||
}
|
||||
|
||||
// Add starts an agent with the given configuration and joins it with the existing cluster
|
||||
// Add starts agents with the given configurations and joins them to the existing cluster
|
||||
func (c *Cluster) Add(configs []Config, serfJoin bool, ports ...int) (xe error) {
|
||||
if c.Index == 0 && !serfJoin {
|
||||
return fmt.Errorf("the first call to Cluster.Add must have serfJoin=true")
|
||||
|
@ -125,10 +125,10 @@ func (c *Cluster) Add(configs []Config, serfJoin bool, ports ...int) (xe error)
|
|||
// Each agent gets it's own area in the cluster scratch.
|
||||
conf.ScratchDir = filepath.Join(c.ScratchDir, strconv.Itoa(c.Index))
|
||||
if err := os.MkdirAll(conf.ScratchDir, 0777); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("container %d: %w", idx, err)
|
||||
}
|
||||
if err := os.Chmod(conf.ScratchDir, 0777); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("container %d: %w", idx, err)
|
||||
}
|
||||
|
||||
n, err := NewConsulContainer(
|
||||
|
@ -138,7 +138,7 @@ func (c *Cluster) Add(configs []Config, serfJoin bool, ports ...int) (xe error)
|
|||
ports...,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not add container index %d: %w", idx, err)
|
||||
return fmt.Errorf("container %d: %w", idx, err)
|
||||
}
|
||||
agents = append(agents, n)
|
||||
c.Index++
|
||||
|
@ -161,9 +161,11 @@ func (c *Cluster) Add(configs []Config, serfJoin bool, ports ...int) (xe error)
|
|||
func (c *Cluster) Join(agents []Agent) error {
|
||||
return c.join(agents, false)
|
||||
}
|
||||
|
||||
func (c *Cluster) JoinExternally(agents []Agent) error {
|
||||
return c.join(agents, true)
|
||||
}
|
||||
|
||||
func (c *Cluster) join(agents []Agent, skipSerfJoin bool) error {
|
||||
if len(agents) == 0 {
|
||||
return nil // no change
|
||||
|
@ -313,6 +315,16 @@ func (c *Cluster) StandardUpgrade(t *testing.T, ctx context.Context, targetVersi
|
|||
}
|
||||
t.Logf("The number of followers = %d", len(followers))
|
||||
|
||||
// NOTE: we only assert the number of agents in default partition
|
||||
// TODO: add partition to the cluster struct to assert partition size
|
||||
clusterSize := 0
|
||||
for _, agent := range c.Agents {
|
||||
if agent.GetPartition() == "" || agent.GetPartition() == "default" {
|
||||
clusterSize++
|
||||
}
|
||||
}
|
||||
t.Logf("The number of agents in default partition = %d", clusterSize)
|
||||
|
||||
upgradeFn := func(agent Agent, clientFactory func() (*api.Client, error)) error {
|
||||
config := agent.GetConfig()
|
||||
config.Version = targetVersion
|
||||
|
@ -347,8 +359,10 @@ func (c *Cluster) StandardUpgrade(t *testing.T, ctx context.Context, targetVersi
|
|||
return err
|
||||
}
|
||||
|
||||
// wait until the agent rejoin and leader is elected
|
||||
WaitForMembers(t, client, len(c.Agents))
|
||||
// wait until the agent rejoin and leader is elected; skip non-default agent
|
||||
if agent.GetPartition() == "" || agent.GetPartition() == "default" {
|
||||
WaitForMembers(t, client, clusterSize)
|
||||
}
|
||||
WaitForLeader(t, c, client)
|
||||
|
||||
return nil
|
||||
|
@ -476,7 +490,23 @@ func (c *Cluster) Servers() []Agent {
|
|||
return servers
|
||||
}
|
||||
|
||||
// Clients returns the handle to client agents
|
||||
// Clients returns the handle to client agents in provided partition
|
||||
func (c *Cluster) ClientsInPartition(partition string) []Agent {
|
||||
var clients []Agent
|
||||
|
||||
for _, n := range c.Agents {
|
||||
if n.IsServer() {
|
||||
continue
|
||||
}
|
||||
|
||||
if n.GetPartition() == partition {
|
||||
clients = append(clients, n)
|
||||
}
|
||||
}
|
||||
return clients
|
||||
}
|
||||
|
||||
// Clients returns the handle to client agents in all partitions
|
||||
func (c *Cluster) Clients() []Agent {
|
||||
var clients []Agent
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ type consulContainerNode struct {
|
|||
container testcontainers.Container
|
||||
serverMode bool
|
||||
datacenter string
|
||||
partition string
|
||||
config Config
|
||||
podReq testcontainers.ContainerRequest
|
||||
consulReq testcontainers.ContainerRequest
|
||||
|
@ -228,6 +229,7 @@ func NewConsulContainer(ctx context.Context, config Config, cluster *Cluster, po
|
|||
container: consulContainer,
|
||||
serverMode: pc.Server,
|
||||
datacenter: pc.Datacenter,
|
||||
partition: pc.Partition,
|
||||
ctx: ctx,
|
||||
podReq: podReq,
|
||||
consulReq: consulReq,
|
||||
|
@ -318,6 +320,10 @@ func (c *consulContainerNode) GetDatacenter() string {
|
|||
return c.datacenter
|
||||
}
|
||||
|
||||
func (c *consulContainerNode) GetPartition() string {
|
||||
return c.partition
|
||||
}
|
||||
|
||||
func (c *consulContainerNode) IsServer() bool {
|
||||
return c.serverMode
|
||||
}
|
||||
|
@ -493,7 +499,7 @@ func startContainer(ctx context.Context, req testcontainers.ContainerRequest) (t
|
|||
})
|
||||
}
|
||||
|
||||
const pauseImage = "k8s.gcr.io/pause:3.3"
|
||||
const pauseImage = "registry.k8s.io/pause:3.3"
|
||||
|
||||
type containerOpts struct {
|
||||
configFile string
|
||||
|
@ -641,6 +647,7 @@ type parsedConfig struct {
|
|||
Datacenter string `json:"datacenter"`
|
||||
Server bool `json:"server"`
|
||||
Ports parsedPorts `json:"ports"`
|
||||
Partition string `json:"partition"`
|
||||
}
|
||||
|
||||
type parsedPorts struct {
|
||||
|
|
|
@ -142,18 +142,24 @@ func (g ConnectContainer) GetStatus() (string, error) {
|
|||
return state.Status, err
|
||||
}
|
||||
|
||||
type SidecarConfig struct {
|
||||
Name string
|
||||
ServiceID string
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// NewConnectService returns a container that runs envoy sidecar, launched by
|
||||
// "consul connect envoy", for service name (serviceName) on the specified
|
||||
// node. The container exposes port serviceBindPort and envoy admin port
|
||||
// (19000) by mapping them onto host ports. The container's name has a prefix
|
||||
// combining datacenter and name.
|
||||
func NewConnectService(ctx context.Context, sidecarServiceName string, serviceID string, serviceBindPorts []int, node cluster.Agent) (*ConnectContainer, error) {
|
||||
func NewConnectService(ctx context.Context, sidecarCfg SidecarConfig, serviceBindPorts []int, node cluster.Agent) (*ConnectContainer, error) {
|
||||
nodeConfig := node.GetConfig()
|
||||
if nodeConfig.ScratchDir == "" {
|
||||
return nil, fmt.Errorf("node ScratchDir is required")
|
||||
}
|
||||
|
||||
namePrefix := fmt.Sprintf("%s-service-connect-%s", node.GetDatacenter(), sidecarServiceName)
|
||||
namePrefix := fmt.Sprintf("%s-service-connect-%s", node.GetDatacenter(), sidecarCfg.Name)
|
||||
containerName := utils.RandName(namePrefix)
|
||||
|
||||
envoyVersion := getEnvoyVersion()
|
||||
|
@ -181,8 +187,9 @@ func NewConnectService(ctx context.Context, sidecarServiceName string, serviceID
|
|||
Name: containerName,
|
||||
Cmd: []string{
|
||||
"consul", "connect", "envoy",
|
||||
"-sidecar-for", serviceID,
|
||||
"-sidecar-for", sidecarCfg.ServiceID,
|
||||
"-admin-bind", fmt.Sprintf("0.0.0.0:%d", internalAdminPort),
|
||||
"-namespace", sidecarCfg.Namespace,
|
||||
"--",
|
||||
"--log-level", envoyLogLevel,
|
||||
},
|
||||
|
@ -240,7 +247,7 @@ func NewConnectService(ctx context.Context, sidecarServiceName string, serviceID
|
|||
ip: info.IP,
|
||||
externalAdminPort: info.MappedPorts[adminPortStr].Int(),
|
||||
internalAdminPort: internalAdminPort,
|
||||
serviceName: sidecarServiceName,
|
||||
serviceName: sidecarCfg.Name,
|
||||
}
|
||||
|
||||
for _, port := range appPortStrs {
|
||||
|
@ -248,9 +255,9 @@ func NewConnectService(ctx context.Context, sidecarServiceName string, serviceID
|
|||
}
|
||||
|
||||
fmt.Printf("NewConnectService: name %s, mapped App Port %d, service bind port %v\n",
|
||||
serviceID, out.appPort, serviceBindPorts)
|
||||
sidecarCfg.ServiceID, out.appPort, serviceBindPorts)
|
||||
fmt.Printf("NewConnectService sidecar: name %s, mapped admin port %d, admin port %d\n",
|
||||
sidecarServiceName, out.externalAdminPort, internalAdminPort)
|
||||
sidecarCfg.Name, out.externalAdminPort, internalAdminPort)
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
|
|
@ -112,7 +112,6 @@ func (g gatewayContainer) GetPort(port int) (int, error) {
|
|||
return 0, fmt.Errorf("port does not exist")
|
||||
}
|
||||
return p, nil
|
||||
|
||||
}
|
||||
|
||||
func (g gatewayContainer) Restart() error {
|
||||
|
@ -140,13 +139,19 @@ func (g gatewayContainer) GetStatus() (string, error) {
|
|||
return state.Status, err
|
||||
}
|
||||
|
||||
func NewGatewayService(ctx context.Context, name string, kind string, node libcluster.Agent, ports ...int) (Service, error) {
|
||||
type GatewayConfig struct {
|
||||
Name string
|
||||
Kind string
|
||||
Namespace string
|
||||
}
|
||||
|
||||
func NewGatewayService(ctx context.Context, gwCfg GatewayConfig, node libcluster.Agent, ports ...int) (Service, error) {
|
||||
nodeConfig := node.GetConfig()
|
||||
if nodeConfig.ScratchDir == "" {
|
||||
return nil, fmt.Errorf("node ScratchDir is required")
|
||||
}
|
||||
|
||||
namePrefix := fmt.Sprintf("%s-service-gateway-%s", node.GetDatacenter(), name)
|
||||
namePrefix := fmt.Sprintf("%s-service-gateway-%s", node.GetDatacenter(), gwCfg.Name)
|
||||
containerName := utils.RandName(namePrefix)
|
||||
|
||||
envoyVersion := getEnvoyVersion()
|
||||
|
@ -174,9 +179,10 @@ func NewGatewayService(ctx context.Context, name string, kind string, node libcl
|
|||
Name: containerName,
|
||||
Cmd: []string{
|
||||
"consul", "connect", "envoy",
|
||||
fmt.Sprintf("-gateway=%s", kind),
|
||||
fmt.Sprintf("-gateway=%s", gwCfg.Kind),
|
||||
"-register",
|
||||
"-service", name,
|
||||
"-namespace", gwCfg.Namespace,
|
||||
"-service", gwCfg.Name,
|
||||
"-address", "{{ GetInterfaceIP \"eth0\" }}:8443",
|
||||
"-admin-bind", fmt.Sprintf("0.0.0.0:%d", adminPort),
|
||||
"--",
|
||||
|
@ -242,7 +248,7 @@ func NewGatewayService(ctx context.Context, name string, kind string, node libcl
|
|||
ip: info.IP,
|
||||
port: info.MappedPorts[portStr].Int(),
|
||||
adminPort: info.MappedPorts[adminPortStr].Int(),
|
||||
serviceName: name,
|
||||
serviceName: gwCfg.Name,
|
||||
portMappings: portMappings,
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,9 @@ package service
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
|
@ -25,13 +28,14 @@ type SidecarService struct {
|
|||
}
|
||||
|
||||
type ServiceOpts struct {
|
||||
Name string
|
||||
ID string
|
||||
Meta map[string]string
|
||||
HTTPPort int
|
||||
GRPCPort int
|
||||
Checks Checks
|
||||
Connect SidecarService
|
||||
Name string
|
||||
ID string
|
||||
Meta map[string]string
|
||||
HTTPPort int
|
||||
GRPCPort int
|
||||
Checks Checks
|
||||
Connect SidecarService
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// createAndRegisterStaticServerAndSidecar register the services and launch static-server containers
|
||||
|
@ -53,8 +57,12 @@ func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, grpcPort int
|
|||
deferClean.Add(func() {
|
||||
_ = serverService.Terminate()
|
||||
})
|
||||
|
||||
serverConnectProxy, err := NewConnectService(context.Background(), fmt.Sprintf("%s-sidecar", svc.ID), svc.ID, []int{svc.Port}, node) // bindPort not used
|
||||
sidecarCfg := SidecarConfig{
|
||||
Name: fmt.Sprintf("%s-sidecar", svc.ID),
|
||||
ServiceID: svc.ID,
|
||||
Namespace: svc.Namespace,
|
||||
}
|
||||
serverConnectProxy, err := NewConnectService(context.Background(), sidecarCfg, []int{svc.Port}, node) // bindPort not used
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -80,6 +88,7 @@ func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts
|
|||
Proxy: &api.AgentServiceConnectProxyConfig{},
|
||||
},
|
||||
},
|
||||
Namespace: serviceOpts.Namespace,
|
||||
Check: &api.AgentServiceCheck{
|
||||
Name: "Static Server Listening",
|
||||
TCP: fmt.Sprintf("127.0.0.1:%d", serviceOpts.HTTPPort),
|
||||
|
@ -158,7 +167,12 @@ func CreateAndRegisterStaticClientSidecar(
|
|||
}
|
||||
|
||||
// Create a service and proxy instance
|
||||
clientConnectProxy, err := NewConnectService(context.Background(), fmt.Sprintf("%s-sidecar", StaticClientServiceName), StaticClientServiceName, []int{libcluster.ServiceUpstreamLocalBindPort}, node)
|
||||
sidecarCfg := SidecarConfig{
|
||||
Name: fmt.Sprintf("%s-sidecar", StaticClientServiceName),
|
||||
ServiceID: StaticClientServiceName,
|
||||
}
|
||||
|
||||
clientConnectProxy, err := NewConnectService(context.Background(), sidecarCfg, []int{libcluster.ServiceUpstreamLocalBindPort}, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -171,3 +185,59 @@ func CreateAndRegisterStaticClientSidecar(
|
|||
|
||||
return clientConnectProxy, nil
|
||||
}
|
||||
|
||||
func ClientsCreate(t *testing.T, numClients int, image, version string, cluster *libcluster.Cluster) {
|
||||
opts := libcluster.BuildOptions{
|
||||
ConsulImageName: image,
|
||||
ConsulVersion: version,
|
||||
}
|
||||
ctx := libcluster.NewBuildContext(t, opts)
|
||||
|
||||
conf := libcluster.NewConfigBuilder(ctx).
|
||||
Client().
|
||||
ToAgentConfig(t)
|
||||
t.Logf("Cluster client config:\n%s", conf.JSON)
|
||||
|
||||
require.NoError(t, cluster.AddN(*conf, numClients, true))
|
||||
}
|
||||
|
||||
func ServiceCreate(t *testing.T, client *api.Client, serviceName string) uint64 {
|
||||
require.NoError(t, client.Agent().ServiceRegister(&api.AgentServiceRegistration{
|
||||
Name: serviceName,
|
||||
Port: 9999,
|
||||
Connect: &api.AgentServiceConnect{
|
||||
SidecarService: &api.AgentServiceRegistration{
|
||||
Port: 22005,
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
service, meta, err := client.Catalog().Service(serviceName, "", &api.QueryOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, service, 1)
|
||||
require.Equal(t, serviceName, service[0].ServiceName)
|
||||
require.Equal(t, 9999, service[0].ServicePort)
|
||||
|
||||
return meta.LastIndex
|
||||
}
|
||||
|
||||
func ServiceHealthBlockingQuery(client *api.Client, serviceName string, waitIndex uint64) (chan []*api.ServiceEntry, chan error) {
|
||||
var (
|
||||
ch = make(chan []*api.ServiceEntry, 1)
|
||||
errCh = make(chan error, 1)
|
||||
)
|
||||
go func() {
|
||||
opts := &api.QueryOptions{WaitIndex: waitIndex}
|
||||
service, q, err := client.Health().Service(serviceName, "", false, opts)
|
||||
if err == nil && q.QueryBackend != api.QueryBackendStreaming {
|
||||
err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend)
|
||||
}
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
} else {
|
||||
ch <- service
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, errCh
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
|
||||
|
@ -43,22 +44,36 @@ func BasicPeeringTwoClustersSetup(
|
|||
consulVersion string,
|
||||
peeringThroughMeshgateway bool,
|
||||
) (*BuiltCluster, *BuiltCluster) {
|
||||
// acceptingCluster, acceptingCtx, acceptingClient := NewPeeringCluster(t, "dc1", 3, consulVersion, true)
|
||||
acceptingCluster, acceptingCtx, acceptingClient := NewPeeringCluster(t, 3, &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
ConsulVersion: consulVersion,
|
||||
InjectAutoEncryption: true,
|
||||
acceptingCluster, acceptingCtx, acceptingClient := NewCluster(t, &ClusterConfig{
|
||||
NumServers: 3,
|
||||
NumClients: 1,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
ConsulVersion: consulVersion,
|
||||
InjectAutoEncryption: true,
|
||||
},
|
||||
ApplyDefaultProxySettings: true,
|
||||
})
|
||||
dialingCluster, dialingCtx, dialingClient := NewPeeringCluster(t, 1, &libcluster.BuildOptions{
|
||||
Datacenter: "dc2",
|
||||
ConsulVersion: consulVersion,
|
||||
InjectAutoEncryption: true,
|
||||
|
||||
dialingCluster, dialingCtx, dialingClient := NewCluster(t, &ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 1,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc2",
|
||||
ConsulVersion: consulVersion,
|
||||
InjectAutoEncryption: true,
|
||||
},
|
||||
ApplyDefaultProxySettings: true,
|
||||
})
|
||||
|
||||
// Create the mesh gateway for dataplane traffic and peering control plane traffic (if enabled)
|
||||
acceptingClusterGateway, err := libservice.NewGatewayService(context.Background(), "mesh", "mesh", acceptingCluster.Clients()[0])
|
||||
gwCfg := libservice.GatewayConfig{
|
||||
Name: "mesh",
|
||||
Kind: "mesh",
|
||||
}
|
||||
acceptingClusterGateway, err := libservice.NewGatewayService(context.Background(), gwCfg, acceptingCluster.Clients()[0])
|
||||
require.NoError(t, err)
|
||||
dialingClusterGateway, err := libservice.NewGatewayService(context.Background(), "mesh", "mesh", dialingCluster.Clients()[0])
|
||||
dialingClusterGateway, err := libservice.NewGatewayService(context.Background(), gwCfg, dialingCluster.Clients()[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable peering control plane traffic through mesh gateway
|
||||
|
@ -69,7 +84,7 @@ func BasicPeeringTwoClustersSetup(
|
|||
},
|
||||
}
|
||||
configCluster := func(cli *api.Client) error {
|
||||
libassert.CatalogServiceExists(t, cli, "mesh")
|
||||
libassert.CatalogServiceExists(t, cli, "mesh", nil)
|
||||
ok, _, err := cli.ConfigEntries().Set(req, &api.WriteOptions{})
|
||||
if !ok {
|
||||
return fmt.Errorf("config entry is not set")
|
||||
|
@ -109,8 +124,8 @@ func BasicPeeringTwoClustersSetup(
|
|||
serverService, serverSidecarService, err = libservice.CreateAndRegisterStaticServerAndSidecar(clientNode, &serviceOpts)
|
||||
require.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, acceptingClient, libservice.StaticServerServiceName)
|
||||
libassert.CatalogServiceExists(t, acceptingClient, "static-server-sidecar-proxy")
|
||||
libassert.CatalogServiceExists(t, acceptingClient, libservice.StaticServerServiceName, nil)
|
||||
libassert.CatalogServiceExists(t, acceptingClient, "static-server-sidecar-proxy", nil)
|
||||
|
||||
require.NoError(t, serverService.Export("default", AcceptingPeerName, acceptingClient))
|
||||
}
|
||||
|
@ -125,7 +140,7 @@ func BasicPeeringTwoClustersSetup(
|
|||
clientSidecarService, err = libservice.CreateAndRegisterStaticClientSidecar(clientNode, DialingPeerName, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, dialingClient, "static-client-sidecar-proxy")
|
||||
libassert.CatalogServiceExists(t, dialingClient, "static-client-sidecar-proxy", nil)
|
||||
|
||||
}
|
||||
|
||||
|
@ -133,7 +148,7 @@ func BasicPeeringTwoClustersSetup(
|
|||
libassert.AssertUpstreamEndpointStatus(t, adminPort, fmt.Sprintf("static-server.default.%s.external", DialingPeerName), "HEALTHY", 1)
|
||||
_, port := clientSidecarService.GetAddr()
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), libservice.StaticServerServiceName, "")
|
||||
|
||||
return &BuiltCluster{
|
||||
Cluster: acceptingCluster,
|
||||
|
@ -151,95 +166,72 @@ func BasicPeeringTwoClustersSetup(
|
|||
}
|
||||
}
|
||||
|
||||
// NewDialingCluster creates a cluster for peering with a single dev agent
|
||||
// TODO: note: formerly called CreatingPeeringClusterAndSetup
|
||||
//
|
||||
// Deprecated: use NewPeeringCluster mostly
|
||||
func NewDialingCluster(
|
||||
t *testing.T,
|
||||
version string,
|
||||
dialingPeerName string,
|
||||
) (*libcluster.Cluster, *api.Client, libservice.Service) {
|
||||
t.Helper()
|
||||
t.Logf("creating the dialing cluster")
|
||||
|
||||
opts := libcluster.BuildOptions{
|
||||
Datacenter: "dc2",
|
||||
InjectAutoEncryption: true,
|
||||
InjectGossipEncryption: true,
|
||||
AllowHTTPAnyway: true,
|
||||
ConsulVersion: version,
|
||||
}
|
||||
ctx := libcluster.NewBuildContext(t, opts)
|
||||
|
||||
conf := libcluster.NewConfigBuilder(ctx).
|
||||
Peering(true).
|
||||
ToAgentConfig(t)
|
||||
t.Logf("dc2 server config: \n%s", conf.JSON)
|
||||
|
||||
cluster, err := libcluster.NewN(t, *conf, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
node := cluster.Agents[0]
|
||||
client := node.GetClient()
|
||||
libcluster.WaitForLeader(t, cluster, client)
|
||||
libcluster.WaitForMembers(t, client, 1)
|
||||
|
||||
// Default Proxy Settings
|
||||
ok, err := utils.ApplyDefaultProxySettings(client)
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
|
||||
// Create the mesh gateway for dataplane traffic
|
||||
_, err = libservice.NewGatewayService(context.Background(), "mesh", "mesh", node)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a service and proxy instance
|
||||
clientProxyService, err := libservice.CreateAndRegisterStaticClientSidecar(node, dialingPeerName, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, client, "static-client-sidecar-proxy")
|
||||
|
||||
return cluster, client, clientProxyService
|
||||
type ClusterConfig struct {
|
||||
NumServers int
|
||||
NumClients int
|
||||
ApplyDefaultProxySettings bool
|
||||
BuildOpts *libcluster.BuildOptions
|
||||
Cmd string
|
||||
LogConsumer *TestLogConsumer
|
||||
Ports []int
|
||||
}
|
||||
|
||||
// NewPeeringCluster creates a cluster with peering enabled. It also creates
|
||||
// NewCluster creates a cluster with peering enabled. It also creates
|
||||
// and registers a mesh-gateway at the client agent. The API client returned is
|
||||
// pointed at the client agent.
|
||||
// - proxy-defaults.protocol = tcp
|
||||
func NewPeeringCluster(
|
||||
func NewCluster(
|
||||
t *testing.T,
|
||||
numServers int,
|
||||
buildOpts *libcluster.BuildOptions,
|
||||
config *ClusterConfig,
|
||||
) (*libcluster.Cluster, *libcluster.BuildContext, *api.Client) {
|
||||
require.NotEmpty(t, buildOpts.Datacenter)
|
||||
require.True(t, numServers > 0)
|
||||
var (
|
||||
cluster *libcluster.Cluster
|
||||
err error
|
||||
)
|
||||
require.NotEmpty(t, config.BuildOpts.Datacenter)
|
||||
require.True(t, config.NumServers > 0)
|
||||
|
||||
opts := libcluster.BuildOptions{
|
||||
Datacenter: buildOpts.Datacenter,
|
||||
InjectAutoEncryption: buildOpts.InjectAutoEncryption,
|
||||
Datacenter: config.BuildOpts.Datacenter,
|
||||
InjectAutoEncryption: config.BuildOpts.InjectAutoEncryption,
|
||||
InjectGossipEncryption: true,
|
||||
AllowHTTPAnyway: true,
|
||||
ConsulVersion: buildOpts.ConsulVersion,
|
||||
ACLEnabled: buildOpts.ACLEnabled,
|
||||
ConsulVersion: config.BuildOpts.ConsulVersion,
|
||||
ACLEnabled: config.BuildOpts.ACLEnabled,
|
||||
}
|
||||
ctx := libcluster.NewBuildContext(t, opts)
|
||||
|
||||
serverConf := libcluster.NewConfigBuilder(ctx).
|
||||
Bootstrap(numServers).
|
||||
Bootstrap(config.NumServers).
|
||||
Peering(true).
|
||||
ToAgentConfig(t)
|
||||
t.Logf("%s server config: \n%s", opts.Datacenter, serverConf.JSON)
|
||||
|
||||
cluster, err := libcluster.NewN(t, *serverConf, numServers)
|
||||
// optional
|
||||
if config.LogConsumer != nil {
|
||||
serverConf.LogConsumer = config.LogConsumer
|
||||
}
|
||||
|
||||
t.Logf("Cluster config:\n%s", serverConf.JSON)
|
||||
|
||||
// optional custom cmd
|
||||
if config.Cmd != "" {
|
||||
serverConf.Cmd = append(serverConf.Cmd, config.Cmd)
|
||||
}
|
||||
|
||||
if config.Ports != nil {
|
||||
cluster, err = libcluster.New(t, []libcluster.Config{*serverConf}, config.Ports...)
|
||||
} else {
|
||||
cluster, err = libcluster.NewN(t, *serverConf, config.NumServers)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
var retryJoin []string
|
||||
for i := 0; i < numServers; i++ {
|
||||
for i := 0; i < config.NumServers; i++ {
|
||||
retryJoin = append(retryJoin, fmt.Sprintf("agent-%d", i))
|
||||
}
|
||||
|
||||
// Add a stable client to register the service
|
||||
// Add numClients static clients to register the service
|
||||
configbuiilder := libcluster.NewConfigBuilder(ctx).
|
||||
Client().
|
||||
Peering(true).
|
||||
|
@ -247,18 +239,33 @@ func NewPeeringCluster(
|
|||
clientConf := configbuiilder.ToAgentConfig(t)
|
||||
t.Logf("%s client config: \n%s", opts.Datacenter, clientConf.JSON)
|
||||
|
||||
require.NoError(t, cluster.AddN(*clientConf, 1, true))
|
||||
require.NoError(t, cluster.AddN(*clientConf, config.NumClients, true))
|
||||
|
||||
// Use the client agent as the HTTP endpoint since we will not rotate it in many tests.
|
||||
clientNode := cluster.Agents[numServers]
|
||||
client := clientNode.GetClient()
|
||||
var client *api.Client
|
||||
if config.NumClients > 0 {
|
||||
clientNode := cluster.Agents[config.NumServers]
|
||||
client = clientNode.GetClient()
|
||||
} else {
|
||||
client = cluster.Agents[0].GetClient()
|
||||
}
|
||||
libcluster.WaitForLeader(t, cluster, client)
|
||||
libcluster.WaitForMembers(t, client, numServers+1)
|
||||
libcluster.WaitForMembers(t, client, config.NumServers+config.NumClients)
|
||||
|
||||
// Default Proxy Settings
|
||||
ok, err := utils.ApplyDefaultProxySettings(client)
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
if config.ApplyDefaultProxySettings {
|
||||
ok, err := utils.ApplyDefaultProxySettings(client)
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
return cluster, ctx, client
|
||||
}
|
||||
|
||||
type TestLogConsumer struct {
|
||||
Msgs []string
|
||||
}
|
||||
|
||||
func (g *TestLogConsumer) Accept(l testcontainers.Log) {
|
||||
g.Msgs = append(g.Msgs, string(l.Content))
|
||||
}
|
||||
|
|
|
@ -38,14 +38,14 @@ func CreateServices(t *testing.T, cluster *libcluster.Cluster) (libservice.Servi
|
|||
_, serverConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts)
|
||||
require.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, client, fmt.Sprintf("%s-sidecar-proxy", libservice.StaticServerServiceName))
|
||||
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName)
|
||||
libassert.CatalogServiceExists(t, client, fmt.Sprintf("%s-sidecar-proxy", libservice.StaticServerServiceName), nil)
|
||||
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName, nil)
|
||||
|
||||
// Create a client proxy instance with the server as an upstream
|
||||
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false)
|
||||
require.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, client, fmt.Sprintf("%s-sidecar-proxy", libservice.StaticClientServiceName))
|
||||
libassert.CatalogServiceExists(t, client, fmt.Sprintf("%s-sidecar-proxy", libservice.StaticClientServiceName), nil)
|
||||
|
||||
return serverConnectProxy, clientConnectProxy
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ func TestBasicConnectService(t *testing.T) {
|
|||
|
||||
libassert.AssertContainerState(t, clientService, "running")
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
|
||||
}
|
||||
|
||||
func createCluster(t *testing.T) *libcluster.Cluster {
|
||||
|
@ -84,14 +84,14 @@ func createServices(t *testing.T, cluster *libcluster.Cluster) libservice.Servic
|
|||
_, _, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts)
|
||||
require.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, client, "static-server-sidecar-proxy")
|
||||
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName)
|
||||
libassert.CatalogServiceExists(t, client, "static-server-sidecar-proxy", nil)
|
||||
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName, nil)
|
||||
|
||||
// Create a client proxy instance with the server as an upstream
|
||||
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false)
|
||||
require.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, client, "static-client-sidecar-proxy")
|
||||
libassert.CatalogServiceExists(t, client, "static-client-sidecar-proxy", nil)
|
||||
|
||||
return clientConnectProxy
|
||||
}
|
||||
|
|
|
@ -3,82 +3,121 @@ package gateways
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
checkTimeout = 1 * time.Minute
|
||||
checkInterval = 1 * time.Second
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
)
|
||||
|
||||
// Creates a gateway service and tests to see if it is routable
|
||||
func TestAPIGatewayCreate(t *testing.T) {
|
||||
t.Skip()
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
gatewayName := randomName("gateway", 16)
|
||||
routeName := randomName("route", 16)
|
||||
serviceName := randomName("service", 16)
|
||||
listenerPortOne := 6000
|
||||
serviceHTTPPort := 6001
|
||||
serviceGRPCPort := 6002
|
||||
|
||||
cluster := createCluster(t, listenerPortOne)
|
||||
clusterConfig := &libtopology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 1,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
InjectAutoEncryption: true,
|
||||
InjectGossipEncryption: true,
|
||||
AllowHTTPAnyway: true,
|
||||
},
|
||||
Ports: []int{
|
||||
listenerPortOne,
|
||||
serviceHTTPPort,
|
||||
serviceGRPCPort,
|
||||
},
|
||||
}
|
||||
|
||||
cluster, _, _ := libtopology.NewCluster(t, clusterConfig)
|
||||
client := cluster.APIClient(0)
|
||||
|
||||
//setup
|
||||
namespace := getNamespace()
|
||||
if namespace != "" {
|
||||
ns := &api.Namespace{Name: namespace}
|
||||
_, _, err := client.Namespaces().Create(ns, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// add api gateway config
|
||||
apiGateway := &api.APIGatewayConfigEntry{
|
||||
Kind: "api-gateway",
|
||||
Name: "api-gateway",
|
||||
Kind: api.APIGateway,
|
||||
Namespace: namespace,
|
||||
Name: gatewayName,
|
||||
Listeners: []api.APIGatewayListener{
|
||||
{
|
||||
Name: "listener",
|
||||
Port: listenerPortOne,
|
||||
Protocol: "tcp",
|
||||
},
|
||||
},
|
||||
}
|
||||
_, _, err := client.ConfigEntries().Set(apiGateway, nil)
|
||||
|
||||
require.NoError(t, cluster.ConfigEntryWrite(apiGateway))
|
||||
|
||||
_, _, err := libservice.CreateAndRegisterStaticServerAndSidecar(cluster.Agents[0], &libservice.ServiceOpts{
|
||||
ID: serviceName,
|
||||
Name: serviceName,
|
||||
Namespace: namespace,
|
||||
HTTPPort: serviceHTTPPort,
|
||||
GRPCPort: serviceGRPCPort,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
tcpRoute := &api.TCPRouteConfigEntry{
|
||||
Kind: "tcp-route",
|
||||
Name: "api-gateway-route",
|
||||
Kind: api.TCPRoute,
|
||||
Name: routeName,
|
||||
Namespace: namespace,
|
||||
Parents: []api.ResourceReference{
|
||||
{
|
||||
Kind: "api-gateway",
|
||||
Name: "api-gateway",
|
||||
Kind: api.APIGateway,
|
||||
Namespace: namespace,
|
||||
Name: gatewayName,
|
||||
},
|
||||
},
|
||||
Services: []api.TCPService{
|
||||
{
|
||||
Name: libservice.StaticServerServiceName,
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, _, err = client.ConfigEntries().Set(tcpRoute, nil)
|
||||
require.NoError(t, cluster.ConfigEntryWrite(tcpRoute))
|
||||
|
||||
// Create a gateway
|
||||
gatewayService, err := libservice.NewGatewayService(context.Background(), libservice.GatewayConfig{
|
||||
Kind: "api",
|
||||
Namespace: namespace,
|
||||
Name: gatewayName,
|
||||
}, cluster.Agents[0], listenerPortOne)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a client proxy instance with the server as an upstream
|
||||
_, gatewayService := createServices(t, cluster, listenerPortOne)
|
||||
|
||||
//make sure the gateway/route come online
|
||||
//make sure config entries have been properly created
|
||||
checkGatewayConfigEntry(t, client, "api-gateway", "")
|
||||
checkTCPRouteConfigEntry(t, client, "api-gateway-route", "")
|
||||
// make sure the gateway/route come online
|
||||
// make sure config entries have been properly created
|
||||
checkGatewayConfigEntry(t, client, gatewayName, namespace)
|
||||
checkTCPRouteConfigEntry(t, client, routeName, namespace)
|
||||
|
||||
port, err := gatewayService.GetPort(listenerPortOne)
|
||||
require.NoError(t, err)
|
||||
|
@ -102,71 +141,36 @@ func conditionStatusIsValue(typeName string, statusValue string, conditions []ap
|
|||
return false
|
||||
}
|
||||
|
||||
// TODO this code is just copy pasted from elsewhere, it is likely we will need to modify it some
|
||||
func createCluster(t *testing.T, ports ...int) *libcluster.Cluster {
|
||||
opts := libcluster.BuildOptions{
|
||||
InjectAutoEncryption: true,
|
||||
InjectGossipEncryption: true,
|
||||
AllowHTTPAnyway: true,
|
||||
}
|
||||
ctx := libcluster.NewBuildContext(t, opts)
|
||||
|
||||
conf := libcluster.NewConfigBuilder(ctx).
|
||||
ToAgentConfig(t)
|
||||
t.Logf("Cluster config:\n%s", conf.JSON)
|
||||
|
||||
configs := []libcluster.Config{*conf}
|
||||
|
||||
cluster, err := libcluster.New(t, configs, ports...)
|
||||
require.NoError(t, err)
|
||||
|
||||
node := cluster.Agents[0]
|
||||
client := node.GetClient()
|
||||
|
||||
libcluster.WaitForLeader(t, cluster, client)
|
||||
libcluster.WaitForMembers(t, client, 1)
|
||||
|
||||
// Default Proxy Settings
|
||||
ok, err := utils.ApplyDefaultProxySettings(client)
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
return cluster
|
||||
}
|
||||
|
||||
func createGateway(gatewayName string, protocol string, listenerPort int) *api.APIGatewayConfigEntry {
|
||||
return &api.APIGatewayConfigEntry{
|
||||
Kind: api.APIGateway,
|
||||
Name: gatewayName,
|
||||
Listeners: []api.APIGatewayListener{
|
||||
{
|
||||
Name: "listener",
|
||||
Port: listenerPort,
|
||||
Protocol: protocol,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func checkGatewayConfigEntry(t *testing.T, client *api.Client, gatewayName string, namespace string) {
|
||||
t.Helper()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
entry, _, err := client.ConfigEntries().Get(api.APIGateway, gatewayName, &api.QueryOptions{Namespace: namespace})
|
||||
require.NoError(t, err)
|
||||
if entry == nil {
|
||||
if err != nil {
|
||||
t.Log("error constructing request", err)
|
||||
return false
|
||||
}
|
||||
if entry == nil {
|
||||
t.Log("returned entry is nil")
|
||||
return false
|
||||
}
|
||||
|
||||
apiEntry := entry.(*api.APIGatewayConfigEntry)
|
||||
return isAccepted(apiEntry.Status.Conditions)
|
||||
}, time.Second*10, time.Second*1)
|
||||
}
|
||||
|
||||
func checkHTTPRouteConfigEntry(t *testing.T, client *api.Client, routeName string, namespace string) {
|
||||
t.Helper()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
entry, _, err := client.ConfigEntries().Get(api.HTTPRoute, routeName, &api.QueryOptions{Namespace: namespace})
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
t.Log("error constructing request", err)
|
||||
return false
|
||||
}
|
||||
if entry == nil {
|
||||
t.Log("returned entry is nil")
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -176,10 +180,16 @@ func checkHTTPRouteConfigEntry(t *testing.T, client *api.Client, routeName strin
|
|||
}
|
||||
|
||||
func checkTCPRouteConfigEntry(t *testing.T, client *api.Client, routeName string, namespace string) {
|
||||
t.Helper()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
entry, _, err := client.ConfigEntries().Get(api.TCPRoute, routeName, &api.QueryOptions{Namespace: namespace})
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
t.Log("error constructing request", err)
|
||||
return false
|
||||
}
|
||||
if entry == nil {
|
||||
t.Log("returned entry is nil")
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -188,39 +198,6 @@ func checkTCPRouteConfigEntry(t *testing.T, client *api.Client, routeName string
|
|||
}, time.Second*10, time.Second*1)
|
||||
}
|
||||
|
||||
func createService(t *testing.T, cluster *libcluster.Cluster, serviceOpts *libservice.ServiceOpts, containerArgs []string) libservice.Service {
|
||||
node := cluster.Agents[0]
|
||||
client := node.GetClient()
|
||||
// Create a service and proxy instance
|
||||
service, _, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts, containerArgs...)
|
||||
require.NoError(t, err)
|
||||
|
||||
libassert.CatalogServiceExists(t, client, serviceOpts.Name+"-sidecar-proxy")
|
||||
libassert.CatalogServiceExists(t, client, serviceOpts.Name)
|
||||
|
||||
return service
|
||||
|
||||
}
|
||||
func createServices(t *testing.T, cluster *libcluster.Cluster, ports ...int) (libservice.Service, libservice.Service) {
|
||||
node := cluster.Agents[0]
|
||||
client := node.GetClient()
|
||||
// Create a service and proxy instance
|
||||
serviceOpts := &libservice.ServiceOpts{
|
||||
Name: libservice.StaticServerServiceName,
|
||||
ID: "static-server",
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8079,
|
||||
}
|
||||
|
||||
clientConnectProxy := createService(t, cluster, serviceOpts, nil)
|
||||
|
||||
gatewayService, err := libservice.NewGatewayService(context.Background(), "api-gateway", "api", cluster.Agents[0], ports...)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, "api-gateway")
|
||||
|
||||
return clientConnectProxy, gatewayService
|
||||
}
|
||||
|
||||
type checkOptions struct {
|
||||
debug bool
|
||||
statusCode int
|
||||
|
@ -229,26 +206,23 @@ type checkOptions struct {
|
|||
|
||||
// checkRoute, customized version of libassert.RouteEchos to allow for headers/distinguishing between the server instances
|
||||
func checkRoute(t *testing.T, port int, path string, headers map[string]string, expected checkOptions) {
|
||||
ip := "localhost"
|
||||
t.Helper()
|
||||
|
||||
if expected.testName != "" {
|
||||
t.Log("running " + expected.testName)
|
||||
}
|
||||
const phrase = "hello"
|
||||
|
||||
failer := func() *retry.Timer {
|
||||
return &retry.Timer{Timeout: time.Second * 60, Wait: time.Second * 60}
|
||||
}
|
||||
|
||||
client := cleanhttp.DefaultClient()
|
||||
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
url := fmt.Sprintf("http://%s:%d/%s", ip, port, path)
|
||||
url := fmt.Sprintf("http://localhost:%d/%s", port, path)
|
||||
|
||||
retry.RunWith(failer(), t, func(r *retry.R) {
|
||||
t.Logf("making call to %s", url)
|
||||
reader := strings.NewReader(phrase)
|
||||
require.Eventually(t, func() bool {
|
||||
reader := strings.NewReader("hello")
|
||||
req, err := http.NewRequest("POST", url, reader)
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
t.Log("error constructing request", err)
|
||||
return false
|
||||
}
|
||||
headers["content-type"] = "text/plain"
|
||||
|
||||
for k, v := range headers {
|
||||
|
@ -258,40 +232,41 @@ func checkRoute(t *testing.T, port int, path string, headers map[string]string,
|
|||
req.Host = v
|
||||
}
|
||||
}
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
r.Fatal("could not make call to service ", url)
|
||||
t.Log("error sending request", err)
|
||||
return false
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
r.Fatal("could not read response body ", url)
|
||||
t.Log("error reading response body", err)
|
||||
return false
|
||||
}
|
||||
|
||||
assert.Equal(t, expected.statusCode, res.StatusCode)
|
||||
if expected.statusCode != res.StatusCode {
|
||||
r.Fatal("unexpected response code returned")
|
||||
t.Logf("bad status code - expected: %d, actual: %d", expected.statusCode, res.StatusCode)
|
||||
return false
|
||||
}
|
||||
if expected.debug {
|
||||
if !strings.Contains(string(body), "debug") {
|
||||
t.Log("body does not contain 'debug'")
|
||||
return false
|
||||
}
|
||||
}
|
||||
if !strings.Contains(string(body), "hello") {
|
||||
t.Log("body does not contain 'hello'")
|
||||
return false
|
||||
}
|
||||
|
||||
//if debug is expected, debug should be in the response body
|
||||
assert.Equal(t, expected.debug, strings.Contains(string(body), "debug"))
|
||||
if expected.statusCode != res.StatusCode {
|
||||
r.Fatal("unexpected response body returned")
|
||||
}
|
||||
|
||||
if !strings.Contains(string(body), phrase) {
|
||||
r.Fatal("received an incorrect response ", string(body))
|
||||
}
|
||||
|
||||
})
|
||||
return true
|
||||
}, time.Second*30, time.Second*1)
|
||||
}
|
||||
|
||||
func checkRouteError(t *testing.T, ip string, port int, path string, headers map[string]string, expected string) {
|
||||
failer := func() *retry.Timer {
|
||||
return &retry.Timer{Timeout: time.Second * 60, Wait: time.Second * 60}
|
||||
}
|
||||
t.Helper()
|
||||
|
||||
client := cleanhttp.DefaultClient()
|
||||
url := fmt.Sprintf("http://%s:%d", ip, port)
|
||||
|
@ -300,11 +275,12 @@ func checkRouteError(t *testing.T, ip string, port int, path string, headers map
|
|||
url += "/" + path
|
||||
}
|
||||
|
||||
retry.RunWith(failer(), t, func(r *retry.R) {
|
||||
t.Logf("making call to %s", url)
|
||||
require.Eventually(t, func() bool {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
if err != nil {
|
||||
t.Log("error constructing request", err)
|
||||
return false
|
||||
}
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
|
||||
|
@ -313,10 +289,16 @@ func checkRouteError(t *testing.T, ip string, port int, path string, headers map
|
|||
}
|
||||
}
|
||||
_, err = client.Do(req)
|
||||
assert.Error(t, err)
|
||||
|
||||
if expected != "" {
|
||||
assert.ErrorContains(t, err, expected)
|
||||
if err == nil {
|
||||
t.Log("client request should have errored, but didn't")
|
||||
return false
|
||||
}
|
||||
})
|
||||
if expected != "" {
|
||||
if !strings.Contains(err.Error(), expected) {
|
||||
t.Logf("expected %q to contain %q", err.Error(), expected)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, time.Second*30, time.Second*1)
|
||||
}
|
||||
|
|
|
@ -5,18 +5,18 @@ import (
|
|||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/hashicorp/consul/api"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func getNamespace() string {
|
||||
return ""
|
||||
}
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||
)
|
||||
|
||||
// randomName generates a random name of n length with the provided
|
||||
// prefix. If prefix is omitted, the then entire name is random char.
|
||||
|
@ -36,54 +36,89 @@ func TestHTTPRouteFlattening(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
//infrastructure set up
|
||||
listenerPort := 6000
|
||||
//create cluster
|
||||
cluster := createCluster(t, listenerPort)
|
||||
client := cluster.Agents[0].GetClient()
|
||||
service1ResponseCode := 200
|
||||
service2ResponseCode := 418
|
||||
serviceOne := createService(t, cluster, &libservice.ServiceOpts{
|
||||
Name: "service1",
|
||||
ID: "service1",
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8079,
|
||||
}, []string{
|
||||
//customizes response code so we can distinguish between which service is responding
|
||||
"-echo-server-default-params", fmt.Sprintf("status=%d", service1ResponseCode),
|
||||
})
|
||||
serviceTwo := createService(t, cluster, &libservice.ServiceOpts{
|
||||
Name: "service2",
|
||||
ID: "service2",
|
||||
HTTPPort: 8081,
|
||||
GRPCPort: 8082,
|
||||
}, []string{
|
||||
"-echo-server-default-params", fmt.Sprintf("status=%d", service2ResponseCode),
|
||||
},
|
||||
)
|
||||
// infrastructure set up
|
||||
listenerPort := 6004
|
||||
serviceOneHTTPPort := 6005
|
||||
serviceOneGRPCPort := 6006
|
||||
serviceTwoHTTPPort := 6007
|
||||
serviceTwoGRPCPort := 6008
|
||||
|
||||
//TODO this should only matter in consul enterprise I believe?
|
||||
namespace := getNamespace()
|
||||
serviceOneName := randomName("service", 16)
|
||||
serviceTwoName := randomName("service", 16)
|
||||
serviceOneResponseCode := 200
|
||||
serviceTwoResponseCode := 418
|
||||
gatewayName := randomName("gw", 16)
|
||||
routeOneName := randomName("route", 16)
|
||||
routeTwoName := randomName("route", 16)
|
||||
path1 := "/"
|
||||
path2 := "/v2"
|
||||
|
||||
//write config entries
|
||||
proxyDefaults := &api.ProxyConfigEntry{
|
||||
Kind: api.ProxyDefaults,
|
||||
Name: api.ProxyConfigGlobal,
|
||||
clusterConfig := &libtopology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 1,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
InjectAutoEncryption: true,
|
||||
InjectGossipEncryption: true,
|
||||
AllowHTTPAnyway: true,
|
||||
},
|
||||
Ports: []int{
|
||||
listenerPort,
|
||||
serviceOneHTTPPort,
|
||||
serviceOneGRPCPort,
|
||||
serviceTwoHTTPPort,
|
||||
serviceTwoGRPCPort,
|
||||
},
|
||||
ApplyDefaultProxySettings: true,
|
||||
}
|
||||
|
||||
cluster, _, _ := libtopology.NewCluster(t, clusterConfig)
|
||||
client := cluster.Agents[0].GetClient()
|
||||
|
||||
namespace := getNamespace()
|
||||
if namespace != "" {
|
||||
ns := &api.Namespace{Name: namespace}
|
||||
_, _, err := client.Namespaces().Create(ns, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
_, _, err := libservice.CreateAndRegisterStaticServerAndSidecar(cluster.Agents[0], &libservice.ServiceOpts{
|
||||
ID: serviceOneName,
|
||||
Name: serviceOneName,
|
||||
Namespace: namespace,
|
||||
HTTPPort: serviceOneHTTPPort,
|
||||
GRPCPort: serviceOneGRPCPort,
|
||||
},
|
||||
// customizes response code so we can distinguish between which service is responding
|
||||
"-echo-server-default-params", fmt.Sprintf("status=%d", serviceOneResponseCode),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = libservice.CreateAndRegisterStaticServerAndSidecar(cluster.Agents[0], &libservice.ServiceOpts{
|
||||
ID: serviceTwoName,
|
||||
Name: serviceTwoName,
|
||||
Namespace: namespace,
|
||||
HTTPPort: serviceTwoHTTPPort,
|
||||
GRPCPort: serviceTwoGRPCPort,
|
||||
},
|
||||
// customizes response code so we can distinguish between which service is responding
|
||||
"-echo-server-default-params", fmt.Sprintf("status=%d", serviceTwoResponseCode),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// write config entries
|
||||
proxyDefaults := &api.ProxyConfigEntry{
|
||||
Kind: api.ProxyDefaults,
|
||||
Name: api.ProxyConfigGlobal,
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "http",
|
||||
},
|
||||
}
|
||||
|
||||
_, _, err := client.ConfigEntries().Set(proxyDefaults, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, cluster.ConfigEntryWrite(proxyDefaults))
|
||||
|
||||
apiGateway := &api.APIGatewayConfigEntry{
|
||||
Kind: "api-gateway",
|
||||
|
@ -95,11 +130,13 @@ func TestHTTPRouteFlattening(t *testing.T) {
|
|||
Protocol: "http",
|
||||
},
|
||||
},
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
routeOne := &api.HTTPRouteConfigEntry{
|
||||
Kind: api.HTTPRoute,
|
||||
Name: routeOneName,
|
||||
Kind: api.HTTPRoute,
|
||||
Name: routeOneName,
|
||||
Namespace: namespace,
|
||||
Parents: []api.ResourceReference{
|
||||
{
|
||||
Kind: api.APIGateway,
|
||||
|
@ -111,12 +148,11 @@ func TestHTTPRouteFlattening(t *testing.T) {
|
|||
"test.foo",
|
||||
"test.example",
|
||||
},
|
||||
Namespace: namespace,
|
||||
Rules: []api.HTTPRouteRule{
|
||||
{
|
||||
Services: []api.HTTPService{
|
||||
{
|
||||
Name: serviceOne.GetServiceName(),
|
||||
Name: serviceOneName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
|
@ -133,8 +169,9 @@ func TestHTTPRouteFlattening(t *testing.T) {
|
|||
}
|
||||
|
||||
routeTwo := &api.HTTPRouteConfigEntry{
|
||||
Kind: api.HTTPRoute,
|
||||
Name: routeTwoName,
|
||||
Kind: api.HTTPRoute,
|
||||
Name: routeTwoName,
|
||||
Namespace: namespace,
|
||||
Parents: []api.ResourceReference{
|
||||
{
|
||||
Kind: api.APIGateway,
|
||||
|
@ -145,12 +182,11 @@ func TestHTTPRouteFlattening(t *testing.T) {
|
|||
Hostnames: []string{
|
||||
"test.foo",
|
||||
},
|
||||
Namespace: namespace,
|
||||
Rules: []api.HTTPRouteRule{
|
||||
{
|
||||
Services: []api.HTTPService{
|
||||
{
|
||||
Name: serviceTwo.GetServiceName(),
|
||||
Name: serviceTwoName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
|
@ -173,59 +209,60 @@ func TestHTTPRouteFlattening(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
_, _, err = client.ConfigEntries().Set(apiGateway, nil)
|
||||
require.NoError(t, err)
|
||||
_, _, err = client.ConfigEntries().Set(routeOne, nil)
|
||||
require.NoError(t, err)
|
||||
_, _, err = client.ConfigEntries().Set(routeTwo, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, cluster.ConfigEntryWrite(apiGateway))
|
||||
require.NoError(t, cluster.ConfigEntryWrite(routeOne))
|
||||
require.NoError(t, cluster.ConfigEntryWrite(routeTwo))
|
||||
|
||||
//create gateway service
|
||||
gatewayService, err := libservice.NewGatewayService(context.Background(), gatewayName, "api", cluster.Agents[0], listenerPort)
|
||||
// create gateway service
|
||||
gwCfg := libservice.GatewayConfig{
|
||||
Name: gatewayName,
|
||||
Kind: "api",
|
||||
Namespace: namespace,
|
||||
}
|
||||
gatewayService, err := libservice.NewGatewayService(context.Background(), gwCfg, cluster.Agents[0], listenerPort)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, gatewayName)
|
||||
libassert.CatalogServiceExists(t, client, gatewayName, &api.QueryOptions{Namespace: namespace})
|
||||
|
||||
//make sure config entries have been properly created
|
||||
// make sure config entries have been properly created
|
||||
checkGatewayConfigEntry(t, client, gatewayName, namespace)
|
||||
t.Log("checking route one")
|
||||
checkHTTPRouteConfigEntry(t, client, routeOneName, namespace)
|
||||
checkHTTPRouteConfigEntry(t, client, routeTwoName, namespace)
|
||||
|
||||
//gateway resolves routes
|
||||
// gateway resolves routes
|
||||
gatewayPort, err := gatewayService.GetPort(listenerPort)
|
||||
require.NoError(t, err)
|
||||
fmt.Println("Gateway Port: ", gatewayPort)
|
||||
|
||||
//route 2 with headers
|
||||
|
||||
//Same v2 path with and without header
|
||||
// Same v2 path with and without header
|
||||
checkRoute(t, gatewayPort, "/v2", map[string]string{
|
||||
"Host": "test.foo",
|
||||
"x-v2": "v2",
|
||||
}, checkOptions{statusCode: service2ResponseCode, testName: "service2 header and path"})
|
||||
}, checkOptions{statusCode: serviceTwoResponseCode, testName: "service2 header and path"})
|
||||
checkRoute(t, gatewayPort, "/v2", map[string]string{
|
||||
"Host": "test.foo",
|
||||
}, checkOptions{statusCode: service2ResponseCode, testName: "service2 just path match"})
|
||||
}, checkOptions{statusCode: serviceTwoResponseCode, testName: "service2 just path match"})
|
||||
|
||||
////v1 path with the header
|
||||
// //v1 path with the header
|
||||
checkRoute(t, gatewayPort, "/check", map[string]string{
|
||||
"Host": "test.foo",
|
||||
"x-v2": "v2",
|
||||
}, checkOptions{statusCode: service2ResponseCode, testName: "service2 just header match"})
|
||||
}, checkOptions{statusCode: serviceTwoResponseCode, testName: "service2 just header match"})
|
||||
|
||||
checkRoute(t, gatewayPort, "/v2/path/value", map[string]string{
|
||||
"Host": "test.foo",
|
||||
"x-v2": "v2",
|
||||
}, checkOptions{statusCode: service2ResponseCode, testName: "service2 v2 with path"})
|
||||
}, checkOptions{statusCode: serviceTwoResponseCode, testName: "service2 v2 with path"})
|
||||
|
||||
//hit service 1 by hitting root path
|
||||
// hit service 1 by hitting root path
|
||||
checkRoute(t, gatewayPort, "", map[string]string{
|
||||
"Host": "test.foo",
|
||||
}, checkOptions{debug: false, statusCode: service1ResponseCode, testName: "service1 root prefix"})
|
||||
}, checkOptions{debug: false, statusCode: serviceOneResponseCode, testName: "service1 root prefix"})
|
||||
|
||||
//hit service 1 by hitting v2 path with v1 hostname
|
||||
// hit service 1 by hitting v2 path with v1 hostname
|
||||
checkRoute(t, gatewayPort, "/v2", map[string]string{
|
||||
"Host": "test.example",
|
||||
}, checkOptions{debug: false, statusCode: service1ResponseCode, testName: "service1, v2 path with v2 hostname"})
|
||||
|
||||
}, checkOptions{debug: false, statusCode: serviceOneResponseCode, testName: "service1, v2 path with v2 hostname"})
|
||||
}
|
||||
|
||||
func TestHTTPRoutePathRewrite(t *testing.T) {
|
||||
|
@ -235,59 +272,107 @@ func TestHTTPRoutePathRewrite(t *testing.T) {
|
|||
|
||||
t.Parallel()
|
||||
|
||||
//infrastructure set up
|
||||
listenerPort := 6001
|
||||
//create cluster
|
||||
cluster := createCluster(t, listenerPort)
|
||||
client := cluster.Agents[0].GetClient()
|
||||
// infrastructure set up
|
||||
listenerPort := 6009
|
||||
fooHTTPPort := 6010
|
||||
fooGRPCPort := 6011
|
||||
barHTTPPort := 6012
|
||||
barGRPCPort := 6013
|
||||
|
||||
fooName := randomName("foo", 16)
|
||||
barName := randomName("bar", 16)
|
||||
gatewayName := randomName("gw", 16)
|
||||
invalidRouteName := randomName("route", 16)
|
||||
validRouteName := randomName("route", 16)
|
||||
|
||||
// create cluster
|
||||
clusterConfig := &libtopology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 1,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
InjectAutoEncryption: true,
|
||||
InjectGossipEncryption: true,
|
||||
AllowHTTPAnyway: true,
|
||||
},
|
||||
Ports: []int{
|
||||
listenerPort,
|
||||
fooHTTPPort,
|
||||
fooGRPCPort,
|
||||
barHTTPPort,
|
||||
barGRPCPort,
|
||||
},
|
||||
ApplyDefaultProxySettings: true,
|
||||
}
|
||||
|
||||
cluster, _, _ := libtopology.NewCluster(t, clusterConfig)
|
||||
client := cluster.APIClient(0)
|
||||
|
||||
fooStatusCode := 400
|
||||
barStatusCode := 201
|
||||
fooPath := "/v1/foo"
|
||||
barPath := "/v1/bar"
|
||||
|
||||
fooService := createService(t, cluster, &libservice.ServiceOpts{
|
||||
Name: "foo",
|
||||
ID: "foo",
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8081,
|
||||
}, []string{
|
||||
//customizes response code so we can distinguish between which service is responding
|
||||
namespace := getNamespace()
|
||||
if namespace != "" {
|
||||
ns := &api.Namespace{Name: namespace}
|
||||
_, _, err := client.Namespaces().Create(ns, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
_, _, err := libservice.CreateAndRegisterStaticServerAndSidecar(cluster.Agents[0], &libservice.ServiceOpts{
|
||||
ID: fooName,
|
||||
Name: fooName,
|
||||
Namespace: namespace,
|
||||
HTTPPort: fooHTTPPort,
|
||||
GRPCPort: fooGRPCPort,
|
||||
},
|
||||
// customizes response code so we can distinguish between which service is responding
|
||||
"-echo-debug-path", fooPath,
|
||||
"-echo-server-default-params", fmt.Sprintf("status=%d", fooStatusCode),
|
||||
})
|
||||
barService := createService(t, cluster, &libservice.ServiceOpts{
|
||||
Name: "bar",
|
||||
ID: "bar",
|
||||
//TODO we can potentially get conflicts if these ports are the same
|
||||
HTTPPort: 8079,
|
||||
GRPCPort: 8078,
|
||||
}, []string{
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = libservice.CreateAndRegisterStaticServerAndSidecar(cluster.Agents[0], &libservice.ServiceOpts{
|
||||
ID: barName,
|
||||
Name: barName,
|
||||
Namespace: namespace,
|
||||
HTTPPort: barHTTPPort,
|
||||
GRPCPort: barGRPCPort,
|
||||
},
|
||||
// customizes response code so we can distinguish between which service is responding
|
||||
"-echo-debug-path", barPath,
|
||||
"-echo-server-default-params", fmt.Sprintf("status=%d", barStatusCode),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
namespace := getNamespace()
|
||||
gatewayName := randomName("gw", 16)
|
||||
invalidRouteName := randomName("route", 16)
|
||||
validRouteName := randomName("route", 16)
|
||||
fooUnrewritten := "/foo"
|
||||
barUnrewritten := "/bar"
|
||||
|
||||
//write config entries
|
||||
// write config entries
|
||||
proxyDefaults := &api.ProxyConfigEntry{
|
||||
Kind: api.ProxyDefaults,
|
||||
Name: api.ProxyConfigGlobal,
|
||||
Namespace: namespace,
|
||||
Namespace: "", // proxy-defaults can only be set in the default namespace
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "http",
|
||||
},
|
||||
}
|
||||
|
||||
_, _, err := client.ConfigEntries().Set(proxyDefaults, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, cluster.ConfigEntryWrite(proxyDefaults))
|
||||
|
||||
apiGateway := createGateway(gatewayName, "http", listenerPort)
|
||||
apiGateway := &api.APIGatewayConfigEntry{
|
||||
Kind: api.APIGateway,
|
||||
Name: gatewayName,
|
||||
Listeners: []api.APIGatewayListener{
|
||||
{
|
||||
Name: "listener",
|
||||
Port: listenerPort,
|
||||
Protocol: "http",
|
||||
},
|
||||
},
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
fooRoute := &api.HTTPRouteConfigEntry{
|
||||
Kind: api.HTTPRoute,
|
||||
|
@ -312,7 +397,7 @@ func TestHTTPRoutePathRewrite(t *testing.T) {
|
|||
},
|
||||
Services: []api.HTTPService{
|
||||
{
|
||||
Name: fooService.GetServiceName(),
|
||||
Name: fooName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
|
@ -351,7 +436,7 @@ func TestHTTPRoutePathRewrite(t *testing.T) {
|
|||
},
|
||||
Services: []api.HTTPService{
|
||||
{
|
||||
Name: barService.GetServiceName(),
|
||||
Name: barName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
|
@ -367,19 +452,21 @@ func TestHTTPRoutePathRewrite(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
_, _, err = client.ConfigEntries().Set(apiGateway, nil)
|
||||
require.NoError(t, err)
|
||||
_, _, err = client.ConfigEntries().Set(fooRoute, nil)
|
||||
require.NoError(t, err)
|
||||
_, _, err = client.ConfigEntries().Set(barRoute, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, cluster.ConfigEntryWrite(apiGateway))
|
||||
require.NoError(t, cluster.ConfigEntryWrite(fooRoute))
|
||||
require.NoError(t, cluster.ConfigEntryWrite(barRoute))
|
||||
|
||||
//create gateway service
|
||||
gatewayService, err := libservice.NewGatewayService(context.Background(), gatewayName, "api", cluster.Agents[0], listenerPort)
|
||||
// create gateway service
|
||||
gwCfg := libservice.GatewayConfig{
|
||||
Name: gatewayName,
|
||||
Kind: "api",
|
||||
Namespace: namespace,
|
||||
}
|
||||
gatewayService, err := libservice.NewGatewayService(context.Background(), gwCfg, cluster.Agents[0], listenerPort)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, gatewayName)
|
||||
libassert.CatalogServiceExists(t, client, gatewayName, &api.QueryOptions{Namespace: namespace})
|
||||
|
||||
//make sure config entries have been properly created
|
||||
// make sure config entries have been properly created
|
||||
checkGatewayConfigEntry(t, client, gatewayName, namespace)
|
||||
checkHTTPRouteConfigEntry(t, client, invalidRouteName, namespace)
|
||||
checkHTTPRouteConfigEntry(t, client, validRouteName, namespace)
|
||||
|
@ -387,71 +474,100 @@ func TestHTTPRoutePathRewrite(t *testing.T) {
|
|||
gatewayPort, err := gatewayService.GetPort(listenerPort)
|
||||
require.NoError(t, err)
|
||||
|
||||
//TODO these were the assertions we had in the original test. potentially would want more test cases
|
||||
// TODO these were the assertions we had in the original test. potentially would want more test cases
|
||||
|
||||
//NOTE: Hitting the debug path code overrides default expected value
|
||||
// NOTE: Hitting the debug path code overrides default expected value
|
||||
debugExpectedStatusCode := 200
|
||||
|
||||
//hit foo, making sure path is being rewritten by hitting the debug page
|
||||
// hit foo, making sure path is being rewritten by hitting the debug page
|
||||
checkRoute(t, gatewayPort, fooUnrewritten, map[string]string{
|
||||
"Host": "test.foo",
|
||||
}, checkOptions{debug: true, statusCode: debugExpectedStatusCode, testName: "foo service"})
|
||||
//make sure foo is being sent to proper service
|
||||
// make sure foo is being sent to proper service
|
||||
checkRoute(t, gatewayPort, fooUnrewritten+"/foo", map[string]string{
|
||||
"Host": "test.foo",
|
||||
}, checkOptions{debug: false, statusCode: fooStatusCode, testName: "foo service"})
|
||||
}, checkOptions{debug: false, statusCode: fooStatusCode, testName: "foo service 2"})
|
||||
|
||||
//hit bar, making sure its been rewritten
|
||||
// hit bar, making sure its been rewritten
|
||||
checkRoute(t, gatewayPort, barUnrewritten, map[string]string{
|
||||
"Host": "test.foo",
|
||||
}, checkOptions{debug: true, statusCode: debugExpectedStatusCode, testName: "bar service"})
|
||||
|
||||
//hit bar, making sure its being sent to the proper service
|
||||
// hit bar, making sure its being sent to the proper service
|
||||
checkRoute(t, gatewayPort, barUnrewritten+"/bar", map[string]string{
|
||||
"Host": "test.foo",
|
||||
}, checkOptions{debug: false, statusCode: barStatusCode, testName: "bar service"})
|
||||
|
||||
}
|
||||
|
||||
func TestHTTPRouteParentRefChange(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
// infrastructure set up
|
||||
address := "localhost"
|
||||
|
||||
listenerOnePort := 6000
|
||||
listenerTwoPort := 6001
|
||||
listenerOnePort := 6014
|
||||
listenerTwoPort := 6015
|
||||
serviceHTTPPort := 6016
|
||||
serviceGRPCPort := 6017
|
||||
|
||||
// create cluster and service
|
||||
cluster := createCluster(t, listenerOnePort, listenerTwoPort)
|
||||
client := cluster.Agents[0].GetClient()
|
||||
service := createService(t, cluster, &libservice.ServiceOpts{
|
||||
Name: "service",
|
||||
ID: "service",
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8079,
|
||||
}, []string{})
|
||||
|
||||
// getNamespace() should always return an empty string in Consul OSS
|
||||
namespace := getNamespace()
|
||||
serviceName := randomName("service", 16)
|
||||
gatewayOneName := randomName("gw1", 16)
|
||||
gatewayTwoName := randomName("gw2", 16)
|
||||
routeName := randomName("route", 16)
|
||||
|
||||
// create cluster
|
||||
clusterConfig := &libtopology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 1,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
InjectAutoEncryption: true,
|
||||
InjectGossipEncryption: true,
|
||||
AllowHTTPAnyway: true,
|
||||
},
|
||||
Ports: []int{
|
||||
listenerOnePort,
|
||||
listenerTwoPort,
|
||||
serviceHTTPPort,
|
||||
serviceGRPCPort,
|
||||
},
|
||||
ApplyDefaultProxySettings: true,
|
||||
}
|
||||
|
||||
cluster, _, _ := libtopology.NewCluster(t, clusterConfig)
|
||||
client := cluster.APIClient(0)
|
||||
|
||||
// getNamespace() should always return an empty string in Consul OSS
|
||||
namespace := getNamespace()
|
||||
if namespace != "" {
|
||||
ns := &api.Namespace{Name: namespace}
|
||||
_, _, err := client.Namespaces().Create(ns, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
_, _, err := libservice.CreateAndRegisterStaticServerAndSidecar(cluster.Agents[0], &libservice.ServiceOpts{
|
||||
ID: serviceName,
|
||||
Name: serviceName,
|
||||
Namespace: namespace,
|
||||
HTTPPort: serviceHTTPPort,
|
||||
GRPCPort: serviceGRPCPort,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// write config entries
|
||||
proxyDefaults := &api.ProxyConfigEntry{
|
||||
Kind: api.ProxyDefaults,
|
||||
Name: api.ProxyConfigGlobal,
|
||||
Namespace: namespace,
|
||||
Kind: api.ProxyDefaults,
|
||||
Name: api.ProxyConfigGlobal,
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "http",
|
||||
},
|
||||
}
|
||||
_, _, err := client.ConfigEntries().Set(proxyDefaults, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.NoError(t, cluster.ConfigEntryWrite(proxyDefaults))
|
||||
|
||||
// create gateway config entry
|
||||
gatewayOne := &api.APIGatewayConfigEntry{
|
||||
|
@ -465,24 +581,20 @@ func TestHTTPRouteParentRefChange(t *testing.T) {
|
|||
Hostname: "test.foo",
|
||||
},
|
||||
},
|
||||
Namespace: namespace,
|
||||
}
|
||||
_, _, err = client.ConfigEntries().Set(gatewayOne, nil)
|
||||
assert.NoError(t, err)
|
||||
require.Eventually(t, func() bool {
|
||||
entry, _, err := client.ConfigEntries().Get(api.APIGateway, gatewayOneName, &api.QueryOptions{Namespace: namespace})
|
||||
assert.NoError(t, err)
|
||||
if entry == nil {
|
||||
return false
|
||||
}
|
||||
apiEntry := entry.(*api.APIGatewayConfigEntry)
|
||||
t.Log(entry)
|
||||
return isAccepted(apiEntry.Status.Conditions)
|
||||
}, time.Second*10, time.Second*1)
|
||||
require.NoError(t, cluster.ConfigEntryWrite(gatewayOne))
|
||||
checkGatewayConfigEntry(t, client, gatewayOneName, namespace)
|
||||
|
||||
// create gateway service
|
||||
gatewayOneService, err := libservice.NewGatewayService(context.Background(), gatewayOneName, "api", cluster.Agents[0], listenerOnePort)
|
||||
gwOneCfg := libservice.GatewayConfig{
|
||||
Name: gatewayOneName,
|
||||
Kind: "api",
|
||||
Namespace: namespace,
|
||||
}
|
||||
gatewayOneService, err := libservice.NewGatewayService(context.Background(), gwOneCfg, cluster.Agents[0], listenerOnePort)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, gatewayOneName)
|
||||
libassert.CatalogServiceExists(t, client, gatewayOneName, &api.QueryOptions{Namespace: namespace})
|
||||
|
||||
// create gateway config entry
|
||||
gatewayTwo := &api.APIGatewayConfigEntry{
|
||||
|
@ -496,24 +608,20 @@ func TestHTTPRouteParentRefChange(t *testing.T) {
|
|||
Hostname: "test.example",
|
||||
},
|
||||
},
|
||||
Namespace: namespace,
|
||||
}
|
||||
_, _, err = client.ConfigEntries().Set(gatewayTwo, nil)
|
||||
assert.NoError(t, err)
|
||||
require.Eventually(t, func() bool {
|
||||
entry, _, err := client.ConfigEntries().Get(api.APIGateway, gatewayTwoName, &api.QueryOptions{Namespace: namespace})
|
||||
assert.NoError(t, err)
|
||||
if entry == nil {
|
||||
return false
|
||||
}
|
||||
apiEntry := entry.(*api.APIGatewayConfigEntry)
|
||||
t.Log(entry)
|
||||
return isAccepted(apiEntry.Status.Conditions)
|
||||
}, time.Second*10, time.Second*1)
|
||||
require.NoError(t, cluster.ConfigEntryWrite(gatewayTwo))
|
||||
checkGatewayConfigEntry(t, client, gatewayTwoName, namespace)
|
||||
|
||||
// create gateway service
|
||||
gatewayTwoService, err := libservice.NewGatewayService(context.Background(), gatewayTwoName, "api", cluster.Agents[0], listenerTwoPort)
|
||||
gwTwoCfg := libservice.GatewayConfig{
|
||||
Name: gatewayTwoName,
|
||||
Kind: "api",
|
||||
Namespace: namespace,
|
||||
}
|
||||
gatewayTwoService, err := libservice.NewGatewayService(context.Background(), gwTwoCfg, cluster.Agents[0], listenerTwoPort)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, gatewayTwoName)
|
||||
libassert.CatalogServiceExists(t, client, gatewayTwoName, &api.QueryOptions{Namespace: namespace})
|
||||
|
||||
// create route to service, targeting first gateway
|
||||
route := &api.HTTPRouteConfigEntry{
|
||||
|
@ -535,7 +643,7 @@ func TestHTTPRouteParentRefChange(t *testing.T) {
|
|||
{
|
||||
Services: []api.HTTPService{
|
||||
{
|
||||
Name: service.GetServiceName(),
|
||||
Name: serviceName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
|
@ -550,8 +658,9 @@ func TestHTTPRouteParentRefChange(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, _, err = client.ConfigEntries().Set(route, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.NoError(t, cluster.ConfigEntryWrite(route))
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
entry, _, err := client.ConfigEntries().Get(api.HTTPRoute, routeName, &api.QueryOptions{Namespace: namespace})
|
||||
assert.NoError(t, err)
|
||||
|
@ -593,8 +702,8 @@ func TestHTTPRouteParentRefChange(t *testing.T) {
|
|||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
_, _, err = client.ConfigEntries().Set(route, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.NoError(t, cluster.ConfigEntryWrite(route))
|
||||
require.Eventually(t, func() bool {
|
||||
entry, _, err := client.ConfigEntries().Get(api.HTTPRoute, routeName, &api.QueryOptions{Namespace: namespace})
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
//go:build !consulent
|
||||
// +build !consulent
|
||||
|
||||
package gateways
|
||||
|
||||
func getNamespace() string {
|
||||
return ""
|
||||
}
|
|
@ -45,9 +45,14 @@ func TestAccessLogs(t *testing.T) {
|
|||
t.Skip()
|
||||
}
|
||||
|
||||
cluster, _, _ := topology.NewPeeringCluster(t, 1, &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
InjectAutoEncryption: true,
|
||||
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 1,
|
||||
ApplyDefaultProxySettings: true,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
InjectAutoEncryption: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Turn on access logs. Do this before starting the sidecars so that they inherit the configuration
|
||||
|
@ -70,7 +75,7 @@ func TestAccessLogs(t *testing.T) {
|
|||
// Validate Custom JSON
|
||||
require.Eventually(t, func() bool {
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "banana")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
|
||||
client := libassert.ServiceLogContains(t, clientService, "\"banana_path\":\"/banana\"")
|
||||
server := libassert.ServiceLogContains(t, serverService, "\"banana_path\":\"/banana\"")
|
||||
return client && server
|
||||
|
@ -112,7 +117,7 @@ func TestAccessLogs(t *testing.T) {
|
|||
_, port = clientService.GetAddr()
|
||||
require.Eventually(t, func() bool {
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "orange")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
|
||||
client := libassert.ServiceLogContains(t, clientService, "Orange you glad I didn't say banana: /orange, -")
|
||||
server := libassert.ServiceLogContains(t, serverService, "Orange you glad I didn't say banana: /orange, -")
|
||||
return client && server
|
||||
|
|
|
@ -94,7 +94,7 @@ func TestPeering_RotateServerAndCAThenFail_(t *testing.T) {
|
|||
|
||||
_, port := clientSidecarService.GetAddr()
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
|
||||
}
|
||||
|
||||
testutil.RunStep(t, "rotate exporting cluster's root CA", func(t *testing.T) {
|
||||
|
@ -144,7 +144,7 @@ func TestPeering_RotateServerAndCAThenFail_(t *testing.T) {
|
|||
// Connectivity should still be contained
|
||||
_, port := clientSidecarService.GetAddr()
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
|
||||
|
||||
verifySidecarHasTwoRootCAs(t, clientSidecarService)
|
||||
})
|
||||
|
@ -166,7 +166,7 @@ func TestPeering_RotateServerAndCAThenFail_(t *testing.T) {
|
|||
|
||||
_, port := clientSidecarService.GetAddr()
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -18,9 +18,14 @@ import (
|
|||
|
||||
func TestTroubleshootProxy(t *testing.T) {
|
||||
t.Parallel()
|
||||
cluster, _, _ := topology.NewPeeringCluster(t, 1, &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
InjectAutoEncryption: true,
|
||||
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 1,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
InjectAutoEncryption: true,
|
||||
},
|
||||
ApplyDefaultProxySettings: true,
|
||||
})
|
||||
|
||||
serverService, clientService := topology.CreateServices(t, cluster)
|
||||
|
|
|
@ -36,11 +36,16 @@ func TestACL_Upgrade_Node_Token(t *testing.T) {
|
|||
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
// NOTE: Disable auto.encrypt due to its conflict with ACL token during bootstrap
|
||||
cluster, _, _ := libtopology.NewPeeringCluster(t, 1, &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
ConsulVersion: tc.oldversion,
|
||||
InjectAutoEncryption: false,
|
||||
ACLEnabled: true,
|
||||
cluster, _, _ := libtopology.NewCluster(t, &libtopology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 1,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
ConsulVersion: tc.oldversion,
|
||||
InjectAutoEncryption: false,
|
||||
ACLEnabled: true,
|
||||
},
|
||||
ApplyDefaultProxySettings: true,
|
||||
})
|
||||
|
||||
agentToken, err := cluster.CreateAgentToken("dc1",
|
||||
|
|
|
@ -67,7 +67,7 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
|
|||
}
|
||||
_, serverConnectProxyV2, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV2)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, "static-server")
|
||||
libassert.CatalogServiceExists(t, client, "static-server", nil)
|
||||
|
||||
// TODO: verify the number of instance of static-server is 3
|
||||
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServerServiceName, true, 3)
|
||||
|
@ -121,7 +121,7 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
|
|||
libassert.AssertUpstreamEndpointStatus(t, adminPort, "v2.static-server.default", "HEALTHY", 1)
|
||||
|
||||
// static-client upstream should connect to static-server-v2 because the default subset value is to v2 set in the service resolver
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server-v2")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server-v2", "")
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -194,7 +194,7 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
|
|||
libassert.AssertUpstreamEndpointStatus(t, adminPort, "test.static-server.default", "UNHEALTHY", 1)
|
||||
|
||||
// static-client upstream should connect to static-server since it is passing
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), libservice.StaticServerServiceName)
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), libservice.StaticServerServiceName, "")
|
||||
|
||||
// ###########################
|
||||
// ## with onlypassing=false
|
||||
|
@ -235,7 +235,7 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
|
|||
}
|
||||
_, server2ConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts2)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, libservice.StaticServer2ServiceName)
|
||||
libassert.CatalogServiceExists(t, client, libservice.StaticServer2ServiceName, nil)
|
||||
|
||||
serviceOptsV1 := &libservice.ServiceOpts{
|
||||
Name: libservice.StaticServer2ServiceName,
|
||||
|
@ -256,7 +256,7 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
|
|||
}
|
||||
_, server2ConnectProxyV2, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV2)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, libservice.StaticServer2ServiceName)
|
||||
libassert.CatalogServiceExists(t, client, libservice.StaticServer2ServiceName, nil)
|
||||
|
||||
// Register static-server service resolver
|
||||
serviceResolver := &api.ServiceResolverConfigEntry{
|
||||
|
@ -318,7 +318,7 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
|
|||
_, appPort := clientConnectProxy.GetAddr()
|
||||
_, adminPort := clientConnectProxy.GetAdminAddr()
|
||||
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPort), "static-server-2-v2")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPort), "static-server-2-v2", "")
|
||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, "v2.static-server-2.default", "HEALTHY", 1)
|
||||
},
|
||||
},
|
||||
|
@ -335,14 +335,19 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
|
|||
if oldVersionTmp.LessThan(libutils.Version_1_14) {
|
||||
buildOpts.InjectAutoEncryption = false
|
||||
}
|
||||
cluster, _, _ := topology.NewPeeringCluster(t, 1, buildOpts)
|
||||
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 1,
|
||||
BuildOpts: buildOpts,
|
||||
ApplyDefaultProxySettings: true,
|
||||
})
|
||||
node := cluster.Agents[0]
|
||||
client := node.GetClient()
|
||||
|
||||
staticClientProxy, staticServerProxy, err := createStaticClientAndServer(cluster)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName)
|
||||
libassert.CatalogServiceExists(t, client, fmt.Sprintf("%s-sidecar-proxy", libservice.StaticClientServiceName))
|
||||
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName, nil)
|
||||
libassert.CatalogServiceExists(t, client, fmt.Sprintf("%s-sidecar-proxy", libservice.StaticClientServiceName), nil)
|
||||
|
||||
err = cluster.ConfigEntryWrite(&api.ProxyConfigEntry{
|
||||
Kind: api.ProxyDefaults,
|
||||
|
|
|
@ -102,7 +102,7 @@ func TestPeering_Upgrade_ControlPlane_MGW(t *testing.T) {
|
|||
require.NoError(t, clientSidecarService.Restart())
|
||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, fmt.Sprintf("static-server.default.%s.external", libtopology.DialingPeerName), "HEALTHY", 1)
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
|
||||
}
|
||||
|
||||
for _, tc := range tcs {
|
||||
|
|
|
@ -67,7 +67,7 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
|
|||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
libassert.CatalogServiceExists(t, c.Clients()[0].GetClient(), libservice.StaticServer2ServiceName)
|
||||
libassert.CatalogServiceExists(t, c.Clients()[0].GetClient(), libservice.StaticServer2ServiceName, nil)
|
||||
|
||||
err = c.ConfigEntryWrite(&api.ProxyConfigEntry{
|
||||
Kind: api.ProxyDefaults,
|
||||
|
@ -100,7 +100,7 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
|
|||
return serverConnectProxy, nil, func() {}, err
|
||||
},
|
||||
extraAssertion: func(clientUpstreamPort int) {
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d/static-server-2", clientUpstreamPort), "static-server-2")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d/static-server-2", clientUpstreamPort), "static-server-2", "")
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -201,7 +201,7 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
|
|||
GRPCPort: 8078,
|
||||
}
|
||||
_, serverConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(dialing.Clients()[0], serviceOpts)
|
||||
libassert.CatalogServiceExists(t, dialing.Clients()[0].GetClient(), libservice.StaticServerServiceName)
|
||||
libassert.CatalogServiceExists(t, dialing.Clients()[0].GetClient(), libservice.StaticServerServiceName, nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
|
|||
GRPCPort: 8078,
|
||||
}
|
||||
_, serverConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(dialing.Clients()[0], serviceOpts)
|
||||
libassert.CatalogServiceExists(t, dialing.Clients()[0].GetClient(), libservice.StaticServerServiceName)
|
||||
libassert.CatalogServiceExists(t, dialing.Clients()[0].GetClient(), libservice.StaticServerServiceName, nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
@ -301,14 +301,14 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
|
|||
_, appPorts := clientConnectProxy.GetAddrs()
|
||||
assertionFn := func() {
|
||||
// assert traffic can fail-over to static-server in peered cluster and restor to local static-server
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[0]), "static-server-dialing")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[0]), "static-server-dialing", "")
|
||||
require.NoError(t, serverConnectProxy.Stop())
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[0]), "static-server")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[0]), "static-server", "")
|
||||
require.NoError(t, serverConnectProxy.Start())
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[0]), "static-server-dialing")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[0]), "static-server-dialing", "")
|
||||
|
||||
// assert peer-static-server resolves to static-server in peered cluster
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[1]), "static-server")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPorts[1]), "static-server", "")
|
||||
}
|
||||
return serverConnectProxy, clientConnectProxy, assertionFn, nil
|
||||
},
|
||||
|
@ -376,7 +376,7 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
|
|||
_, adminPort := clientSidecarService.GetAdminAddr()
|
||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, fmt.Sprintf("static-server.default.%s.external", libtopology.DialingPeerName), "HEALTHY", 1)
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server")
|
||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
|
||||
|
||||
// TODO: restart static-server-2's sidecar
|
||||
tc.extraAssertion(appPort)
|
||||
|
@ -439,7 +439,11 @@ func createAndRegisterStaticClientSidecarWith2Upstreams(c *cluster.Cluster, dest
|
|||
}
|
||||
|
||||
// Create a service and proxy instance
|
||||
clientConnectProxy, err := libservice.NewConnectService(context.Background(), fmt.Sprintf("%s-sidecar", libservice.StaticClientServiceName), libservice.StaticClientServiceName, []int{cluster.ServiceUpstreamLocalBindPort, cluster.ServiceUpstreamLocalBindPort2}, node)
|
||||
sidecarCfg := libservice.SidecarConfig{
|
||||
Name: fmt.Sprintf("%s-sidecar", libservice.StaticClientServiceName),
|
||||
ServiceID: libservice.StaticClientServiceName,
|
||||
}
|
||||
clientConnectProxy, err := libservice.NewConnectService(context.Background(), sidecarCfg, []int{cluster.ServiceUpstreamLocalBindPort, cluster.ServiceUpstreamLocalBindPort2}, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -37,7 +37,11 @@ func TestPeering_WanFedSecondaryDC(t *testing.T) {
|
|||
|
||||
t.Run("secondary dc can peer to alpha dc", func(t *testing.T) {
|
||||
// Create the gateway
|
||||
_, err := libservice.NewGatewayService(context.Background(), "mesh", "mesh", c3.Servers()[0])
|
||||
gwCfg := libservice.GatewayConfig{
|
||||
Name: "mesh",
|
||||
Kind: "mesh",
|
||||
}
|
||||
_, err := libservice.NewGatewayService(context.Background(), gwCfg, c3.Servers()[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the peering connection
|
||||
|
|
Loading…
Reference in New Issue