mirror of https://github.com/hashicorp/consul
Browse Source
This removes any references to v2 integration tests from: - envoy integration tests (test/integration/connect) - container tests (test/integration/consul-container) - deployer tests (test-integ)pull/21066/head
R.B. Boyer
7 months ago
committed by
GitHub
16 changed files with 10 additions and 2745 deletions
@ -1,504 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package catalogv2 |
||||
|
||||
import ( |
||||
"fmt" |
||||
"testing" |
||||
|
||||
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" |
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" |
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert" |
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils" |
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest" |
||||
"github.com/hashicorp/consul/testing/deployer/topology" |
||||
|
||||
"github.com/hashicorp/consul/test-integ/topoutil" |
||||
) |
||||
|
||||
func TestSplitterFeaturesL7ExplicitDestinations(t *testing.T) { |
||||
tenancies := []*pbresource.Tenancy{ |
||||
{ |
||||
Partition: "default", |
||||
Namespace: "default", |
||||
}, |
||||
} |
||||
if utils.IsEnterprise() { |
||||
tenancies = append(tenancies, &pbresource.Tenancy{ |
||||
Partition: "part1", |
||||
Namespace: "default", |
||||
}) |
||||
tenancies = append(tenancies, &pbresource.Tenancy{ |
||||
Partition: "part1", |
||||
Namespace: "nsa", |
||||
}) |
||||
tenancies = append(tenancies, &pbresource.Tenancy{ |
||||
Partition: "default", |
||||
Namespace: "nsa", |
||||
}) |
||||
} |
||||
cfg := testSplitterFeaturesL7ExplicitDestinationsCreator{ |
||||
tenancies: tenancies, |
||||
}.NewConfig(t) |
||||
|
||||
sp := sprawltest.Launch(t, cfg) |
||||
|
||||
var ( |
||||
asserter = topoutil.NewAsserter(sp) |
||||
|
||||
topo = sp.Topology() |
||||
cluster = topo.Clusters["dc1"] |
||||
|
||||
ships = topo.ComputeRelationships() |
||||
) |
||||
|
||||
clientV2 := sp.ResourceServiceClientForCluster(cluster.Name) |
||||
|
||||
t.Log(topology.RenderRelationships(ships)) |
||||
|
||||
for _, tenancy := range tenancies { |
||||
// Make sure things are in v2.
|
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, "static-client", tenancy, 1) |
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, "static-server-v1", tenancy, 1) |
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, "static-server-v2", tenancy, 1) |
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, "static-server", tenancy, 0) |
||||
} |
||||
|
||||
// Check relationships
|
||||
for _, ship := range ships { |
||||
t.Run("relationship: "+ship.String(), func(t *testing.T) { |
||||
var ( |
||||
wrk = ship.Caller |
||||
dest = ship.Destination |
||||
) |
||||
|
||||
v1ID := dest.ID |
||||
v1ID.Name = "static-server-v1" |
||||
v1ClusterPrefix := clusterPrefix(dest.PortName, v1ID, dest.Cluster) |
||||
|
||||
v2ID := dest.ID |
||||
v2ID.Name = "static-server-v2" |
||||
v2ClusterPrefix := clusterPrefix(dest.PortName, v2ID, dest.Cluster) |
||||
|
||||
// we expect 2 clusters, one for each leg of the split
|
||||
asserter.DestinationEndpointStatus(t, wrk, v1ClusterPrefix+".", "HEALTHY", 1) |
||||
asserter.DestinationEndpointStatus(t, wrk, v2ClusterPrefix+".", "HEALTHY", 1) |
||||
|
||||
// Both should be possible.
|
||||
v1Expect := fmt.Sprintf("%s::%s", cluster.Name, v1ID.String()) |
||||
v2Expect := fmt.Sprintf("%s::%s", cluster.Name, v2ID.String()) |
||||
|
||||
switch dest.PortName { |
||||
case "tcp": |
||||
asserter.CheckBlankspaceNameTrafficSplitViaTCP(t, wrk, dest, |
||||
map[string]int{v1Expect: 10, v2Expect: 90}) |
||||
case "grpc": |
||||
asserter.CheckBlankspaceNameTrafficSplitViaGRPC(t, wrk, dest, |
||||
map[string]int{v1Expect: 10, v2Expect: 90}) |
||||
case "http": |
||||
asserter.CheckBlankspaceNameTrafficSplitViaHTTP(t, wrk, dest, false, "/", |
||||
map[string]int{v1Expect: 10, v2Expect: 90}) |
||||
case "http2": |
||||
asserter.CheckBlankspaceNameTrafficSplitViaHTTP(t, wrk, dest, true, "/", |
||||
map[string]int{v1Expect: 10, v2Expect: 90}) |
||||
default: |
||||
t.Fatalf("unexpected port name: %s", dest.PortName) |
||||
} |
||||
}) |
||||
} |
||||
} |
||||
|
||||
type testSplitterFeaturesL7ExplicitDestinationsCreator struct { |
||||
tenancies []*pbresource.Tenancy |
||||
} |
||||
|
||||
func (c testSplitterFeaturesL7ExplicitDestinationsCreator) NewConfig(t *testing.T) *topology.Config { |
||||
const clusterName = "dc1" |
||||
|
||||
servers := topoutil.NewTopologyServerSet(clusterName+"-server", 3, []string{clusterName, "wan"}, nil) |
||||
|
||||
cluster := &topology.Cluster{ |
||||
Enterprise: utils.IsEnterprise(), |
||||
Name: clusterName, |
||||
Nodes: servers, |
||||
Services: make(map[topology.ID]*pbcatalog.Service), |
||||
} |
||||
|
||||
lastNode := 0 |
||||
nodeName := func() string { |
||||
lastNode++ |
||||
return fmt.Sprintf("%s-box%d", clusterName, lastNode) |
||||
} |
||||
|
||||
for _, ten := range c.tenancies { |
||||
c.topologyConfigAddNodes(t, cluster, nodeName, ten) |
||||
} |
||||
|
||||
return &topology.Config{ |
||||
Images: utils.TargetImages(), |
||||
Networks: []*topology.Network{ |
||||
{Name: clusterName}, |
||||
{Name: "wan", Type: "wan"}, |
||||
}, |
||||
Clusters: []*topology.Cluster{ |
||||
cluster, |
||||
}, |
||||
} |
||||
} |
||||
|
||||
func (c testSplitterFeaturesL7ExplicitDestinationsCreator) topologyConfigAddNodes( |
||||
t *testing.T, |
||||
cluster *topology.Cluster, |
||||
nodeName func() string, |
||||
currentTenancy *pbresource.Tenancy, |
||||
) { |
||||
clusterName := cluster.Name |
||||
|
||||
newID := func(name string, tenancy *pbresource.Tenancy) topology.ID { |
||||
return topology.ID{ |
||||
Partition: tenancy.Partition, |
||||
Namespace: tenancy.Namespace, |
||||
Name: name, |
||||
} |
||||
} |
||||
|
||||
tenancy := &pbresource.Tenancy{ |
||||
Partition: currentTenancy.Partition, |
||||
Namespace: currentTenancy.Namespace, |
||||
} |
||||
|
||||
v1ServerNode := &topology.Node{ |
||||
Kind: topology.NodeKindDataplane, |
||||
Version: topology.NodeVersionV2, |
||||
Partition: currentTenancy.Partition, |
||||
Name: nodeName(), |
||||
Workloads: []*topology.Workload{ |
||||
topoutil.NewBlankspaceWorkloadWithDefaults( |
||||
clusterName, |
||||
newID("static-server-v1", tenancy), |
||||
topology.NodeVersionV2, |
||||
func(wrk *topology.Workload) { |
||||
wrk.V2Services = []string{"static-server-v1", "static-server"} |
||||
wrk.Meta = map[string]string{ |
||||
"version": "v1", |
||||
} |
||||
wrk.WorkloadIdentity = "static-server-v1" |
||||
}, |
||||
), |
||||
}, |
||||
} |
||||
v2ServerNode := &topology.Node{ |
||||
Kind: topology.NodeKindDataplane, |
||||
Version: topology.NodeVersionV2, |
||||
Partition: currentTenancy.Partition, |
||||
Name: nodeName(), |
||||
Workloads: []*topology.Workload{ |
||||
topoutil.NewBlankspaceWorkloadWithDefaults( |
||||
clusterName, |
||||
newID("static-server-v2", tenancy), |
||||
topology.NodeVersionV2, |
||||
func(wrk *topology.Workload) { |
||||
wrk.V2Services = []string{"static-server-v2", "static-server"} |
||||
wrk.Meta = map[string]string{ |
||||
"version": "v2", |
||||
} |
||||
wrk.WorkloadIdentity = "static-server-v2" |
||||
}, |
||||
), |
||||
}, |
||||
} |
||||
clientNode := &topology.Node{ |
||||
Kind: topology.NodeKindDataplane, |
||||
Version: topology.NodeVersionV2, |
||||
Partition: currentTenancy.Partition, |
||||
Name: nodeName(), |
||||
Workloads: []*topology.Workload{ |
||||
topoutil.NewBlankspaceWorkloadWithDefaults( |
||||
clusterName, |
||||
newID("static-client", tenancy), |
||||
topology.NodeVersionV2, |
||||
func(wrk *topology.Workload) { |
||||
wrk.V2Services = []string{"static-client"} |
||||
for i, tenancy := range c.tenancies { |
||||
wrk.Destinations = append(wrk.Destinations, &topology.Destination{ |
||||
|
||||
ID: newID("static-server", tenancy), |
||||
PortName: "http", |
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5000 + (i * 4), |
||||
}, |
||||
&topology.Destination{ |
||||
|
||||
ID: newID("static-server", tenancy), |
||||
PortName: "http2", |
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5001 + (i * 4), |
||||
}, |
||||
&topology.Destination{ |
||||
|
||||
ID: newID("static-server", tenancy), |
||||
PortName: "grpc", |
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5002 + (i * 4), |
||||
}, |
||||
&topology.Destination{ |
||||
|
||||
ID: newID("static-server", tenancy), |
||||
PortName: "tcp", |
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5003 + (i * 4), |
||||
}, |
||||
) |
||||
} |
||||
}, |
||||
), |
||||
}, |
||||
} |
||||
|
||||
var sources []*pbauth.Source |
||||
for _, ten := range c.tenancies { |
||||
sources = append(sources, &pbauth.Source{ |
||||
IdentityName: "static-client", |
||||
Namespace: ten.Namespace, |
||||
Partition: ten.Partition, |
||||
}) |
||||
} |
||||
|
||||
v1TrafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{ |
||||
Id: &pbresource.ID{ |
||||
Type: pbauth.TrafficPermissionsType, |
||||
Name: "static-server-v1-perms", |
||||
Tenancy: tenancy, |
||||
}, |
||||
}, &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: "static-server-v1", |
||||
}, |
||||
Action: pbauth.Action_ACTION_ALLOW, |
||||
Permissions: []*pbauth.Permission{{ |
||||
Sources: sources, |
||||
}}, |
||||
}) |
||||
|
||||
v2TrafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{ |
||||
Id: &pbresource.ID{ |
||||
Type: pbauth.TrafficPermissionsType, |
||||
Name: "static-server-v2-perms", |
||||
Tenancy: tenancy, |
||||
}, |
||||
}, &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: "static-server-v2", |
||||
}, |
||||
Action: pbauth.Action_ACTION_ALLOW, |
||||
Permissions: []*pbauth.Permission{{ |
||||
Sources: sources, |
||||
}}, |
||||
}) |
||||
|
||||
portsFunc := func(offset uint32) []*pbcatalog.ServicePort { |
||||
return []*pbcatalog.ServicePort{ |
||||
{ |
||||
TargetPort: "http", |
||||
VirtualPort: 8005 + offset, |
||||
Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, |
||||
}, |
||||
{ |
||||
TargetPort: "http2", |
||||
VirtualPort: 8006 + offset, |
||||
Protocol: pbcatalog.Protocol_PROTOCOL_HTTP2, |
||||
}, |
||||
{ |
||||
TargetPort: "grpc", |
||||
VirtualPort: 9005 + offset, |
||||
Protocol: pbcatalog.Protocol_PROTOCOL_GRPC, |
||||
}, |
||||
{ |
||||
TargetPort: "tcp", |
||||
VirtualPort: 10005 + offset, |
||||
Protocol: pbcatalog.Protocol_PROTOCOL_TCP, |
||||
}, |
||||
{ |
||||
TargetPort: "mesh", |
||||
Protocol: pbcatalog.Protocol_PROTOCOL_MESH, |
||||
}, |
||||
} |
||||
} |
||||
|
||||
// Differ parent and backend virtual ports to verify we route to each correctly.
|
||||
parentServicePorts := portsFunc(0) |
||||
backendServicePorts := portsFunc(100) |
||||
|
||||
// Explicitly define backend services s.t. they are not inferred from workload,
|
||||
// which would assign random virtual ports.
|
||||
cluster.Services[newID("static-client", tenancy)] = &pbcatalog.Service{ |
||||
Ports: []*pbcatalog.ServicePort{ |
||||
{ |
||||
TargetPort: "mesh", |
||||
Protocol: pbcatalog.Protocol_PROTOCOL_MESH, |
||||
}, |
||||
}, |
||||
} |
||||
cluster.Services[newID("static-server", tenancy)] = &pbcatalog.Service{ |
||||
Ports: parentServicePorts, |
||||
} |
||||
cluster.Services[newID("static-server-v1", tenancy)] = &pbcatalog.Service{ |
||||
Ports: backendServicePorts, |
||||
} |
||||
cluster.Services[newID("static-server-v2", tenancy)] = &pbcatalog.Service{ |
||||
Ports: backendServicePorts, |
||||
} |
||||
|
||||
httpServerRoute := sprawltest.MustSetResourceData(t, &pbresource.Resource{ |
||||
Id: &pbresource.ID{ |
||||
Type: pbmesh.HTTPRouteType, |
||||
Name: "static-server-http-route", |
||||
Tenancy: tenancy, |
||||
}, |
||||
}, &pbmesh.HTTPRoute{ |
||||
ParentRefs: []*pbmesh.ParentReference{ |
||||
{ |
||||
Ref: &pbresource.Reference{ |
||||
Type: pbcatalog.ServiceType, |
||||
Name: "static-server", |
||||
Tenancy: tenancy, |
||||
}, |
||||
Port: "8005", // use mix of target and virtual parent ports
|
||||
}, |
||||
{ |
||||
Ref: &pbresource.Reference{ |
||||
Type: pbcatalog.ServiceType, |
||||
Name: "static-server", |
||||
Tenancy: tenancy, |
||||
}, |
||||
Port: "http2", |
||||
}, |
||||
}, |
||||
Rules: []*pbmesh.HTTPRouteRule{{ |
||||
BackendRefs: []*pbmesh.HTTPBackendRef{ |
||||
{ |
||||
BackendRef: &pbmesh.BackendReference{ |
||||
Ref: &pbresource.Reference{ |
||||
Type: pbcatalog.ServiceType, |
||||
Name: "static-server-v1", |
||||
Tenancy: tenancy, |
||||
}, |
||||
}, |
||||
Weight: 10, |
||||
}, |
||||
{ |
||||
BackendRef: &pbmesh.BackendReference{ |
||||
Ref: &pbresource.Reference{ |
||||
Type: pbcatalog.ServiceType, |
||||
Name: "static-server-v2", |
||||
Tenancy: tenancy, |
||||
}, |
||||
}, |
||||
Weight: 90, |
||||
}, |
||||
}, |
||||
}}, |
||||
}) |
||||
|
||||
grpcServerRoute := sprawltest.MustSetResourceData(t, &pbresource.Resource{ |
||||
Id: &pbresource.ID{ |
||||
Type: pbmesh.GRPCRouteType, |
||||
Name: "static-server-grpc-route", |
||||
Tenancy: tenancy, |
||||
}, |
||||
}, &pbmesh.GRPCRoute{ |
||||
ParentRefs: []*pbmesh.ParentReference{{ |
||||
Ref: &pbresource.Reference{ |
||||
Type: pbcatalog.ServiceType, |
||||
Name: "static-server", |
||||
Tenancy: tenancy, |
||||
}, |
||||
Port: "grpc", |
||||
}}, |
||||
Rules: []*pbmesh.GRPCRouteRule{{ |
||||
BackendRefs: []*pbmesh.GRPCBackendRef{ |
||||
{ |
||||
BackendRef: &pbmesh.BackendReference{ |
||||
Ref: &pbresource.Reference{ |
||||
Type: pbcatalog.ServiceType, |
||||
Name: "static-server-v1", |
||||
Tenancy: tenancy, |
||||
}, |
||||
Port: "9105", // use mix of virtual and target (inferred from parent) ports
|
||||
}, |
||||
Weight: 10, |
||||
}, |
||||
{ |
||||
BackendRef: &pbmesh.BackendReference{ |
||||
Ref: &pbresource.Reference{ |
||||
Type: pbcatalog.ServiceType, |
||||
Name: "static-server-v2", |
||||
Tenancy: tenancy, |
||||
}, |
||||
}, |
||||
Weight: 90, |
||||
}, |
||||
}, |
||||
}}, |
||||
}) |
||||
|
||||
tcpServerRoute := sprawltest.MustSetResourceData(t, &pbresource.Resource{ |
||||
Id: &pbresource.ID{ |
||||
Type: pbmesh.TCPRouteType, |
||||
Name: "static-server-tcp-route", |
||||
Tenancy: tenancy, |
||||
}, |
||||
}, &pbmesh.TCPRoute{ |
||||
ParentRefs: []*pbmesh.ParentReference{{ |
||||
Ref: &pbresource.Reference{ |
||||
Type: pbcatalog.ServiceType, |
||||
Name: "static-server", |
||||
Tenancy: tenancy, |
||||
}, |
||||
Port: "10005", // use virtual parent port
|
||||
}}, |
||||
Rules: []*pbmesh.TCPRouteRule{{ |
||||
BackendRefs: []*pbmesh.TCPBackendRef{ |
||||
{ |
||||
BackendRef: &pbmesh.BackendReference{ |
||||
Ref: &pbresource.Reference{ |
||||
Type: pbcatalog.ServiceType, |
||||
Name: "static-server-v1", |
||||
Tenancy: tenancy, |
||||
}, |
||||
Port: "10105", // use explicit virtual port
|
||||
}, |
||||
Weight: 10, |
||||
}, |
||||
{ |
||||
BackendRef: &pbmesh.BackendReference{ |
||||
Ref: &pbresource.Reference{ |
||||
Type: pbcatalog.ServiceType, |
||||
Name: "static-server-v2", |
||||
Tenancy: tenancy, |
||||
}, |
||||
Port: "tcp", // use explicit target port
|
||||
}, |
||||
Weight: 90, |
||||
}, |
||||
}, |
||||
}}, |
||||
}) |
||||
|
||||
cluster.Nodes = append(cluster.Nodes, |
||||
clientNode, |
||||
v1ServerNode, |
||||
v2ServerNode, |
||||
) |
||||
|
||||
cluster.InitialResources = append(cluster.InitialResources, |
||||
v1TrafficPerms, |
||||
v2TrafficPerms, |
||||
httpServerRoute, |
||||
grpcServerRoute, |
||||
tcpServerRoute, |
||||
) |
||||
} |
@ -1,315 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package catalogv2 |
||||
|
||||
import ( |
||||
"fmt" |
||||
"testing" |
||||
|
||||
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert" |
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils" |
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest" |
||||
"github.com/hashicorp/consul/testing/deployer/topology" |
||||
|
||||
"github.com/hashicorp/consul/test-integ/topoutil" |
||||
) |
||||
|
||||
// TestBasicL4ExplicitDestinations sets up the following:
|
||||
//
|
||||
// - 1 cluster (no peering / no wanfed)
|
||||
// - 3 servers in that cluster
|
||||
// - v2 arch is activated
|
||||
// - for each tenancy, only using v2 constructs:
|
||||
// - a client with one explicit destination to a single port service
|
||||
// - a client with multiple explicit destinations to multiple ports of the
|
||||
// same multiport service
|
||||
//
|
||||
// When this test is executed in CE it will only use the default/default
|
||||
// tenancy.
|
||||
//
|
||||
// When this test is executed in Enterprise it will additionally test the same
|
||||
// things within these tenancies:
|
||||
//
|
||||
// - part1/default
|
||||
// - default/nsa
|
||||
// - part1/nsa
|
||||
func TestBasicL4ExplicitDestinations(t *testing.T) { |
||||
|
||||
tenancies := []*pbresource.Tenancy{ |
||||
{ |
||||
Partition: "default", |
||||
Namespace: "default", |
||||
}, |
||||
} |
||||
if utils.IsEnterprise() { |
||||
tenancies = append(tenancies, &pbresource.Tenancy{ |
||||
Partition: "part1", |
||||
Namespace: "default", |
||||
}) |
||||
tenancies = append(tenancies, &pbresource.Tenancy{ |
||||
Partition: "part1", |
||||
Namespace: "nsa", |
||||
}) |
||||
tenancies = append(tenancies, &pbresource.Tenancy{ |
||||
Partition: "default", |
||||
Namespace: "nsa", |
||||
}) |
||||
} |
||||
|
||||
cfg := testBasicL4ExplicitDestinationsCreator{ |
||||
tenancies: tenancies, |
||||
}.NewConfig(t) |
||||
|
||||
sp := sprawltest.Launch(t, cfg) |
||||
|
||||
var ( |
||||
asserter = topoutil.NewAsserter(sp) |
||||
|
||||
topo = sp.Topology() |
||||
cluster = topo.Clusters["dc1"] |
||||
|
||||
ships = topo.ComputeRelationships() |
||||
) |
||||
|
||||
clientV2 := sp.ResourceServiceClientForCluster(cluster.Name) |
||||
|
||||
t.Log(topology.RenderRelationships(ships)) |
||||
|
||||
// Make sure things are in v2.
|
||||
for _, ten := range tenancies { |
||||
for _, name := range []string{ |
||||
"single-server", |
||||
"single-client", |
||||
"multi-server", |
||||
"multi-client", |
||||
} { |
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, name, ten, 1) |
||||
} |
||||
} |
||||
|
||||
// Check relationships
|
||||
for _, ship := range ships { |
||||
t.Run("relationship: "+ship.String(), func(t *testing.T) { |
||||
var ( |
||||
wrk = ship.Caller |
||||
dest = ship.Destination |
||||
) |
||||
|
||||
clusterPrefix := clusterPrefixForDestination(dest) |
||||
|
||||
asserter.DestinationEndpointStatus(t, wrk, clusterPrefix+".", "HEALTHY", 1) |
||||
asserter.HTTPServiceEchoes(t, wrk, dest.LocalPort, "") |
||||
asserter.FortioFetch2FortioName(t, wrk, dest, cluster.Name, dest.ID) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
type testBasicL4ExplicitDestinationsCreator struct { |
||||
tenancies []*pbresource.Tenancy |
||||
} |
||||
|
||||
func (c testBasicL4ExplicitDestinationsCreator) NewConfig(t *testing.T) *topology.Config { |
||||
const clusterName = "dc1" |
||||
|
||||
servers := topoutil.NewTopologyServerSet(clusterName+"-server", 3, []string{clusterName, "wan"}, nil) |
||||
|
||||
cluster := &topology.Cluster{ |
||||
Enterprise: utils.IsEnterprise(), |
||||
Name: clusterName, |
||||
Nodes: servers, |
||||
} |
||||
|
||||
lastNode := 0 |
||||
nodeName := func() string { |
||||
lastNode++ |
||||
return fmt.Sprintf("%s-box%d", clusterName, lastNode) |
||||
} |
||||
|
||||
for _, ten := range c.tenancies { |
||||
c.topologyConfigAddNodes(t, cluster, nodeName, ten) |
||||
} |
||||
|
||||
return &topology.Config{ |
||||
Images: utils.TargetImages(), |
||||
Networks: []*topology.Network{ |
||||
{Name: clusterName}, |
||||
{Name: "wan", Type: "wan"}, |
||||
}, |
||||
Clusters: []*topology.Cluster{ |
||||
cluster, |
||||
}, |
||||
} |
||||
} |
||||
|
||||
func (c testBasicL4ExplicitDestinationsCreator) topologyConfigAddNodes( |
||||
t *testing.T, |
||||
cluster *topology.Cluster, |
||||
nodeName func() string, |
||||
tenancy *pbresource.Tenancy, |
||||
) { |
||||
clusterName := cluster.Name |
||||
|
||||
newID := func(name string, tenancy *pbresource.Tenancy) topology.ID { |
||||
return topology.ID{ |
||||
Partition: tenancy.Partition, |
||||
Namespace: tenancy.Namespace, |
||||
Name: name, |
||||
} |
||||
} |
||||
|
||||
singleportServerNode := &topology.Node{ |
||||
Kind: topology.NodeKindDataplane, |
||||
Version: topology.NodeVersionV2, |
||||
Partition: tenancy.Partition, |
||||
Name: nodeName(), |
||||
Workloads: []*topology.Workload{ |
||||
topoutil.NewFortioWorkloadWithDefaults( |
||||
clusterName, |
||||
newID("single-server", tenancy), |
||||
topology.NodeVersionV2, |
||||
func(wrk *topology.Workload) { |
||||
wrk.WorkloadIdentity = "single-server-identity" |
||||
}, |
||||
), |
||||
}, |
||||
} |
||||
var singleportDestinations []*topology.Destination |
||||
for i, ten := range c.tenancies { |
||||
singleportDestinations = append(singleportDestinations, &topology.Destination{ |
||||
ID: newID("single-server", ten), |
||||
PortName: "http", |
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5000 + i, |
||||
}) |
||||
} |
||||
singleportClientNode := &topology.Node{ |
||||
Kind: topology.NodeKindDataplane, |
||||
Version: topology.NodeVersionV2, |
||||
Partition: tenancy.Partition, |
||||
Name: nodeName(), |
||||
Workloads: []*topology.Workload{ |
||||
topoutil.NewFortioWorkloadWithDefaults( |
||||
clusterName, |
||||
newID("single-client", tenancy), |
||||
topology.NodeVersionV2, |
||||
func(wrk *topology.Workload) { |
||||
delete(wrk.Ports, "grpc") // v2 mode turns this on, so turn it off
|
||||
delete(wrk.Ports, "http2") // v2 mode turns this on, so turn it off
|
||||
wrk.WorkloadIdentity = "single-client-identity" |
||||
wrk.Destinations = singleportDestinations |
||||
}, |
||||
), |
||||
}, |
||||
} |
||||
var sources []*pbauth.Source |
||||
for _, ten := range c.tenancies { |
||||
sources = append(sources, &pbauth.Source{ |
||||
IdentityName: "single-client-identity", |
||||
Namespace: ten.Namespace, |
||||
Partition: ten.Partition, |
||||
}) |
||||
} |
||||
singleportTrafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{ |
||||
Id: &pbresource.ID{ |
||||
Type: pbauth.TrafficPermissionsType, |
||||
Name: "single-server-perms", |
||||
Tenancy: tenancy, |
||||
}, |
||||
}, &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: "single-server-identity", |
||||
}, |
||||
Action: pbauth.Action_ACTION_ALLOW, |
||||
Permissions: []*pbauth.Permission{{ |
||||
Sources: sources, |
||||
}}, |
||||
}) |
||||
|
||||
multiportServerNode := &topology.Node{ |
||||
Kind: topology.NodeKindDataplane, |
||||
Version: topology.NodeVersionV2, |
||||
Partition: tenancy.Partition, |
||||
Name: nodeName(), |
||||
Workloads: []*topology.Workload{ |
||||
topoutil.NewFortioWorkloadWithDefaults( |
||||
clusterName, |
||||
newID("multi-server", tenancy), |
||||
topology.NodeVersionV2, |
||||
func(wrk *topology.Workload) { |
||||
wrk.WorkloadIdentity = "multi-server-identity" |
||||
}, |
||||
), |
||||
}, |
||||
} |
||||
var multiportDestinations []*topology.Destination |
||||
for i, ten := range c.tenancies { |
||||
multiportDestinations = append(multiportDestinations, &topology.Destination{ |
||||
ID: newID("multi-server", ten), |
||||
PortName: "http", |
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5000 + 2*i, |
||||
}) |
||||
multiportDestinations = append(multiportDestinations, &topology.Destination{ |
||||
ID: newID("multi-server", ten), |
||||
PortName: "http2", |
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5000 + 2*i + 1, |
||||
}) |
||||
} |
||||
multiportClientNode := &topology.Node{ |
||||
Kind: topology.NodeKindDataplane, |
||||
Version: topology.NodeVersionV2, |
||||
Partition: tenancy.Partition, |
||||
Name: nodeName(), |
||||
Workloads: []*topology.Workload{ |
||||
topoutil.NewFortioWorkloadWithDefaults( |
||||
clusterName, |
||||
newID("multi-client", tenancy), |
||||
topology.NodeVersionV2, |
||||
func(wrk *topology.Workload) { |
||||
wrk.WorkloadIdentity = "multi-client-identity" |
||||
wrk.Destinations = multiportDestinations |
||||
}, |
||||
), |
||||
}, |
||||
} |
||||
|
||||
var multiportSources []*pbauth.Source |
||||
for _, ten := range c.tenancies { |
||||
multiportSources = append(multiportSources, &pbauth.Source{ |
||||
IdentityName: "multi-client-identity", |
||||
Namespace: ten.Namespace, |
||||
Partition: ten.Partition, |
||||
}) |
||||
} |
||||
multiportTrafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{ |
||||
Id: &pbresource.ID{ |
||||
Type: pbauth.TrafficPermissionsType, |
||||
Name: "multi-server-perms", |
||||
Tenancy: tenancy, |
||||
}, |
||||
}, &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: "multi-server-identity", |
||||
}, |
||||
Action: pbauth.Action_ACTION_ALLOW, |
||||
Permissions: []*pbauth.Permission{{ |
||||
Sources: multiportSources, |
||||
}}, |
||||
}) |
||||
|
||||
cluster.Nodes = append(cluster.Nodes, |
||||
singleportClientNode, |
||||
singleportServerNode, |
||||
multiportClientNode, |
||||
multiportServerNode, |
||||
) |
||||
|
||||
cluster.InitialResources = append(cluster.InitialResources, |
||||
singleportTrafficPerms, |
||||
multiportTrafficPerms, |
||||
) |
||||
} |
@ -1,31 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package catalogv2 |
||||
|
||||
import ( |
||||
"strings" |
||||
|
||||
"github.com/hashicorp/consul/testing/deployer/topology" |
||||
) |
||||
|
||||
// Deprecated: clusterPrefixForDestination
|
||||
func clusterPrefixForUpstream(dest *topology.Destination) string { |
||||
return clusterPrefixForDestination(dest) |
||||
} |
||||
|
||||
func clusterPrefixForDestination(dest *topology.Destination) string { |
||||
if dest.Peer == "" { |
||||
return clusterPrefix(dest.PortName, dest.ID, dest.Cluster) |
||||
} else { |
||||
return strings.Join([]string{dest.ID.Name, dest.ID.Namespace, dest.Peer, "external"}, ".") |
||||
} |
||||
} |
||||
|
||||
func clusterPrefix(port string, svcID topology.ID, cluster string) string { |
||||
if svcID.PartitionOrDefault() == "default" { |
||||
return strings.Join([]string{port, svcID.Name, svcID.Namespace, cluster, "internal"}, ".") |
||||
} else { |
||||
return strings.Join([]string{port, svcID.Name, svcID.Namespace, svcID.Partition, cluster, "internal-v1"}, ".") |
||||
} |
||||
} |
@ -1,244 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package catalogv2 |
||||
|
||||
import ( |
||||
"fmt" |
||||
"testing" |
||||
|
||||
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert" |
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils" |
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest" |
||||
"github.com/hashicorp/consul/testing/deployer/topology" |
||||
|
||||
"github.com/hashicorp/consul/test-integ/topoutil" |
||||
) |
||||
|
||||
// TestBasicL4ImplicitDestinations sets up the following:
|
||||
//
|
||||
// - 1 cluster (no peering / no wanfed)
|
||||
// - 3 servers in that cluster
|
||||
// - v2 arch is activated
|
||||
// - for each tenancy, only using v2 constructs:
|
||||
// - a server exposing 2 tcp ports
|
||||
// - a client with transparent proxy enabled and no explicit upstreams
|
||||
// - a traffic permission granting the client access to the service on all ports
|
||||
//
|
||||
// When this test is executed in CE it will only use the default/default
|
||||
// tenancy.
|
||||
//
|
||||
// When this test is executed in Enterprise it will additionally test the same
|
||||
// things within these tenancies:
|
||||
//
|
||||
// - part1/default
|
||||
// - default/nsa
|
||||
// - part1/nsa
|
||||
func TestBasicL4ImplicitDestinations(t *testing.T) { |
||||
tenancies := []*pbresource.Tenancy{{ |
||||
Namespace: "default", |
||||
Partition: "default", |
||||
}} |
||||
if utils.IsEnterprise() { |
||||
tenancies = append(tenancies, &pbresource.Tenancy{ |
||||
Namespace: "default", |
||||
Partition: "nsa", |
||||
}) |
||||
tenancies = append(tenancies, &pbresource.Tenancy{ |
||||
Namespace: "part1", |
||||
Partition: "default", |
||||
}) |
||||
tenancies = append(tenancies, &pbresource.Tenancy{ |
||||
Namespace: "part1", |
||||
Partition: "nsa", |
||||
}) |
||||
} |
||||
|
||||
cfg := testBasicL4ImplicitDestinationsCreator{ |
||||
tenancies: tenancies, |
||||
}.NewConfig(t) |
||||
|
||||
sp := sprawltest.Launch(t, cfg) |
||||
|
||||
var ( |
||||
asserter = topoutil.NewAsserter(sp) |
||||
|
||||
topo = sp.Topology() |
||||
cluster = topo.Clusters["dc1"] |
||||
|
||||
ships = topo.ComputeRelationships() |
||||
) |
||||
|
||||
clientV2 := sp.ResourceServiceClientForCluster(cluster.Name) |
||||
|
||||
t.Log(topology.RenderRelationships(ships)) |
||||
|
||||
// Make sure things are truly in v2 not v1.
|
||||
for _, tenancy := range tenancies { |
||||
for _, name := range []string{ |
||||
"static-server", |
||||
"static-client", |
||||
} { |
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, name, tenancy, 1) |
||||
} |
||||
} |
||||
|
||||
// Check relationships
|
||||
for _, ship := range ships { |
||||
t.Run("relationship: "+ship.String(), func(t *testing.T) { |
||||
var ( |
||||
wrk = ship.Caller |
||||
dest = ship.Destination |
||||
) |
||||
|
||||
clusterPrefix := clusterPrefixForDestination(dest) |
||||
|
||||
asserter.DestinationEndpointStatus(t, wrk, clusterPrefix+".", "HEALTHY", 1) |
||||
if dest.LocalPort > 0 { |
||||
asserter.HTTPServiceEchoes(t, wrk, dest.LocalPort, "") |
||||
} |
||||
asserter.FortioFetch2FortioName(t, wrk, dest, cluster.Name, dest.ID) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
type testBasicL4ImplicitDestinationsCreator struct { |
||||
tenancies []*pbresource.Tenancy |
||||
} |
||||
|
||||
func (c testBasicL4ImplicitDestinationsCreator) NewConfig(t *testing.T) *topology.Config { |
||||
const clusterName = "dc1" |
||||
|
||||
servers := topoutil.NewTopologyServerSet(clusterName+"-server", 3, []string{clusterName, "wan"}, nil) |
||||
|
||||
cluster := &topology.Cluster{ |
||||
Enterprise: utils.IsEnterprise(), |
||||
Name: clusterName, |
||||
Nodes: servers, |
||||
} |
||||
|
||||
lastNode := 0 |
||||
nodeName := func() string { |
||||
lastNode++ |
||||
return fmt.Sprintf("%s-box%d", clusterName, lastNode) |
||||
} |
||||
|
||||
for i := range c.tenancies { |
||||
c.topologyConfigAddNodes(t, cluster, nodeName, c.tenancies[i]) |
||||
} |
||||
|
||||
return &topology.Config{ |
||||
Images: utils.TargetImages(), |
||||
Networks: []*topology.Network{ |
||||
{Name: clusterName}, |
||||
{Name: "wan", Type: "wan"}, |
||||
}, |
||||
Clusters: []*topology.Cluster{ |
||||
cluster, |
||||
}, |
||||
} |
||||
} |
||||
|
||||
func (c testBasicL4ImplicitDestinationsCreator) topologyConfigAddNodes( |
||||
t *testing.T, |
||||
cluster *topology.Cluster, |
||||
nodeName func() string, |
||||
tenancy *pbresource.Tenancy, |
||||
) { |
||||
clusterName := cluster.Name |
||||
|
||||
newID := func(name string, tenancy *pbresource.Tenancy) topology.ID { |
||||
return topology.ID{ |
||||
Partition: tenancy.Partition, |
||||
Namespace: tenancy.Namespace, |
||||
Name: name, |
||||
} |
||||
} |
||||
|
||||
serverNode := &topology.Node{ |
||||
Kind: topology.NodeKindDataplane, |
||||
Version: topology.NodeVersionV2, |
||||
Partition: tenancy.Partition, |
||||
Name: nodeName(), |
||||
Workloads: []*topology.Workload{ |
||||
topoutil.NewFortioWorkloadWithDefaults( |
||||
clusterName, |
||||
newID("static-server", tenancy), |
||||
topology.NodeVersionV2, |
||||
func(wrk *topology.Workload) { |
||||
wrk.EnableTransparentProxy = true |
||||
}, |
||||
), |
||||
}, |
||||
} |
||||
|
||||
var impliedDestinations []*topology.Destination |
||||
for _, ten := range c.tenancies { |
||||
// For now we include all services in the same partition as implicit upstreams.
|
||||
if tenancy.Partition != ten.Partition { |
||||
continue |
||||
} |
||||
impliedDestinations = append(impliedDestinations, &topology.Destination{ |
||||
ID: newID("static-server", ten), |
||||
PortName: "http", |
||||
}) |
||||
impliedDestinations = append(impliedDestinations, &topology.Destination{ |
||||
ID: newID("static-server", ten), |
||||
PortName: "http2", |
||||
}) |
||||
} |
||||
|
||||
clientNode := &topology.Node{ |
||||
Kind: topology.NodeKindDataplane, |
||||
Version: topology.NodeVersionV2, |
||||
Partition: tenancy.Partition, |
||||
Name: nodeName(), |
||||
Workloads: []*topology.Workload{ |
||||
topoutil.NewFortioWorkloadWithDefaults( |
||||
clusterName, |
||||
newID("static-client", tenancy), |
||||
topology.NodeVersionV2, |
||||
func(wrk *topology.Workload) { |
||||
wrk.EnableTransparentProxy = true |
||||
wrk.ImpliedDestinations = impliedDestinations |
||||
}, |
||||
), |
||||
}, |
||||
} |
||||
|
||||
var sources []*pbauth.Source |
||||
for _, ten := range c.tenancies { |
||||
sources = append(sources, &pbauth.Source{ |
||||
IdentityName: "static-client", |
||||
Namespace: ten.Namespace, |
||||
Partition: ten.Partition, |
||||
}) |
||||
} |
||||
|
||||
trafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{ |
||||
Id: &pbresource.ID{ |
||||
Type: pbauth.TrafficPermissionsType, |
||||
Name: "static-server-perms", |
||||
Tenancy: tenancy, |
||||
}, |
||||
}, &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: "static-server", |
||||
}, |
||||
Action: pbauth.Action_ACTION_ALLOW, |
||||
Permissions: []*pbauth.Permission{{ |
||||
Sources: sources, |
||||
}}, |
||||
}) |
||||
|
||||
cluster.Nodes = append(cluster.Nodes, |
||||
clientNode, |
||||
serverNode, |
||||
) |
||||
|
||||
cluster.InitialResources = append(cluster.InitialResources, |
||||
trafficPerms, |
||||
) |
||||
} |
@ -1,459 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
package catalogv2 |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"net/http" |
||||
"strconv" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
"google.golang.org/grpc/codes" |
||||
"google.golang.org/grpc/status" |
||||
|
||||
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
"github.com/hashicorp/consul/sdk/testutil/retry" |
||||
"github.com/hashicorp/consul/test-integ/topoutil" |
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert" |
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils" |
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest" |
||||
"github.com/hashicorp/consul/testing/deployer/topology" |
||||
) |
||||
|
||||
type testCase struct { |
||||
permissions []*permission |
||||
result []*testResult |
||||
} |
||||
|
||||
type permission struct { |
||||
allow bool |
||||
excludeSource bool |
||||
includeSourceTenancy bool |
||||
excludeSourceTenancy bool |
||||
destRules []*destRules |
||||
} |
||||
|
||||
type destRules struct { |
||||
values *ruleValues |
||||
excludes []*ruleValues |
||||
} |
||||
|
||||
type ruleValues struct { |
||||
portNames []string |
||||
path string |
||||
pathPref string |
||||
pathReg string |
||||
headers []string |
||||
methods []string |
||||
} |
||||
|
||||
type testResult struct { |
||||
fail bool |
||||
port string |
||||
path string |
||||
headers map[string]string |
||||
} |
||||
|
||||
func newTrafficPermissions(p *permission, srcTenancy *pbresource.Tenancy) *pbauth.TrafficPermissions { |
||||
sources := []*pbauth.Source{{ |
||||
IdentityName: "static-client", |
||||
Namespace: srcTenancy.Namespace, |
||||
Partition: srcTenancy.Partition, |
||||
}} |
||||
destinationRules := []*pbauth.DestinationRule{} |
||||
if p != nil { |
||||
srcId := "static-client" |
||||
if p.includeSourceTenancy { |
||||
srcId = "" |
||||
} |
||||
if p.excludeSource { |
||||
sources = []*pbauth.Source{{ |
||||
IdentityName: srcId, |
||||
Namespace: srcTenancy.Namespace, |
||||
Partition: srcTenancy.Partition, |
||||
Exclude: []*pbauth.ExcludeSource{{ |
||||
IdentityName: "static-client", |
||||
Namespace: srcTenancy.Namespace, |
||||
Partition: srcTenancy.Partition, |
||||
}}, |
||||
}} |
||||
} else { |
||||
sources = []*pbauth.Source{{ |
||||
IdentityName: srcId, |
||||
Namespace: srcTenancy.Namespace, |
||||
Partition: srcTenancy.Partition, |
||||
}} |
||||
} |
||||
for _, dr := range p.destRules { |
||||
destRule := &pbauth.DestinationRule{} |
||||
if dr.values != nil { |
||||
destRule.PathExact = dr.values.path |
||||
destRule.PathPrefix = dr.values.pathPref |
||||
destRule.PathRegex = dr.values.pathReg |
||||
destRule.Methods = dr.values.methods |
||||
destRule.PortNames = dr.values.portNames |
||||
destRule.Headers = []*pbauth.DestinationRuleHeader{} |
||||
for _, h := range dr.values.headers { |
||||
destRule.Headers = append(destRule.Headers, &pbauth.DestinationRuleHeader{ |
||||
Name: h, |
||||
Present: true, |
||||
}) |
||||
} |
||||
} |
||||
var excludePermissions []*pbauth.ExcludePermissionRule |
||||
for _, e := range dr.excludes { |
||||
eRule := &pbauth.ExcludePermissionRule{ |
||||
PathExact: e.path, |
||||
PathPrefix: e.pathPref, |
||||
PathRegex: e.pathReg, |
||||
Methods: e.methods, |
||||
PortNames: e.portNames, |
||||
} |
||||
eRule.Headers = []*pbauth.DestinationRuleHeader{} |
||||
for _, h := range e.headers { |
||||
eRule.Headers = append(eRule.Headers, &pbauth.DestinationRuleHeader{ |
||||
Name: h, |
||||
Present: true, |
||||
}) |
||||
} |
||||
excludePermissions = append(excludePermissions, eRule) |
||||
} |
||||
destRule.Exclude = excludePermissions |
||||
destinationRules = append(destinationRules, destRule) |
||||
} |
||||
} |
||||
action := pbauth.Action_ACTION_ALLOW |
||||
if !p.allow { |
||||
action = pbauth.Action_ACTION_DENY |
||||
} |
||||
return &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: "static-server", |
||||
}, |
||||
Action: action, |
||||
Permissions: []*pbauth.Permission{{ |
||||
Sources: sources, |
||||
DestinationRules: destinationRules, |
||||
}}, |
||||
} |
||||
|
||||
} |
||||
|
||||
// This tests runs a gauntlet of traffic permissions updates and validates that the request status codes match the intended rules
|
||||
func TestL7TrafficPermissions(t *testing.T) { |
||||
testcases := map[string]testCase{ |
||||
// L4 permissions
|
||||
"basic": {permissions: []*permission{{allow: true}}, result: []*testResult{{fail: false}}}, |
||||
"client-exclude": {permissions: []*permission{{allow: true, includeSourceTenancy: true, excludeSource: true}}, result: []*testResult{{fail: true}}}, |
||||
"allow-all-client-in-tenancy": {permissions: []*permission{{allow: true, includeSourceTenancy: true}}, result: []*testResult{{fail: false}}}, |
||||
"only-one-port": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{portNames: []string{"http"}}}}}}, result: []*testResult{{fail: true, port: "http2"}}}, |
||||
"exclude-port": {permissions: []*permission{{allow: true, destRules: []*destRules{{excludes: []*ruleValues{{portNames: []string{"http"}}}}}}}, result: []*testResult{{fail: true, port: "http"}}}, |
||||
// L7 permissions
|
||||
"methods": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{methods: []string{"POST", "PUT", "PATCH", "DELETE", "CONNECT", "HEAD", "OPTIONS", "TRACE"}, pathPref: "/"}}}}}, |
||||
// fortio fetch2 is configured to GET
|
||||
result: []*testResult{{fail: true}}}, |
||||
"headers": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{headers: []string{"a", "b"}, pathPref: "/"}}}}}, |
||||
result: []*testResult{{fail: true}, {fail: true, headers: map[string]string{"a": "1"}}, {fail: false, headers: map[string]string{"a": "1", "b": "2"}}}}, |
||||
"path-prefix-all": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/", methods: []string{"GET"}}}}}}, result: []*testResult{{fail: false}}}, |
||||
"method-exclude": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/"}, excludes: []*ruleValues{{methods: []string{"GET"}}}}}}}, |
||||
// fortio fetch2 is configured to GET
|
||||
result: []*testResult{{fail: true}}}, |
||||
"exclude-paths-and-headers": {permissions: []*permission{{allow: true, destRules: []*destRules{ |
||||
{ |
||||
values: &ruleValues{pathPref: "/f", headers: []string{"a"}}, |
||||
excludes: []*ruleValues{{headers: []string{"b"}, path: "/foobar"}}, |
||||
}}}}, |
||||
result: []*testResult{ |
||||
{fail: false, path: "foobar", headers: map[string]string{"a": "1"}}, |
||||
{fail: false, path: "foo", headers: map[string]string{"a": "1", "b": "2"}}, |
||||
{fail: true, path: "foobar", headers: map[string]string{"a": "1", "b": "2"}}, |
||||
{fail: false, path: "foo", headers: map[string]string{"a": "1"}}, |
||||
{fail: true, path: "foo", headers: map[string]string{"b": "2"}}, |
||||
{fail: true, path: "baz", headers: map[string]string{"a": "1"}}, |
||||
}}, |
||||
"exclude-paths-or-headers": {permissions: []*permission{{allow: true, destRules: []*destRules{ |
||||
{values: &ruleValues{pathPref: "/f", headers: []string{"a"}}, excludes: []*ruleValues{{headers: []string{"b"}}, {path: "/foobar"}}}}}}, |
||||
result: []*testResult{ |
||||
{fail: true, path: "foobar", headers: map[string]string{"a": "1"}}, |
||||
{fail: true, path: "foo", headers: map[string]string{"a": "1", "b": "2"}}, |
||||
{fail: true, path: "foobar", headers: map[string]string{"a": "1", "b": "2"}}, |
||||
{fail: false, path: "foo", headers: map[string]string{"a": "1"}}, |
||||
{fail: false, path: "foo", headers: map[string]string{"a": "1"}}, |
||||
{fail: true, path: "baz", port: "http", headers: map[string]string{"a": "1"}}, |
||||
}}, |
||||
"path-or-header": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/bar"}}, {values: &ruleValues{headers: []string{"b"}}}}}}, |
||||
result: []*testResult{ |
||||
{fail: false, path: "bar"}, |
||||
{fail: false, path: "foo", headers: map[string]string{"a": "1", "b": "2"}}, |
||||
{fail: false, path: "bar", headers: map[string]string{"b": "2"}}, |
||||
{fail: true, path: "foo", headers: map[string]string{"a": "1"}}, |
||||
}}, |
||||
"path-and-header": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/bar", headers: []string{"b"}}}}}}, |
||||
result: []*testResult{ |
||||
{fail: true, path: "bar"}, |
||||
{fail: true, path: "foo", headers: map[string]string{"a": "1", "b": "2"}}, |
||||
{fail: false, path: "bar", headers: map[string]string{"b": "2"}}, |
||||
{fail: true, path: "foo", headers: map[string]string{"a": "1"}}, |
||||
}}, |
||||
"path-regex-exclude": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/"}, excludes: []*ruleValues{{pathReg: ".*dns.*"}}}}}}, |
||||
result: []*testResult{{fail: true, path: "fortio/rest/dns"}, {fail: false, path: "fortio/rest/status"}}}, |
||||
"header-include-exclude-by-port": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/", headers: []string{"experiment1", "experiment2"}}, excludes: []*ruleValues{{portNames: []string{"http2"}, headers: []string{"experiment1"}}}}}}}, |
||||
result: []*testResult{{fail: true, port: "http2", headers: map[string]string{"experiment1": "a", "experiment2": "b"}}, |
||||
{fail: false, port: "http", headers: map[string]string{"experiment1": "a", "experiment2": "b"}}, |
||||
{fail: true, port: "http2", headers: map[string]string{"experiment2": "b"}}, |
||||
{fail: true, port: "http", headers: map[string]string{"experiment3": "c"}}, |
||||
}}, |
||||
"two-tp-or": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/bar"}}}}, {allow: true, destRules: []*destRules{{values: &ruleValues{headers: []string{"b"}}}}}}, |
||||
result: []*testResult{ |
||||
{fail: false, path: "bar"}, |
||||
{fail: false, path: "foo", headers: map[string]string{"a": "1", "b": "2"}}, |
||||
{fail: false, path: "bar", headers: map[string]string{"b": "2"}}, |
||||
{fail: true, path: "foo", headers: map[string]string{"a": "1"}}, |
||||
}}, |
||||
} |
||||
if utils.IsEnterprise() { |
||||
// DENY and ALLOW permissions
|
||||
testcases["deny-cancel-allow"] = testCase{permissions: []*permission{{allow: true}, {allow: false}}, result: []*testResult{{fail: true}}} |
||||
testcases["l4-deny-l7-allow"] = testCase{permissions: []*permission{{allow: false}, {allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/"}}}}}, result: []*testResult{{fail: true}, {fail: true, path: "test"}}} |
||||
testcases["l7-deny-l4-allow"] = testCase{permissions: []*permission{{allow: true}, {allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/"}}}}, {allow: false, destRules: []*destRules{{values: &ruleValues{pathPref: "/foo"}}}}}, |
||||
result: []*testResult{{fail: false}, {fail: false, path: "test"}, {fail: true, path: "foo-bar"}}} |
||||
} |
||||
|
||||
tenancies := []*pbresource.Tenancy{ |
||||
{ |
||||
Partition: "default", |
||||
Namespace: "default", |
||||
}, |
||||
} |
||||
if utils.IsEnterprise() { |
||||
tenancies = append(tenancies, &pbresource.Tenancy{ |
||||
Partition: "ap1", |
||||
Namespace: "ns1", |
||||
}) |
||||
} |
||||
cfg := testL7TrafficPermissionsCreator{tenancies}.NewConfig(t) |
||||
targetImage := utils.TargetImages() |
||||
imageName := targetImage.Consul |
||||
if utils.IsEnterprise() { |
||||
imageName = targetImage.ConsulEnterprise |
||||
} |
||||
t.Log("running with target image: " + imageName) |
||||
|
||||
sp := sprawltest.Launch(t, cfg) |
||||
|
||||
asserter := topoutil.NewAsserter(sp) |
||||
|
||||
topo := sp.Topology() |
||||
cluster := topo.Clusters["dc1"] |
||||
ships := topo.ComputeRelationships() |
||||
|
||||
clientV2 := sp.ResourceServiceClientForCluster(cluster.Name) |
||||
|
||||
// Make sure services exist
|
||||
for _, tenancy := range tenancies { |
||||
for _, name := range []string{ |
||||
"static-server", |
||||
"static-client", |
||||
} { |
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, name, tenancy, len(tenancies)) |
||||
} |
||||
} |
||||
var initialTrafficPerms []*pbresource.Resource |
||||
for testName, tc := range testcases { |
||||
// Delete old TP and write new one for a new test case
|
||||
mustDeleteTestResources(t, clientV2, initialTrafficPerms) |
||||
initialTrafficPerms = []*pbresource.Resource{} |
||||
for _, st := range tenancies { |
||||
for _, dt := range tenancies { |
||||
for i, p := range tc.permissions { |
||||
newTrafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{ |
||||
Id: &pbresource.ID{ |
||||
Type: pbauth.TrafficPermissionsType, |
||||
Name: "static-server-perms" + strconv.Itoa(i) + "-" + st.Namespace + "-" + st.Partition, |
||||
Tenancy: dt, |
||||
}, |
||||
}, newTrafficPermissions(p, st)) |
||||
mustWriteTestResource(t, clientV2, newTrafficPerms) |
||||
initialTrafficPerms = append(initialTrafficPerms, newTrafficPerms) |
||||
} |
||||
} |
||||
} |
||||
t.Log(initialTrafficPerms) |
||||
// Wait for the resource updates to go through and Envoy to be ready
|
||||
time.Sleep(1 * time.Second) |
||||
// Check the default server workload envoy config for RBAC filters matching testcase criteria
|
||||
serverWorkload := cluster.WorkloadsByID(topology.ID{ |
||||
Partition: "default", |
||||
Namespace: "default", |
||||
Name: "static-server", |
||||
}) |
||||
asserter.AssertEnvoyHTTPrbacFiltersContainIntentions(t, serverWorkload[0]) |
||||
// Check relationships
|
||||
for _, ship := range ships { |
||||
t.Run("case: "+testName+":"+ship.Destination.PortName+":("+ship.Caller.ID.Partition+"/"+ship.Caller.ID.Namespace+ |
||||
")("+ship.Destination.ID.Partition+"/"+ship.Destination.ID.Namespace+")", func(t *testing.T) { |
||||
var ( |
||||
wrk = ship.Caller |
||||
dest = ship.Destination |
||||
) |
||||
for _, res := range tc.result { |
||||
if res.port != "" && res.port != ship.Destination.PortName { |
||||
continue |
||||
} |
||||
dest.ID.Name = "static-server" |
||||
destClusterPrefix := clusterPrefix(dest.PortName, dest.ID, dest.Cluster) |
||||
asserter.DestinationEndpointStatus(t, wrk, destClusterPrefix+".", "HEALTHY", len(tenancies)) |
||||
status := http.StatusForbidden |
||||
if res.fail == false { |
||||
status = http.StatusOK |
||||
} |
||||
t.Log("Test request:"+res.path, res.headers, status) |
||||
asserter.FortioFetch2ServiceStatusCodes(t, wrk, dest, res.path, res.headers, []int{status}) |
||||
} |
||||
}) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func mustWriteTestResource(t *testing.T, client pbresource.ResourceServiceClient, res *pbresource.Resource) { |
||||
retryer := &retry.Timer{Timeout: time.Minute, Wait: time.Second} |
||||
rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: res}) |
||||
require.NoError(t, err) |
||||
retry.RunWith(retryer, t, func(r *retry.R) { |
||||
readRsp, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: rsp.Resource.Id}) |
||||
require.NoError(r, err, "error reading %s", rsp.Resource.Id.Name) |
||||
require.NotNil(r, readRsp) |
||||
}) |
||||
|
||||
} |
||||
|
||||
func mustDeleteTestResources(t *testing.T, client pbresource.ResourceServiceClient, resources []*pbresource.Resource) { |
||||
if len(resources) == 0 { |
||||
return |
||||
} |
||||
retryer := &retry.Timer{Timeout: time.Minute, Wait: time.Second} |
||||
for _, res := range resources { |
||||
retry.RunWith(retryer, t, func(r *retry.R) { |
||||
_, err := client.Delete(context.Background(), &pbresource.DeleteRequest{Id: res.Id}) |
||||
if status.Code(err) == codes.NotFound { |
||||
return |
||||
} |
||||
if err != nil && status.Code(err) != codes.Aborted { |
||||
r.Stop(fmt.Errorf("failed to delete the resource: %w", err)) |
||||
return |
||||
} |
||||
require.NoError(r, err) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
type testL7TrafficPermissionsCreator struct { |
||||
tenancies []*pbresource.Tenancy |
||||
} |
||||
|
||||
func (c testL7TrafficPermissionsCreator) NewConfig(t *testing.T) *topology.Config { |
||||
const clusterName = "dc1" |
||||
|
||||
servers := topoutil.NewTopologyServerSet(clusterName+"-server", 1, []string{clusterName, "wan"}, nil) |
||||
|
||||
cluster := &topology.Cluster{ |
||||
Enterprise: utils.IsEnterprise(), |
||||
Name: clusterName, |
||||
Nodes: servers, |
||||
} |
||||
|
||||
lastNode := 0 |
||||
nodeName := func() string { |
||||
lastNode++ |
||||
return fmt.Sprintf("%s-box%d", clusterName, lastNode) |
||||
} |
||||
|
||||
for _, st := range c.tenancies { |
||||
for _, dt := range c.tenancies { |
||||
c.topologyConfigAddNodes(cluster, nodeName, st, dt) |
||||
|
||||
} |
||||
} |
||||
|
||||
return &topology.Config{ |
||||
Images: utils.TargetImages(), |
||||
Networks: []*topology.Network{ |
||||
{Name: clusterName}, |
||||
{Name: "wan", Type: "wan"}, |
||||
}, |
||||
Clusters: []*topology.Cluster{ |
||||
cluster, |
||||
}, |
||||
} |
||||
} |
||||
|
||||
func (c testL7TrafficPermissionsCreator) topologyConfigAddNodes( |
||||
cluster *topology.Cluster, |
||||
nodeName func() string, |
||||
sourceTenancy *pbresource.Tenancy, |
||||
destinationTenancy *pbresource.Tenancy, |
||||
) { |
||||
clusterName := cluster.Name |
||||
|
||||
newID := func(name string, tenancy *pbresource.Tenancy) topology.ID { |
||||
return topology.ID{ |
||||
Partition: tenancy.Partition, |
||||
Namespace: tenancy.Namespace, |
||||
Name: name, |
||||
} |
||||
} |
||||
|
||||
serverNode := &topology.Node{ |
||||
Kind: topology.NodeKindDataplane, |
||||
Version: topology.NodeVersionV2, |
||||
Partition: destinationTenancy.Partition, |
||||
Name: nodeName(), |
||||
Workloads: []*topology.Workload{ |
||||
topoutil.NewFortioWorkloadWithDefaults( |
||||
clusterName, |
||||
newID("static-server", destinationTenancy), |
||||
topology.NodeVersionV2, |
||||
nil, |
||||
), |
||||
}, |
||||
} |
||||
|
||||
clientNode := &topology.Node{ |
||||
Kind: topology.NodeKindDataplane, |
||||
Version: topology.NodeVersionV2, |
||||
Partition: sourceTenancy.Partition, |
||||
Name: nodeName(), |
||||
Workloads: []*topology.Workload{ |
||||
topoutil.NewFortioWorkloadWithDefaults( |
||||
clusterName, |
||||
newID("static-client", sourceTenancy), |
||||
topology.NodeVersionV2, |
||||
func(wrk *topology.Workload) { |
||||
wrk.Destinations = append(wrk.Destinations, &topology.Destination{ |
||||
ID: newID("static-server", destinationTenancy), |
||||
PortName: "http", |
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5000, |
||||
}, |
||||
&topology.Destination{ |
||||
ID: newID("static-server", destinationTenancy), |
||||
PortName: "http2", |
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5001, |
||||
}, |
||||
) |
||||
wrk.WorkloadIdentity = "static-client" |
||||
}, |
||||
), |
||||
}, |
||||
} |
||||
|
||||
cluster.Nodes = append(cluster.Nodes, |
||||
clientNode, |
||||
serverNode, |
||||
) |
||||
} |
@ -1,154 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package tenancy |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
"google.golang.org/grpc/codes" |
||||
"google.golang.org/grpc/metadata" |
||||
"google.golang.org/grpc/status" |
||||
|
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
"github.com/hashicorp/consul/sdk/testutil" |
||||
"github.com/hashicorp/consul/sdk/testutil/retry" |
||||
) |
||||
|
||||
// This duplicates a subset of internal/resource/resourcetest/client.go so
|
||||
// we're not importing consul internals integration tests.
|
||||
//
|
||||
// TODO: Move to a general package if used more widely.
|
||||
|
||||
type ClientOption func(*Client) |
||||
|
||||
func WithACLToken(token string) ClientOption { |
||||
return func(c *Client) { |
||||
c.token = token |
||||
} |
||||
} |
||||
|
||||
// Client decorates a resource service client with helper functions to assist
|
||||
// with integration testing.
|
||||
type Client struct { |
||||
pbresource.ResourceServiceClient |
||||
|
||||
timeout time.Duration |
||||
wait time.Duration |
||||
token string |
||||
} |
||||
|
||||
func NewClient(client pbresource.ResourceServiceClient, opts ...ClientOption) *Client { |
||||
c := &Client{ |
||||
ResourceServiceClient: client, |
||||
timeout: 7 * time.Second, |
||||
wait: 50 * time.Millisecond, |
||||
} |
||||
|
||||
for _, opt := range opts { |
||||
opt(c) |
||||
} |
||||
|
||||
return c |
||||
} |
||||
|
||||
func NewClientWithACLToken(client pbresource.ResourceServiceClient, token string) *Client { |
||||
return NewClient(client, WithACLToken(token)) |
||||
} |
||||
|
||||
func (client *Client) SetRetryerConfig(timeout time.Duration, wait time.Duration) { |
||||
client.timeout = timeout |
||||
client.wait = wait |
||||
} |
||||
|
||||
func (client *Client) retry(t testutil.TestingTB, fn func(r *retry.R)) { |
||||
t.Helper() |
||||
retryer := &retry.Timer{Timeout: client.timeout, Wait: client.wait} |
||||
retry.RunWith(retryer, t, fn) |
||||
} |
||||
|
||||
func (client *Client) Context(t testutil.TestingTB) context.Context { |
||||
ctx := testutil.TestContext(t) |
||||
|
||||
if client.token != "" { |
||||
md := metadata.New(map[string]string{ |
||||
"x-consul-token": client.token, |
||||
}) |
||||
ctx = metadata.NewOutgoingContext(ctx, md) |
||||
} |
||||
|
||||
return ctx |
||||
} |
||||
|
||||
func (client *Client) RequireResourceNotFound(t testutil.TestingTB, id *pbresource.ID) { |
||||
t.Helper() |
||||
|
||||
rsp, err := client.Read(client.Context(t), &pbresource.ReadRequest{Id: id}) |
||||
require.Error(t, err) |
||||
require.Equal(t, codes.NotFound, status.Code(err)) |
||||
require.Nil(t, rsp) |
||||
} |
||||
|
||||
func (client *Client) RequireResourceExists(t testutil.TestingTB, id *pbresource.ID) *pbresource.Resource { |
||||
t.Helper() |
||||
|
||||
rsp, err := client.Read(client.Context(t), &pbresource.ReadRequest{Id: id}) |
||||
require.NoError(t, err, "error reading %s with type %s", id.Name, ToGVK(id.Type)) |
||||
require.NotNil(t, rsp) |
||||
return rsp.Resource |
||||
} |
||||
|
||||
func ToGVK(resourceType *pbresource.Type) string { |
||||
return fmt.Sprintf("%s.%s.%s", resourceType.Group, resourceType.GroupVersion, resourceType.Kind) |
||||
} |
||||
|
||||
func (client *Client) WaitForResourceExists(t testutil.TestingTB, id *pbresource.ID) *pbresource.Resource { |
||||
t.Helper() |
||||
|
||||
var res *pbresource.Resource |
||||
client.retry(t, func(r *retry.R) { |
||||
res = client.RequireResourceExists(r, id) |
||||
}) |
||||
|
||||
return res |
||||
} |
||||
|
||||
func (client *Client) WaitForDeletion(t testutil.TestingTB, id *pbresource.ID) { |
||||
t.Helper() |
||||
|
||||
client.retry(t, func(r *retry.R) { |
||||
client.RequireResourceNotFound(r, id) |
||||
}) |
||||
} |
||||
|
||||
// MustDelete will delete a resource by its id, retrying if necessary and fail the test
|
||||
// if it cannot delete it within the timeout. The clients request delay settings are
|
||||
// taken into account with this operation.
|
||||
func (client *Client) MustDelete(t testutil.TestingTB, id *pbresource.ID) { |
||||
t.Helper() |
||||
client.retryDelete(t, id) |
||||
} |
||||
|
||||
func (client *Client) retryDelete(t testutil.TestingTB, id *pbresource.ID) { |
||||
t.Helper() |
||||
ctx := client.Context(t) |
||||
|
||||
client.retry(t, func(r *retry.R) { |
||||
_, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: id}) |
||||
if status.Code(err) == codes.NotFound { |
||||
return |
||||
} |
||||
|
||||
// codes.Aborted indicates a CAS failure and that the delete request should
|
||||
// be retried. Anything else should be considered an unrecoverable error.
|
||||
if err != nil && status.Code(err) != codes.Aborted { |
||||
r.Stop(fmt.Errorf("failed to delete the resource: %w", err)) |
||||
return |
||||
} |
||||
|
||||
require.NoError(r, err) |
||||
}) |
||||
} |
@ -1,84 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package tenancy |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"testing" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" |
||||
"github.com/hashicorp/consul/test-integ/topoutil" |
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils" |
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest" |
||||
"github.com/hashicorp/consul/testing/deployer/topology" |
||||
) |
||||
|
||||
const ( |
||||
DefaultNamespaceName = "default" |
||||
DefaultPartitionName = "default" |
||||
) |
||||
|
||||
func newConfig(t *testing.T) *topology.Config { |
||||
const clusterName = "cluster1" |
||||
servers := topoutil.NewTopologyServerSet(clusterName+"-server", 3, []string{clusterName}, nil) |
||||
|
||||
cluster := &topology.Cluster{ |
||||
Enterprise: utils.IsEnterprise(), |
||||
Name: clusterName, |
||||
Nodes: servers, |
||||
EnableV2: true, |
||||
EnableV2Tenancy: true, |
||||
} |
||||
|
||||
return &topology.Config{ |
||||
Images: utils.TargetImages(), |
||||
Networks: []*topology.Network{{Name: clusterName}}, |
||||
Clusters: []*topology.Cluster{cluster}, |
||||
} |
||||
} |
||||
|
||||
func createNamespaces(t *testing.T, resourceServiceClient *Client, numNamespaces int, ap string) []*pbresource.Resource { |
||||
namespaces := []*pbresource.Resource{} |
||||
for i := 0; i < numNamespaces; i++ { |
||||
namespace := &pbresource.Resource{ |
||||
Id: &pbresource.ID{ |
||||
Name: fmt.Sprintf("namespace-%d", i), |
||||
Type: pbtenancy.NamespaceType, |
||||
Tenancy: &pbresource.Tenancy{Partition: ap}, |
||||
}, |
||||
} |
||||
rsp, err := resourceServiceClient.Write(context.Background(), &pbresource.WriteRequest{Resource: namespace}) |
||||
require.NoError(t, err) |
||||
namespace = resourceServiceClient.WaitForResourceExists(t, rsp.Resource.Id) |
||||
namespaces = append(namespaces, namespace) |
||||
} |
||||
return namespaces |
||||
} |
||||
|
||||
func createServices(t *testing.T, resourceServiceClient *Client, numServices int, ap string, ns string) []*pbresource.Resource { |
||||
services := []*pbresource.Resource{} |
||||
for i := 0; i < numServices; i++ { |
||||
service := &pbresource.Resource{ |
||||
Id: &pbresource.ID{ |
||||
Name: fmt.Sprintf("service-%d", i), |
||||
Type: pbcatalog.ServiceType, |
||||
Tenancy: &pbresource.Tenancy{Partition: ap, Namespace: ns}, |
||||
}, |
||||
} |
||||
service = sprawltest.MustSetResourceData(t, service, &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{}, |
||||
Ports: []*pbcatalog.ServicePort{}, |
||||
}) |
||||
rsp, err := resourceServiceClient.Write(context.Background(), &pbresource.WriteRequest{Resource: service}) |
||||
require.NoError(t, err) |
||||
service = resourceServiceClient.WaitForResourceExists(t, rsp.Resource.Id) |
||||
services = append(services, service) |
||||
} |
||||
return services |
||||
} |
@ -1,84 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
//go:build !consulent
|
||||
|
||||
package tenancy |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
|
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" |
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest" |
||||
) |
||||
|
||||
// TestNamespaceLifecycle sets up the following:
|
||||
//
|
||||
// - 1 cluster
|
||||
// - 3 servers in that cluster
|
||||
// - v2 resources and v2 tenancy are activated
|
||||
//
|
||||
// When this test is executed it tests the full lifecycle for a
|
||||
// small number of namespaces:
|
||||
// - creation of namespaces in the default partition
|
||||
// - populating resources under namespaces
|
||||
// - finally deleting everything
|
||||
func TestNamespaceLifecycle(t *testing.T) { |
||||
t.Parallel() |
||||
|
||||
cfg := newConfig(t) |
||||
sp := sprawltest.Launch(t, cfg) |
||||
cluster := sp.Topology().Clusters["cluster1"] |
||||
client := NewClient(sp.ResourceServiceClientForCluster(cluster.Name)) |
||||
|
||||
// 3 namespaces
|
||||
// @ 3 services per namespace
|
||||
// ==============================
|
||||
// 9 resources total
|
||||
tenants := []*pbresource.Resource{} |
||||
numNamespaces := 3 |
||||
numServices := 3 |
||||
|
||||
// Default namespace is expected to exist
|
||||
// when we boostrap a cluster
|
||||
client.RequireResourceExists(t, &pbresource.ID{ |
||||
Name: DefaultNamespaceName, |
||||
Type: pbtenancy.NamespaceType, |
||||
Tenancy: &pbresource.Tenancy{Partition: DefaultPartitionName}, |
||||
}) |
||||
|
||||
// Namespaces are created in default partition
|
||||
namespaces := createNamespaces(t, client, numNamespaces, DefaultPartitionName) |
||||
|
||||
for _, namespace := range namespaces { |
||||
services := createServices(t, client, numServices, DefaultPartitionName, namespace.Id.Name) |
||||
tenants = append(tenants, services...) |
||||
} |
||||
|
||||
// Verify test setup
|
||||
require.Equal(t, len(tenants), numNamespaces*numServices) |
||||
|
||||
// List namespaces
|
||||
listRsp, err := client.List(client.Context(t), &pbresource.ListRequest{ |
||||
Type: pbtenancy.NamespaceType, |
||||
Tenancy: &pbresource.Tenancy{}, |
||||
NamePrefix: "namespace-", |
||||
}) |
||||
require.NoError(t, err) |
||||
require.Equal(t, len(namespaces), len(listRsp.Resources)) |
||||
|
||||
// Delete all namespaces
|
||||
for _, namespace := range namespaces { |
||||
_, err := client.Delete(client.Context(t), &pbresource.DeleteRequest{Id: namespace.Id}) |
||||
require.NoError(t, err) |
||||
client.WaitForDeletion(t, namespace.Id) |
||||
} |
||||
|
||||
// Make sure no namespace tenants left behind
|
||||
for _, tenant := range tenants { |
||||
client.RequireResourceNotFound(t, tenant.Id) |
||||
} |
||||
} |
@ -1,43 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package catalog |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster" |
||||
libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology" |
||||
|
||||
"github.com/hashicorp/consul/internal/catalog/catalogtest" |
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
) |
||||
|
||||
var ( |
||||
cli = rtest.ConfigureTestCLIFlags() |
||||
) |
||||
|
||||
func TestCatalog(t *testing.T) { |
||||
t.Parallel() |
||||
|
||||
cluster, _, _ := libtopology.NewCluster(t, &libtopology.ClusterConfig{ |
||||
NumServers: 3, |
||||
BuildOpts: &libcluster.BuildOptions{Datacenter: "dc1"}, |
||||
Cmd: `-hcl=experiments=["resource-apis"]`, |
||||
}) |
||||
|
||||
followers, err := cluster.Followers() |
||||
require.NoError(t, err) |
||||
client := pbresource.NewResourceServiceClient(followers[0].GetGRPCConn()) |
||||
|
||||
t.Run("one-shot", func(t *testing.T) { |
||||
catalogtest.RunCatalogV2Beta1IntegrationTest(t, client, cli.ClientOptions(t)...) |
||||
}) |
||||
|
||||
t.Run("lifecycle", func(t *testing.T) { |
||||
catalogtest.RunCatalogV2Beta1LifecycleIntegrationTest(t, client, cli.ClientOptions(t)...) |
||||
}) |
||||
} |
@ -1,555 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package trafficpermissions |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"strings" |
||||
"testing" |
||||
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
|
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest" |
||||
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" |
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" |
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster" |
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service" |
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology" |
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils" |
||||
) |
||||
|
||||
const ( |
||||
echoPort = 9999 |
||||
tcpPort = 8888 |
||||
staticServerVIP = "240.0.0.1" |
||||
staticServerReturnValue = "static-server" |
||||
staticServerIdentity = "static-server-identity" |
||||
) |
||||
|
||||
type trafficPermissionsCase struct { |
||||
tp1 *pbauth.TrafficPermissions |
||||
tp2 *pbauth.TrafficPermissions |
||||
client1TCPSuccess bool |
||||
client1EchoSuccess bool |
||||
client2TCPSuccess bool |
||||
client2EchoSuccess bool |
||||
} |
||||
|
||||
// We are using tproxy to test traffic permissions now because explicitly specifying destinations
|
||||
// doesn't work when multiple downstreams specify the same destination yet. In the future, we will need
|
||||
// to update this to use explicit destinations once we infer tproxy destinations from traffic permissions.
|
||||
//
|
||||
// This also explicitly uses virtual IPs and virtual ports because Consul DNS doesn't support v2 resources yet.
|
||||
// We should update this to use Consul DNS when it is working.
|
||||
func runTrafficPermissionsTests(t *testing.T, aclsEnabled bool, cases map[string]trafficPermissionsCase) { |
||||
t.Parallel() |
||||
cluster, resourceClient := createCluster(t, aclsEnabled) |
||||
|
||||
serverDataplane := createServerResources(t, resourceClient, cluster, cluster.Agents[1]) |
||||
client1Dataplane := createClientResources(t, resourceClient, cluster, cluster.Agents[2], 1) |
||||
client2Dataplane := createClientResources(t, resourceClient, cluster, cluster.Agents[3], 2) |
||||
|
||||
assertDataplaneContainerState(t, client1Dataplane, "running") |
||||
assertDataplaneContainerState(t, client2Dataplane, "running") |
||||
assertDataplaneContainerState(t, serverDataplane, "running") |
||||
|
||||
for n, tc := range cases { |
||||
t.Run(n, func(t *testing.T) { |
||||
storeStaticServerTrafficPermissions(t, resourceClient, tc.tp1, 1) |
||||
storeStaticServerTrafficPermissions(t, resourceClient, tc.tp2, 2) |
||||
|
||||
// We must establish a new TCP connection each time because TCP traffic permissions are
|
||||
// enforced at the connection level.
|
||||
retry.Run(t, func(r *retry.R) { |
||||
assertPassing(r, httpRequestToVirtualAddress, client1Dataplane, tc.client1TCPSuccess) |
||||
assertPassing(r, echoToVirtualAddress, client1Dataplane, tc.client1EchoSuccess) |
||||
assertPassing(r, httpRequestToVirtualAddress, client2Dataplane, tc.client2TCPSuccess) |
||||
assertPassing(r, echoToVirtualAddress, client2Dataplane, tc.client2EchoSuccess) |
||||
}) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestTrafficPermission_TCP_DefaultDeny(t *testing.T) { |
||||
cases := map[string]trafficPermissionsCase{ |
||||
"default deny": { |
||||
tp1: nil, |
||||
client1TCPSuccess: false, |
||||
client1EchoSuccess: false, |
||||
client2TCPSuccess: false, |
||||
client2EchoSuccess: false, |
||||
}, |
||||
"allow everything": { |
||||
tp1: &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: staticServerIdentity, |
||||
}, |
||||
Action: pbauth.Action_ACTION_ALLOW, |
||||
Permissions: []*pbauth.Permission{ |
||||
{ |
||||
Sources: []*pbauth.Source{ |
||||
{ |
||||
// IdentityName: "static-client-1-identity",
|
||||
Namespace: "default", |
||||
Partition: "default", |
||||
Peer: "local", |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
client1TCPSuccess: true, |
||||
client1EchoSuccess: true, |
||||
client2TCPSuccess: true, |
||||
client2EchoSuccess: true, |
||||
}, |
||||
"allow tcp": { |
||||
tp1: &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: staticServerIdentity, |
||||
}, |
||||
Action: pbauth.Action_ACTION_ALLOW, |
||||
Permissions: []*pbauth.Permission{ |
||||
{ |
||||
Sources: []*pbauth.Source{ |
||||
{ |
||||
// IdentityName: "static-client-1-identity",
|
||||
Namespace: "default", |
||||
Partition: "default", |
||||
Peer: "local", |
||||
}, |
||||
}, |
||||
DestinationRules: []*pbauth.DestinationRule{ |
||||
{ |
||||
PortNames: []string{"tcp"}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
client1TCPSuccess: true, |
||||
client1EchoSuccess: false, |
||||
client2TCPSuccess: true, |
||||
client2EchoSuccess: false, |
||||
}, |
||||
"client 1 only": { |
||||
tp1: &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: staticServerIdentity, |
||||
}, |
||||
Action: pbauth.Action_ACTION_ALLOW, |
||||
Permissions: []*pbauth.Permission{ |
||||
{ |
||||
Sources: []*pbauth.Source{ |
||||
{ |
||||
IdentityName: "static-client-1-identity", |
||||
Namespace: "default", |
||||
Partition: "default", |
||||
Peer: "local", |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
client1TCPSuccess: true, |
||||
client1EchoSuccess: true, |
||||
client2TCPSuccess: false, |
||||
client2EchoSuccess: false, |
||||
}, |
||||
"allow all exclude client 1": { |
||||
tp1: &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: staticServerIdentity, |
||||
}, |
||||
Action: pbauth.Action_ACTION_ALLOW, |
||||
Permissions: []*pbauth.Permission{ |
||||
{ |
||||
Sources: []*pbauth.Source{ |
||||
{ |
||||
Namespace: "default", |
||||
Partition: "default", |
||||
Peer: "local", |
||||
Exclude: []*pbauth.ExcludeSource{ |
||||
{ |
||||
IdentityName: "static-client-1-identity", |
||||
Namespace: "default", |
||||
Partition: "default", |
||||
Peer: "local", |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
client1TCPSuccess: false, |
||||
client1EchoSuccess: false, |
||||
client2TCPSuccess: true, |
||||
client2EchoSuccess: true, |
||||
}, |
||||
} |
||||
|
||||
runTrafficPermissionsTests(t, true, cases) |
||||
} |
||||
|
||||
func TestTrafficPermission_TCP_DefaultAllow(t *testing.T) { |
||||
cases := map[string]trafficPermissionsCase{ |
||||
"default allow": { |
||||
tp1: nil, |
||||
client1TCPSuccess: true, |
||||
client1EchoSuccess: true, |
||||
client2TCPSuccess: true, |
||||
client2EchoSuccess: true, |
||||
}, |
||||
"empty allow denies everything": { |
||||
tp1: &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: staticServerIdentity, |
||||
}, |
||||
Action: pbauth.Action_ACTION_ALLOW, |
||||
}, |
||||
client1TCPSuccess: false, |
||||
client1EchoSuccess: false, |
||||
client2TCPSuccess: false, |
||||
client2EchoSuccess: false, |
||||
}, |
||||
"allow everything": { |
||||
tp1: &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: staticServerIdentity, |
||||
}, |
||||
Action: pbauth.Action_ACTION_ALLOW, |
||||
Permissions: []*pbauth.Permission{ |
||||
{ |
||||
Sources: []*pbauth.Source{ |
||||
{ |
||||
Namespace: "default", |
||||
Partition: "default", |
||||
Peer: "local", |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
client1TCPSuccess: true, |
||||
client1EchoSuccess: true, |
||||
client2TCPSuccess: true, |
||||
client2EchoSuccess: true, |
||||
}, |
||||
"allow one protocol denies the other protocol": { |
||||
tp1: &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: staticServerIdentity, |
||||
}, |
||||
Action: pbauth.Action_ACTION_ALLOW, |
||||
Permissions: []*pbauth.Permission{ |
||||
{ |
||||
Sources: []*pbauth.Source{ |
||||
{ |
||||
Namespace: "default", |
||||
Partition: "default", |
||||
Peer: "local", |
||||
}, |
||||
}, |
||||
DestinationRules: []*pbauth.DestinationRule{ |
||||
{ |
||||
PortNames: []string{"tcp"}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
client1TCPSuccess: true, |
||||
client1EchoSuccess: false, |
||||
client2TCPSuccess: true, |
||||
client2EchoSuccess: false, |
||||
}, |
||||
"allow something unrelated": { |
||||
tp1: &pbauth.TrafficPermissions{ |
||||
Destination: &pbauth.Destination{ |
||||
IdentityName: staticServerIdentity, |
||||
}, |
||||
Action: pbauth.Action_ACTION_ALLOW, |
||||
Permissions: []*pbauth.Permission{ |
||||
{ |
||||
Sources: []*pbauth.Source{ |
||||
{ |
||||
IdentityName: "something-else", |
||||
Namespace: "default", |
||||
Partition: "default", |
||||
Peer: "local", |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
}, |
||||
client1TCPSuccess: false, |
||||
client1EchoSuccess: false, |
||||
client2TCPSuccess: false, |
||||
client2EchoSuccess: false, |
||||
}, |
||||
} |
||||
|
||||
runTrafficPermissionsTests(t, false, cases) |
||||
} |
||||
|
||||
func createServiceAndDataplane(t *testing.T, node libcluster.Agent, cluster *libcluster.Cluster, proxyID, serviceName string, httpPort, grpcPort int, serviceBindPorts []int) (*libcluster.ConsulDataplaneContainer, error) { |
||||
leader, err := cluster.Leader() |
||||
require.NoError(t, err) |
||||
leaderIP := leader.GetIP() |
||||
|
||||
token := cluster.TokenBootstrap |
||||
|
||||
// Do some trickery to ensure that partial completion is correctly torn
|
||||
// down, but successful execution is not.
|
||||
var deferClean utils.ResettableDefer |
||||
defer deferClean.Execute() |
||||
|
||||
// Create a service and proxy instance
|
||||
svc, err := libservice.NewExampleService(context.Background(), serviceName, httpPort, grpcPort, node) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
deferClean.Add(func() { |
||||
_ = svc.Terminate() |
||||
}) |
||||
|
||||
// Create Consul Dataplane
|
||||
dp, err := libcluster.NewConsulDataplane(context.Background(), proxyID, leaderIP, 8502, serviceBindPorts, node, true, token) |
||||
require.NoError(t, err) |
||||
deferClean.Add(func() { |
||||
_ = dp.Terminate() |
||||
}) |
||||
|
||||
// disable cleanup functions now that we have an object with a Terminate() function
|
||||
deferClean.Reset() |
||||
|
||||
return dp, nil |
||||
} |
||||
|
||||
func storeStaticServerTrafficPermissions(t *testing.T, resourceClient *rtest.Client, tp *pbauth.TrafficPermissions, i int) { |
||||
id := &pbresource.ID{ |
||||
Name: fmt.Sprintf("static-server-tp-%d", i), |
||||
Type: pbauth.TrafficPermissionsType, |
||||
} |
||||
if tp == nil { |
||||
resourceClient.Delete(resourceClient.Context(t), &pbresource.DeleteRequest{ |
||||
Id: id, |
||||
}) |
||||
} else { |
||||
rtest.ResourceID(id). |
||||
WithData(t, tp). |
||||
Write(t, resourceClient) |
||||
} |
||||
} |
||||
|
||||
func createServerResources(t *testing.T, resourceClient *rtest.Client, cluster *libcluster.Cluster, node libcluster.Agent) *libcluster.ConsulDataplaneContainer { |
||||
rtest.ResourceID(&pbresource.ID{ |
||||
Name: "static-server-service", |
||||
Type: pbcatalog.ServiceType, |
||||
}). |
||||
WithData(t, &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{Prefixes: []string{"static-server"}}, |
||||
Ports: []*pbcatalog.ServicePort{ |
||||
{ |
||||
TargetPort: "tcp", |
||||
Protocol: pbcatalog.Protocol_PROTOCOL_TCP, |
||||
VirtualPort: 8888, |
||||
}, |
||||
{ |
||||
TargetPort: "echo", |
||||
Protocol: pbcatalog.Protocol_PROTOCOL_TCP, |
||||
VirtualPort: 9999, |
||||
}, |
||||
{TargetPort: "mesh", Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, |
||||
}, |
||||
VirtualIps: []string{"240.0.0.1"}, |
||||
}).Write(t, resourceClient) |
||||
|
||||
workloadPortMap := map[string]*pbcatalog.WorkloadPort{ |
||||
"tcp": { |
||||
Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP, |
||||
}, |
||||
"echo": { |
||||
Port: 8078, Protocol: pbcatalog.Protocol_PROTOCOL_TCP, |
||||
}, |
||||
"mesh": { |
||||
Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH, |
||||
}, |
||||
} |
||||
|
||||
rtest.ResourceID(&pbresource.ID{ |
||||
Name: "static-server-workload", |
||||
Type: pbcatalog.WorkloadType, |
||||
}). |
||||
WithData(t, &pbcatalog.Workload{ |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{Host: node.GetIP()}, |
||||
}, |
||||
Ports: workloadPortMap, |
||||
Identity: staticServerIdentity, |
||||
}). |
||||
Write(t, resourceClient) |
||||
|
||||
rtest.ResourceID(&pbresource.ID{ |
||||
Name: staticServerIdentity, |
||||
Type: pbauth.WorkloadIdentityType, |
||||
}). |
||||
Write(t, resourceClient) |
||||
|
||||
serverDataplane, err := createServiceAndDataplane(t, node, cluster, "static-server-workload", "static-server", 8080, 8079, []int{}) |
||||
require.NoError(t, err) |
||||
|
||||
return serverDataplane |
||||
} |
||||
|
||||
func createClientResources(t *testing.T, resourceClient *rtest.Client, cluster *libcluster.Cluster, node libcluster.Agent, idx int) *libcluster.ConsulDataplaneContainer { |
||||
prefix := fmt.Sprintf("static-client-%d", idx) |
||||
rtest.ResourceID(&pbresource.ID{ |
||||
Name: prefix + "-service", |
||||
Type: pbcatalog.ServiceType, |
||||
}). |
||||
WithData(t, &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{Prefixes: []string{prefix}}, |
||||
Ports: []*pbcatalog.ServicePort{ |
||||
{TargetPort: "tcp", Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, |
||||
{TargetPort: "mesh", Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, |
||||
}, |
||||
}).Write(t, resourceClient) |
||||
|
||||
workloadPortMap := map[string]*pbcatalog.WorkloadPort{ |
||||
"tcp": { |
||||
Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP, |
||||
}, |
||||
"mesh": { |
||||
Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH, |
||||
}, |
||||
} |
||||
|
||||
rtest.ResourceID(&pbresource.ID{ |
||||
Name: prefix + "-workload", |
||||
Type: pbcatalog.WorkloadType, |
||||
}). |
||||
WithData(t, &pbcatalog.Workload{ |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{Host: node.GetIP()}, |
||||
}, |
||||
Ports: workloadPortMap, |
||||
Identity: prefix + "-identity", |
||||
}). |
||||
Write(t, resourceClient) |
||||
|
||||
rtest.ResourceID(&pbresource.ID{ |
||||
Name: prefix + "-identity", |
||||
Type: pbauth.WorkloadIdentityType, |
||||
}). |
||||
Write(t, resourceClient) |
||||
|
||||
rtest.ResourceID(&pbresource.ID{ |
||||
Name: prefix + "-proxy-configuration", |
||||
Type: pbmesh.ProxyConfigurationType, |
||||
}). |
||||
WithData(t, &pbmesh.ProxyConfiguration{ |
||||
Workloads: &pbcatalog.WorkloadSelector{ |
||||
Prefixes: []string{"static-client"}, |
||||
}, |
||||
DynamicConfig: &pbmesh.DynamicConfig{ |
||||
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, |
||||
}, |
||||
}). |
||||
Write(t, resourceClient) |
||||
|
||||
dp, err := createServiceAndDataplane(t, node, cluster, fmt.Sprintf("static-client-%d-workload", idx), "static-client", 8080, 8079, []int{}) |
||||
require.NoError(t, err) |
||||
|
||||
return dp |
||||
} |
||||
|
||||
func createCluster(t *testing.T, aclsEnabled bool) (*libcluster.Cluster, *rtest.Client) { |
||||
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{ |
||||
NumServers: 1, |
||||
NumClients: 3, |
||||
BuildOpts: &libcluster.BuildOptions{ |
||||
Datacenter: "dc1", |
||||
InjectAutoEncryption: true, |
||||
InjectGossipEncryption: true, |
||||
AllowHTTPAnyway: true, |
||||
ACLEnabled: aclsEnabled, |
||||
}, |
||||
Cmd: `-hcl=experiments=["resource-apis"] log_level="TRACE"`, |
||||
}) |
||||
|
||||
leader, err := cluster.Leader() |
||||
require.NoError(t, err) |
||||
client := pbresource.NewResourceServiceClient(leader.GetGRPCConn()) |
||||
resourceClient := rtest.NewClientWithACLToken(client, cluster.TokenBootstrap) |
||||
|
||||
return cluster, resourceClient |
||||
} |
||||
|
||||
// assertDataplaneContainerState validates service container status
|
||||
func assertDataplaneContainerState(t *testing.T, dataplane *libcluster.ConsulDataplaneContainer, state string) { |
||||
containerStatus, err := dataplane.GetStatus() |
||||
require.NoError(t, err) |
||||
require.Equal(t, containerStatus, state, fmt.Sprintf("Expected: %s. Got %s", state, containerStatus)) |
||||
} |
||||
|
||||
func httpRequestToVirtualAddress(dp *libcluster.ConsulDataplaneContainer) (string, error) { |
||||
addr := fmt.Sprintf("%s:%d", staticServerVIP, tcpPort) |
||||
|
||||
out, err := dp.Exec( |
||||
context.Background(), |
||||
[]string{"sudo", "sh", "-c", fmt.Sprintf(` |
||||
set -e |
||||
curl -s "%s/debug?env=dump" |
||||
`, addr), |
||||
}, |
||||
) |
||||
|
||||
if err != nil { |
||||
return out, fmt.Errorf("curl request to upstream virtual address %q\nerr = %v\nout = %s\nservice=%s", addr, err, out, dp.GetServiceName()) |
||||
} |
||||
|
||||
expected := fmt.Sprintf("FORTIO_NAME=%s", staticServerReturnValue) |
||||
if !strings.Contains(out, expected) { |
||||
return out, fmt.Errorf("expected %q to contain %q", out, expected) |
||||
} |
||||
|
||||
return out, nil |
||||
} |
||||
|
||||
func echoToVirtualAddress(dp *libcluster.ConsulDataplaneContainer) (string, error) { |
||||
out, err := dp.Exec( |
||||
context.Background(), |
||||
[]string{"sudo", "sh", "-c", fmt.Sprintf(` |
||||
set -e |
||||
echo foo | nc %s %d |
||||
`, staticServerVIP, echoPort), |
||||
}, |
||||
) |
||||
|
||||
if err != nil { |
||||
return out, fmt.Errorf("nc request to upstream virtual address %s:%d\nerr = %v\nout = %s\nservice=%s", staticServerVIP, echoPort, err, out, dp.GetServiceName()) |
||||
} |
||||
|
||||
if !strings.Contains(out, "foo") { |
||||
return out, fmt.Errorf("expected %q to contain 'foo'", out) |
||||
} |
||||
|
||||
return out, err |
||||
} |
||||
|
||||
func assertPassing(t *retry.R, fn func(*libcluster.ConsulDataplaneContainer) (string, error), dp *libcluster.ConsulDataplaneContainer, success bool) { |
||||
_, err := fn(dp) |
||||
if success { |
||||
require.NoError(t, err) |
||||
} else { |
||||
require.Error(t, err) |
||||
} |
||||
} |
@ -1,88 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package catalog |
||||
|
||||
import ( |
||||
"context" |
||||
"testing" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
|
||||
"github.com/hashicorp/go-version" |
||||
|
||||
"github.com/hashicorp/consul/internal/catalog/catalogtest" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster" |
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology" |
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils" |
||||
) |
||||
|
||||
var minCatalogResourceVersion = version.Must(version.NewVersion("v1.18.0")) |
||||
|
||||
const ( |
||||
versionUndetermined = ` |
||||
Cannot determine the actual version the starting image represents. |
||||
Scrutinze test failures to ensure that the starting version should |
||||
actually be able to be used for creating the initial data set. |
||||
` |
||||
) |
||||
|
||||
func maybeSkipUpgradeTest(t *testing.T, minVersion *version.Version) { |
||||
t.Helper() |
||||
|
||||
image := utils.DockerImage(utils.GetLatestImageName(), utils.LatestVersion) |
||||
latestVersion, err := utils.DockerImageVersion(image) |
||||
|
||||
if latestVersion != nil && latestVersion.LessThan(minVersion) { |
||||
t.Skipf("Upgrade test isn't applicable with version %q as the starting version", latestVersion.String()) |
||||
} |
||||
|
||||
if err != nil || latestVersion == nil { |
||||
t.Log(versionUndetermined) |
||||
} |
||||
} |
||||
|
||||
// Test upgrade a cluster of latest version to the target version and ensure that the catalog still
|
||||
// functions properly. Note
|
||||
func TestCatalogUpgrade(t *testing.T) { |
||||
maybeSkipUpgradeTest(t, minCatalogResourceVersion) |
||||
t.Parallel() |
||||
|
||||
const numServers = 1 |
||||
buildOpts := &libcluster.BuildOptions{ |
||||
ConsulImageName: utils.GetLatestImageName(), |
||||
ConsulVersion: utils.LatestVersion, |
||||
Datacenter: "dc1", |
||||
InjectAutoEncryption: true, |
||||
} |
||||
|
||||
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{ |
||||
NumServers: 1, |
||||
BuildOpts: buildOpts, |
||||
ApplyDefaultProxySettings: false, |
||||
Cmd: `-hcl=experiments=["resource-apis"]`, |
||||
}) |
||||
|
||||
client := cluster.APIClient(0) |
||||
|
||||
libcluster.WaitForLeader(t, cluster, client) |
||||
libcluster.WaitForMembers(t, client, numServers) |
||||
|
||||
leader, err := cluster.Leader() |
||||
require.NoError(t, err) |
||||
rscClient := pbresource.NewResourceServiceClient(leader.GetGRPCConn()) |
||||
|
||||
// Initialize some data
|
||||
catalogtest.PublishCatalogV2Beta1IntegrationTestData(t, rscClient) |
||||
|
||||
// upgrade the cluster to the Target version
|
||||
t.Logf("initiating standard upgrade to version=%q", utils.TargetVersion) |
||||
err = cluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), utils.TargetVersion) |
||||
|
||||
require.NoError(t, err) |
||||
libcluster.WaitForLeader(t, cluster, client) |
||||
libcluster.WaitForMembers(t, client, numServers) |
||||
|
||||
catalogtest.VerifyCatalogV2Beta1IntegrationTestResults(t, rscClient) |
||||
} |
Loading…
Reference in new issue