mirror of https://github.com/hashicorp/consul
Browse Source
* Add cache resource decoding helpers * Implement a common package for workload selection facilities. This includes: * Controller cache Index * ACL hooks * Dependency Mapper to go from workload to list of resources which select it * Dependency Mapper to go from a resource which selects workloads to all the workloads it selects. * Update the endpoints controller to use the cache instead of custom mappers. Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com>pull/20244/head
Matt Keeler
10 months ago
committed by
GitHub
34 changed files with 1778 additions and 757 deletions
@ -1,189 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package endpoints |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"sort" |
||||
|
||||
"google.golang.org/grpc/codes" |
||||
"google.golang.org/grpc/status" |
||||
|
||||
"github.com/hashicorp/consul/internal/controller" |
||||
"github.com/hashicorp/consul/internal/resource" |
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
) |
||||
|
||||
type serviceData struct { |
||||
resource *pbresource.Resource |
||||
service *pbcatalog.Service |
||||
} |
||||
|
||||
type endpointsData struct { |
||||
resource *pbresource.Resource |
||||
endpoints *pbcatalog.ServiceEndpoints |
||||
} |
||||
|
||||
type workloadData struct { |
||||
resource *pbresource.Resource |
||||
workload *pbcatalog.Workload |
||||
} |
||||
|
||||
// getServiceData will read the service with the given ID and unmarshal the
|
||||
// Data field. The return value is a struct that contains the retrieved
|
||||
// resource as well as the unmarshalled form. If the resource doesn't
|
||||
// exist, nil will be returned. Any other error either with retrieving
|
||||
// the resource or unmarshalling it will cause the error to be returned
|
||||
// to the caller
|
||||
func getServiceData(ctx context.Context, rt controller.Runtime, id *pbresource.ID) (*serviceData, error) { |
||||
rsp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: id}) |
||||
switch { |
||||
case status.Code(err) == codes.NotFound: |
||||
return nil, nil |
||||
case err != nil: |
||||
return nil, err |
||||
} |
||||
|
||||
var service pbcatalog.Service |
||||
err = rsp.Resource.Data.UnmarshalTo(&service) |
||||
if err != nil { |
||||
return nil, resource.NewErrDataParse(&service, err) |
||||
} |
||||
|
||||
return &serviceData{resource: rsp.Resource, service: &service}, nil |
||||
} |
||||
|
||||
// getEndpointsData will read the endpoints with the given ID and unmarshal the
|
||||
// Data field. The return value is a struct that contains the retrieved
|
||||
// resource as well as the unmsashalled form. If the resource doesn't
|
||||
// exist, nil will be returned. Any other error either with retrieving
|
||||
// the resource or unmarshalling it will cause the error to be returned
|
||||
// to the caller
|
||||
func getEndpointsData(ctx context.Context, rt controller.Runtime, id *pbresource.ID) (*endpointsData, error) { |
||||
rsp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: id}) |
||||
switch { |
||||
case status.Code(err) == codes.NotFound: |
||||
return nil, nil |
||||
case err != nil: |
||||
return nil, err |
||||
} |
||||
|
||||
var endpoints pbcatalog.ServiceEndpoints |
||||
err = rsp.Resource.Data.UnmarshalTo(&endpoints) |
||||
if err != nil { |
||||
return nil, resource.NewErrDataParse(&endpoints, err) |
||||
} |
||||
|
||||
return &endpointsData{resource: rsp.Resource, endpoints: &endpoints}, nil |
||||
} |
||||
|
||||
// getWorkloadData will retrieve all workloads for the given services selector
|
||||
// and unmarhshal them, returning a slic of objects hold both the resource and
|
||||
// unmarshaled forms. Unmarshalling errors, or other resource service errors
|
||||
// will be returned to the caller.
|
||||
func getWorkloadData(ctx context.Context, rt controller.Runtime, svc *serviceData) ([]*workloadData, error) { |
||||
workloadResources, err := gatherWorkloadsForService(ctx, rt, svc) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
var results []*workloadData |
||||
for _, res := range workloadResources { |
||||
var workload pbcatalog.Workload |
||||
err = res.Data.UnmarshalTo(&workload) |
||||
if err != nil { |
||||
return nil, resource.NewErrDataParse(&workload, err) |
||||
} |
||||
|
||||
results = append(results, &workloadData{resource: res, workload: &workload}) |
||||
} |
||||
|
||||
return results, nil |
||||
} |
||||
|
||||
// gatherWorkloadsForService will retrieve all the unique workloads for a given selector.
|
||||
// NotFound errors for workloads selected by Name will be ignored. Any other
|
||||
// resource service errors will be returned to the caller. Prior to returning
|
||||
// the slice of resources, they will be sorted by name. The consistent ordering
|
||||
// will allow callers to diff two versions of the data to determine if anything
|
||||
// has changed but it also will make testing a little easier.
|
||||
func gatherWorkloadsForService(ctx context.Context, rt controller.Runtime, svc *serviceData) ([]*pbresource.Resource, error) { |
||||
var workloads []*pbresource.Resource |
||||
|
||||
sel := svc.service.GetWorkloads() |
||||
|
||||
// this map will track all the gathered workloads by name, this is mainly to deduplicate workloads if they
|
||||
// are specified multiple times throughout the list of selection criteria
|
||||
workloadNames := make(map[string]struct{}) |
||||
|
||||
// First gather all the prefix matched workloads. We could do this second but by doing
|
||||
// it first its possible we can avoid some resource service calls to read individual
|
||||
// workloads selected by name if they are also matched by a prefix.
|
||||
for _, prefix := range sel.GetPrefixes() { |
||||
rsp, err := rt.Client.List(ctx, &pbresource.ListRequest{ |
||||
Type: pbcatalog.WorkloadType, |
||||
Tenancy: svc.resource.Id.Tenancy, |
||||
NamePrefix: prefix, |
||||
}) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// append all workloads in the list response to our list of all selected workloads
|
||||
for _, workload := range rsp.Resources { |
||||
// ignore duplicate workloads
|
||||
if _, found := workloadNames[workload.Id.Name]; !found { |
||||
workloads = append(workloads, workload) |
||||
workloadNames[workload.Id.Name] = struct{}{} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Now gather the exact match selections
|
||||
for _, name := range sel.GetNames() { |
||||
// ignore names we have already fetched
|
||||
if _, found := workloadNames[name]; found { |
||||
continue |
||||
} |
||||
|
||||
workloadID := &pbresource.ID{ |
||||
Type: pbcatalog.WorkloadType, |
||||
Tenancy: svc.resource.Id.Tenancy, |
||||
Name: name, |
||||
} |
||||
|
||||
rsp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: workloadID}) |
||||
switch { |
||||
case status.Code(err) == codes.NotFound: |
||||
// Ignore not found errors as services may select workloads that do not
|
||||
// yet exist. This is not considered an error state or mis-configuration
|
||||
// as the user could be getting ready to add the workloads.
|
||||
continue |
||||
case err != nil: |
||||
return nil, err |
||||
} |
||||
|
||||
workloads = append(workloads, rsp.Resource) |
||||
workloadNames[rsp.Resource.Id.Name] = struct{}{} |
||||
} |
||||
|
||||
if sel.GetFilter() != "" && len(workloads) > 0 { |
||||
var err error |
||||
workloads, err = resource.FilterResourcesByMetadata(workloads, sel.GetFilter()) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("error filtering results by metadata: %w", err) |
||||
} |
||||
} |
||||
|
||||
// Sorting ensures deterministic output. This will help for testing but
|
||||
// the real reason to do this is so we will be able to diff the set of
|
||||
// workloads endpoints to determine if we need to update them.
|
||||
sort.Slice(workloads, func(i, j int) bool { |
||||
return workloads[i].Id.Name < workloads[j].Id.Name |
||||
}) |
||||
|
||||
return workloads, nil |
||||
} |
@ -1,358 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package endpoints |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"testing" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
"github.com/stretchr/testify/suite" |
||||
"google.golang.org/grpc/codes" |
||||
"google.golang.org/grpc/status" |
||||
"google.golang.org/protobuf/proto" |
||||
|
||||
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" |
||||
"github.com/hashicorp/consul/internal/catalog/internal/types" |
||||
"github.com/hashicorp/consul/internal/controller" |
||||
"github.com/hashicorp/consul/internal/resource" |
||||
"github.com/hashicorp/consul/internal/resource/resourcetest" |
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest" |
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
"github.com/hashicorp/consul/proto/private/prototest" |
||||
"github.com/hashicorp/consul/sdk/testutil" |
||||
) |
||||
|
||||
type reconciliationDataSuite struct { |
||||
suite.Suite |
||||
|
||||
ctx context.Context |
||||
client *resourcetest.Client |
||||
rt controller.Runtime |
||||
|
||||
apiServiceData *pbcatalog.Service |
||||
apiService *pbresource.Resource |
||||
apiServiceSubsetData *pbcatalog.Service |
||||
apiServiceSubset *pbresource.Resource |
||||
apiEndpoints *pbresource.Resource |
||||
api1Workload *pbresource.Resource |
||||
api2Workload *pbresource.Resource |
||||
api123Workload *pbresource.Resource |
||||
web1Workload *pbresource.Resource |
||||
web2Workload *pbresource.Resource |
||||
|
||||
tenancies []*pbresource.Tenancy |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) SetupTest() { |
||||
suite.ctx = testutil.TestContext(suite.T()) |
||||
suite.tenancies = rtest.TestTenancies() |
||||
resourceClient := svctest.NewResourceServiceBuilder(). |
||||
WithRegisterFns(types.Register). |
||||
WithTenancies(suite.tenancies...). |
||||
Run(suite.T()) |
||||
suite.client = resourcetest.NewClient(resourceClient) |
||||
suite.rt = controller.Runtime{ |
||||
Client: suite.client, |
||||
Logger: testutil.Logger(suite.T()), |
||||
} |
||||
|
||||
suite.apiServiceData = &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{ |
||||
// This services selectors are specially crafted to exercise both the
|
||||
// dedeuplication and sorting behaviors of gatherWorkloadsForService
|
||||
Prefixes: []string{"api-"}, |
||||
Names: []string{"api-1", "web-2", "web-1", "api-1", "not-found"}, |
||||
}, |
||||
Ports: []*pbcatalog.ServicePort{ |
||||
{ |
||||
TargetPort: "http", |
||||
Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, |
||||
}, |
||||
}, |
||||
} |
||||
suite.apiServiceSubsetData = proto.Clone(suite.apiServiceData).(*pbcatalog.Service) |
||||
suite.apiServiceSubsetData.Workloads.Filter = "(zim in metadata) and (metadata.zim matches `^g.`)" |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) TestGetServiceData_NotFound() { |
||||
// This test's purposes is to ensure that NotFound errors when retrieving
|
||||
// the service data are ignored properly.
|
||||
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { |
||||
data, err := getServiceData(suite.ctx, suite.rt, rtest.Resource(pbcatalog.ServiceType, "not-found").WithTenancy(tenancy).ID()) |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), data) |
||||
}) |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) TestGetServiceData_ReadError() { |
||||
// This test's purpose is to ensure that Read errors other than NotFound
|
||||
// are propagated back to the caller. Specifying a resource ID with an
|
||||
// unregistered type is the easiest way to force a resource service error.
|
||||
badType := &pbresource.Type{ |
||||
Group: "not", |
||||
Kind: "found", |
||||
GroupVersion: "vfake", |
||||
} |
||||
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { |
||||
data, err := getServiceData(suite.ctx, suite.rt, rtest.Resource(badType, "foo").WithTenancy(tenancy).ID()) |
||||
require.Error(suite.T(), err) |
||||
require.Equal(suite.T(), codes.InvalidArgument, status.Code(err)) |
||||
require.Nil(suite.T(), data) |
||||
}) |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) TestGetServiceData_UnmarshalError() { |
||||
// This test's purpose is to ensure that unmarshlling errors are returned
|
||||
// to the caller. We are using a resource id that points to an endpoints
|
||||
// object instead of a service to ensure that the data will be unmarshallable.
|
||||
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { |
||||
data, err := getServiceData(suite.ctx, suite.rt, rtest.Resource(pbcatalog.ServiceEndpointsType, "api").WithTenancy(tenancy).ID()) |
||||
require.Error(suite.T(), err) |
||||
var parseErr resource.ErrDataParse |
||||
require.ErrorAs(suite.T(), err, &parseErr) |
||||
require.Nil(suite.T(), data) |
||||
}) |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) TestGetServiceData_Ok() { |
||||
// This test's purpose is to ensure that the happy path for
|
||||
// retrieving a service works as expected.
|
||||
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { |
||||
data, err := getServiceData(suite.ctx, suite.rt, suite.apiService.Id) |
||||
require.NoError(suite.T(), err) |
||||
require.NotNil(suite.T(), data) |
||||
require.NotNil(suite.T(), data.resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.apiService.Id, data.resource.Id) |
||||
require.Len(suite.T(), data.service.Ports, 1) |
||||
}) |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) TestGetEndpointsData_NotFound() { |
||||
// This test's purposes is to ensure that NotFound errors when retrieving
|
||||
// the endpoint data are ignored properly.
|
||||
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { |
||||
data, err := getEndpointsData(suite.ctx, suite.rt, rtest.Resource(pbcatalog.ServiceEndpointsType, "not-found").WithTenancy(tenancy).ID()) |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), data) |
||||
}) |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) TestGetEndpointsData_ReadError() { |
||||
// This test's purpose is to ensure that Read errors other than NotFound
|
||||
// are propagated back to the caller. Specifying a resource ID with an
|
||||
// unregistered type is the easiest way to force a resource service error.
|
||||
badType := &pbresource.Type{ |
||||
Group: "not", |
||||
Kind: "found", |
||||
GroupVersion: "vfake", |
||||
} |
||||
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { |
||||
data, err := getEndpointsData(suite.ctx, suite.rt, rtest.Resource(badType, "foo").WithTenancy(tenancy).ID()) |
||||
require.Error(suite.T(), err) |
||||
require.Equal(suite.T(), codes.InvalidArgument, status.Code(err)) |
||||
require.Nil(suite.T(), data) |
||||
}) |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) TestGetEndpointsData_UnmarshalError() { |
||||
// This test's purpose is to ensure that unmarshlling errors are returned
|
||||
// to the caller. We are using a resource id that points to a service object
|
||||
// instead of an endpoints object to ensure that the data will be unmarshallable.
|
||||
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { |
||||
data, err := getEndpointsData(suite.ctx, suite.rt, rtest.Resource(pbcatalog.ServiceType, "api").WithTenancy(tenancy).ID()) |
||||
require.Error(suite.T(), err) |
||||
var parseErr resource.ErrDataParse |
||||
require.ErrorAs(suite.T(), err, &parseErr) |
||||
require.Nil(suite.T(), data) |
||||
}) |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) TestGetEndpointsData_Ok() { |
||||
// This test's purpose is to ensure that the happy path for
|
||||
// retrieving an endpoints object works as expected.
|
||||
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { |
||||
data, err := getEndpointsData(suite.ctx, suite.rt, suite.apiEndpoints.Id) |
||||
require.NoError(suite.T(), err) |
||||
require.NotNil(suite.T(), data) |
||||
require.NotNil(suite.T(), data.resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.apiEndpoints.Id, data.resource.Id) |
||||
require.Len(suite.T(), data.endpoints.Endpoints, 1) |
||||
}) |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) TestGetWorkloadData() { |
||||
// This test's purpose is to ensure that gather workloads for
|
||||
// a service work as expected. The services selector was crafted
|
||||
// to exercise the deduplication behavior as well as the sorting
|
||||
// behavior. The assertions in this test will verify that only
|
||||
// unique workloads are returned and that they are ordered.
|
||||
|
||||
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { |
||||
require.NotNil(suite.T(), suite.apiService) |
||||
|
||||
data, err := getWorkloadData(suite.ctx, suite.rt, &serviceData{ |
||||
resource: suite.apiService, |
||||
service: suite.apiServiceData, |
||||
}) |
||||
|
||||
require.NoError(suite.T(), err) |
||||
require.Len(suite.T(), data, 5) |
||||
prototest.AssertDeepEqual(suite.T(), suite.api1Workload, data[0].resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.api123Workload, data[1].resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.api2Workload, data[2].resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.web1Workload, data[3].resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.web2Workload, data[4].resource) |
||||
}) |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) TestGetWorkloadDataWithFilter() { |
||||
// This is like TestGetWorkloadData except it exercises the post-read
|
||||
// filter on the selector.
|
||||
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { |
||||
require.NotNil(suite.T(), suite.apiServiceSubset) |
||||
|
||||
data, err := getWorkloadData(suite.ctx, suite.rt, &serviceData{ |
||||
resource: suite.apiServiceSubset, |
||||
service: suite.apiServiceSubsetData, |
||||
}) |
||||
|
||||
require.NoError(suite.T(), err) |
||||
require.Len(suite.T(), data, 2) |
||||
prototest.AssertDeepEqual(suite.T(), suite.api123Workload, data[0].resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.web1Workload, data[1].resource) |
||||
}) |
||||
} |
||||
|
||||
func TestReconciliationData(t *testing.T) { |
||||
suite.Run(t, new(reconciliationDataSuite)) |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) setupResourcesWithTenancy(tenancy *pbresource.Tenancy) { |
||||
suite.apiService = rtest.Resource(pbcatalog.ServiceType, "api"). |
||||
WithTenancy(tenancy). |
||||
WithData(suite.T(), suite.apiServiceData). |
||||
Write(suite.T(), suite.client) |
||||
|
||||
suite.apiServiceSubset = rtest.Resource(pbcatalog.ServiceType, "api-subset"). |
||||
WithTenancy(tenancy). |
||||
WithData(suite.T(), suite.apiServiceSubsetData). |
||||
Write(suite.T(), suite.client) |
||||
|
||||
suite.api1Workload = rtest.Resource(pbcatalog.WorkloadType, "api-1"). |
||||
WithTenancy(tenancy). |
||||
WithMeta("zim", "dib"). |
||||
WithData(suite.T(), &pbcatalog.Workload{ |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{Host: "127.0.0.1"}, |
||||
}, |
||||
Ports: map[string]*pbcatalog.WorkloadPort{ |
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, |
||||
}, |
||||
Identity: "api", |
||||
}). |
||||
Write(suite.T(), suite.client) |
||||
|
||||
suite.api2Workload = rtest.Resource(pbcatalog.WorkloadType, "api-2"). |
||||
WithTenancy(tenancy). |
||||
WithData(suite.T(), &pbcatalog.Workload{ |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{Host: "127.0.0.1"}, |
||||
}, |
||||
Ports: map[string]*pbcatalog.WorkloadPort{ |
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, |
||||
}, |
||||
Identity: "api", |
||||
}). |
||||
Write(suite.T(), suite.client) |
||||
|
||||
suite.api123Workload = rtest.Resource(pbcatalog.WorkloadType, "api-123"). |
||||
WithTenancy(tenancy). |
||||
WithMeta("zim", "gir"). |
||||
WithData(suite.T(), &pbcatalog.Workload{ |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{Host: "127.0.0.1"}, |
||||
}, |
||||
Ports: map[string]*pbcatalog.WorkloadPort{ |
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, |
||||
}, |
||||
Identity: "api", |
||||
}). |
||||
Write(suite.T(), suite.client) |
||||
|
||||
suite.web1Workload = rtest.Resource(pbcatalog.WorkloadType, "web-1"). |
||||
WithTenancy(tenancy). |
||||
WithMeta("zim", "gaz"). |
||||
WithData(suite.T(), &pbcatalog.Workload{ |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{Host: "127.0.0.1"}, |
||||
}, |
||||
Ports: map[string]*pbcatalog.WorkloadPort{ |
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, |
||||
}, |
||||
Identity: "web", |
||||
}). |
||||
Write(suite.T(), suite.client) |
||||
|
||||
suite.web2Workload = rtest.Resource(pbcatalog.WorkloadType, "web-2"). |
||||
WithTenancy(tenancy). |
||||
WithData(suite.T(), &pbcatalog.Workload{ |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{Host: "127.0.0.1"}, |
||||
}, |
||||
Ports: map[string]*pbcatalog.WorkloadPort{ |
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, |
||||
}, |
||||
Identity: "web", |
||||
}). |
||||
Write(suite.T(), suite.client) |
||||
|
||||
suite.apiEndpoints = rtest.Resource(pbcatalog.ServiceEndpointsType, "api"). |
||||
WithTenancy(tenancy). |
||||
WithData(suite.T(), &pbcatalog.ServiceEndpoints{ |
||||
Endpoints: []*pbcatalog.Endpoint{ |
||||
{ |
||||
TargetRef: rtest.Resource(pbcatalog.WorkloadType, "api-1").WithTenancy(tenancy).ID(), |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{ |
||||
Host: "127.0.0.1", |
||||
Ports: []string{"http"}, |
||||
}, |
||||
}, |
||||
Ports: map[string]*pbcatalog.WorkloadPort{ |
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, |
||||
}, |
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING, |
||||
}, |
||||
}, |
||||
}). |
||||
Write(suite.T(), suite.client) |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) cleanupResources() { |
||||
suite.client.MustDelete(suite.T(), suite.apiService.Id) |
||||
suite.client.MustDelete(suite.T(), suite.apiServiceSubset.Id) |
||||
suite.client.MustDelete(suite.T(), suite.api1Workload.Id) |
||||
suite.client.MustDelete(suite.T(), suite.api2Workload.Id) |
||||
suite.client.MustDelete(suite.T(), suite.api123Workload.Id) |
||||
suite.client.MustDelete(suite.T(), suite.web1Workload.Id) |
||||
suite.client.MustDelete(suite.T(), suite.web2Workload.Id) |
||||
suite.client.MustDelete(suite.T(), suite.apiEndpoints.Id) |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) runTestCaseWithTenancies(testFunc func(*pbresource.Tenancy)) { |
||||
for _, tenancy := range suite.tenancies { |
||||
suite.Run(suite.appendTenancyInfo(tenancy), func() { |
||||
suite.setupResourcesWithTenancy(tenancy) |
||||
testFunc(tenancy) |
||||
suite.T().Cleanup(suite.cleanupResources) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func (suite *reconciliationDataSuite) appendTenancyInfo(tenancy *pbresource.Tenancy) string { |
||||
return fmt.Sprintf("%s_Namespace_%s_Partition", tenancy.Namespace, tenancy.Partition) |
||||
} |
@ -0,0 +1,123 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package workloadselector |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
"github.com/stretchr/testify/suite" |
||||
|
||||
"github.com/hashicorp/consul/acl" |
||||
"github.com/hashicorp/consul/internal/resource" |
||||
"github.com/hashicorp/consul/internal/resource/resourcetest" |
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
) |
||||
|
||||
func TestACLHooks(t *testing.T) { |
||||
suite.Run(t, new(aclHookSuite)) |
||||
} |
||||
|
||||
type aclHookSuite struct { |
||||
suite.Suite |
||||
|
||||
hooks *resource.ACLHooks |
||||
authz *acl.MockAuthorizer |
||||
ctx *acl.AuthorizerContext |
||||
res *pbresource.Resource |
||||
} |
||||
|
||||
func (suite *aclHookSuite) SetupTest() { |
||||
suite.authz = new(acl.MockAuthorizer) |
||||
|
||||
suite.authz.On("ToAllowAuthorizer").Return(acl.AllowAuthorizer{Authorizer: suite.authz, AccessorID: "862270e5-7d7b-4583-98bc-4d14810cc158"}) |
||||
|
||||
suite.ctx = &acl.AuthorizerContext{} |
||||
acl.DefaultEnterpriseMeta().FillAuthzContext(suite.ctx) |
||||
|
||||
suite.hooks = ACLHooks[*pbcatalog.Service]() |
||||
|
||||
suite.res = resourcetest.Resource(pbcatalog.ServiceType, "foo"). |
||||
WithData(suite.T(), &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{ |
||||
Prefixes: []string{"api-"}, |
||||
Names: []string{"bar"}, |
||||
}, |
||||
}). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
} |
||||
|
||||
func (suite *aclHookSuite) TeardownTest() { |
||||
suite.authz.AssertExpectations(suite.T()) |
||||
} |
||||
|
||||
func (suite *aclHookSuite) TestReadHook_Allowed() { |
||||
suite.authz.On("ServiceRead", "foo", suite.ctx). |
||||
Return(acl.Allow). |
||||
Once() |
||||
|
||||
require.NoError(suite.T(), suite.hooks.Read(suite.authz, suite.ctx, suite.res.Id, nil)) |
||||
} |
||||
|
||||
func (suite *aclHookSuite) TestReadHook_Denied() { |
||||
suite.authz.On("ServiceRead", "foo", suite.ctx). |
||||
Return(acl.Deny). |
||||
Once() |
||||
|
||||
require.Error(suite.T(), suite.hooks.Read(suite.authz, suite.ctx, suite.res.Id, nil)) |
||||
} |
||||
|
||||
func (suite *aclHookSuite) TestWriteHook_ServiceWriteDenied() { |
||||
suite.authz.On("ServiceWrite", "foo", suite.ctx). |
||||
Return(acl.Deny). |
||||
Once() |
||||
|
||||
require.Error(suite.T(), suite.hooks.Write(suite.authz, suite.ctx, suite.res)) |
||||
} |
||||
|
||||
func (suite *aclHookSuite) TestWriteHook_ServiceReadNameDenied() { |
||||
suite.authz.On("ServiceWrite", "foo", suite.ctx). |
||||
Return(acl.Allow). |
||||
Once() |
||||
|
||||
suite.authz.On("ServiceRead", "bar", suite.ctx). |
||||
Return(acl.Deny). |
||||
Once() |
||||
|
||||
require.Error(suite.T(), suite.hooks.Write(suite.authz, suite.ctx, suite.res)) |
||||
} |
||||
|
||||
func (suite *aclHookSuite) TestWriteHook_ServiceReadPrefixDenied() { |
||||
suite.authz.On("ServiceWrite", "foo", suite.ctx). |
||||
Return(acl.Allow). |
||||
Once() |
||||
|
||||
suite.authz.On("ServiceRead", "bar", suite.ctx). |
||||
Return(acl.Allow). |
||||
Once() |
||||
|
||||
suite.authz.On("ServiceReadPrefix", "api-", suite.ctx). |
||||
Return(acl.Deny). |
||||
Once() |
||||
|
||||
require.Error(suite.T(), suite.hooks.Write(suite.authz, suite.ctx, suite.res)) |
||||
} |
||||
|
||||
func (suite *aclHookSuite) TestWriteHook_Allowed() { |
||||
suite.authz.On("ServiceWrite", "foo", suite.ctx). |
||||
Return(acl.Allow). |
||||
Once() |
||||
|
||||
suite.authz.On("ServiceRead", "bar", suite.ctx). |
||||
Return(acl.Allow). |
||||
Once() |
||||
|
||||
suite.authz.On("ServiceReadPrefix", "api-", suite.ctx). |
||||
Return(acl.Allow). |
||||
Once() |
||||
|
||||
require.NoError(suite.T(), suite.hooks.Write(suite.authz, suite.ctx, suite.res)) |
||||
} |
@ -0,0 +1,114 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package workloadselector |
||||
|
||||
import ( |
||||
"fmt" |
||||
"sort" |
||||
|
||||
"github.com/hashicorp/consul/internal/controller/cache" |
||||
"github.com/hashicorp/consul/internal/controller/cache/index" |
||||
"github.com/hashicorp/consul/internal/resource" |
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
) |
||||
|
||||
// GetWorkloadsWithSelector will retrieve all workloads for the given resources selector
|
||||
// and unmarhshal them, returning a slice of objects hold both the resource and
|
||||
// unmarshaled forms. Unmarshalling errors, or other cache errors
|
||||
// will be returned to the caller.
|
||||
func GetWorkloadsWithSelector[T WorkloadSelecting](c cache.ReadOnlyCache, res *resource.DecodedResource[T]) ([]*resource.DecodedResource[*pbcatalog.Workload], error) { |
||||
if res == nil { |
||||
return nil, nil |
||||
} |
||||
|
||||
sel := res.Data.GetWorkloads() |
||||
|
||||
if sel == nil || (len(sel.GetNames()) < 1 && len(sel.GetPrefixes()) < 1) { |
||||
return nil, nil |
||||
} |
||||
|
||||
// this map will track all workloads by name which is needed to deduplicate workloads if they
|
||||
// are specified multiple times throughout the list of selection criteria
|
||||
workloadNames := make(map[string]struct{}) |
||||
|
||||
var workloads []*resource.DecodedResource[*pbcatalog.Workload] |
||||
|
||||
// First gather all the prefix matched workloads. We could do this second but by doing
|
||||
// it first its possible we can avoid some operations to get individual
|
||||
// workloads selected by name if they are also matched by a prefix.
|
||||
for _, prefix := range sel.GetPrefixes() { |
||||
iter, err := cache.ListIteratorDecoded[*pbcatalog.Workload]( |
||||
c, |
||||
pbcatalog.WorkloadType, |
||||
"id", |
||||
&pbresource.ID{ |
||||
Type: pbcatalog.WorkloadType, |
||||
Tenancy: res.Id.Tenancy, |
||||
Name: prefix, |
||||
}, |
||||
index.IndexQueryOptions{Prefix: true}) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// append all workloads in the list response to our list of all selected workloads
|
||||
for workload, err := iter.Next(); workload != nil || err != nil; workload, err = iter.Next() { |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// ignore duplicate workloads
|
||||
if _, found := workloadNames[workload.Id.Name]; !found { |
||||
workloads = append(workloads, workload) |
||||
workloadNames[workload.Id.Name] = struct{}{} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Now gather the exact match selections
|
||||
for _, name := range sel.GetNames() { |
||||
// ignore names we have already fetched
|
||||
if _, found := workloadNames[name]; found { |
||||
continue |
||||
} |
||||
|
||||
workloadID := &pbresource.ID{ |
||||
Type: pbcatalog.WorkloadType, |
||||
Tenancy: res.Id.Tenancy, |
||||
Name: name, |
||||
} |
||||
|
||||
res, err := cache.GetDecoded[*pbcatalog.Workload](c, pbcatalog.WorkloadType, "id", workloadID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// ignore workloads that don't exist as it is fine for a Service to select them. If they exist in the
|
||||
// future then the ServiceEndpoints will be regenerated to include them.
|
||||
if res == nil { |
||||
continue |
||||
} |
||||
|
||||
workloads = append(workloads, res) |
||||
workloadNames[res.Id.Name] = struct{}{} |
||||
} |
||||
|
||||
if sel.GetFilter() != "" && len(workloads) > 0 { |
||||
var err error |
||||
workloads, err = resource.FilterResourcesByMetadata(workloads, sel.GetFilter()) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("error filtering results by metadata: %w", err) |
||||
} |
||||
} |
||||
|
||||
// Sorting ensures deterministic output. This will help for testing but
|
||||
// the real reason to do this is so we will be able to diff the set of
|
||||
// workloads endpoints to determine if we need to update them.
|
||||
sort.Slice(workloads, func(i, j int) bool { |
||||
return workloads[i].Id.Name < workloads[j].Id.Name |
||||
}) |
||||
|
||||
return workloads, nil |
||||
} |
@ -0,0 +1,258 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package workloadselector |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"testing" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
"github.com/stretchr/testify/suite" |
||||
"google.golang.org/protobuf/proto" |
||||
|
||||
"github.com/hashicorp/consul/internal/controller/cache" |
||||
"github.com/hashicorp/consul/internal/resource" |
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest" |
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
"github.com/hashicorp/consul/proto/private/prototest" |
||||
"github.com/hashicorp/consul/sdk/testutil" |
||||
) |
||||
|
||||
type gatherWorkloadsDataSuite struct { |
||||
suite.Suite |
||||
|
||||
ctx context.Context |
||||
cache cache.Cache |
||||
|
||||
apiServiceData *pbcatalog.Service |
||||
apiService *resource.DecodedResource[*pbcatalog.Service] |
||||
apiServiceSubsetData *pbcatalog.Service |
||||
apiServiceSubset *resource.DecodedResource[*pbcatalog.Service] |
||||
apiEndpoints *resource.DecodedResource[*pbcatalog.ServiceEndpoints] |
||||
api1Workload *resource.DecodedResource[*pbcatalog.Workload] |
||||
api2Workload *resource.DecodedResource[*pbcatalog.Workload] |
||||
api123Workload *resource.DecodedResource[*pbcatalog.Workload] |
||||
web1Workload *resource.DecodedResource[*pbcatalog.Workload] |
||||
web2Workload *resource.DecodedResource[*pbcatalog.Workload] |
||||
|
||||
tenancies []*pbresource.Tenancy |
||||
} |
||||
|
||||
func (suite *gatherWorkloadsDataSuite) SetupTest() { |
||||
suite.ctx = testutil.TestContext(suite.T()) |
||||
suite.tenancies = rtest.TestTenancies() |
||||
|
||||
suite.cache = cache.New() |
||||
suite.cache.AddType(pbcatalog.WorkloadType) |
||||
|
||||
suite.apiServiceData = &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{ |
||||
// This services selectors are specially crafted to exercise both the
|
||||
// dedeuplication and sorting behaviors of gatherWorkloadsForService
|
||||
Prefixes: []string{"api-"}, |
||||
Names: []string{"api-1", "web-2", "web-1", "api-1", "not-found"}, |
||||
}, |
||||
Ports: []*pbcatalog.ServicePort{ |
||||
{ |
||||
TargetPort: "http", |
||||
Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, |
||||
}, |
||||
}, |
||||
} |
||||
suite.apiServiceSubsetData = proto.Clone(suite.apiServiceData).(*pbcatalog.Service) |
||||
suite.apiServiceSubsetData.Workloads.Filter = "(zim in metadata) and (metadata.zim matches `^g.`)" |
||||
} |
||||
|
||||
func (suite *gatherWorkloadsDataSuite) TestGetWorkloadData() { |
||||
// This test's purpose is to ensure that gather workloads for
|
||||
// a service work as expected. The services selector was crafted
|
||||
// to exercise the deduplication behavior as well as the sorting
|
||||
// behavior. The assertions in this test will verify that only
|
||||
// unique workloads are returned and that they are ordered.
|
||||
|
||||
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { |
||||
require.NotNil(suite.T(), suite.apiService) |
||||
|
||||
data, err := GetWorkloadsWithSelector(suite.cache, suite.apiService) |
||||
|
||||
require.NoError(suite.T(), err) |
||||
require.Len(suite.T(), data, 5) |
||||
requireDecodedWorkloadEquals(suite.T(), suite.api1Workload, data[0]) |
||||
requireDecodedWorkloadEquals(suite.T(), suite.api1Workload, data[0]) |
||||
requireDecodedWorkloadEquals(suite.T(), suite.api123Workload, data[1]) |
||||
requireDecodedWorkloadEquals(suite.T(), suite.api2Workload, data[2]) |
||||
requireDecodedWorkloadEquals(suite.T(), suite.web1Workload, data[3]) |
||||
requireDecodedWorkloadEquals(suite.T(), suite.web2Workload, data[4]) |
||||
}) |
||||
} |
||||
|
||||
func (suite *gatherWorkloadsDataSuite) TestGetWorkloadDataWithFilter() { |
||||
// This is like TestGetWorkloadData except it exercises the post-read
|
||||
// filter on the selector.
|
||||
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { |
||||
require.NotNil(suite.T(), suite.apiServiceSubset) |
||||
|
||||
data, err := GetWorkloadsWithSelector(suite.cache, suite.apiServiceSubset) |
||||
|
||||
require.NoError(suite.T(), err) |
||||
require.Len(suite.T(), data, 2) |
||||
requireDecodedWorkloadEquals(suite.T(), suite.api123Workload, data[0]) |
||||
requireDecodedWorkloadEquals(suite.T(), suite.web1Workload, data[1]) |
||||
}) |
||||
} |
||||
|
||||
func TestReconciliationData(t *testing.T) { |
||||
suite.Run(t, new(gatherWorkloadsDataSuite)) |
||||
} |
||||
|
||||
func (suite *gatherWorkloadsDataSuite) setupResourcesWithTenancy(tenancy *pbresource.Tenancy) { |
||||
suite.apiService = rtest.MustDecode[*pbcatalog.Service]( |
||||
suite.T(), |
||||
rtest.Resource(pbcatalog.ServiceType, "api"). |
||||
WithTenancy(tenancy). |
||||
WithData(suite.T(), suite.apiServiceData). |
||||
Build()) |
||||
|
||||
suite.apiServiceSubset = rtest.MustDecode[*pbcatalog.Service]( |
||||
suite.T(), |
||||
rtest.Resource(pbcatalog.ServiceType, "api-subset"). |
||||
WithTenancy(tenancy). |
||||
WithData(suite.T(), suite.apiServiceSubsetData). |
||||
Build()) |
||||
|
||||
suite.api1Workload = rtest.MustDecode[*pbcatalog.Workload]( |
||||
suite.T(), |
||||
rtest.Resource(pbcatalog.WorkloadType, "api-1"). |
||||
WithTenancy(tenancy). |
||||
WithMeta("zim", "dib"). |
||||
WithData(suite.T(), &pbcatalog.Workload{ |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{Host: "127.0.0.1"}, |
||||
}, |
||||
Ports: map[string]*pbcatalog.WorkloadPort{ |
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, |
||||
}, |
||||
Identity: "api", |
||||
}). |
||||
Build()) |
||||
suite.cache.Insert(suite.api1Workload.Resource) |
||||
|
||||
suite.api2Workload = rtest.MustDecode[*pbcatalog.Workload]( |
||||
suite.T(), |
||||
rtest.Resource(pbcatalog.WorkloadType, "api-2"). |
||||
WithTenancy(tenancy). |
||||
WithData(suite.T(), &pbcatalog.Workload{ |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{Host: "127.0.0.1"}, |
||||
}, |
||||
Ports: map[string]*pbcatalog.WorkloadPort{ |
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, |
||||
}, |
||||
Identity: "api", |
||||
}). |
||||
Build()) |
||||
suite.cache.Insert(suite.api2Workload.Resource) |
||||
|
||||
suite.api123Workload = rtest.MustDecode[*pbcatalog.Workload]( |
||||
suite.T(), |
||||
rtest.Resource(pbcatalog.WorkloadType, "api-123"). |
||||
WithTenancy(tenancy). |
||||
WithMeta("zim", "gir"). |
||||
WithData(suite.T(), &pbcatalog.Workload{ |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{Host: "127.0.0.1"}, |
||||
}, |
||||
Ports: map[string]*pbcatalog.WorkloadPort{ |
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, |
||||
}, |
||||
Identity: "api", |
||||
}). |
||||
Build()) |
||||
suite.cache.Insert(suite.api123Workload.Resource) |
||||
|
||||
suite.web1Workload = rtest.MustDecode[*pbcatalog.Workload]( |
||||
suite.T(), |
||||
rtest.Resource(pbcatalog.WorkloadType, "web-1"). |
||||
WithTenancy(tenancy). |
||||
WithMeta("zim", "gaz"). |
||||
WithData(suite.T(), &pbcatalog.Workload{ |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{Host: "127.0.0.1"}, |
||||
}, |
||||
Ports: map[string]*pbcatalog.WorkloadPort{ |
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, |
||||
}, |
||||
Identity: "web", |
||||
}). |
||||
Build()) |
||||
suite.cache.Insert(suite.web1Workload.Resource) |
||||
|
||||
suite.web2Workload = rtest.MustDecode[*pbcatalog.Workload]( |
||||
suite.T(), |
||||
rtest.Resource(pbcatalog.WorkloadType, "web-2"). |
||||
WithTenancy(tenancy). |
||||
WithData(suite.T(), &pbcatalog.Workload{ |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{Host: "127.0.0.1"}, |
||||
}, |
||||
Ports: map[string]*pbcatalog.WorkloadPort{ |
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, |
||||
}, |
||||
Identity: "web", |
||||
}). |
||||
Build()) |
||||
suite.cache.Insert(suite.web2Workload.Resource) |
||||
|
||||
suite.apiEndpoints = rtest.MustDecode[*pbcatalog.ServiceEndpoints]( |
||||
suite.T(), |
||||
rtest.Resource(pbcatalog.ServiceEndpointsType, "api"). |
||||
WithTenancy(tenancy). |
||||
WithData(suite.T(), &pbcatalog.ServiceEndpoints{ |
||||
Endpoints: []*pbcatalog.Endpoint{ |
||||
{ |
||||
TargetRef: rtest.Resource(pbcatalog.WorkloadType, "api-1").WithTenancy(tenancy).ID(), |
||||
Addresses: []*pbcatalog.WorkloadAddress{ |
||||
{ |
||||
Host: "127.0.0.1", |
||||
Ports: []string{"http"}, |
||||
}, |
||||
}, |
||||
Ports: map[string]*pbcatalog.WorkloadPort{ |
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP}, |
||||
}, |
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING, |
||||
}, |
||||
}, |
||||
}). |
||||
Build()) |
||||
} |
||||
|
||||
func (suite *gatherWorkloadsDataSuite) cleanupResources() { |
||||
require.NoError(suite.T(), suite.cache.Delete(suite.api1Workload.Resource)) |
||||
require.NoError(suite.T(), suite.cache.Delete(suite.api2Workload.Resource)) |
||||
require.NoError(suite.T(), suite.cache.Delete(suite.api123Workload.Resource)) |
||||
require.NoError(suite.T(), suite.cache.Delete(suite.web1Workload.Resource)) |
||||
require.NoError(suite.T(), suite.cache.Delete(suite.web2Workload.Resource)) |
||||
} |
||||
|
||||
func (suite *gatherWorkloadsDataSuite) runTestCaseWithTenancies(testFunc func(*pbresource.Tenancy)) { |
||||
for _, tenancy := range suite.tenancies { |
||||
suite.Run(suite.appendTenancyInfo(tenancy), func() { |
||||
suite.setupResourcesWithTenancy(tenancy) |
||||
testFunc(tenancy) |
||||
suite.T().Cleanup(suite.cleanupResources) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func (suite *gatherWorkloadsDataSuite) appendTenancyInfo(tenancy *pbresource.Tenancy) string { |
||||
return fmt.Sprintf("%s_Namespace_%s_Partition", tenancy.Namespace, tenancy.Partition) |
||||
} |
||||
|
||||
func requireDecodedWorkloadEquals(t testutil.TestingTB, expected, actual *resource.DecodedResource[*pbcatalog.Workload]) { |
||||
prototest.AssertDeepEqual(t, expected.Resource, actual.Resource) |
||||
require.Equal(t, expected.Data, actual.Data) |
||||
} |
@ -0,0 +1,72 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package workloadselector |
||||
|
||||
import ( |
||||
"github.com/hashicorp/consul/internal/controller/cache/index" |
||||
"github.com/hashicorp/consul/internal/controller/cache/indexers" |
||||
"github.com/hashicorp/consul/internal/resource" |
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
) |
||||
|
||||
const ( |
||||
IndexName = "selected-workloads" |
||||
) |
||||
|
||||
func Index[T WorkloadSelecting](name string) *index.Index { |
||||
return indexers.DecodedMultiIndexer[T]( |
||||
name, |
||||
index.SingleValueFromOneOrTwoArgs[resource.ReferenceOrID, index.IndexQueryOptions](fromArgs), |
||||
fromResource[T], |
||||
) |
||||
} |
||||
|
||||
func fromArgs(r resource.ReferenceOrID, opts index.IndexQueryOptions) ([]byte, error) { |
||||
workloadRef := &pbresource.Reference{ |
||||
Type: pbcatalog.WorkloadType, |
||||
Tenancy: r.GetTenancy(), |
||||
Name: r.GetName(), |
||||
} |
||||
|
||||
if opts.Prefix { |
||||
return index.PrefixIndexFromRefOrID(workloadRef), nil |
||||
} else { |
||||
return index.IndexFromRefOrID(workloadRef), nil |
||||
} |
||||
} |
||||
|
||||
func fromResource[T WorkloadSelecting](res *resource.DecodedResource[T]) (bool, [][]byte, error) { |
||||
sel := res.Data.GetWorkloads() |
||||
if sel == nil || (len(sel.Prefixes) == 0 && len(sel.Names) == 0) { |
||||
return false, nil, nil |
||||
} |
||||
|
||||
var indexes [][]byte |
||||
|
||||
for _, name := range sel.Names { |
||||
ref := &pbresource.Reference{ |
||||
Type: pbcatalog.WorkloadType, |
||||
Tenancy: res.Id.Tenancy, |
||||
Name: name, |
||||
} |
||||
|
||||
indexes = append(indexes, index.IndexFromRefOrID(ref)) |
||||
} |
||||
|
||||
for _, name := range sel.Prefixes { |
||||
ref := &pbresource.Reference{ |
||||
Type: pbcatalog.WorkloadType, |
||||
Tenancy: res.Id.Tenancy, |
||||
Name: name, |
||||
} |
||||
|
||||
b := index.IndexFromRefOrID(ref) |
||||
|
||||
// need to remove the path separator to be compatible with prefix matching
|
||||
indexes = append(indexes, b[:len(b)-1]) |
||||
} |
||||
|
||||
return true, indexes, nil |
||||
} |
@ -0,0 +1,135 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package workloadselector |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/hashicorp/consul/internal/controller/cache" |
||||
"github.com/hashicorp/consul/internal/controller/cache/index" |
||||
"github.com/hashicorp/consul/internal/resource" |
||||
"github.com/hashicorp/consul/internal/resource/resourcetest" |
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest" |
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
"github.com/hashicorp/consul/proto/private/prototest" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func TestServiceWorkloadIndexer(t *testing.T) { |
||||
c := cache.New() |
||||
i := Index[*pbcatalog.Service]("selected-workloads") |
||||
require.NoError(t, c.AddIndex(pbcatalog.ServiceType, i)) |
||||
|
||||
foo := rtest.Resource(pbcatalog.ServiceType, "foo"). |
||||
WithData(t, &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{ |
||||
Names: []string{ |
||||
"api-2", |
||||
}, |
||||
Prefixes: []string{ |
||||
"api-1", |
||||
}, |
||||
}, |
||||
}). |
||||
WithTenancy(&pbresource.Tenancy{ |
||||
Partition: "default", |
||||
Namespace: "default", |
||||
PeerName: "local", |
||||
}). |
||||
Build() |
||||
|
||||
require.NoError(t, c.Insert(foo)) |
||||
|
||||
bar := rtest.Resource(pbcatalog.ServiceType, "bar"). |
||||
WithData(t, &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{ |
||||
Names: []string{ |
||||
"api-3", |
||||
}, |
||||
Prefixes: []string{ |
||||
"api-2", |
||||
}, |
||||
}, |
||||
}). |
||||
WithTenancy(&pbresource.Tenancy{ |
||||
Partition: "default", |
||||
Namespace: "default", |
||||
PeerName: "local", |
||||
}). |
||||
Build() |
||||
|
||||
require.NoError(t, c.Insert(bar)) |
||||
|
||||
api123 := rtest.Resource(pbcatalog.WorkloadType, "api-123"). |
||||
WithTenancy(&pbresource.Tenancy{ |
||||
Partition: "default", |
||||
Namespace: "default", |
||||
PeerName: "local", |
||||
}). |
||||
Reference("") |
||||
|
||||
api2 := rtest.Resource(pbcatalog.WorkloadType, "api-2"). |
||||
WithTenancy(&pbresource.Tenancy{ |
||||
Partition: "default", |
||||
Namespace: "default", |
||||
PeerName: "local", |
||||
}). |
||||
Reference("") |
||||
|
||||
resources, err := c.Parents(pbcatalog.ServiceType, i.Name(), api123) |
||||
require.NoError(t, err) |
||||
require.Len(t, resources, 1) |
||||
prototest.AssertDeepEqual(t, foo, resources[0]) |
||||
|
||||
resources, err = c.Parents(pbcatalog.ServiceType, i.Name(), api2) |
||||
require.NoError(t, err) |
||||
require.Len(t, resources, 2) |
||||
prototest.AssertElementsMatch(t, []*pbresource.Resource{foo, bar}, resources) |
||||
|
||||
refPrefix := &pbresource.Reference{ |
||||
Type: pbcatalog.WorkloadType, |
||||
Tenancy: &pbresource.Tenancy{ |
||||
Partition: "default", |
||||
Namespace: "default", |
||||
}, |
||||
} |
||||
resources, err = c.List(pbcatalog.ServiceType, i.Name(), refPrefix, index.IndexQueryOptions{Prefix: true}) |
||||
require.NoError(t, err) |
||||
// because foo and bar both have 2 index values they will appear in the output twice
|
||||
require.Len(t, resources, 4) |
||||
prototest.AssertElementsMatch(t, []*pbresource.Resource{foo, bar, foo, bar}, resources) |
||||
} |
||||
|
||||
func TestServiceWorkloadIndexer_FromResource_Errors(t *testing.T) { |
||||
t.Run("nil-selector", func(t *testing.T) { |
||||
res := resourcetest.MustDecode[*pbcatalog.Service]( |
||||
t, |
||||
resourcetest.Resource(pbcatalog.ServiceType, "foo"). |
||||
WithData(t, &pbcatalog.Service{}). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build()) |
||||
|
||||
indexed, vals, err := fromResource(res) |
||||
require.False(t, indexed) |
||||
require.Nil(t, vals) |
||||
require.NoError(t, err) |
||||
}) |
||||
|
||||
t.Run("no-selections", func(t *testing.T) { |
||||
res := resourcetest.MustDecode[*pbcatalog.Service]( |
||||
t, |
||||
resourcetest.Resource(pbcatalog.ServiceType, "foo"). |
||||
WithData(t, &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{}, |
||||
}). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build()) |
||||
|
||||
indexed, vals, err := fromResource(res) |
||||
require.False(t, indexed) |
||||
require.Nil(t, vals) |
||||
require.NoError(t, err) |
||||
}) |
||||
} |
@ -0,0 +1,151 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package workloadselector_test |
||||
|
||||
import ( |
||||
"context" |
||||
"testing" |
||||
|
||||
"github.com/hashicorp/consul/internal/catalog/workloadselector" |
||||
"github.com/hashicorp/consul/internal/controller" |
||||
"github.com/hashicorp/consul/internal/controller/cache" |
||||
"github.com/hashicorp/consul/internal/resource" |
||||
"github.com/hashicorp/consul/internal/resource/resourcetest" |
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" |
||||
"github.com/hashicorp/consul/proto/private/prototest" |
||||
"github.com/hashicorp/consul/sdk/testutil" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func TestWorkloadSelectorCacheIntegration(t *testing.T) { |
||||
c := cache.New() |
||||
i := workloadselector.Index[*pbcatalog.Service]("selected-workloads") |
||||
c.AddType(pbcatalog.WorkloadType) |
||||
c.AddIndex(pbcatalog.ServiceType, i) |
||||
|
||||
rt := controller.Runtime{ |
||||
Cache: c, |
||||
Logger: testutil.Logger(t), |
||||
} |
||||
|
||||
svcFoo := resourcetest.Resource(pbcatalog.ServiceType, "foo"). |
||||
WithData(t, &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{ |
||||
Names: []string{"foo"}, |
||||
Prefixes: []string{"api-", "other-"}, |
||||
}, |
||||
}). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
svcBar := resourcetest.Resource(pbcatalog.ServiceType, "bar"). |
||||
WithData(t, &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{ |
||||
Names: []string{"bar"}, |
||||
Prefixes: []string{"api-1", "something-else-"}, |
||||
}, |
||||
}). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
workloadBar := resourcetest.Resource(pbcatalog.WorkloadType, "bar"). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
workloadAPIFoo := resourcetest.Resource(pbcatalog.WorkloadType, "api-foo"). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
workloadAPI12 := resourcetest.Resource(pbcatalog.WorkloadType, "api-1"). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
workloadFoo := resourcetest.Resource(pbcatalog.WorkloadType, "foo"). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
workloadSomethingElse12 := resourcetest.Resource(pbcatalog.WorkloadType, "something-else-12"). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
// prime the cache with all of our services and workloads
|
||||
require.NoError(t, c.Insert(svcFoo)) |
||||
require.NoError(t, c.Insert(svcBar)) |
||||
require.NoError(t, c.Insert(workloadAPIFoo)) |
||||
require.NoError(t, c.Insert(workloadAPI12)) |
||||
require.NoError(t, c.Insert(workloadFoo)) |
||||
require.NoError(t, c.Insert(workloadSomethingElse12)) |
||||
|
||||
// check that mapping a selecting resource to the list of currently selected workloads works as expected
|
||||
reqs, err := workloadselector.MapSelectorToWorkloads[*pbcatalog.Service](context.Background(), rt, svcFoo) |
||||
require.NoError(t, err) |
||||
// in particular workloadSomethingElse12 should not show up here
|
||||
expected := []controller.Request{ |
||||
{ID: workloadFoo.Id}, |
||||
{ID: workloadAPI12.Id}, |
||||
{ID: workloadAPIFoo.Id}, |
||||
} |
||||
prototest.AssertElementsMatch(t, expected, reqs) |
||||
|
||||
reqs, err = workloadselector.MapSelectorToWorkloads[*pbcatalog.Service](context.Background(), rt, svcBar) |
||||
require.NoError(t, err) |
||||
// workloadFoo and workloadAPIFoo should not show up here as they don't meet the selection critiera
|
||||
// workloadBar should not show up here because it hasn't been inserted into the cache yet.
|
||||
expected = []controller.Request{ |
||||
{ID: workloadSomethingElse12.Id}, |
||||
{ID: workloadAPI12.Id}, |
||||
} |
||||
prototest.AssertElementsMatch(t, expected, reqs) |
||||
|
||||
// insert workloadBar into the cache so that future calls to MapSelectorToWorkloads for svcBar show
|
||||
// the workload in the output
|
||||
require.NoError(t, c.Insert(workloadBar)) |
||||
|
||||
// now validate that workloadBar shows up in the svcBar mapping
|
||||
reqs, err = workloadselector.MapSelectorToWorkloads[*pbcatalog.Service](context.Background(), rt, svcBar) |
||||
require.NoError(t, err) |
||||
expected = []controller.Request{ |
||||
{ID: workloadSomethingElse12.Id}, |
||||
{ID: workloadAPI12.Id}, |
||||
{ID: workloadBar.Id}, |
||||
} |
||||
prototest.AssertElementsMatch(t, expected, reqs) |
||||
|
||||
// create the mapper to verify that finding services that select workloads functions correctly
|
||||
mapper := workloadselector.MapWorkloadsToSelectors(pbcatalog.ServiceType, i.Name()) |
||||
|
||||
// check that workloadAPIFoo only returns a request for serviceFoo
|
||||
reqs, err = mapper(context.Background(), rt, workloadAPIFoo) |
||||
require.NoError(t, err) |
||||
expected = []controller.Request{ |
||||
{ID: svcFoo.Id}, |
||||
} |
||||
prototest.AssertElementsMatch(t, expected, reqs) |
||||
|
||||
// check that workloadAPI12 returns both services
|
||||
reqs, err = mapper(context.Background(), rt, workloadAPI12) |
||||
require.NoError(t, err) |
||||
expected = []controller.Request{ |
||||
{ID: svcFoo.Id}, |
||||
{ID: svcBar.Id}, |
||||
} |
||||
prototest.AssertElementsMatch(t, expected, reqs) |
||||
|
||||
// check that workloadSomethingElse12 returns only svcBar
|
||||
reqs, err = mapper(context.Background(), rt, workloadSomethingElse12) |
||||
require.NoError(t, err) |
||||
expected = []controller.Request{ |
||||
{ID: svcBar.Id}, |
||||
} |
||||
prototest.AssertElementsMatch(t, expected, reqs) |
||||
|
||||
// check that workloadFoo returns only svcFoo
|
||||
reqs, err = mapper(context.Background(), rt, workloadFoo) |
||||
require.NoError(t, err) |
||||
expected = []controller.Request{ |
||||
{ID: svcFoo.Id}, |
||||
} |
||||
prototest.AssertElementsMatch(t, expected, reqs) |
||||
|
||||
} |
@ -0,0 +1,45 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package workloadselector |
||||
|
||||
import ( |
||||
"context" |
||||
|
||||
"github.com/hashicorp/consul/internal/controller" |
||||
"github.com/hashicorp/consul/internal/controller/dependency" |
||||
"github.com/hashicorp/consul/internal/resource" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
) |
||||
|
||||
// MapSelectorToWorkloads will use the "id" index on watched Workload type to find all current
|
||||
// workloads selected by the resource.
|
||||
func MapSelectorToWorkloads[T WorkloadSelecting](_ context.Context, rt controller.Runtime, r *pbresource.Resource) ([]controller.Request, error) { |
||||
res, err := resource.Decode[T](r) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
workloads, err := GetWorkloadsWithSelector[T](rt.Cache, res) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
reqs := make([]controller.Request, len(workloads)) |
||||
for i, workload := range workloads { |
||||
reqs[i] = controller.Request{ |
||||
ID: workload.Id, |
||||
} |
||||
} |
||||
|
||||
return reqs, nil |
||||
} |
||||
|
||||
// MapWorkloadsToSelectors returns a DependencyMapper that will use the specified index to map a workload
|
||||
// to resources that select it.
|
||||
//
|
||||
// This mapper can only be used on watches for the Workload type and works in conjunction with the Index
|
||||
// created by this package.
|
||||
func MapWorkloadsToSelectors(indexType *pbresource.Type, indexName string) controller.DependencyMapper { |
||||
return dependency.CacheParentsMapper(indexType, indexName) |
||||
} |
@ -0,0 +1,180 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package workloadselector |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"testing" |
||||
|
||||
"github.com/hashicorp/consul/internal/controller" |
||||
"github.com/hashicorp/consul/internal/controller/cache/cachemock" |
||||
"github.com/hashicorp/consul/internal/controller/cache/index" |
||||
"github.com/hashicorp/consul/internal/controller/cache/index/indexmock" |
||||
"github.com/hashicorp/consul/internal/resource" |
||||
"github.com/hashicorp/consul/internal/resource/resourcetest" |
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
"github.com/hashicorp/consul/proto/private/prototest" |
||||
"github.com/hashicorp/go-hclog" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
var injectedError = errors.New("injected error") |
||||
|
||||
func TestMapSelectorToWorkloads(t *testing.T) { |
||||
cache := cachemock.NewReadOnlyCache(t) |
||||
|
||||
rt := controller.Runtime{ |
||||
Cache: cache, |
||||
} |
||||
|
||||
mres := indexmock.NewResourceIterator(t) |
||||
|
||||
svc := resourcetest.Resource(pbcatalog.ServiceType, "api"). |
||||
WithData(t, &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{ |
||||
Prefixes: []string{"api-"}, |
||||
Names: []string{"foo"}, |
||||
}, |
||||
}). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
api1 := resourcetest.Resource(pbcatalog.WorkloadType, "api-1"). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
api2 := resourcetest.Resource(pbcatalog.WorkloadType, "api-2"). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
fooRes := resourcetest.Resource(pbcatalog.WorkloadType, "foo"). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
cache.EXPECT(). |
||||
ListIterator(pbcatalog.WorkloadType, "id", &pbresource.ID{ |
||||
Type: pbcatalog.WorkloadType, |
||||
Name: "api-", |
||||
Tenancy: resource.DefaultNamespacedTenancy(), |
||||
}, index.IndexQueryOptions{Prefix: true}). |
||||
Return(mres, nil). |
||||
Once() |
||||
cache.EXPECT(). |
||||
Get(pbcatalog.WorkloadType, "id", &pbresource.ID{ |
||||
Type: pbcatalog.WorkloadType, |
||||
Name: "foo", |
||||
Tenancy: resource.DefaultNamespacedTenancy(), |
||||
}). |
||||
Return(fooRes, nil). |
||||
Once() |
||||
|
||||
mres.EXPECT().Next().Return(api1).Once() |
||||
mres.EXPECT().Next().Return(api2).Once() |
||||
mres.EXPECT().Next().Return(nil).Once() |
||||
|
||||
expected := []controller.Request{ |
||||
{ID: fooRes.Id}, |
||||
{ID: api1.Id}, |
||||
{ID: api2.Id}, |
||||
} |
||||
|
||||
reqs, err := MapSelectorToWorkloads[*pbcatalog.Service](context.Background(), rt, svc) |
||||
require.NoError(t, err) |
||||
prototest.AssertElementsMatch(t, expected, reqs) |
||||
} |
||||
|
||||
func TestMapSelectorToWorkloads_DecodeError(t *testing.T) { |
||||
res := resourcetest.Resource(pbcatalog.ServiceType, "foo"). |
||||
WithData(t, &pbcatalog.DNSPolicy{}). |
||||
Build() |
||||
|
||||
reqs, err := MapSelectorToWorkloads[*pbcatalog.Service](context.Background(), controller.Runtime{}, res) |
||||
require.Nil(t, reqs) |
||||
require.Error(t, err) |
||||
require.ErrorAs(t, err, &resource.ErrDataParse{}) |
||||
} |
||||
|
||||
func TestMapSelectorToWorkloads_CacheError(t *testing.T) { |
||||
cache := cachemock.NewReadOnlyCache(t) |
||||
|
||||
rt := controller.Runtime{ |
||||
Cache: cache, |
||||
} |
||||
|
||||
svc := resourcetest.Resource(pbcatalog.ServiceType, "api"). |
||||
WithData(t, &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{ |
||||
Prefixes: []string{"api-"}, |
||||
}, |
||||
}). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
cache.EXPECT(). |
||||
ListIterator(pbcatalog.WorkloadType, "id", &pbresource.ID{ |
||||
Type: pbcatalog.WorkloadType, |
||||
Name: "api-", |
||||
Tenancy: resource.DefaultNamespacedTenancy(), |
||||
}, index.IndexQueryOptions{Prefix: true}). |
||||
Return(nil, injectedError). |
||||
Once() |
||||
|
||||
reqs, err := MapSelectorToWorkloads[*pbcatalog.Service](context.Background(), rt, svc) |
||||
require.ErrorIs(t, err, injectedError) |
||||
require.Nil(t, reqs) |
||||
} |
||||
|
||||
func TestMapWorkloadsToSelectors(t *testing.T) { |
||||
cache := cachemock.NewReadOnlyCache(t) |
||||
rt := controller.Runtime{ |
||||
Cache: cache, |
||||
Logger: hclog.NewNullLogger(), |
||||
} |
||||
|
||||
dm := MapWorkloadsToSelectors(pbcatalog.ServiceType, "selected-workloads") |
||||
|
||||
workload := resourcetest.Resource(pbcatalog.WorkloadType, "api-123"). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
svc1 := resourcetest.Resource(pbcatalog.ServiceType, "foo"). |
||||
WithData(t, &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{ |
||||
Prefixes: []string{"api-"}, |
||||
}, |
||||
}). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
svc2 := resourcetest.Resource(pbcatalog.ServiceType, "bar"). |
||||
WithData(t, &pbcatalog.Service{ |
||||
Workloads: &pbcatalog.WorkloadSelector{ |
||||
Prefixes: []string{"api-"}, |
||||
}, |
||||
}). |
||||
WithTenancy(resource.DefaultNamespacedTenancy()). |
||||
Build() |
||||
|
||||
mres := indexmock.NewResourceIterator(t) |
||||
|
||||
cache.EXPECT(). |
||||
ParentsIterator(pbcatalog.ServiceType, "selected-workloads", workload.Id). |
||||
Return(mres, nil). |
||||
Once() |
||||
|
||||
mres.EXPECT().Next().Return(svc1).Once() |
||||
mres.EXPECT().Next().Return(svc2).Once() |
||||
mres.EXPECT().Next().Return(nil).Once() |
||||
|
||||
reqs, err := dm(context.Background(), rt, workload) |
||||
require.NoError(t, err) |
||||
expected := []controller.Request{ |
||||
{ID: svc1.Id}, |
||||
{ID: svc2.Id}, |
||||
} |
||||
prototest.AssertElementsMatch(t, expected, reqs) |
||||
|
||||
} |
@ -1,7 +1,7 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package types |
||||
package workloadselector |
||||
|
||||
import ( |
||||
"google.golang.org/protobuf/proto" |
@ -0,0 +1,107 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package cache |
||||
|
||||
import ( |
||||
"google.golang.org/protobuf/proto" |
||||
|
||||
"github.com/hashicorp/consul/internal/resource" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
) |
||||
|
||||
// Get retrieves a single resource from the specified index that matches the provided args.
|
||||
// If more than one match is found the first is returned.
|
||||
func GetDecoded[T proto.Message](c ReadOnlyCache, it *pbresource.Type, indexName string, args ...any) (*resource.DecodedResource[T], error) { |
||||
res, err := c.Get(it, indexName, args...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if res == nil { |
||||
return nil, nil |
||||
} |
||||
|
||||
return resource.Decode[T](res) |
||||
} |
||||
|
||||
// List retrieves all the resources from the specified index matching the provided args.
|
||||
func ListDecoded[T proto.Message](c ReadOnlyCache, it *pbresource.Type, indexName string, args ...any) ([]*resource.DecodedResource[T], error) { |
||||
resources, err := c.List(it, indexName, args...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return resource.DecodeList[T](resources) |
||||
} |
||||
|
||||
// ListIterator retrieves an iterator over all resources from the specified index matching the provided args.
|
||||
func ListIteratorDecoded[T proto.Message](c ReadOnlyCache, it *pbresource.Type, indexName string, args ...any) (DecodedResourceIterator[T], error) { |
||||
iter, err := c.ListIterator(it, indexName, args...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if iter == nil { |
||||
return nil, nil |
||||
} |
||||
|
||||
return decodedResourceIterator[T]{iter}, nil |
||||
} |
||||
|
||||
// Parents retrieves all resources whos index value is a parent (or prefix) of the value calculated
|
||||
// from the provided args.
|
||||
func ParentsDecoded[T proto.Message](c ReadOnlyCache, it *pbresource.Type, indexName string, args ...any) ([]*resource.DecodedResource[T], error) { |
||||
resources, err := c.Parents(it, indexName, args...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return resource.DecodeList[T](resources) |
||||
} |
||||
|
||||
// ParentsIterator retrieves an iterator over all resources whos index value is a parent (or prefix)
|
||||
// of the value calculated from the provided args.
|
||||
func ParentsIteratorDecoded[T proto.Message](c ReadOnlyCache, it *pbresource.Type, indexName string, args ...any) (DecodedResourceIterator[T], error) { |
||||
iter, err := c.ParentsIterator(it, indexName, args...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if iter == nil { |
||||
return nil, nil |
||||
} |
||||
|
||||
return decodedResourceIterator[T]{iter}, nil |
||||
} |
||||
|
||||
// Query will execute a named query against the cache and return an interator over its results
|
||||
func QueryDecoded[T proto.Message](c ReadOnlyCache, name string, args ...any) (DecodedResourceIterator[T], error) { |
||||
iter, err := c.Query(name, args...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if iter == nil { |
||||
return nil, nil |
||||
} |
||||
|
||||
return decodedResourceIterator[T]{iter}, nil |
||||
} |
||||
|
||||
type DecodedResourceIterator[T proto.Message] interface { |
||||
Next() (*resource.DecodedResource[T], error) |
||||
} |
||||
|
||||
type decodedResourceIterator[T proto.Message] struct { |
||||
ResourceIterator |
||||
} |
||||
|
||||
func (iter decodedResourceIterator[T]) Next() (*resource.DecodedResource[T], error) { |
||||
res := iter.ResourceIterator.Next() |
||||
if res == nil { |
||||
return nil, nil |
||||
} |
||||
|
||||
return resource.Decode[T](res) |
||||
} |
@ -0,0 +1,360 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package cache_test |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
"github.com/stretchr/testify/suite" |
||||
|
||||
"github.com/hashicorp/consul/internal/controller/cache" |
||||
"github.com/hashicorp/consul/internal/controller/cache/cachemock" |
||||
"github.com/hashicorp/consul/internal/resource" |
||||
"github.com/hashicorp/consul/internal/resource/demo" |
||||
"github.com/hashicorp/consul/proto-public/pbresource" |
||||
pbdemo "github.com/hashicorp/consul/proto/private/pbdemo/v2" |
||||
"github.com/hashicorp/consul/proto/private/prototest" |
||||
) |
||||
|
||||
type decodedSuite struct { |
||||
suite.Suite |
||||
rc *cachemock.ReadOnlyCache |
||||
iter *cachemock.ResourceIterator |
||||
|
||||
artistGood *resource.DecodedResource[*pbdemo.Artist] |
||||
artistGood2 *resource.DecodedResource[*pbdemo.Artist] |
||||
artistBad *pbresource.Resource |
||||
} |
||||
|
||||
func (suite *decodedSuite) SetupTest() { |
||||
suite.rc = cachemock.NewReadOnlyCache(suite.T()) |
||||
suite.iter = cachemock.NewResourceIterator(suite.T()) |
||||
artist, err := demo.GenerateV2Artist() |
||||
require.NoError(suite.T(), err) |
||||
suite.artistGood, err = resource.Decode[*pbdemo.Artist](artist) |
||||
require.NoError(suite.T(), err) |
||||
|
||||
artist2, err := demo.GenerateV2Artist() |
||||
require.NoError(suite.T(), err) |
||||
suite.artistGood2, err = resource.Decode[*pbdemo.Artist](artist2) |
||||
require.NoError(suite.T(), err) |
||||
|
||||
suite.artistBad, err = demo.GenerateV2Album(artist.Id) |
||||
require.NoError(suite.T(), err) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestGetDecoded_Ok() { |
||||
suite.rc.EXPECT().Get(pbdemo.ArtistType, "id", suite.artistGood.Id).Return(suite.artistGood.Resource, nil) |
||||
|
||||
dec, err := cache.GetDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.NoError(suite.T(), err) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Resource, dec.Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Data, dec.Data) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestGetDecoded_DecodeError() { |
||||
suite.rc.EXPECT().Get(pbdemo.ArtistType, "id", suite.artistGood.Id).Return(suite.artistBad, nil) |
||||
|
||||
dec, err := cache.GetDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.Error(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestGetDecoded_CacheError() { |
||||
suite.rc.EXPECT().Get(pbdemo.ArtistType, "id", suite.artistGood.Id).Return(nil, injectedError) |
||||
|
||||
dec, err := cache.GetDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.ErrorIs(suite.T(), err, injectedError) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestGetDecoded_Nil() { |
||||
suite.rc.EXPECT().Get(pbdemo.ArtistType, "id", suite.artistGood.Id).Return(nil, nil) |
||||
|
||||
dec, err := cache.GetDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestListDecoded_Ok() { |
||||
suite.rc.EXPECT().List(pbdemo.ArtistType, "id", suite.artistGood.Id). |
||||
Return([]*pbresource.Resource{suite.artistGood.Resource, suite.artistGood2.Resource}, nil) |
||||
|
||||
dec, err := cache.ListDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.NoError(suite.T(), err) |
||||
require.Len(suite.T(), dec, 2) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Resource, dec[0].Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Data, dec[0].Data) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood2.Resource, dec[1].Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood2.Data, dec[1].Data) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestListDecoded_DecodeError() { |
||||
suite.rc.EXPECT().List(pbdemo.ArtistType, "id", suite.artistGood.Id). |
||||
Return([]*pbresource.Resource{suite.artistGood.Resource, suite.artistBad}, nil) |
||||
|
||||
dec, err := cache.ListDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.Error(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestListDecoded_CacheError() { |
||||
suite.rc.EXPECT().List(pbdemo.ArtistType, "id", suite.artistGood.Id).Return(nil, injectedError) |
||||
|
||||
dec, err := cache.ListDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.ErrorIs(suite.T(), err, injectedError) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestListDecoded_Nil() { |
||||
suite.rc.EXPECT().List(pbdemo.ArtistType, "id", suite.artistGood.Id).Return(nil, nil) |
||||
|
||||
dec, err := cache.ListDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestListIteratorDecoded_Ok() { |
||||
suite.iter.EXPECT().Next().Return(suite.artistGood.Resource).Once() |
||||
suite.iter.EXPECT().Next().Return(suite.artistGood2.Resource).Once() |
||||
suite.iter.EXPECT().Next().Return(nil).Times(0) |
||||
suite.rc.EXPECT().ListIterator(pbdemo.ArtistType, "id", suite.artistGood.Id). |
||||
Return(suite.iter, nil) |
||||
|
||||
iter, err := cache.ListIteratorDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.NoError(suite.T(), err) |
||||
require.NotNil(suite.T(), iter) |
||||
|
||||
dec, err := iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Resource, dec.Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Data, dec.Data) |
||||
|
||||
dec, err = iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood2.Resource, dec.Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood2.Data, dec.Data) |
||||
|
||||
dec, err = iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestListIteratorDecoded_DecodeError() { |
||||
suite.iter.EXPECT().Next().Return(suite.artistGood.Resource).Once() |
||||
suite.iter.EXPECT().Next().Return(suite.artistBad).Once() |
||||
suite.iter.EXPECT().Next().Return(nil).Times(0) |
||||
suite.rc.EXPECT().ListIterator(pbdemo.ArtistType, "id", suite.artistGood.Id). |
||||
Return(suite.iter, nil) |
||||
|
||||
iter, err := cache.ListIteratorDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.NoError(suite.T(), err) |
||||
require.NotNil(suite.T(), iter) |
||||
|
||||
dec, err := iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Resource, dec.Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Data, dec.Data) |
||||
|
||||
dec, err = iter.Next() |
||||
require.Error(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
|
||||
dec, err = iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestListIteratorDecoded_CacheError() { |
||||
suite.rc.EXPECT().ListIterator(pbdemo.ArtistType, "id", suite.artistGood.Id).Return(nil, injectedError) |
||||
|
||||
iter, err := cache.ListIteratorDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.ErrorIs(suite.T(), err, injectedError) |
||||
require.Nil(suite.T(), iter) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestListIteratorDecoded_Nil() { |
||||
suite.rc.EXPECT().ListIterator(pbdemo.ArtistType, "id", suite.artistGood.Id).Return(nil, nil) |
||||
|
||||
dec, err := cache.ListIteratorDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestParentsDecoded_Ok() { |
||||
suite.rc.EXPECT().Parents(pbdemo.ArtistType, "id", suite.artistGood.Id). |
||||
Return([]*pbresource.Resource{suite.artistGood.Resource, suite.artistGood2.Resource}, nil) |
||||
|
||||
dec, err := cache.ParentsDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.NoError(suite.T(), err) |
||||
require.Len(suite.T(), dec, 2) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Resource, dec[0].Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Data, dec[0].Data) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood2.Resource, dec[1].Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood2.Data, dec[1].Data) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestParentsDecoded_DecodeError() { |
||||
suite.rc.EXPECT().Parents(pbdemo.ArtistType, "id", suite.artistGood.Id). |
||||
Return([]*pbresource.Resource{suite.artistGood.Resource, suite.artistBad}, nil) |
||||
|
||||
dec, err := cache.ParentsDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.Error(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestParentsDecoded_CacheError() { |
||||
suite.rc.EXPECT().Parents(pbdemo.ArtistType, "id", suite.artistGood.Id).Return(nil, injectedError) |
||||
|
||||
dec, err := cache.ParentsDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.ErrorIs(suite.T(), err, injectedError) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestParentsDecoded_Nil() { |
||||
suite.rc.EXPECT().Parents(pbdemo.ArtistType, "id", suite.artistGood.Id).Return(nil, nil) |
||||
|
||||
dec, err := cache.ParentsDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestParentsIteratorDecoded_Ok() { |
||||
suite.iter.EXPECT().Next().Return(suite.artistGood.Resource).Once() |
||||
suite.iter.EXPECT().Next().Return(suite.artistGood2.Resource).Once() |
||||
suite.iter.EXPECT().Next().Return(nil).Times(0) |
||||
suite.rc.EXPECT().ParentsIterator(pbdemo.ArtistType, "id", suite.artistGood.Id). |
||||
Return(suite.iter, nil) |
||||
|
||||
iter, err := cache.ParentsIteratorDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.NoError(suite.T(), err) |
||||
require.NotNil(suite.T(), iter) |
||||
|
||||
dec, err := iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Resource, dec.Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Data, dec.Data) |
||||
|
||||
dec, err = iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood2.Resource, dec.Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood2.Data, dec.Data) |
||||
|
||||
dec, err = iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestParentsIteratorDecoded_DecodeError() { |
||||
suite.iter.EXPECT().Next().Return(suite.artistGood.Resource).Once() |
||||
suite.iter.EXPECT().Next().Return(suite.artistBad).Once() |
||||
suite.iter.EXPECT().Next().Return(nil).Times(0) |
||||
suite.rc.EXPECT().ParentsIterator(pbdemo.ArtistType, "id", suite.artistGood.Id). |
||||
Return(suite.iter, nil) |
||||
|
||||
iter, err := cache.ParentsIteratorDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.NoError(suite.T(), err) |
||||
require.NotNil(suite.T(), iter) |
||||
|
||||
dec, err := iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Resource, dec.Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Data, dec.Data) |
||||
|
||||
dec, err = iter.Next() |
||||
require.Error(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
|
||||
dec, err = iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestParentsIteratorDecoded_CacheError() { |
||||
suite.rc.EXPECT().ParentsIterator(pbdemo.ArtistType, "id", suite.artistGood.Id).Return(nil, injectedError) |
||||
|
||||
iter, err := cache.ParentsIteratorDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.ErrorIs(suite.T(), err, injectedError) |
||||
require.Nil(suite.T(), iter) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestParentsIteratorDecoded_Nil() { |
||||
suite.rc.EXPECT().ParentsIterator(pbdemo.ArtistType, "id", suite.artistGood.Id).Return(nil, nil) |
||||
|
||||
dec, err := cache.ParentsIteratorDecoded[*pbdemo.Artist](suite.rc, pbdemo.ArtistType, "id", suite.artistGood.Id) |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestQueryDecoded_Ok() { |
||||
suite.iter.EXPECT().Next().Return(suite.artistGood.Resource).Once() |
||||
suite.iter.EXPECT().Next().Return(suite.artistGood2.Resource).Once() |
||||
suite.iter.EXPECT().Next().Return(nil).Times(0) |
||||
suite.rc.EXPECT().Query("query", "blah"). |
||||
Return(suite.iter, nil) |
||||
|
||||
iter, err := cache.QueryDecoded[*pbdemo.Artist](suite.rc, "query", "blah") |
||||
require.NoError(suite.T(), err) |
||||
require.NotNil(suite.T(), iter) |
||||
|
||||
dec, err := iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Resource, dec.Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Data, dec.Data) |
||||
|
||||
dec, err = iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood2.Resource, dec.Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood2.Data, dec.Data) |
||||
|
||||
dec, err = iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestQueryDecoded_DecodeError() { |
||||
suite.iter.EXPECT().Next().Return(suite.artistGood.Resource).Once() |
||||
suite.iter.EXPECT().Next().Return(suite.artistBad).Once() |
||||
suite.iter.EXPECT().Next().Return(nil).Times(0) |
||||
suite.rc.EXPECT().Query("query", "blah"). |
||||
Return(suite.iter, nil) |
||||
|
||||
iter, err := cache.QueryDecoded[*pbdemo.Artist](suite.rc, "query", "blah") |
||||
require.NoError(suite.T(), err) |
||||
require.NotNil(suite.T(), iter) |
||||
|
||||
dec, err := iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Resource, dec.Resource) |
||||
prototest.AssertDeepEqual(suite.T(), suite.artistGood.Data, dec.Data) |
||||
|
||||
dec, err = iter.Next() |
||||
require.Error(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
|
||||
dec, err = iter.Next() |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestQueryDecoded_CacheError() { |
||||
suite.rc.EXPECT().Query("query", "blah").Return(nil, injectedError) |
||||
|
||||
dec, err := cache.QueryDecoded[*pbdemo.Artist](suite.rc, "query", "blah") |
||||
require.ErrorIs(suite.T(), err, injectedError) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func (suite *decodedSuite) TestQueryDecoded_Nil() { |
||||
suite.rc.EXPECT().Query("query", "blah").Return(nil, nil) |
||||
|
||||
dec, err := cache.QueryDecoded[*pbdemo.Artist](suite.rc, "query", "blah") |
||||
require.NoError(suite.T(), err) |
||||
require.Nil(suite.T(), dec) |
||||
} |
||||
|
||||
func TestDecodedCache(t *testing.T) { |
||||
suite.Run(t, new(decodedSuite)) |
||||
} |
Loading…
Reference in new issue