Browse Source

resource: add v2tenancy feature flag to deployer tests (#19774)

pull/19734/head^2
Semir Patel 12 months ago committed by GitHub
parent
commit
2d1f308138
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 11
      internal/resource/resourcetest/client.go
  2. 2
      test-integ/Makefile
  3. 2
      test-integ/README.md
  4. 10
      test-integ/connect/snapshot_test.go
  5. 166
      test-integ/tenancy/client.go
  6. 11
      testing/deployer/sprawl/internal/tfgen/agent.go
  7. 2
      testing/deployer/sprawl/internal/tfgen/nodes.go
  8. 13
      testing/deployer/sprawl/sprawl.go
  9. 2
      testing/deployer/topology/default_versions.go
  10. 2
      testing/deployer/topology/images.go
  11. 4
      testing/deployer/topology/topology.go
  12. 4
      testing/deployer/update-latest-versions.sh

11
internal/resource/resourcetest/client.go

@ -324,6 +324,17 @@ func (client *Client) WaitForResourceState(t T, id *pbresource.ID, verify func(T
return res
}
func (client *Client) WaitForResourceExists(t T, id *pbresource.ID) *pbresource.Resource {
t.Helper()
var res *pbresource.Resource
client.retry(t, func(r *retry.R) {
res = client.RequireResourceExists(r, id)
})
return res
}
func (client *Client) WaitForDeletion(t T, id *pbresource.ID) {
t.Helper()

2
test-integ/Makefile

@ -1,7 +1,7 @@
SHELL := /bin/bash
.PHONY: noop
noop:
noop: help
##@ Build

2
test-integ/README.md

@ -6,7 +6,7 @@ These should use the [testing/deployer framework](../testing/deployer) to bring
up some local testing infrastructure and fixtures to run test assertions against.
Where reasonably possible, try to bring up infrastructure interesting enough to
be able to run many related sorts of test against it, rather than waiting for
be able to run many related sorts of tests against it, rather than waiting for
many similar clusters to be provisioned and torn down. This will help ensure
that the integration tests do not consume CPU cycles needlessly.

10
test-integ/connect/snapshot_test.go

@ -6,17 +6,17 @@ package connect
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/test-integ/topoutil"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/test-integ/topoutil"
)
// Test_Snapshot_Restore_Agentless verifies consul agent can continue
// to push envoy confgi after restoring from a snapshot.
// to push envoy config after restoring from a snapshot.
//
// - This test is to detect server agent frozen after restoring from a snapshot
// (https://github.com/hashicorp/consul/pull/18636)
@ -164,7 +164,7 @@ func Test_Snapshot_Restore_Agentless(t *testing.T) {
asserter.HTTPStatus(t, staticServer, staticServer.Port, 200)
t.Log("Take a snapshot of the cluster and restore ...")
err := sp.SnapshotSave("dc1")
err := sp.SnapshotSaveAndRestore("dc1")
require.NoError(t, err)
// Shutdown existing static-server

166
test-integ/tenancy/client.go

@ -0,0 +1,166 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package tenancy
import (
"context"
"fmt"
"time"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
)
// This duplicates a subset of internal/resource/resourcetest/client.go so
// we're not importing consul internals integration tests.
//
// TODO: Move to a general package if used more widely.
// T represents the subset of testing.T methods that will be used
// by the various functionality in this package
type T interface {
Helper()
Log(args ...interface{})
Logf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
FailNow()
Cleanup(func())
}
type ClientOption func(*Client)
func WithACLToken(token string) ClientOption {
return func(c *Client) {
c.token = token
}
}
// Client decorates a resource service client with helper functions to assist
// with integration testing.
type Client struct {
pbresource.ResourceServiceClient
timeout time.Duration
wait time.Duration
token string
}
func NewClient(client pbresource.ResourceServiceClient, opts ...ClientOption) *Client {
c := &Client{
ResourceServiceClient: client,
timeout: 7 * time.Second,
wait: 50 * time.Millisecond,
}
for _, opt := range opts {
opt(c)
}
return c
}
func NewClientWithACLToken(client pbresource.ResourceServiceClient, token string) *Client {
return NewClient(client, WithACLToken(token))
}
func (client *Client) SetRetryerConfig(timeout time.Duration, wait time.Duration) {
client.timeout = timeout
client.wait = wait
}
func (client *Client) retry(t T, fn func(r *retry.R)) {
t.Helper()
retryer := &retry.Timer{Timeout: client.timeout, Wait: client.wait}
retry.RunWith(retryer, t, fn)
}
func (client *Client) Context(t T) context.Context {
ctx := testutil.TestContext(t)
if client.token != "" {
md := metadata.New(map[string]string{
"x-consul-token": client.token,
})
ctx = metadata.NewOutgoingContext(ctx, md)
}
return ctx
}
func (client *Client) RequireResourceNotFound(t T, id *pbresource.ID) {
t.Helper()
rsp, err := client.Read(client.Context(t), &pbresource.ReadRequest{Id: id})
require.Error(t, err)
require.Equal(t, codes.NotFound, status.Code(err))
require.Nil(t, rsp)
}
func (client *Client) RequireResourceExists(t T, id *pbresource.ID) *pbresource.Resource {
t.Helper()
rsp, err := client.Read(client.Context(t), &pbresource.ReadRequest{Id: id})
require.NoError(t, err, "error reading %s with type %s", id.Name, ToGVK(id.Type))
require.NotNil(t, rsp)
return rsp.Resource
}
func ToGVK(resourceType *pbresource.Type) string {
return fmt.Sprintf("%s.%s.%s", resourceType.Group, resourceType.GroupVersion, resourceType.Kind)
}
func (client *Client) WaitForResourceExists(t T, id *pbresource.ID) *pbresource.Resource {
t.Helper()
var res *pbresource.Resource
client.retry(t, func(r *retry.R) {
res = client.RequireResourceExists(r, id)
})
return res
}
func (client *Client) WaitForDeletion(t T, id *pbresource.ID) {
t.Helper()
client.retry(t, func(r *retry.R) {
client.RequireResourceNotFound(r, id)
})
}
// MustDelete will delete a resource by its id, retrying if necessary and fail the test
// if it cannot delete it within the timeout. The clients request delay settings are
// taken into account with this operation.
func (client *Client) MustDelete(t T, id *pbresource.ID) {
t.Helper()
client.retryDelete(t, id)
}
func (client *Client) retryDelete(t T, id *pbresource.ID) {
t.Helper()
ctx := client.Context(t)
client.retry(t, func(r *retry.R) {
_, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: id})
if status.Code(err) == codes.NotFound {
return
}
// codes.Aborted indicates a CAS failure and that the delete request should
// be retried. Anything else should be considered an unrecoverable error.
if err != nil && status.Code(err) != codes.Aborted {
r.Stop(fmt.Errorf("failed to delete the resource: %w", err))
return
}
require.NoError(r, err)
})
}

11
testing/deployer/sprawl/internal/tfgen/agent.go

@ -13,7 +13,7 @@ import (
"github.com/hashicorp/consul/testing/deployer/topology"
)
func (g *Generator) generateAgentHCL(node *topology.Node, enableV2 bool) string {
func (g *Generator) generateAgentHCL(node *topology.Node, enableV2, enableV2Tenancy bool) string {
if !node.IsAgent() {
panic("generateAgentHCL only applies to agents")
}
@ -35,8 +35,15 @@ func (g *Generator) generateAgentHCL(node *topology.Node, enableV2 bool) string
b.add("enable_debug", true)
b.add("use_streaming_backend", true)
var experiments []string
if enableV2 {
b.addSlice("experiments", []string{"resource-apis"})
experiments = append(experiments, "resource-apis")
}
if enableV2Tenancy {
experiments = append(experiments, "v2tenancy")
}
if len(experiments) > 0 {
b.addSlice("experiments", experiments)
}
// speed up leaves

2
testing/deployer/sprawl/internal/tfgen/nodes.go

@ -67,7 +67,7 @@ func (g *Generator) generateNodeContainers(
}{
terraformPod: pod,
ImageResource: DockerImageResourceName(node.Images.Consul),
HCL: g.generateAgentHCL(node, cluster.EnableV2 && node.IsServer()),
HCL: g.generateAgentHCL(node, cluster.EnableV2 && node.IsServer(), cluster.EnableV2Tenancy && node.IsServer()),
EnterpriseLicense: g.license,
}))
}

13
testing/deployer/sprawl/sprawl.go

@ -17,13 +17,14 @@ import (
"time"
retry "github.com/avast/retry-go"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
"github.com/mitchellh/copystructure"
"google.golang.org/grpc"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/runner"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/tfgen"
@ -405,8 +406,8 @@ func (s *Sprawl) RelaunchWithPhase(
return nil
}
// SnapshotSave saves a snapshot of a cluster and restore with the snapshot
func (s *Sprawl) SnapshotSave(clusterName string) error {
// SnapshotSaveAndRestore saves a snapshot of a cluster and then restores the snapshot
func (s *Sprawl) SnapshotSaveAndRestore(clusterName string) error {
cluster, ok := s.topology.Clusters[clusterName]
if !ok {
return fmt.Errorf("no such cluster: %s", clusterName)

2
testing/deployer/topology/default_versions.go

@ -6,7 +6,7 @@
package topology
const (
DefaultConsulImage = "hashicorp/consul:1.17.0"
DefaultConsulCEImage = "hashicorp/consul:1.17.0"
DefaultConsulEnterpriseImage = "hashicorp/consul-enterprise:1.17.0-ent"
DefaultEnvoyImage = "envoyproxy/envoy:v1.27.2"
DefaultDataplaneImage = "hashicorp/consul-dataplane:1.3.0"

2
testing/deployer/topology/images.go

@ -122,7 +122,7 @@ func (i Images) OverrideWith(i2 Images) Images {
func DefaultImages() Images {
return Images{
Consul: "",
ConsulCE: DefaultConsulImage,
ConsulCE: DefaultConsulCEImage,
ConsulEnterprise: DefaultConsulEnterpriseImage,
Envoy: DefaultEnvoyImage,
Dataplane: DefaultDataplaneImage,

4
testing/deployer/topology/topology.go

@ -286,6 +286,10 @@ type Cluster struct {
// EnableV2 activates V2 on the servers. If any node in the cluster needs
// V2 this will be turned on automatically.
EnableV2 bool `json:",omitempty"`
// EnableV2Tenancy activates V2 tenancy on the servers. If not enabled,
// V2 resources are bridged to V1 tenancy counterparts.
EnableV2Tenancy bool `json:",omitempty"`
}
func (c *Cluster) inheritFromExisting(existing *Cluster) {

4
testing/deployer/update-latest-versions.sh

@ -8,7 +8,7 @@ cd "$(dirname "$0")"
###
# This script will update the default image names to the latest released versions of
# Consul, Consul Enterprise, and Consul Dataplane.
# Consul CE, Consul Enterprise, and Consul Dataplane.
#
# For Envoy, it will interrogate the latest version of Consul for it's maximum supported
# Envoy version and use that.
@ -50,7 +50,7 @@ cat > topology/default_versions.go <<EOF
package topology
const (
DefaultConsulImage = "hashicorp/consul:${consul_version}"
DefaultConsulCEImage = "hashicorp/consul:${consul_version}"
DefaultConsulEnterpriseImage = "hashicorp/consul-enterprise:${consul_version}-ent"
DefaultEnvoyImage = "envoyproxy/envoy:v${envoy_version}"
DefaultDataplaneImage = "hashicorp/consul-dataplane:${dataplane_version}"

Loading…
Cancel
Save