Browse Source

Retry lint fixes (#19151)

* Add a make target to run lint-consul-retry on all the modules
* Cleanup sdk/testutil/retry
* Fix a bunch of retry.Run* usage to not use the outer testing.T
* Fix some more recent retry lint issues and pin to v1.4.0 of lint-consul-retry
* Fix codegen copywrite lint issues
* Don’t perform cleanup after each retry attempt by default.
* Use the common testutil.TestingTB interface in test-integ/tenancy
* Fix retry tests
* Update otel access logging extension test to perform requests within the retry block
pull/19840/head
Matt Keeler 12 months ago committed by GitHub
parent
commit
efe279f802
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      .github/workflows/go-tests.yml
  2. 11
      Makefile
  3. 12
      agent/agent_endpoint_test.go
  4. 18
      agent/agent_test.go
  5. 2
      agent/catalog_endpoint_test.go
  6. 2
      agent/connect/ca/provider_vault_test.go
  7. 4
      agent/connect/ca/testing.go
  8. 10
      agent/connect/testing_spiffe.go
  9. 4
      agent/consul/client_test.go
  10. 5
      agent/consul/enterprise_server_ce_test.go
  11. 10
      agent/consul/server_test.go
  12. 2
      agent/event_endpoint_test.go
  13. 2
      agent/grpc-external/services/peerstream/stream_test.go
  14. 6
      agent/health_endpoint_test.go
  15. 6
      agent/http_test.go
  16. 9
      agent/remote_exec_test.go
  17. 25
      agent/session_endpoint_test.go
  18. 10
      agent/testagent.go
  19. 2
      api/api_test.go
  20. 5
      api/lock_test.go
  21. 2
      connect/proxy/proxy_test.go
  22. 2
      connect/service_test.go
  23. 2
      internal/mesh/internal/controllers/explicitdestinations/controller_test.go
  24. 2
      internal/mesh/internal/controllers/proxyconfiguration/controller_test.go
  25. 10
      internal/resource/resourcetest/testing.go
  26. 2
      sdk/testutil/io.go
  27. 22
      sdk/testutil/retry/doc.go
  28. 35
      sdk/testutil/retry/interface.go
  29. 42
      sdk/testutil/retry/output.go
  30. 353
      sdk/testutil/retry/retry.go
  31. 126
      sdk/testutil/retry/retry_test.go
  32. 36
      sdk/testutil/retry/retryer.go
  33. 48
      sdk/testutil/retry/run.go
  34. 16
      sdk/testutil/retry/timer.go
  35. 25
      sdk/testutil/types.go
  36. 28
      test-integ/tenancy/client.go
  37. 7
      test-integ/topoutil/asserter.go
  38. 2
      test/integration/consul-container/libs/assert/grpc.go
  39. 6
      test/integration/consul-container/libs/assert/service.go
  40. 19
      test/integration/consul-container/test/envoy_extensions/ext_authz_test.go
  41. 12
      test/integration/consul-container/test/envoy_extensions/otel_access_logging_test.go
  42. 4
      test/integration/consul-container/test/gateways/terminating_gateway_test.go
  43. 5
      test/integration/consul-container/test/ratelimit/ratelimit_test.go
  44. 4
      test/integration/consul-container/test/tproxy/tproxy_test.go
  45. 1
      testrpc/wait.go

2
.github/workflows/go-tests.yml

@ -156,7 +156,7 @@ jobs:
- uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1
with: with:
go-version-file: 'go.mod' go-version-file: 'go.mod'
- run: go install github.com/hashicorp/lint-consul-retry@v1.3.0 && lint-consul-retry - run: make lint-consul-retry
lint: lint:
needs: needs:

11
Makefile

@ -20,7 +20,7 @@ PROTOC_GO_INJECT_TAG_VERSION='v1.3.0'
PROTOC_GEN_GO_BINARY_VERSION='v0.1.0' PROTOC_GEN_GO_BINARY_VERSION='v0.1.0'
DEEP_COPY_VERSION='bc3f5aa5735d8a54961580a3a24422c308c831c2' DEEP_COPY_VERSION='bc3f5aa5735d8a54961580a3a24422c308c831c2'
COPYWRITE_TOOL_VERSION='v0.16.4' COPYWRITE_TOOL_VERSION='v0.16.4'
LINT_CONSUL_RETRY_VERSION='v1.3.0' LINT_CONSUL_RETRY_VERSION='v1.4.0'
# Go imports formatter # Go imports formatter
GCI_VERSION='v0.11.2' GCI_VERSION='v0.11.2'
@ -258,6 +258,15 @@ lint/%:
@echo "--> Running enumcover ($*)" @echo "--> Running enumcover ($*)"
@cd $* && GOWORK=off enumcover ./... @cd $* && GOWORK=off enumcover ./...
.PHONY: lint-consul-retry
lint-consul-retry: $(foreach mod,$(GO_MODULES),lint-consul-retry/$(mod))
.PHONY: lint-consul-retry/%
lint-consul-retry/%: lint-tools
@echo "--> Running lint-consul-retry ($*)"
@cd $* && GOWORK=off lint-consul-retry
# check that the test-container module only imports allowlisted packages # check that the test-container module only imports allowlisted packages
# from the root consul module. Generally we don't want to allow these imports. # from the root consul module. Generally we don't want to allow these imports.
# In a few specific instances though it is okay to import test definitions and # In a few specific instances though it is okay to import test definitions and

12
agent/agent_endpoint_test.go

@ -1877,7 +1877,7 @@ func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) {
require.NoError(t, a.updateTTLCheck(checkID, api.HealthPassing, "testing-agent-reload-001")) require.NoError(t, a.updateTTLCheck(checkID, api.HealthPassing, "testing-agent-reload-001"))
checkStr := func(r *retry.R, evaluator func(string) error) { checkStr := func(r *retry.R, evaluator func(string) error) {
t.Helper() r.Helper()
contentsStr := "" contentsStr := ""
// Wait for watch to be populated // Wait for watch to be populated
for i := 1; i < 7; i++ { for i := 1; i < 7; i++ {
@ -1890,14 +1890,14 @@ func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) {
break break
} }
time.Sleep(time.Duration(i) * time.Second) time.Sleep(time.Duration(i) * time.Second)
testutil.Logger(t).Info("Watch not yet populated, retrying") testutil.Logger(r).Info("Watch not yet populated, retrying")
} }
if err := evaluator(contentsStr); err != nil { if err := evaluator(contentsStr); err != nil {
r.Errorf("ERROR: Test failing: %s", err) r.Errorf("ERROR: Test failing: %s", err)
} }
} }
ensureNothingCritical := func(r *retry.R, mustContain string) { ensureNothingCritical := func(r *retry.R, mustContain string) {
t.Helper() r.Helper()
eval := func(contentsStr string) error { eval := func(contentsStr string) error {
if strings.Contains(contentsStr, "critical") { if strings.Contains(contentsStr, "critical") {
return fmt.Errorf("MUST NOT contain critical:= %s", contentsStr) return fmt.Errorf("MUST NOT contain critical:= %s", contentsStr)
@ -1915,7 +1915,7 @@ func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) {
} }
retry.RunWith(retriesWithDelay(), t, func(r *retry.R) { retry.RunWith(retriesWithDelay(), t, func(r *retry.R) {
testutil.Logger(t).Info("Consul is now ready") testutil.Logger(r).Info("Consul is now ready")
// it should contain the output // it should contain the output
checkStr(r, func(contentStr string) error { checkStr(r, func(contentStr string) error {
if contentStr == "[]" { if contentStr == "[]" {
@ -4340,7 +4340,7 @@ func testDefaultSidecar(svc string, port int, fns ...func(*structs.NodeService))
} }
// testCreateToken creates a Policy for the provided rules and a Token linked to that Policy. // testCreateToken creates a Policy for the provided rules and a Token linked to that Policy.
func testCreateToken(t *testing.T, a *TestAgent, rules string) string { func testCreateToken(t testutil.TestingTB, a *TestAgent, rules string) string {
policyName, err := uuid.GenerateUUID() // we just need a unique name for the test and UUIDs are definitely unique policyName, err := uuid.GenerateUUID() // we just need a unique name for the test and UUIDs are definitely unique
require.NoError(t, err) require.NoError(t, err)
@ -4369,7 +4369,7 @@ func testCreateToken(t *testing.T, a *TestAgent, rules string) string {
return aclResp.SecretID return aclResp.SecretID
} }
func testCreatePolicy(t *testing.T, a *TestAgent, name, rules string) string { func testCreatePolicy(t testutil.TestingTB, a *TestAgent, name, rules string) string {
args := map[string]interface{}{ args := map[string]interface{}{
"Name": name, "Name": name,
"Rules": rules, "Rules": rules,

18
agent/agent_test.go

@ -90,7 +90,7 @@ func requireServiceMissing(t *testing.T, a *TestAgent, id string) {
require.Nil(t, getService(a, id), "have service %q (expected missing)", id) require.Nil(t, getService(a, id), "have service %q (expected missing)", id)
} }
func requireCheckExists(t *testing.T, a *TestAgent, id types.CheckID) *structs.HealthCheck { func requireCheckExists(t testutil.TestingTB, a *TestAgent, id types.CheckID) *structs.HealthCheck {
t.Helper() t.Helper()
chk := getCheck(a, id) chk := getCheck(a, id)
require.NotNil(t, chk, "missing check %q", id) require.NotNil(t, chk, "missing check %q", id)
@ -853,7 +853,7 @@ func TestAgent_CheckAliasRPC(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
t.Helper() r.Helper()
var args structs.NodeSpecificRequest var args structs.NodeSpecificRequest
args.Datacenter = "dc1" args.Datacenter = "dc1"
args.Node = "node1" args.Node = "node1"
@ -1888,7 +1888,7 @@ func TestAgent_RestoreServiceWithAliasCheck(t *testing.T) {
// We do this so that the agent logs and the informational messages from // We do this so that the agent logs and the informational messages from
// the test itself are interwoven properly. // the test itself are interwoven properly.
logf := func(t *testing.T, a *TestAgent, format string, args ...interface{}) { logf := func(a *TestAgent, format string, args ...interface{}) {
a.logger.Info("testharness: " + fmt.Sprintf(format, args...)) a.logger.Info("testharness: " + fmt.Sprintf(format, args...))
} }
@ -1947,12 +1947,12 @@ func TestAgent_RestoreServiceWithAliasCheck(t *testing.T) {
retryUntilCheckState := func(t *testing.T, a *TestAgent, checkID string, expectedStatus string) { retryUntilCheckState := func(t *testing.T, a *TestAgent, checkID string, expectedStatus string) {
t.Helper() t.Helper()
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
chk := requireCheckExists(t, a, types.CheckID(checkID)) chk := requireCheckExists(r, a, types.CheckID(checkID))
if chk.Status != expectedStatus { if chk.Status != expectedStatus {
logf(t, a, "check=%q expected status %q but got %q", checkID, expectedStatus, chk.Status) logf(a, "check=%q expected status %q but got %q", checkID, expectedStatus, chk.Status)
r.Fatalf("check=%q expected status %q but got %q", checkID, expectedStatus, chk.Status) r.Fatalf("check=%q expected status %q but got %q", checkID, expectedStatus, chk.Status)
} }
logf(t, a, "check %q has reached desired status %q", checkID, expectedStatus) logf(a, "check %q has reached desired status %q", checkID, expectedStatus)
}) })
} }
@ -1963,7 +1963,7 @@ func TestAgent_RestoreServiceWithAliasCheck(t *testing.T) {
retryUntilCheckState(t, a, "service:ping", api.HealthPassing) retryUntilCheckState(t, a, "service:ping", api.HealthPassing)
retryUntilCheckState(t, a, "service:ping-sidecar-proxy", api.HealthPassing) retryUntilCheckState(t, a, "service:ping-sidecar-proxy", api.HealthPassing)
logf(t, a, "==== POWERING DOWN ORIGINAL ====") logf(a, "==== POWERING DOWN ORIGINAL ====")
require.NoError(t, a.Shutdown()) require.NoError(t, a.Shutdown())
@ -1985,7 +1985,7 @@ node_name = "` + a.Config.NodeName + `"
// reregister during standup; we use an adjustable timing to try and force a race // reregister during standup; we use an adjustable timing to try and force a race
sleepDur := time.Duration(idx+1) * 500 * time.Millisecond sleepDur := time.Duration(idx+1) * 500 * time.Millisecond
time.Sleep(sleepDur) time.Sleep(sleepDur)
logf(t, a2, "re-registering checks and services after a delay of %v", sleepDur) logf(a2, "re-registering checks and services after a delay of %v", sleepDur)
for i := 0; i < 20; i++ { // RACE RACE RACE! for i := 0; i < 20; i++ { // RACE RACE RACE!
registerServicesAndChecks(t, a2) registerServicesAndChecks(t, a2)
time.Sleep(50 * time.Millisecond) time.Sleep(50 * time.Millisecond)
@ -1995,7 +1995,7 @@ node_name = "` + a.Config.NodeName + `"
retryUntilCheckState(t, a2, "service:ping", api.HealthPassing) retryUntilCheckState(t, a2, "service:ping", api.HealthPassing)
logf(t, a2, "giving the alias check a chance to notice...") logf(a2, "giving the alias check a chance to notice...")
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
retryUntilCheckState(t, a2, "service:ping-sidecar-proxy", api.HealthPassing) retryUntilCheckState(t, a2, "service:ping-sidecar-proxy", api.HealthPassing)

2
agent/catalog_endpoint_test.go

@ -1167,7 +1167,7 @@ func TestCatalogServiceNodes_DistanceSort(t *testing.T) {
r.Fatalf("err: %v", err) r.Fatalf("err: %v", err)
} }
assertIndex(t, resp) assertIndex(r, resp)
nodes = obj.(structs.ServiceNodes) nodes = obj.(structs.ServiceNodes)
if len(nodes) != 2 { if len(nodes) != 2 {
r.Fatalf("bad: %v", obj) r.Fatalf("bad: %v", obj)

2
agent/connect/ca/provider_vault_test.go

@ -295,7 +295,7 @@ func TestVaultCAProvider_ConfigureFailureGoroutineLeakCheck(t *testing.T) {
profile := pprof.Lookup("goroutine") profile := pprof.Lookup("goroutine")
sb := strings.Builder{} sb := strings.Builder{}
require.NoError(r, profile.WriteTo(&sb, 2)) require.NoError(r, profile.WriteTo(&sb, 2))
t.Log(sb.String()) r.Log(sb.String())
require.Contains(r, sb.String(), require.Contains(r, sb.String(),
"created by github.com/hashicorp/consul/agent/connect/ca.(*VaultProvider).Configure", "created by github.com/hashicorp/consul/agent/connect/ca.(*VaultProvider).Configure",
"expected renewal goroutine, got none") "expected renewal goroutine, got none")

4
agent/connect/ca/testing.go

@ -126,7 +126,7 @@ func SkipIfVaultNotPresent(t testing.T, reqs ...vaultRequirements) {
} }
} }
func NewTestVaultServer(t testing.T) *TestVaultServer { func NewTestVaultServer(t retry.TestingTB) *TestVaultServer {
vaultBinaryName := os.Getenv("VAULT_BINARY_NAME") vaultBinaryName := os.Getenv("VAULT_BINARY_NAME")
if vaultBinaryName == "" { if vaultBinaryName == "" {
vaultBinaryName = "vault" vaultBinaryName = "vault"
@ -204,7 +204,7 @@ func (v *TestVaultServer) Client() *vaultapi.Client {
return v.client return v.client
} }
func (v *TestVaultServer) WaitUntilReady(t testing.T) { func (v *TestVaultServer) WaitUntilReady(t retry.TestingTB) {
var version string var version string
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
resp, err := v.client.Sys().Health() resp, err := v.client.Sys().Health()

10
agent/connect/testing_spiffe.go

@ -3,24 +3,22 @@
package connect package connect
import ( import "github.com/hashicorp/consul/sdk/testutil"
"github.com/mitchellh/go-testing-interface"
)
// TestSpiffeIDService returns a SPIFFE ID representing a service. // TestSpiffeIDService returns a SPIFFE ID representing a service.
func TestSpiffeIDService(t testing.T, service string) *SpiffeIDService { func TestSpiffeIDService(t testutil.TestingTB, service string) *SpiffeIDService {
return TestSpiffeIDServiceWithHost(t, service, TestClusterID+".consul") return TestSpiffeIDServiceWithHost(t, service, TestClusterID+".consul")
} }
// TestSpiffeIDServiceWithHost returns a SPIFFE ID representing a service with // TestSpiffeIDServiceWithHost returns a SPIFFE ID representing a service with
// the specified trust domain. // the specified trust domain.
func TestSpiffeIDServiceWithHost(t testing.T, service, host string) *SpiffeIDService { func TestSpiffeIDServiceWithHost(t testutil.TestingTB, service, host string) *SpiffeIDService {
return TestSpiffeIDServiceWithHostDC(t, service, host, "dc1") return TestSpiffeIDServiceWithHostDC(t, service, host, "dc1")
} }
// TestSpiffeIDServiceWithHostDC returns a SPIFFE ID representing a service with // TestSpiffeIDServiceWithHostDC returns a SPIFFE ID representing a service with
// the specified trust domain for the given datacenter. // the specified trust domain for the given datacenter.
func TestSpiffeIDServiceWithHostDC(t testing.T, service, host, datacenter string) *SpiffeIDService { func TestSpiffeIDServiceWithHostDC(t testutil.TestingTB, service, host, datacenter string) *SpiffeIDService {
return &SpiffeIDService{ return &SpiffeIDService{
Host: host, Host: host,
Namespace: "default", Namespace: "default",

4
agent/consul/client_test.go

@ -509,7 +509,7 @@ func newClient(t *testing.T, config *Config) *Client {
return client return client
} }
func newTestResolverConfig(t *testing.T, suffix string, dc, agentType string) resolver.Config { func newTestResolverConfig(t testutil.TestingTB, suffix string, dc, agentType string) resolver.Config {
n := t.Name() n := t.Name()
s := strings.Replace(n, "/", "", -1) s := strings.Replace(n, "/", "", -1)
s = strings.Replace(s, "_", "", -1) s = strings.Replace(s, "_", "", -1)
@ -520,7 +520,7 @@ func newTestResolverConfig(t *testing.T, suffix string, dc, agentType string) re
} }
} }
func newDefaultDeps(t *testing.T, c *Config) Deps { func newDefaultDeps(t testutil.TestingTB, c *Config) Deps {
t.Helper() t.Helper()
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{

5
agent/consul/enterprise_server_ce_test.go

@ -6,12 +6,11 @@
package consul package consul
import ( import (
"testing" "github.com/hashicorp/consul/sdk/testutil"
hclog "github.com/hashicorp/go-hclog" hclog "github.com/hashicorp/go-hclog"
) )
func newDefaultDepsEnterprise(t *testing.T, _ hclog.Logger, _ *Config) EnterpriseDeps { func newDefaultDepsEnterprise(t testutil.TestingTB, _ hclog.Logger, _ *Config) EnterpriseDeps {
t.Helper() t.Helper()
return EnterpriseDeps{} return EnterpriseDeps{}
} }

10
agent/consul/server_test.go

@ -121,7 +121,7 @@ func waitForLeaderEstablishment(t *testing.T, servers ...*Server) {
}) })
} }
func testServerConfig(t *testing.T) (string, *Config) { func testServerConfig(t testutil.TestingTB) (string, *Config) {
dir := testutil.TempDir(t, "consul") dir := testutil.TempDir(t, "consul")
config := DefaultConfig() config := DefaultConfig()
@ -237,7 +237,7 @@ func testServerWithConfig(t *testing.T, configOpts ...func(*Config)) (string, *S
var deps Deps var deps Deps
// Retry added to avoid cases where bind addr is already in use // Retry added to avoid cases where bind addr is already in use
retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) { retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) {
dir, config = testServerConfig(t) dir, config = testServerConfig(r)
for _, fn := range configOpts { for _, fn := range configOpts {
fn(config) fn(config)
} }
@ -250,8 +250,8 @@ func testServerWithConfig(t *testing.T, configOpts ...func(*Config)) (string, *S
config.ACLResolverSettings.EnterpriseMeta = *config.AgentEnterpriseMeta() config.ACLResolverSettings.EnterpriseMeta = *config.AgentEnterpriseMeta()
var err error var err error
deps = newDefaultDeps(t, config) deps = newDefaultDeps(r, config)
srv, err = newServerWithDeps(t, config, deps) srv, err = newServerWithDeps(r, config, deps)
if err != nil { if err != nil {
r.Fatalf("err: %v", err) r.Fatalf("err: %v", err)
} }
@ -331,7 +331,7 @@ func newServer(t *testing.T, c *Config) (*Server, error) {
return newServerWithDeps(t, c, newDefaultDeps(t, c)) return newServerWithDeps(t, c, newDefaultDeps(t, c))
} }
func newServerWithDeps(t *testing.T, c *Config, deps Deps) (*Server, error) { func newServerWithDeps(t testutil.TestingTB, c *Config, deps Deps) (*Server, error) {
// chain server up notification // chain server up notification
oldNotify := c.NotifyListen oldNotify := c.NotifyListen
up := make(chan struct{}) up := make(chan struct{})

2
agent/event_endpoint_test.go

@ -234,7 +234,7 @@ func TestEventList_ACLFilter(t *testing.T) {
t.Run("token with access to one event type", func(t *testing.T) { t.Run("token with access to one event type", func(t *testing.T) {
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
token := testCreateToken(t, a, ` token := testCreateToken(r, a, `
event "foo" { event "foo" {
policy = "read" policy = "read"
} }

2
agent/grpc-external/services/peerstream/stream_test.go vendored

@ -690,7 +690,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
req := msg.GetRequest() req := msg.GetRequest()
require.NotNil(r, req) require.NotNil(r, req)
require.Equal(r, pbpeerstream.TypeURLExportedService, req.ResourceURL) require.Equal(r, pbpeerstream.TypeURLExportedService, req.ResourceURL)
prototest.AssertDeepEqual(t, expectAck, msg) prototest.AssertDeepEqual(r, expectAck, msg)
}) })
expect := Status{ expect := Status{

6
agent/health_endpoint_test.go

@ -258,7 +258,7 @@ func TestHealthChecksInState_DistanceSort(t *testing.T) {
if err != nil { if err != nil {
r.Fatalf("err: %v", err) r.Fatalf("err: %v", err)
} }
assertIndex(t, resp) assertIndex(r, resp)
nodes = obj.(structs.HealthChecks) nodes = obj.(structs.HealthChecks)
if len(nodes) != 2 { if len(nodes) != 2 {
r.Fatalf("bad: %v", nodes) r.Fatalf("bad: %v", nodes)
@ -613,7 +613,7 @@ func TestHealthServiceChecks_DistanceSort(t *testing.T) {
if err != nil { if err != nil {
r.Fatalf("err: %v", err) r.Fatalf("err: %v", err)
} }
assertIndex(t, resp) assertIndex(r, resp)
nodes = obj.(structs.HealthChecks) nodes = obj.(structs.HealthChecks)
if len(nodes) != 2 { if len(nodes) != 2 {
r.Fatalf("bad: %v", obj) r.Fatalf("bad: %v", obj)
@ -1371,7 +1371,7 @@ func TestHealthServiceNodes_DistanceSort(t *testing.T) {
if err != nil { if err != nil {
r.Fatalf("err: %v", err) r.Fatalf("err: %v", err)
} }
assertIndex(t, resp) assertIndex(r, resp)
nodes = obj.(structs.CheckServiceNodes) nodes = obj.(structs.CheckServiceNodes)
if len(nodes) != 2 { if len(nodes) != 2 {
r.Fatalf("bad: %v", obj) r.Fatalf("bad: %v", obj)

6
agent/http_test.go

@ -1628,10 +1628,8 @@ func TestAllowedNets(t *testing.T) {
} }
// assertIndex tests that X-Consul-Index is set and non-zero // assertIndex tests that X-Consul-Index is set and non-zero
func assertIndex(t require.TestingT, resp *httptest.ResponseRecorder) { func assertIndex(t testutil.TestingTB, resp *httptest.ResponseRecorder) {
if tt, ok := t.(*testing.T); ok { t.Helper()
tt.Helper()
}
require.NoError(t, checkIndex(resp)) require.NoError(t, checkIndex(resp))
} }

9
agent/remote_exec_test.go

@ -15,6 +15,7 @@ import (
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/go-uuid" "github.com/hashicorp/go-uuid"
@ -358,9 +359,9 @@ func testHandleRemoteExec(t *testing.T, command string, expectedSubstring string
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
event := &remoteExecEvent{ event := &remoteExecEvent{
Prefix: "_rexec", Prefix: "_rexec",
Session: makeRexecSession(t, a.Agent, ""), Session: makeRexecSession(r, a.Agent, ""),
} }
defer destroySession(t, a.Agent, event.Session, "") defer destroySession(r, a.Agent, event.Session, "")
spec := &remoteExecSpec{ spec := &remoteExecSpec{
Command: command, Command: command,
@ -429,7 +430,7 @@ func TestHandleRemoteExecFailed(t *testing.T) {
testHandleRemoteExec(t, "echo failing;exit 2", "failing", "2") testHandleRemoteExec(t, "echo failing;exit 2", "failing", "2")
} }
func makeRexecSession(t *testing.T, a *Agent, token string) string { func makeRexecSession(t testutil.TestingTB, a *Agent, token string) string {
args := structs.SessionRequest{ args := structs.SessionRequest{
Datacenter: a.config.Datacenter, Datacenter: a.config.Datacenter,
Op: structs.SessionCreate, Op: structs.SessionCreate,
@ -448,7 +449,7 @@ func makeRexecSession(t *testing.T, a *Agent, token string) string {
return out return out
} }
func destroySession(t *testing.T, a *Agent, session string, token string) { func destroySession(t testutil.TestingTB, a *Agent, session string, token string) {
args := structs.SessionRequest{ args := structs.SessionRequest{
Datacenter: a.config.Datacenter, Datacenter: a.config.Datacenter,
Op: structs.SessionDestroy, Op: structs.SessionDestroy,

25
agent/session_endpoint_test.go

@ -15,13 +15,14 @@ import (
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/types" "github.com/hashicorp/consul/types"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func verifySession(t *testing.T, r *retry.R, a *TestAgent, want structs.Session) { func verifySession(t testutil.TestingTB, a *TestAgent, want structs.Session) {
t.Helper() t.Helper()
args := &structs.SessionSpecificRequest{ args := &structs.SessionSpecificRequest{
@ -30,10 +31,10 @@ func verifySession(t *testing.T, r *retry.R, a *TestAgent, want structs.Session)
} }
var out structs.IndexedSessions var out structs.IndexedSessions
if err := a.RPC(context.Background(), "Session.Get", args, &out); err != nil { if err := a.RPC(context.Background(), "Session.Get", args, &out); err != nil {
r.Fatalf("err: %v", err) t.Fatalf("err: %v", err)
} }
if len(out.Sessions) != 1 { if len(out.Sessions) != 1 {
r.Fatalf("bad: %#v", out.Sessions) t.Fatalf("bad: %#v", out.Sessions)
} }
// Make a copy so we don't modify the state store copy for an in-mem // Make a copy so we don't modify the state store copy for an in-mem
@ -123,7 +124,7 @@ func TestSessionCreate(t *testing.T) {
LockDelay: 20 * time.Second, LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease, Behavior: structs.SessionKeysRelease,
} }
verifySession(t, r, a, want) verifySession(r, a, want)
}) })
} }
@ -188,7 +189,7 @@ func TestSessionCreate_NodeChecks(t *testing.T) {
LockDelay: 20 * time.Second, LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease, Behavior: structs.SessionKeysRelease,
} }
verifySession(t, r, a, want) verifySession(r, a, want)
}) })
} }
@ -250,7 +251,7 @@ func TestSessionCreate_Delete(t *testing.T) {
LockDelay: 20 * time.Second, LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysDelete, Behavior: structs.SessionKeysDelete,
} }
verifySession(t, r, a, want) verifySession(r, a, want)
}) })
} }
@ -288,7 +289,7 @@ func TestSessionCreate_DefaultCheck(t *testing.T) {
LockDelay: 20 * time.Second, LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease, Behavior: structs.SessionKeysRelease,
} }
verifySession(t, r, a, want) verifySession(r, a, want)
}) })
} }
@ -329,7 +330,7 @@ func TestSessionCreate_NoCheck(t *testing.T) {
LockDelay: 20 * time.Second, LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease, Behavior: structs.SessionKeysRelease,
} }
verifySession(t, r, a, want) verifySession(r, a, want)
}) })
}) })
@ -359,7 +360,7 @@ func TestSessionCreate_NoCheck(t *testing.T) {
LockDelay: 20 * time.Second, LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease, Behavior: structs.SessionKeysRelease,
} }
verifySession(t, r, a, want) verifySession(r, a, want)
}) })
}) })
@ -391,7 +392,7 @@ func TestSessionCreate_NoCheck(t *testing.T) {
LockDelay: 20 * time.Second, LockDelay: 20 * time.Second,
Behavior: structs.SessionKeysRelease, Behavior: structs.SessionKeysRelease,
} }
verifySession(t, r, a, want) verifySession(r, a, want)
}) })
}) })
} }
@ -430,7 +431,7 @@ func makeTestSessionDelete(t *testing.T, srv *HTTPHandlers) string {
return sessResp.ID return sessResp.ID
} }
func makeTestSessionTTL(t *testing.T, srv *HTTPHandlers, ttl string) string { func makeTestSessionTTL(t testutil.TestingTB, srv *HTTPHandlers, ttl string) string {
t.Helper() t.Helper()
// Create Session with TTL // Create Session with TTL
body := bytes.NewBuffer(nil) body := bytes.NewBuffer(nil)
@ -488,7 +489,7 @@ func TestSessionCustomTTL(t *testing.T) {
testrpc.WaitForTestAgent(t, a.RPC, "dc1") testrpc.WaitForTestAgent(t, a.RPC, "dc1")
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
id := makeTestSessionTTL(t, a.srv, ttl.String()) id := makeTestSessionTTL(r, a.srv, ttl.String())
req, _ := http.NewRequest("GET", "/v1/session/info/"+id, nil) req, _ := http.NewRequest("GET", "/v1/session/info/"+id, nil)
resp := httptest.NewRecorder() resp := httptest.NewRecorder()

10
agent/testagent.go

@ -117,8 +117,8 @@ func NewTestAgentWithConfigFile(t *testing.T, hcl string, configFiles []string)
func StartTestAgent(t *testing.T, a TestAgent) *TestAgent { func StartTestAgent(t *testing.T, a TestAgent) *TestAgent {
t.Helper() t.Helper()
retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) { retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) {
t.Helper() r.Helper()
if err := a.Start(t); err != nil { if err := a.Start(r); err != nil {
r.Fatal(err) r.Fatal(err)
} }
}) })
@ -152,7 +152,7 @@ func TestConfigHCL(nodeID string) string {
// Start starts a test agent. It returns an error if the agent could not be started. // Start starts a test agent. It returns an error if the agent could not be started.
// If no error is returned, the caller must call Shutdown() when finished. // If no error is returned, the caller must call Shutdown() when finished.
func (a *TestAgent) Start(t *testing.T) error { func (a *TestAgent) Start(t testutil.TestingTB) error {
t.Helper() t.Helper()
if a.Agent != nil { if a.Agent != nil {
return fmt.Errorf("TestAgent already started") return fmt.Errorf("TestAgent already started")
@ -442,10 +442,10 @@ func (r *retryShim) Name() string {
// chance of port conflicts for concurrently executed test binaries. // chance of port conflicts for concurrently executed test binaries.
// Instead of relying on one set of ports to be sufficient we retry // Instead of relying on one set of ports to be sufficient we retry
// starting the agent with different ports on port conflict. // starting the agent with different ports on port conflict.
func randomPortsSource(t *testing.T, useHTTPS bool) string { func randomPortsSource(t testutil.TestingTB, useHTTPS bool) string {
var ports []int var ports []int
retry.RunWith(retry.TwoSeconds(), t, func(r *retry.R) { retry.RunWith(retry.TwoSeconds(), t, func(r *retry.R) {
ports = freeport.GetN(&retryShim{r, t.Name()}, 7) ports = freeport.GetN(r, 7)
}) })
var http, https int var http, https int

2
api/api_test.go

@ -119,7 +119,7 @@ func makeClientWithConfig(
var server *testutil.TestServer var server *testutil.TestServer
var err error var err error
retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) { retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) {
server, err = testutil.NewTestServerConfigT(t, cb2) server, err = testutil.NewTestServerConfigT(r, cb2)
if err != nil { if err != nil {
r.Fatalf("Failed to start server: %v", err.Error()) r.Fatalf("Failed to start server: %v", err.Error())
} }

5
api/lock_test.go

@ -13,10 +13,11 @@ import (
"testing" "testing"
"time" "time"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
) )
func createTestLock(t *testing.T, c *Client, key string) (*Lock, *Session) { func createTestLock(t testutil.TestingTB, c *Client, key string) (*Lock, *Session) {
t.Helper() t.Helper()
session := c.Session() session := c.Session()
@ -106,7 +107,7 @@ func TestAPI_LockForceInvalidate(t *testing.T) {
defer s.Stop() defer s.Stop()
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
lock, session := createTestLock(t, c, "test/lock") lock, session := createTestLock(r, c, "test/lock")
defer session.Destroy(lock.opts.Session, nil) defer session.Destroy(lock.opts.Session, nil)
// Should work // Should work

2
connect/proxy/proxy_test.go

@ -98,7 +98,7 @@ func TestProxy_public(t *testing.T) {
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
conn, err = svc.Dial(context.Background(), &connect.StaticResolver{ conn, err = svc.Dial(context.Background(), &connect.StaticResolver{
Addr: TestLocalAddr(ports[0]), Addr: TestLocalAddr(ports[0]),
CertURI: agConnect.TestSpiffeIDService(t, "echo"), CertURI: agConnect.TestSpiffeIDService(r, "echo"),
}) })
if err != nil { if err != nil {
r.Fatalf("err: %s", err) r.Fatalf("err: %s", err)

2
connect/service_test.go

@ -246,7 +246,7 @@ func TestService_HTTPClient(t *testing.T) {
//require.Equal(t,"https://backend.service.consul:443", addr) //require.Equal(t,"https://backend.service.consul:443", addr)
return &StaticResolver{ return &StaticResolver{
Addr: testSvr.Addr, Addr: testSvr.Addr,
CertURI: connect.TestSpiffeIDService(t, "backend"), CertURI: connect.TestSpiffeIDService(r, "backend"),
}, nil }, nil
} }

2
internal/mesh/internal/controllers/explicitdestinations/controller_test.go

@ -911,7 +911,7 @@ func (suite *controllerTestSuite) TestController() {
expDest := &pbmesh.ComputedExplicitDestinations{ expDest := &pbmesh.ComputedExplicitDestinations{
Destinations: suite.dest1.Destinations, Destinations: suite.dest1.Destinations,
} }
dec := resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, res) dec := resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](r, res)
prototest.AssertDeepEqual(r, expDest.GetDestinations(), dec.GetData().GetDestinations()) prototest.AssertDeepEqual(r, expDest.GetDestinations(), dec.GetData().GetDestinations())
matchingWorkloadCD := suite.client.RequireResourceExists(r, matchingWorkloadCDID) matchingWorkloadCD := suite.client.RequireResourceExists(r, matchingWorkloadCDID)

2
internal/mesh/internal/controllers/proxyconfiguration/controller_test.go

@ -245,7 +245,7 @@ func (suite *controllerTestSuite) TestController() {
PrometheusBindAddr: "0.0.0.0:9000", PrometheusBindAddr: "0.0.0.0:9000",
}, },
} }
dec := resourcetest.MustDecode[*pbmesh.ComputedProxyConfiguration](t, res) dec := resourcetest.MustDecode[*pbmesh.ComputedProxyConfiguration](r, res)
prototest.AssertDeepEqual(r, expProxyCfg.GetDynamicConfig(), dec.GetData().GetDynamicConfig()) prototest.AssertDeepEqual(r, expProxyCfg.GetDynamicConfig(), dec.GetData().GetDynamicConfig())
prototest.AssertDeepEqual(r, expProxyCfg.GetBootstrapConfig(), dec.GetData().GetBootstrapConfig()) prototest.AssertDeepEqual(r, expProxyCfg.GetBootstrapConfig(), dec.GetData().GetBootstrapConfig())

10
internal/resource/resourcetest/testing.go

@ -3,14 +3,10 @@
package resourcetest package resourcetest
import "github.com/hashicorp/consul/sdk/testutil"
// T represents the subset of testing.T methods that will be used // T represents the subset of testing.T methods that will be used
// by the various functionality in this package // by the various functionality in this package
type T interface { type T interface {
Helper() testutil.TestingTB
Log(args ...interface{})
Logf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
FailNow()
Cleanup(func())
} }

2
sdk/testutil/io.go

@ -16,7 +16,7 @@ var saveSnapshot = strings.ToLower(os.Getenv("TEST_SAVE_SNAPSHOT")) == "true"
// If the directory cannot be created t.Fatal is called. // If the directory cannot be created t.Fatal is called.
// The directory will be removed when the test ends. Set TEST_NOCLEANUP env var // The directory will be removed when the test ends. Set TEST_NOCLEANUP env var
// to prevent the directory from being removed. // to prevent the directory from being removed.
func TempDir(t testing.TB, name string) string { func TempDir(t TestingTB, name string) string {
if t == nil { if t == nil {
panic("argument t must be non-nil") panic("argument t must be non-nil")
} }

22
sdk/testutil/retry/doc.go

@ -0,0 +1,22 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Package retry provides support for repeating operations in tests.
//
// A sample retry operation looks like this:
//
// func TestX(t *testing.T) {
// retry.Run(t, func(r *retry.R) {
// if err := foo(); err != nil {
// r.Errorf("foo: %s", err)
// return
// }
// })
// }
//
// Run uses the DefaultFailer, which is a Timer with a Timeout of 7s,
// and a Wait of 25ms. To customize, use RunWith.
//
// WARNING: unlike *testing.T, *retry.R#Fatal and FailNow *do not*
// fail the test function entirely, only the current run the retry func
package retry

35
sdk/testutil/retry/interface.go

@ -0,0 +1,35 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package retry
import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var nilInf TestingTB = nil
// Assertion that our TestingTB can be passed to
var _ require.TestingT = nilInf
var _ assert.TestingT = nilInf
// TestingTB is an interface that describes the implementation of the testing object.
// Using an interface that describes testing.TB instead of the actual implementation
// makes testutil usable in a wider variety of contexts (e.g. use with ginkgo : https://godoc.org/github.com/onsi/ginkgo#GinkgoT)
type TestingTB interface {
Cleanup(func())
Error(args ...any)
Errorf(format string, args ...any)
Fail()
FailNow()
Failed() bool
Fatal(args ...any)
Fatalf(format string, args ...any)
Helper()
Log(args ...any)
Logf(format string, args ...any)
Name() string
Setenv(key, value string)
TempDir() string
}

42
sdk/testutil/retry/output.go

@ -0,0 +1,42 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package retry
import (
"bytes"
"fmt"
"runtime"
"strings"
)
func dedup(a []string) string {
if len(a) == 0 {
return ""
}
seen := map[string]struct{}{}
var b bytes.Buffer
for _, s := range a {
if _, ok := seen[s]; ok {
continue
}
seen[s] = struct{}{}
b.WriteString(s)
b.WriteRune('\n')
}
return b.String()
}
func decorate(s string) string {
_, file, line, ok := runtime.Caller(3)
if ok {
n := strings.LastIndex(file, "/")
if n >= 0 {
file = file[n+1:]
}
} else {
file = "???"
line = 1
}
return fmt.Sprintf("%s:%d: %s", file, line, s)
}

353
sdk/testutil/retry/retry.go

@ -1,263 +1,232 @@
// Copyright (c) HashiCorp, Inc. // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
// Package retry provides support for repeating operations in tests.
//
// A sample retry operation looks like this:
//
// func TestX(t *testing.T) {
// retry.Run(t, func(r *retry.R) {
// if err := foo(); err != nil {
// r.Errorf("foo: %s", err)
// return
// }
// })
// }
//
// Run uses the DefaultFailer, which is a Timer with a Timeout of 7s,
// and a Wait of 25ms. To customize, use RunWith.
//
// WARNING: unlike *testing.T, *retry.R#Fatal and FailNow *do not*
// fail the test function entirely, only the current run the retry func
package retry package retry
import ( import (
"bytes"
"fmt" "fmt"
"runtime" "os"
"strings"
"time"
) )
// Failer is an interface compatible with testing.T. var _ TestingTB = &R{}
type Failer interface {
Helper()
// Log is called for the final test output type R struct {
Log(args ...interface{}) wrapped TestingTB
retryer Retryer
// FailNow is called when the retrying is abandoned. done bool
FailNow() fullOutput bool
} immediateCleanup bool
// R provides context for the retryer. attempts []*attempt
// }
// Logs from Logf, (Error|Fatal)(f) are gathered in an internal buffer
// and printed only if the retryer fails. Printed logs are deduped and
// prefixed with source code line numbers
type R struct {
// fail is set by FailNow and (Fatal|Error)(f). It indicates the pass
// did not succeed, and should be retried
fail bool
// done is set by Stop. It indicates the entire run was a failure,
// and triggers t.FailNow()
done bool
output []string
cleanups []func() func (r *R) Cleanup(clean func()) {
if r.immediateCleanup {
a := r.getCurrentAttempt()
a.cleanups = append(a.cleanups, clean)
} else {
r.wrapped.Cleanup(clean)
}
} }
func (r *R) Logf(format string, args ...interface{}) { func (r *R) Error(args ...any) {
r.log(fmt.Sprintf(format, args...)) r.Log(args...)
r.Fail()
} }
func (r *R) Log(args ...interface{}) { func (r *R) Errorf(format string, args ...any) {
r.log(fmt.Sprintln(args...)) r.Logf(format, args...)
r.Fail()
} }
func (r *R) Helper() {} func (r *R) Fail() {
r.getCurrentAttempt().failed = true
}
// Cleanup register a function to be run to cleanup resources that func (r *R) FailNow() {
// were allocated during the retry attempt. These functions are executed r.Fail()
// after a retry attempt. If they panic, it will not stop further retry panic(attemptFailed{})
// attempts but will be cause for the overall test failure.
func (r *R) Cleanup(fn func()) {
r.cleanups = append(r.cleanups, fn)
} }
func (r *R) runCleanup() { func (r *R) Failed() bool {
return r.getCurrentAttempt().failed
}
// Make sure that if a cleanup function panics, func (r *R) Fatal(args ...any) {
// we still run the remaining cleanup functions. r.Log(args...)
defer func() { r.FailNow()
err := recover() }
if err != nil {
r.Stop(fmt.Errorf("error when performing test cleanup: %v", err))
}
if len(r.cleanups) > 0 {
r.runCleanup()
}
}()
for len(r.cleanups) > 0 { func (r *R) Fatalf(format string, args ...any) {
var cleanup func() r.Logf(format, args...)
if len(r.cleanups) > 0 { r.FailNow()
last := len(r.cleanups) - 1
cleanup = r.cleanups[last]
r.cleanups = r.cleanups[:last]
}
if cleanup != nil {
cleanup()
}
}
} }
// runFailed is a sentinel value to indicate that the func itself func (r *R) Helper() {
// didn't panic, rather that `FailNow` was called. // *testing.T will just record which functions are helpers by their addresses and
type runFailed struct{} // it doesn't much matter where where we record that they are helpers
r.wrapped.Helper()
// FailNow stops run execution. It is roughly equivalent to:
//
// r.Error("")
// return
//
// inside the function being run.
func (r *R) FailNow() {
r.fail = true
panic(runFailed{})
} }
// Fatal is equivalent to r.Logf(args) followed by r.FailNow(), i.e. the run func (r *R) Log(args ...any) {
// function should be exited. Retries on the next run are allowed. Fatal is r.log(fmt.Sprintln(args...))
// equivalent to
//
// r.Error(args)
// return
//
// inside the function being run.
func (r *R) Fatal(args ...interface{}) {
r.log(fmt.Sprint(args...))
r.FailNow()
} }
// Fatalf is like Fatal but allows a format string func (r *R) Logf(format string, args ...any) {
func (r *R) Fatalf(format string, args ...interface{}) {
r.log(fmt.Sprintf(format, args...)) r.log(fmt.Sprintf(format, args...))
r.FailNow()
} }
// Error indicates the current run encountered an error and should be retried. // Name will return the name of the underlying TestingT.
// It *does not* stop execution of the rest of the run function. func (r *R) Name() string {
func (r *R) Error(args ...interface{}) { return r.wrapped.Name()
r.log(fmt.Sprint(args...))
r.fail = true
} }
// Errorf is like Error but allows a format string // Setenv will save the current value of the specified env var, set it to the
func (r *R) Errorf(format string, args ...interface{}) { // specified value and then restore it to the original value in a cleanup function
r.log(fmt.Sprintf(format, args...)) // once the retry attempt has finished.
r.fail = true func (r *R) Setenv(key, value string) {
prevValue, ok := os.LookupEnv(key)
if err := os.Setenv(key, value); err != nil {
r.wrapped.Fatalf("cannot set environment variable: %v", err)
}
if ok {
r.Cleanup(func() {
os.Setenv(key, prevValue)
})
} else {
r.Cleanup(func() {
os.Unsetenv(key)
})
}
} }
// If err is non-nil, equivalent to r.Fatal(err.Error()) followed by // TempDir will use the wrapped TestingT to create a temporary directory
// r.FailNow(). Otherwise a no-op. // that will be cleaned up when ALL RETRYING has finished.
func (r *R) TempDir() string {
return r.wrapped.TempDir()
}
// Check will call r.Fatal(err) if err is not nil
func (r *R) Check(err error) { func (r *R) Check(err error) {
if err != nil { if err != nil {
r.log(err.Error()) r.Fatal(err)
r.FailNow()
} }
} }
func (r *R) log(s string) {
r.output = append(r.output, decorate(s))
}
// Stop retrying, and fail the test, logging the specified error.
// Does not stop execution, so return should be called after.
func (r *R) Stop(err error) { func (r *R) Stop(err error) {
r.log(err.Error()) r.log(err.Error())
r.done = true r.done = true
} }
func decorate(s string) string { func (r *R) failCurrentAttempt() {
_, file, line, ok := runtime.Caller(3) r.getCurrentAttempt().failed = true
if ok {
n := strings.LastIndex(file, "/")
if n >= 0 {
file = file[n+1:]
}
} else {
file = "???"
line = 1
}
return fmt.Sprintf("%s:%d: %s", file, line, s)
} }
func Run(t Failer, f func(r *R)) { func (r *R) log(s string) {
t.Helper() a := r.getCurrentAttempt()
run(DefaultFailer(), t, f) a.output = append(a.output, decorate(s))
} }
func RunWith(r Retryer, t Failer, f func(r *R)) { func (r *R) getCurrentAttempt() *attempt {
t.Helper() if len(r.attempts) == 0 {
run(r, t, f) panic("no retry attempts have been started yet")
}
return r.attempts[len(r.attempts)-1]
} }
func dedup(a []string) string { // cleanupAttempt will perform all the register cleanup operations recorded
if len(a) == 0 { // during execution of the single round of the test function.
return "" func (r *R) cleanupAttempt(a *attempt) {
} // Make sure that if a cleanup function panics,
seen := map[string]struct{}{} // we still run the remaining cleanup functions.
var b bytes.Buffer defer func() {
for _, s := range a { err := recover()
if _, ok := seen[s]; ok { if err != nil {
continue r.Stop(fmt.Errorf("error when performing test cleanup: %v", err))
}
if len(a.cleanups) > 0 {
r.cleanupAttempt(a)
}
}()
for len(a.cleanups) > 0 {
var cleanup func()
if len(a.cleanups) > 0 {
last := len(a.cleanups) - 1
cleanup = a.cleanups[last]
a.cleanups = a.cleanups[:last]
}
if cleanup != nil {
cleanup()
} }
seen[s] = struct{}{}
b.WriteString(s)
b.WriteRune('\n')
} }
return b.String()
} }
func run(r Retryer, t Failer, f func(r *R)) { // runAttempt will execute one round of the test function and handle cleanups and panic recovery
t.Helper() // of a failed attempt that should not stop retrying.
rr := &R{} func (r *R) runAttempt(f func(r *R)) {
r.Helper()
a := &attempt{}
r.attempts = append(r.attempts, a)
fail := func() { defer r.cleanupAttempt(a)
t.Helper() defer func() {
out := dedup(rr.output) if p := recover(); p != nil && p != (attemptFailed{}) {
if out != "" { panic(p)
t.Log(out)
} }
t.FailNow() }()
} f(r)
}
func (r *R) run(f func(r *R)) {
r.Helper()
for r.Continue() { for r.retryer.Continue() {
// run f(rr), but if recover yields a runFailed value, we know r.runAttempt(f)
// FailNow was called.
func() {
defer rr.runCleanup()
defer func() {
if p := recover(); p != nil && p != (runFailed{}) {
panic(p)
}
}()
f(rr)
}()
switch { switch {
case rr.done: case r.done:
fail() r.recordRetryFailure()
return return
case !rr.fail: case !r.Failed():
// the current attempt did not fail so we can go ahead and return
return return
} }
rr.fail = false
} }
fail()
// We cannot retry any more and no attempt has succeeded yet.
r.recordRetryFailure()
} }
// DefaultFailer provides default retry.Run() behavior for unit tests, namely func (r *R) recordRetryFailure() {
// 7s timeout with a wait of 25ms r.Helper()
func DefaultFailer() *Timer { output := r.getCurrentAttempt().output
return &Timer{Timeout: 7 * time.Second, Wait: 25 * time.Millisecond} if r.fullOutput {
var combined []string
for _, attempt := range r.attempts {
combined = append(combined, attempt.output...)
}
output = combined
}
out := dedup(output)
if out != "" {
r.wrapped.Log(out)
}
r.wrapped.FailNow()
} }
// Retryer provides an interface for repeating operations type attempt struct {
// until they succeed or an exit condition is met. failed bool
type Retryer interface { output []string
// Continue returns true if the operation should be repeated, otherwise it cleanups []func()
// returns false to indicate retrying should stop.
Continue() bool
} }
// attemptFailed is a sentinel value to indicate that the func itself
// didn't panic, rather that `FailNow` was called.
type attemptFailed struct{}

126
sdk/testutil/retry/retry_test.go

@ -62,7 +62,7 @@ func TestBasics(t *testing.T) {
t.Run("Fatal returns from func, but does not fail test", func(t *testing.T) { t.Run("Fatal returns from func, but does not fail test", func(t *testing.T) {
i := 0 i := 0
gotHere := false gotHere := false
ft := &fakeT{} ft := &fakeT{T: t}
Run(ft, func(r *R) { Run(ft, func(r *R) {
i++ i++
t.Logf("i: %d; r: %#v", i, r) t.Logf("i: %d; r: %#v", i, r)
@ -97,7 +97,7 @@ func TestBasics(t *testing.T) {
func TestRunWith(t *testing.T) { func TestRunWith(t *testing.T) {
t.Run("calls FailNow after exceeding retries", func(t *testing.T) { t.Run("calls FailNow after exceeding retries", func(t *testing.T) {
ft := &fakeT{} ft := &fakeT{T: t}
iter := 0 iter := 0
RunWith(&Counter{Count: 3, Wait: time.Millisecond}, ft, func(r *R) { RunWith(&Counter{Count: 3, Wait: time.Millisecond}, ft, func(r *R) {
iter++ iter++
@ -109,7 +109,7 @@ func TestRunWith(t *testing.T) {
}) })
t.Run("Stop ends the retrying", func(t *testing.T) { t.Run("Stop ends the retrying", func(t *testing.T) {
ft := &fakeT{} ft := &fakeT{T: t}
iter := 0 iter := 0
RunWith(&Counter{Count: 5, Wait: time.Millisecond}, ft, func(r *R) { RunWith(&Counter{Count: 5, Wait: time.Millisecond}, ft, func(r *R) {
iter++ iter++
@ -128,38 +128,54 @@ func TestRunWith(t *testing.T) {
}) })
} }
func TestCleanup_Passthrough(t *testing.T) {
}
func TestCleanup(t *testing.T) { func TestCleanup(t *testing.T) {
t.Run("basic", func(t *testing.T) { t.Run("basic", func(t *testing.T) {
ft := &fakeT{} ft := &fakeT{T: t}
cleanupsExecuted := 0 cleanupsExecuted := 0
RunWith(&Counter{Count: 2, Wait: time.Millisecond}, ft, func(r *R) {
r.Cleanup(func() { Run(
cleanupsExecuted += 1 ft,
}) func(r *R) {
}) r.Cleanup(func() {
cleanupsExecuted += 1
})
},
WithImmediateCleanup(),
WithRetryer(&Counter{Count: 2, Wait: time.Millisecond}),
)
require.Equal(t, 0, ft.fails) require.Equal(t, 0, ft.fails)
require.Equal(t, 1, cleanupsExecuted) require.Equal(t, 1, cleanupsExecuted)
}) })
t.Run("cleanup-panic-recovery", func(t *testing.T) { t.Run("cleanup-panic-recovery", func(t *testing.T) {
ft := &fakeT{} ft := &fakeT{T: t}
cleanupsExecuted := 0 cleanupsExecuted := 0
RunWith(&Counter{Count: 2, Wait: time.Millisecond}, ft, func(r *R) { Run(
r.Cleanup(func() { ft,
cleanupsExecuted += 1 func(r *R) {
}) r.Cleanup(func() {
cleanupsExecuted += 1
})
r.Cleanup(func() { r.Cleanup(func() {
cleanupsExecuted += 1 cleanupsExecuted += 1
panic(fmt.Errorf("fake test error")) panic(fmt.Errorf("fake test error"))
}) })
r.Cleanup(func() { r.Cleanup(func() {
cleanupsExecuted += 1 cleanupsExecuted += 1
}) })
// test is successful but should fail due to the cleanup panicing // test is successful but should fail due to the cleanup panicing
}) },
WithRetryer(&Counter{Count: 2, Wait: time.Millisecond}),
WithImmediateCleanup(),
)
require.Equal(t, 3, cleanupsExecuted) require.Equal(t, 3, cleanupsExecuted)
require.Equal(t, 1, ft.fails) require.Equal(t, 1, ft.fails)
@ -167,33 +183,71 @@ func TestCleanup(t *testing.T) {
}) })
t.Run("cleanup-per-retry", func(t *testing.T) { t.Run("cleanup-per-retry", func(t *testing.T) {
ft := &fakeT{} ft := &fakeT{T: t}
iter := 0 iter := 0
cleanupsExecuted := 0 cleanupsExecuted := 0
RunWith(&Counter{Count: 3, Wait: time.Millisecond}, ft, func(r *R) { Run(
if cleanupsExecuted != iter { ft,
r.Stop(fmt.Errorf("cleanups not executed between retries")) func(r *R) {
return if cleanupsExecuted != iter {
} r.Stop(fmt.Errorf("cleanups not executed between retries"))
iter += 1 return
}
iter += 1
r.Cleanup(func() { r.Cleanup(func() {
cleanupsExecuted += 1 cleanupsExecuted += 1
}) })
r.FailNow() r.FailNow()
}) },
WithRetryer(&Counter{Count: 3, Wait: time.Millisecond}),
WithImmediateCleanup(),
)
require.Equal(t, 3, cleanupsExecuted) require.Equal(t, 3, cleanupsExecuted)
// ensure that r.Stop hadn't been called. If it was then we would // ensure that r.Stop hadn't been called. If it was then we would
// have log output // have log output
require.Len(t, ft.out, 0) require.Len(t, ft.out, 0)
}) })
t.Run("passthrough-to-t", func(t *testing.T) {
cleanupsExecuted := 0
require.True(t, t.Run("internal", func(t *testing.T) {
iter := 0
Run(
t,
func(r *R) {
iter++
r.Cleanup(func() {
cleanupsExecuted += 1
})
// fail all but the last one to ensure the right number of cleanups
// are eventually executed
if iter < 3 {
r.FailNow()
}
},
WithRetryer(&Counter{Count: 3, Wait: time.Millisecond}),
)
// at this point nothing should be cleaned up
require.Equal(t, 0, cleanupsExecuted)
}))
// now since the subtest finished the test cleanup funcs
// should have been executed.
require.Equal(t, 3, cleanupsExecuted)
})
} }
type fakeT struct { type fakeT struct {
fails int fails int
out []string out []string
*testing.T
} }
func (f *fakeT) Helper() {} func (f *fakeT) Helper() {}
@ -206,4 +260,4 @@ func (f *fakeT) FailNow() {
f.fails++ f.fails++
} }
var _ Failer = &fakeT{} var _ TestingTB = &fakeT{}

36
sdk/testutil/retry/retryer.go

@ -0,0 +1,36 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package retry
import "time"
// Retryer provides an interface for repeating operations
// until they succeed or an exit condition is met.
type Retryer interface {
// Continue returns true if the operation should be repeated, otherwise it
// returns false to indicate retrying should stop.
Continue() bool
}
// DefaultRetryer provides default retry.Run() behavior for unit tests, namely
// 7s timeout with a wait of 25ms
func DefaultRetryer() Retryer {
return &Timer{Timeout: 7 * time.Second, Wait: 25 * time.Millisecond}
}
// ThirtySeconds repeats an operation for thirty seconds and waits 500ms in between.
// Best for known slower operations like waiting on eventually consistent state.
func ThirtySeconds() *Timer {
return &Timer{Timeout: 30 * time.Second, Wait: 500 * time.Millisecond}
}
// TwoSeconds repeats an operation for two seconds and waits 25ms in between.
func TwoSeconds() *Timer {
return &Timer{Timeout: 2 * time.Second, Wait: 25 * time.Millisecond}
}
// ThreeTimes repeats an operation three times and waits 25ms in between.
func ThreeTimes() *Counter {
return &Counter{Count: 3, Wait: 25 * time.Millisecond}
}

48
sdk/testutil/retry/run.go

@ -0,0 +1,48 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package retry
type Option func(r *R)
func WithRetryer(retryer Retryer) Option {
return func(r *R) {
r.retryer = retryer
}
}
func WithFullOutput() Option {
return func(r *R) {
r.fullOutput = true
}
}
// WithImmediateCleanup will cause all cleanup operations added
// by calling the Cleanup method on *R to be performed after
// the retry attempt completes (regardless of pass/fail status)
// Use this only if all resources created during the retry loop should
// not persist after the retry has finished.
func WithImmediateCleanup() Option {
return func(r *R) {
r.immediateCleanup = true
}
}
func Run(t TestingTB, f func(r *R), opts ...Option) {
t.Helper()
r := &R{
wrapped: t,
retryer: DefaultRetryer(),
}
for _, opt := range opts {
opt(r)
}
r.run(f)
}
func RunWith(r Retryer, t TestingTB, f func(r *R)) {
t.Helper()
Run(t, f, WithRetryer(r))
}

16
sdk/testutil/retry/timer.go

@ -5,22 +5,6 @@ package retry
import "time" import "time"
// ThirtySeconds repeats an operation for thirty seconds and waits 500ms in between.
// Best for known slower operations like waiting on eventually consistent state.
func ThirtySeconds() *Timer {
return &Timer{Timeout: 30 * time.Second, Wait: 500 * time.Millisecond}
}
// TwoSeconds repeats an operation for two seconds and waits 25ms in between.
func TwoSeconds() *Timer {
return &Timer{Timeout: 2 * time.Second, Wait: 25 * time.Millisecond}
}
// ThreeTimes repeats an operation three times and waits 25ms in between.
func ThreeTimes() *Counter {
return &Counter{Count: 3, Wait: 25 * time.Millisecond}
}
// Timer repeats an operation for a given amount // Timer repeats an operation for a given amount
// of time and waits between subsequent operations. // of time and waits between subsequent operations.
type Timer struct { type Timer struct {

25
sdk/testutil/types.go

@ -3,14 +3,33 @@
package testutil package testutil
import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var nilInf TestingTB = nil
// Assertion that our TestingTB can be passed to
var _ require.TestingT = nilInf
var _ assert.TestingT = nilInf
// TestingTB is an interface that describes the implementation of the testing object. // TestingTB is an interface that describes the implementation of the testing object.
// Using an interface that describes testing.TB instead of the actual implementation // Using an interface that describes testing.TB instead of the actual implementation
// makes testutil usable in a wider variety of contexts (e.g. use with ginkgo : https://godoc.org/github.com/onsi/ginkgo#GinkgoT) // makes testutil usable in a wider variety of contexts (e.g. use with ginkgo : https://godoc.org/github.com/onsi/ginkgo#GinkgoT)
type TestingTB interface { type TestingTB interface {
Cleanup(func()) Cleanup(func())
Error(args ...any)
Errorf(format string, args ...any)
Fail()
FailNow()
Failed() bool Failed() bool
Logf(format string, args ...interface{}) Fatal(args ...any)
Name() string Fatalf(format string, args ...any)
Fatalf(fmt string, args ...interface{})
Helper() Helper()
Log(args ...any)
Logf(format string, args ...any)
Name() string
Setenv(key, value string)
TempDir() string
} }

28
test-integ/tenancy/client.go

@ -23,18 +23,6 @@ import (
// //
// TODO: Move to a general package if used more widely. // TODO: Move to a general package if used more widely.
// T represents the subset of testing.T methods that will be used
// by the various functionality in this package
type T interface {
Helper()
Log(args ...interface{})
Logf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
FailNow()
Cleanup(func())
}
type ClientOption func(*Client) type ClientOption func(*Client)
func WithACLToken(token string) ClientOption { func WithACLToken(token string) ClientOption {
@ -76,13 +64,13 @@ func (client *Client) SetRetryerConfig(timeout time.Duration, wait time.Duration
client.wait = wait client.wait = wait
} }
func (client *Client) retry(t T, fn func(r *retry.R)) { func (client *Client) retry(t testutil.TestingTB, fn func(r *retry.R)) {
t.Helper() t.Helper()
retryer := &retry.Timer{Timeout: client.timeout, Wait: client.wait} retryer := &retry.Timer{Timeout: client.timeout, Wait: client.wait}
retry.RunWith(retryer, t, fn) retry.RunWith(retryer, t, fn)
} }
func (client *Client) Context(t T) context.Context { func (client *Client) Context(t testutil.TestingTB) context.Context {
ctx := testutil.TestContext(t) ctx := testutil.TestContext(t)
if client.token != "" { if client.token != "" {
@ -95,7 +83,7 @@ func (client *Client) Context(t T) context.Context {
return ctx return ctx
} }
func (client *Client) RequireResourceNotFound(t T, id *pbresource.ID) { func (client *Client) RequireResourceNotFound(t testutil.TestingTB, id *pbresource.ID) {
t.Helper() t.Helper()
rsp, err := client.Read(client.Context(t), &pbresource.ReadRequest{Id: id}) rsp, err := client.Read(client.Context(t), &pbresource.ReadRequest{Id: id})
@ -104,7 +92,7 @@ func (client *Client) RequireResourceNotFound(t T, id *pbresource.ID) {
require.Nil(t, rsp) require.Nil(t, rsp)
} }
func (client *Client) RequireResourceExists(t T, id *pbresource.ID) *pbresource.Resource { func (client *Client) RequireResourceExists(t testutil.TestingTB, id *pbresource.ID) *pbresource.Resource {
t.Helper() t.Helper()
rsp, err := client.Read(client.Context(t), &pbresource.ReadRequest{Id: id}) rsp, err := client.Read(client.Context(t), &pbresource.ReadRequest{Id: id})
@ -117,7 +105,7 @@ func ToGVK(resourceType *pbresource.Type) string {
return fmt.Sprintf("%s.%s.%s", resourceType.Group, resourceType.GroupVersion, resourceType.Kind) return fmt.Sprintf("%s.%s.%s", resourceType.Group, resourceType.GroupVersion, resourceType.Kind)
} }
func (client *Client) WaitForResourceExists(t T, id *pbresource.ID) *pbresource.Resource { func (client *Client) WaitForResourceExists(t testutil.TestingTB, id *pbresource.ID) *pbresource.Resource {
t.Helper() t.Helper()
var res *pbresource.Resource var res *pbresource.Resource
@ -128,7 +116,7 @@ func (client *Client) WaitForResourceExists(t T, id *pbresource.ID) *pbresource.
return res return res
} }
func (client *Client) WaitForDeletion(t T, id *pbresource.ID) { func (client *Client) WaitForDeletion(t testutil.TestingTB, id *pbresource.ID) {
t.Helper() t.Helper()
client.retry(t, func(r *retry.R) { client.retry(t, func(r *retry.R) {
@ -139,12 +127,12 @@ func (client *Client) WaitForDeletion(t T, id *pbresource.ID) {
// MustDelete will delete a resource by its id, retrying if necessary and fail the test // MustDelete will delete a resource by its id, retrying if necessary and fail the test
// if it cannot delete it within the timeout. The clients request delay settings are // if it cannot delete it within the timeout. The clients request delay settings are
// taken into account with this operation. // taken into account with this operation.
func (client *Client) MustDelete(t T, id *pbresource.ID) { func (client *Client) MustDelete(t testutil.TestingTB, id *pbresource.ID) {
t.Helper() t.Helper()
client.retryDelete(t, id) client.retryDelete(t, id)
} }
func (client *Client) retryDelete(t T, id *pbresource.ID) { func (client *Client) retryDelete(t testutil.TestingTB, id *pbresource.ID) {
t.Helper() t.Helper()
ctx := client.Context(t) ctx := client.Context(t)

7
test-integ/topoutil/asserter.go

@ -14,6 +14,7 @@ import (
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert" libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils" "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
@ -52,13 +53,13 @@ func NewAsserter(sp SprawlLite) *Asserter {
} }
} }
func (a *Asserter) mustGetHTTPClient(t *testing.T, cluster string) *http.Client { func (a *Asserter) mustGetHTTPClient(t testutil.TestingTB, cluster string) *http.Client {
client, err := a.httpClientFor(cluster) client, err := a.httpClientFor(cluster)
require.NoError(t, err) require.NoError(t, err)
return client return client
} }
func (a *Asserter) mustGetAPIClient(t *testing.T, cluster string) *api.Client { func (a *Asserter) mustGetAPIClient(t testutil.TestingTB, cluster string) *api.Client {
clu := a.sp.Topology().Clusters[cluster] clu := a.sp.Topology().Clusters[cluster]
cl, err := a.sp.APIClientForCluster(clu.Name, "") cl, err := a.sp.APIClientForCluster(clu.Name, "")
require.NoError(t, err) require.NoError(t, err)
@ -208,7 +209,7 @@ type testingT interface {
// //
// We treat 400, 503, and 504s as retryable errors // We treat 400, 503, and 504s as retryable errors
func (a *Asserter) fortioFetch2Destination( func (a *Asserter) fortioFetch2Destination(
t testingT, t testutil.TestingTB,
client *http.Client, client *http.Client,
addr string, addr string,
dest *topology.Destination, dest *topology.Destination,

2
test/integration/consul-container/libs/assert/grpc.go

@ -26,7 +26,7 @@ func GRPCPing(t *testing.T, addr string) {
var msg *fgrpc.PingMessage var msg *fgrpc.PingMessage
retries := 0 retries := 0
retry.RunWith(&retry.Timer{Timeout: time.Minute, Wait: 25 * time.Millisecond}, t, func(r *retry.R) { retry.RunWith(&retry.Timer{Timeout: time.Minute, Wait: 25 * time.Millisecond}, t, func(r *retry.R) {
t.Logf("making grpc call to %s", addr) r.Logf("making grpc call to %s", addr)
retries += 1 retries += 1
msg, err = pingCl.Ping(context.Background(), &fgrpc.PingMessage{ msg, err = pingCl.Ping(context.Background(), &fgrpc.PingMessage{
// use addr as payload so we have something variable to check against // use addr as payload so we have something variable to check against

6
test/integration/consul-container/libs/assert/service.go

@ -221,7 +221,7 @@ func doHTTPServiceEchoesWithClient(
} }
retry.RunWith(failer(), t, func(r *retry.R) { retry.RunWith(failer(), t, func(r *retry.R) {
t.Logf("making call to %s", url) r.Logf("making call to %s", url)
reader := strings.NewReader(phrase) reader := strings.NewReader(phrase)
req, err := http.NewRequest("POST", url, reader) req, err := http.NewRequest("POST", url, reader)
@ -242,7 +242,7 @@ func doHTTPServiceEchoesWithClient(
defer res.Body.Close() defer res.Body.Close()
statusCode := res.StatusCode statusCode := res.StatusCode
t.Logf("...got response code %d", statusCode) r.Logf("...got response code %d", statusCode)
require.Equal(r, 200, statusCode) require.Equal(r, 200, statusCode)
body, err := io.ReadAll(res.Body) body, err := io.ReadAll(res.Body)
@ -342,7 +342,7 @@ func WaitForFortioNameWithClient(t *testing.T, r retry.Retryer, urlbase string,
// It retries with timeout defaultHTTPTimeout and wait defaultHTTPWait. // It retries with timeout defaultHTTPTimeout and wait defaultHTTPWait.
// //
// client must be a custom http.Client // client must be a custom http.Client
func FortioNameWithClient(t retry.Failer, urlbase string, name string, reqHost string, client *http.Client) (string, error) { func FortioNameWithClient(t retry.TestingTB, urlbase string, name string, reqHost string, client *http.Client) (string, error) {
t.Helper() t.Helper()
var fortioNameRE = regexp.MustCompile("\nFORTIO_NAME=(.+)\n") var fortioNameRE = regexp.MustCompile("\nFORTIO_NAME=(.+)\n")
var body []byte var body []byte

19
test/integration/consul-container/test/envoy_extensions/ext_authz_test.go

@ -15,6 +15,7 @@ import (
"github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert" libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster" libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
@ -87,8 +88,8 @@ func TestExtAuthzLocal(t *testing.T) {
// Make requests to the static-server. We expect that all requests are rejected with 403 Forbidden // Make requests to the static-server. We expect that all requests are rejected with 403 Forbidden
// unless they are to the /allow path. // unless they are to the /allow path.
baseURL := fmt.Sprintf("http://localhost:%d", port) baseURL := fmt.Sprintf("http://localhost:%d", port)
doRequest(t, baseURL, http.StatusForbidden) retryRequest(t, baseURL, http.StatusForbidden)
doRequest(t, baseURL+"/allow", http.StatusOK) retryRequest(t, baseURL+"/allow", http.StatusOK)
} }
func createServices(t *testing.T, cluster *libcluster.Cluster) libservice.Service { func createServices(t *testing.T, cluster *libcluster.Cluster) libservice.Service {
@ -161,10 +162,16 @@ func createLocalAuthzService(t *testing.T, cluster *libcluster.Cluster) {
} }
} }
func doRequest(t *testing.T, url string, expStatus int) { func retryRequest(t *testing.T, url string, expStatus int) {
t.Helper()
retry.RunWith(&retry.Timer{Timeout: 5 * time.Second, Wait: time.Second}, t, func(r *retry.R) { retry.RunWith(&retry.Timer{Timeout: 5 * time.Second, Wait: time.Second}, t, func(r *retry.R) {
resp, err := cleanhttp.DefaultClient().Get(url) doRequest(r, url, expStatus)
require.NoError(r, err)
require.Equal(r, expStatus, resp.StatusCode)
}) })
} }
func doRequest(t testutil.TestingTB, url string, expStatus int) {
t.Helper()
resp, err := cleanhttp.DefaultClient().Get(url)
require.NoError(t, err)
require.Equal(t, expStatus, resp.StatusCode)
}

12
test/integration/consul-container/test/envoy_extensions/otel_access_logging_test.go

@ -40,8 +40,6 @@ import (
// - Make sure a call to the client sidecar local bind port results in Envoy access logs being sent to the // - Make sure a call to the client sidecar local bind port results in Envoy access logs being sent to the
// otel-collector. // otel-collector.
func TestOTELAccessLogging(t *testing.T) { func TestOTELAccessLogging(t *testing.T) {
t.Parallel()
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{ cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
NumServers: 1, NumServers: 1,
NumClients: 1, NumClients: 1,
@ -87,8 +85,9 @@ func TestOTELAccessLogging(t *testing.T) {
// Make requests from the static-client to the static-server and look for the access logs // Make requests from the static-client to the static-server and look for the access logs
// to show up in the `otel-collector` container logs. // to show up in the `otel-collector` container logs.
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Second}, t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
doRequest(t, fmt.Sprintf("http://localhost:%d", port), http.StatusOK) doRequest(r, fmt.Sprintf("http://localhost:%d", port), http.StatusOK)
reader, err := launchInfo.Container.Logs(context.Background()) reader, err := launchInfo.Container.Logs(context.Background())
require.NoError(r, err) require.NoError(r, err)
log, err := io.ReadAll(reader) log, err := io.ReadAll(reader)
@ -96,7 +95,10 @@ func TestOTELAccessLogging(t *testing.T) {
require.Contains(r, string(log), `log_name: Str(otel-integration-test)`) require.Contains(r, string(log), `log_name: Str(otel-integration-test)`)
require.Contains(r, string(log), `cluster_name: Str(static-server)`) require.Contains(r, string(log), `cluster_name: Str(static-server)`)
require.Contains(r, string(log), `node_name: Str(static-server-sidecar-proxy)`) require.Contains(r, string(log), `node_name: Str(static-server-sidecar-proxy)`)
}) },
retry.WithFullOutput(),
retry.WithRetryer(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Second}),
)
} }
func createLocalOTELService(t *testing.T, cluster *libcluster.Cluster) *libcluster.LaunchInfo { func createLocalOTELService(t *testing.T, cluster *libcluster.Cluster) *libcluster.LaunchInfo {

4
test/integration/consul-container/test/gateways/terminating_gateway_test.go

@ -173,12 +173,12 @@ func assertHTTPRequestToServiceAddress(t *testing.T, client *libservice.ConnectC
upstreamURL := fmt.Sprintf("http://localhost:%d/debug?env=dump", port) upstreamURL := fmt.Sprintf("http://localhost:%d/debug?env=dump", port)
retry.RunWith(requestRetryTimer, t, func(r *retry.R) { retry.RunWith(requestRetryTimer, t, func(r *retry.R) {
out, err := client.Exec(context.Background(), []string{"curl", "-s", upstreamURL}) out, err := client.Exec(context.Background(), []string{"curl", "-s", upstreamURL})
t.Logf("curl request to upstream service address: url=%s\nerr = %v\nout = %s", upstreamURL, err, out) r.Logf("curl request to upstream service address: url=%s\nerr = %v\nout = %s", upstreamURL, err, out)
if expSuccess { if expSuccess {
require.NoError(r, err) require.NoError(r, err)
require.Contains(r, out, fmt.Sprintf("FORTIO_NAME=%s", serviceName)) require.Contains(r, out, fmt.Sprintf("FORTIO_NAME=%s", serviceName))
t.Logf("successfuly messaged %s", serviceName) r.Logf("successfuly messaged %s", serviceName)
} else { } else {
require.Error(r, err) require.Error(r, err)
require.Contains(r, err.Error(), "exit code 52") require.Contains(r, err.Error(), "exit code 52")

5
test/integration/consul-container/test/ratelimit/ratelimit_test.go

@ -11,6 +11,7 @@ import (
"time" "time"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -282,7 +283,7 @@ func setupClusterAndClient(t *testing.T, config *libtopology.ClusterConfig, isSe
return cluster, client return cluster, client
} }
func checkForMetric(t require.TestingT, cluster *libcluster.Cluster, operationName string, expectedLimitType string, expectedMode string, expectMetric bool) { func checkForMetric(t testutil.TestingTB, cluster *libcluster.Cluster, operationName string, expectedLimitType string, expectedMode string, expectMetric bool) {
// validate metrics // validate metrics
server, err := cluster.GetClient(nil, true) server, err := cluster.GetClient(nil, true)
require.NoError(t, err) require.NoError(t, err)
@ -320,7 +321,7 @@ func checkForMetric(t require.TestingT, cluster *libcluster.Cluster, operationNa
} }
} }
func checkLogsForMessage(t require.TestingT, logs []string, msg string, operationName string, logType string, logShouldExist bool) { func checkLogsForMessage(t testutil.TestingTB, logs []string, msg string, operationName string, logType string, logShouldExist bool) {
if logShouldExist { if logShouldExist {
found := false found := false
for _, log := range logs { for _, log := range logs {

4
test/integration/consul-container/test/tproxy/tproxy_test.go

@ -136,7 +136,7 @@ func assertHTTPRequestToVirtualAddress(t *testing.T, clientService libservice.Se
`, virtualHostname), `, virtualHostname),
}, },
) )
t.Logf("curl request to upstream virtual address\nerr = %v\nout = %s", err, out) r.Logf("curl request to upstream virtual address\nerr = %v\nout = %s", err, out)
require.NoError(r, err) require.NoError(r, err)
require.Regexp(r, `Virtual IP: 240.0.0.\d+`, out) require.Regexp(r, `Virtual IP: 240.0.0.\d+`, out)
require.Contains(r, out, fmt.Sprintf("FORTIO_NAME=%s", serverName)) require.Contains(r, out, fmt.Sprintf("FORTIO_NAME=%s", serverName))
@ -155,7 +155,7 @@ func assertHTTPRequestToServiceAddress(t *testing.T, client, server libcluster.A
upstreamURL := fmt.Sprintf("http://%s:8080/debug?env=dump", server.GetIP()) upstreamURL := fmt.Sprintf("http://%s:8080/debug?env=dump", server.GetIP())
retry.RunWith(requestRetryTimer, t, func(r *retry.R) { retry.RunWith(requestRetryTimer, t, func(r *retry.R) {
out, err := client.Exec(context.Background(), []string{"curl", "-s", upstreamURL}) out, err := client.Exec(context.Background(), []string{"curl", "-s", upstreamURL})
t.Logf("curl request to upstream service address: url=%s\nerr = %v\nout = %s", upstreamURL, err, out) r.Logf("curl request to upstream service address: url=%s\nerr = %v\nout = %s", upstreamURL, err, out)
if expSuccess { if expSuccess {
require.NoError(r, err) require.NoError(r, err)

1
testrpc/wait.go

@ -108,6 +108,7 @@ func WaitForTestAgent(t *testing.T, rpc rpcFn, dc string, options ...waitOption)
var checks structs.IndexedHealthChecks var checks structs.IndexedHealthChecks
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
r.Helper()
dcReq := &structs.DCSpecificRequest{ dcReq := &structs.DCSpecificRequest{
Datacenter: dc, Datacenter: dc,
QueryOptions: structs.QueryOptions{Token: flat.Token}, QueryOptions: structs.QueryOptions{Token: flat.Token},

Loading…
Cancel
Save