local state: drop retry loops from tests

Since the tests are now using synchronous calls for state syncing
we no longer need to use retry loops to wait for the changes to
propagate.
pull/3609/head
Frank Schroeder 7 years ago
parent 7e3adc4549
commit 1602ac56b5
No known key found for this signature in database
GPG Key ID: 4D65C6EAEC87DECD

@ -123,60 +123,58 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
Node: a.Config.NodeName, Node: a.Config.NodeName,
} }
retry.Run(t, func(r *retry.R) { if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil { t.Fatalf("err: %v", err)
r.Fatalf("err: %v", err) }
}
// Make sure we sent along our node info when we synced. // Make sure we sent along our node info when we synced.
id := services.NodeServices.Node.ID id := services.NodeServices.Node.ID
addrs := services.NodeServices.Node.TaggedAddresses addrs := services.NodeServices.Node.TaggedAddresses
meta := services.NodeServices.Node.Meta meta := services.NodeServices.Node.Meta
delete(meta, structs.MetaSegmentKey) // Added later, not in config. delete(meta, structs.MetaSegmentKey) // Added later, not in config.
verify.Values(r, "node id", id, a.config.NodeID) verify.Values(t, "node id", id, a.Config.NodeID)
verify.Values(r, "tagged addrs", addrs, a.config.TaggedAddresses) verify.Values(t, "tagged addrs", addrs, a.Config.TaggedAddresses)
verify.Values(r, "node meta", meta, a.config.NodeMeta) verify.Values(t, "node meta", meta, a.Config.NodeMeta)
// We should have 6 services (consul included) // We should have 6 services (consul included)
if len(services.NodeServices.Services) != 6 { if len(services.NodeServices.Services) != 6 {
r.Fatalf("bad: %v", services.NodeServices.Services) t.Fatalf("bad: %v", services.NodeServices.Services)
} }
// All the services should match // All the services should match
for id, serv := range services.NodeServices.Services { for id, serv := range services.NodeServices.Services {
serv.CreateIndex, serv.ModifyIndex = 0, 0 serv.CreateIndex, serv.ModifyIndex = 0, 0
switch id { switch id {
case "mysql": case "mysql":
if !reflect.DeepEqual(serv, srv1) { if !reflect.DeepEqual(serv, srv1) {
r.Fatalf("bad: %v %v", serv, srv1) t.Fatalf("bad: %v %v", serv, srv1)
} }
case "redis": case "redis":
if !reflect.DeepEqual(serv, srv2) { if !reflect.DeepEqual(serv, srv2) {
r.Fatalf("bad: %#v %#v", serv, srv2) t.Fatalf("bad: %#v %#v", serv, srv2)
} }
case "web": case "web":
if !reflect.DeepEqual(serv, srv3) { if !reflect.DeepEqual(serv, srv3) {
r.Fatalf("bad: %v %v", serv, srv3) t.Fatalf("bad: %v %v", serv, srv3)
}
case "api":
if !reflect.DeepEqual(serv, srv5) {
r.Fatalf("bad: %v %v", serv, srv5)
}
case "cache":
if !reflect.DeepEqual(serv, srv6) {
r.Fatalf("bad: %v %v", serv, srv6)
}
case structs.ConsulServiceID:
// ignore
default:
r.Fatalf("unexpected service: %v", id)
} }
case "api":
if !reflect.DeepEqual(serv, srv5) {
t.Fatalf("bad: %v %v", serv, srv5)
}
case "cache":
if !reflect.DeepEqual(serv, srv6) {
t.Fatalf("bad: %v %v", serv, srv6)
}
case structs.ConsulServiceID:
// ignore
default:
t.Fatalf("unexpected service: %v", id)
} }
}
if err := servicesInSync(a.State, 5); err != nil { if err := servicesInSync(a.State, 5); err != nil {
r.Fatal(err) t.Fatal(err)
} }
})
// Remove one of the services // Remove one of the services
a.State.RemoveService("api") a.State.RemoveService("api")
@ -185,47 +183,45 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
t.Fatalf("err: %v", err) t.Fatalf("err: %v", err)
} }
retry.Run(t, func(r *retry.R) { if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil { t.Fatalf("err: %v", err)
r.Fatalf("err: %v", err) }
}
// We should have 5 services (consul included) // We should have 5 services (consul included)
if len(services.NodeServices.Services) != 5 { if len(services.NodeServices.Services) != 5 {
r.Fatalf("bad: %v", services.NodeServices.Services) t.Fatalf("bad: %v", services.NodeServices.Services)
} }
// All the services should match // All the services should match
for id, serv := range services.NodeServices.Services { for id, serv := range services.NodeServices.Services {
serv.CreateIndex, serv.ModifyIndex = 0, 0 serv.CreateIndex, serv.ModifyIndex = 0, 0
switch id { switch id {
case "mysql": case "mysql":
if !reflect.DeepEqual(serv, srv1) { if !reflect.DeepEqual(serv, srv1) {
r.Fatalf("bad: %v %v", serv, srv1) t.Fatalf("bad: %v %v", serv, srv1)
} }
case "redis": case "redis":
if !reflect.DeepEqual(serv, srv2) { if !reflect.DeepEqual(serv, srv2) {
r.Fatalf("bad: %#v %#v", serv, srv2) t.Fatalf("bad: %#v %#v", serv, srv2)
} }
case "web": case "web":
if !reflect.DeepEqual(serv, srv3) { if !reflect.DeepEqual(serv, srv3) {
r.Fatalf("bad: %v %v", serv, srv3) t.Fatalf("bad: %v %v", serv, srv3)
}
case "cache":
if !reflect.DeepEqual(serv, srv6) {
r.Fatalf("bad: %v %v", serv, srv6)
}
case structs.ConsulServiceID:
// ignore
default:
r.Fatalf("unexpected service: %v", id)
} }
case "cache":
if !reflect.DeepEqual(serv, srv6) {
t.Fatalf("bad: %v %v", serv, srv6)
}
case structs.ConsulServiceID:
// ignore
default:
t.Fatalf("unexpected service: %v", id)
} }
}
if err := servicesInSync(a.State, 4); err != nil { if err := servicesInSync(a.State, 4); err != nil {
r.Fatal(err) t.Fatal(err)
} }
})
} }
func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) { func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) {
@ -462,8 +458,7 @@ func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) {
acl_datacenter = "dc1" acl_datacenter = "dc1"
acl_master_token = "root" acl_master_token = "root"
acl_default_policy = "deny" acl_default_policy = "deny"
acl_enforce_version_8 = true acl_enforce_version_8 = true`}
`, NoInitialSync: true}
a.Start() a.Start()
defer a.Shutdown() defer a.Shutdown()
@ -687,43 +682,41 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
var checks structs.IndexedHealthChecks var checks structs.IndexedHealthChecks
// Verify that we are in sync // Verify that we are in sync
retry.Run(t, func(r *retry.R) { if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil { t.Fatalf("err: %v", err)
r.Fatalf("err: %v", err) }
}
// We should have 5 checks (serf included) // We should have 5 checks (serf included)
if len(checks.HealthChecks) != 5 { if len(checks.HealthChecks) != 5 {
r.Fatalf("bad: %v", checks) t.Fatalf("bad: %v", checks)
} }
// All the checks should match // All the checks should match
for _, chk := range checks.HealthChecks { for _, chk := range checks.HealthChecks {
chk.CreateIndex, chk.ModifyIndex = 0, 0 chk.CreateIndex, chk.ModifyIndex = 0, 0
switch chk.CheckID { switch chk.CheckID {
case "mysql": case "mysql":
if !reflect.DeepEqual(chk, chk1) { if !reflect.DeepEqual(chk, chk1) {
r.Fatalf("bad: %v %v", chk, chk1) t.Fatalf("bad: %v %v", chk, chk1)
} }
case "redis": case "redis":
if !reflect.DeepEqual(chk, chk2) { if !reflect.DeepEqual(chk, chk2) {
r.Fatalf("bad: %v %v", chk, chk2) t.Fatalf("bad: %v %v", chk, chk2)
} }
case "web": case "web":
if !reflect.DeepEqual(chk, chk3) { if !reflect.DeepEqual(chk, chk3) {
r.Fatalf("bad: %v %v", chk, chk3) t.Fatalf("bad: %v %v", chk, chk3)
} }
case "cache": case "cache":
if !reflect.DeepEqual(chk, chk5) { if !reflect.DeepEqual(chk, chk5) {
r.Fatalf("bad: %v %v", chk, chk5) t.Fatalf("bad: %v %v", chk, chk5)
}
case "serfHealth":
// ignore
default:
r.Fatalf("unexpected check: %v", chk)
} }
case "serfHealth":
// ignore
default:
t.Fatalf("unexpected check: %v", chk)
} }
}) }
if err := checksInSync(a.State, 4); err != nil { if err := checksInSync(a.State, 4); err != nil {
t.Fatal(err) t.Fatal(err)
@ -744,9 +737,9 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
addrs := services.NodeServices.Node.TaggedAddresses addrs := services.NodeServices.Node.TaggedAddresses
meta := services.NodeServices.Node.Meta meta := services.NodeServices.Node.Meta
delete(meta, structs.MetaSegmentKey) // Added later, not in config. delete(meta, structs.MetaSegmentKey) // Added later, not in config.
verify.Values(t, "node id", id, a.config.NodeID) verify.Values(t, "node id", id, a.Config.NodeID)
verify.Values(t, "tagged addrs", addrs, a.config.TaggedAddresses) verify.Values(t, "tagged addrs", addrs, a.Config.TaggedAddresses)
verify.Values(t, "node meta", meta, a.config.NodeMeta) verify.Values(t, "node meta", meta, a.Config.NodeMeta)
} }
// Remove one of the checks // Remove one of the checks
@ -757,39 +750,37 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
} }
// Verify that we are in sync // Verify that we are in sync
retry.Run(t, func(r *retry.R) { if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil { t.Fatalf("err: %v", err)
r.Fatalf("err: %v", err) }
}
// We should have 5 checks (serf included) // We should have 5 checks (serf included)
if len(checks.HealthChecks) != 4 { if len(checks.HealthChecks) != 4 {
r.Fatalf("bad: %v", checks) t.Fatalf("bad: %v", checks)
} }
// All the checks should match // All the checks should match
for _, chk := range checks.HealthChecks { for _, chk := range checks.HealthChecks {
chk.CreateIndex, chk.ModifyIndex = 0, 0 chk.CreateIndex, chk.ModifyIndex = 0, 0
switch chk.CheckID { switch chk.CheckID {
case "mysql": case "mysql":
if !reflect.DeepEqual(chk, chk1) { if !reflect.DeepEqual(chk, chk1) {
r.Fatalf("bad: %v %v", chk, chk1) t.Fatalf("bad: %v %v", chk, chk1)
} }
case "web": case "web":
if !reflect.DeepEqual(chk, chk3) { if !reflect.DeepEqual(chk, chk3) {
r.Fatalf("bad: %v %v", chk, chk3) t.Fatalf("bad: %v %v", chk, chk3)
} }
case "cache": case "cache":
if !reflect.DeepEqual(chk, chk5) { if !reflect.DeepEqual(chk, chk5) {
r.Fatalf("bad: %v %v", chk, chk5) t.Fatalf("bad: %v %v", chk, chk5)
}
case "serfHealth":
// ignore
default:
r.Fatalf("unexpected check: %v", chk)
} }
case "serfHealth":
// ignore
default:
t.Fatalf("unexpected check: %v", chk)
} }
}) }
if err := checksInSync(a.State, 3); err != nil { if err := checksInSync(a.State, 3); err != nil {
t.Fatal(err) t.Fatal(err)
@ -802,8 +793,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
acl_datacenter = "dc1" acl_datacenter = "dc1"
acl_master_token = "root" acl_master_token = "root"
acl_default_policy = "deny" acl_default_policy = "deny"
acl_enforce_version_8 = true acl_enforce_version_8 = true`}
`, NoInitialSync: true}
a.Start() a.Start()
defer a.Shutdown() defer a.Shutdown()
@ -917,41 +907,39 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
} }
// Verify that we are in sync // Verify that we are in sync
retry.Run(t, func(r *retry.R) { req := structs.NodeSpecificRequest{
req := structs.NodeSpecificRequest{ Datacenter: "dc1",
Datacenter: "dc1", Node: a.Config.NodeName,
Node: a.Config.NodeName, QueryOptions: structs.QueryOptions{
QueryOptions: structs.QueryOptions{ Token: "root",
Token: "root", },
}, }
} var checks structs.IndexedHealthChecks
var checks structs.IndexedHealthChecks if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil { t.Fatalf("err: %v", err)
r.Fatalf("err: %v", err) }
}
// We should have 2 checks (serf included) // We should have 2 checks (serf included)
if len(checks.HealthChecks) != 2 { if len(checks.HealthChecks) != 2 {
r.Fatalf("bad: %v", checks) t.Fatalf("bad: %v", checks)
} }
// All the checks should match // All the checks should match
for _, chk := range checks.HealthChecks { for _, chk := range checks.HealthChecks {
chk.CreateIndex, chk.ModifyIndex = 0, 0 chk.CreateIndex, chk.ModifyIndex = 0, 0
switch chk.CheckID { switch chk.CheckID {
case "mysql-check": case "mysql-check":
t.Fatalf("should not be permitted") t.Fatalf("should not be permitted")
case "api-check": case "api-check":
if !reflect.DeepEqual(chk, chk2) { if !reflect.DeepEqual(chk, chk2) {
r.Fatalf("bad: %v %v", chk, chk2) t.Fatalf("bad: %v %v", chk, chk2)
}
case "serfHealth":
// ignore
default:
r.Fatalf("unexpected check: %v", chk)
} }
case "serfHealth":
// ignore
default:
t.Fatalf("unexpected check: %v", chk)
} }
}) }
if err := checksInSync(a.State, 2); err != nil { if err := checksInSync(a.State, 2); err != nil {
t.Fatal(err) t.Fatal(err)
@ -964,7 +952,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
} }
// Verify that we are in sync // Verify that we are in sync
retry.Run(t, func(r *retry.R) { {
req := structs.NodeSpecificRequest{ req := structs.NodeSpecificRequest{
Datacenter: "dc1", Datacenter: "dc1",
Node: a.Config.NodeName, Node: a.Config.NodeName,
@ -974,12 +962,12 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
} }
var checks structs.IndexedHealthChecks var checks structs.IndexedHealthChecks
if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil { if err := a.RPC("Health.NodeChecks", &req, &checks); err != nil {
r.Fatalf("err: %v", err) t.Fatalf("err: %v", err)
} }
// We should have 1 check (just serf) // We should have 1 check (just serf)
if len(checks.HealthChecks) != 1 { if len(checks.HealthChecks) != 1 {
r.Fatalf("bad: %v", checks) t.Fatalf("bad: %v", checks)
} }
// All the checks should match // All the checks should match
@ -987,16 +975,16 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
chk.CreateIndex, chk.ModifyIndex = 0, 0 chk.CreateIndex, chk.ModifyIndex = 0, 0
switch chk.CheckID { switch chk.CheckID {
case "mysql-check": case "mysql-check":
r.Fatalf("should not be permitted") t.Fatalf("should not be permitted")
case "api-check": case "api-check":
r.Fatalf("should be deleted") t.Fatalf("should be deleted")
case "serfHealth": case "serfHealth":
// ignore // ignore
default: default:
r.Fatalf("unexpected check: %v", chk) t.Fatalf("unexpected check: %v", chk)
} }
} }
}) }
if err := checksInSync(a.State, 1); err != nil { if err := checksInSync(a.State, 1); err != nil {
t.Fatal(err) t.Fatal(err)
@ -1062,7 +1050,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
t.Parallel() t.Parallel()
a := &agent.TestAgent{Name: t.Name(), HCL: ` a := &agent.TestAgent{Name: t.Name(), HCL: `
check_update_interval = "500ms" check_update_interval = "500ms"
`, NoInitialSync: true} `}
a.Start() a.Start()
defer a.Shutdown() defer a.Shutdown()
@ -1243,8 +1231,7 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
node_id = "40e4a748-2192-161a-0510-9bf59fe950b5" node_id = "40e4a748-2192-161a-0510-9bf59fe950b5"
node_meta { node_meta {
somekey = "somevalue" somekey = "somevalue"
} }`}
`, NoInitialSync: true}
a.Start() a.Start()
defer a.Shutdown() defer a.Shutdown()
@ -1268,24 +1255,21 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
Node: a.Config.NodeName, Node: a.Config.NodeName,
} }
var services structs.IndexedNodeServices var services structs.IndexedNodeServices
// Wait for the sync if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
retry.Run(t, func(r *retry.R) { t.Fatalf("err: %v", err)
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil { }
r.Fatalf("err: %v", err)
}
// Make sure we synced our node info - this should have ridden on the // Make sure we synced our node info - this should have ridden on the
// "consul" service sync // "consul" service sync
id := services.NodeServices.Node.ID id := services.NodeServices.Node.ID
addrs := services.NodeServices.Node.TaggedAddresses addrs := services.NodeServices.Node.TaggedAddresses
meta := services.NodeServices.Node.Meta meta := services.NodeServices.Node.Meta
delete(meta, structs.MetaSegmentKey) // Added later, not in config. delete(meta, structs.MetaSegmentKey) // Added later, not in config.
if id != nodeID || if id != a.Config.NodeID ||
!reflect.DeepEqual(addrs, a.config.TaggedAddresses) || !reflect.DeepEqual(addrs, a.Config.TaggedAddresses) ||
!reflect.DeepEqual(meta, nodeMeta) { !reflect.DeepEqual(meta, a.Config.NodeMeta) {
r.Fatalf("bad: %v", services.NodeServices.Node) t.Fatalf("bad: %v", services.NodeServices.Node)
} }
})
// Blow away the catalog version of the node info // Blow away the catalog version of the node info
if err := a.RPC("Catalog.Register", args, &out); err != nil { if err := a.RPC("Catalog.Register", args, &out); err != nil {
@ -1297,30 +1281,30 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
} }
// Wait for the sync - this should have been a sync of just the node info // Wait for the sync - this should have been a sync of just the node info
retry.Run(t, func(r *retry.R) { if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil {
if err := a.RPC("Catalog.NodeServices", &req, &services); err != nil { t.Fatalf("err: %v", err)
r.Fatalf("err: %v", err) }
}
{
id := services.NodeServices.Node.ID id := services.NodeServices.Node.ID
addrs := services.NodeServices.Node.TaggedAddresses addrs := services.NodeServices.Node.TaggedAddresses
meta := services.NodeServices.Node.Meta meta := services.NodeServices.Node.Meta
delete(meta, structs.MetaSegmentKey) // Added later, not in config. delete(meta, structs.MetaSegmentKey) // Added later, not in config.
if id != nodeID || if id != nodeID ||
!reflect.DeepEqual(addrs, a.config.TaggedAddresses) || !reflect.DeepEqual(addrs, a.Config.TaggedAddresses) ||
!reflect.DeepEqual(meta, nodeMeta) { !reflect.DeepEqual(meta, nodeMeta) {
r.Fatalf("bad: %v", services.NodeServices.Node) t.Fatalf("bad: %v", services.NodeServices.Node)
} }
}) }
} }
func TestAgent_serviceTokens(t *testing.T) { func TestAgent_serviceTokens(t *testing.T) {
t.Parallel() t.Parallel()
cfg := agent.TestConfig()
tokens := new(token.Store) tokens := new(token.Store)
tokens.UpdateUserToken("default") tokens.UpdateUserToken("default")
l := local.NewState(config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`), nil, tokens, make(chan struct{}, 1)) lcfg := agent.LocalConfig(config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`))
l := local.NewState(lcfg, nil, tokens, make(chan struct{}, 1))
l.AddService(&structs.NodeService{ID: "redis"}, "") l.AddService(&structs.NodeService{ID: "redis"}, "")
@ -1345,10 +1329,10 @@ func TestAgent_serviceTokens(t *testing.T) {
func TestAgent_checkTokens(t *testing.T) { func TestAgent_checkTokens(t *testing.T) {
t.Parallel() t.Parallel()
cfg := agent.TestConfig()
tokens := new(token.Store) tokens := new(token.Store)
tokens.UpdateUserToken("default") tokens.UpdateUserToken("default")
l := local.NewState(config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`), nil, tokens, make(chan struct{}, 1)) lcfg := agent.LocalConfig(config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`))
l := local.NewState(lcfg, nil, tokens, make(chan struct{}, 1))
// Returns default when no token is set // Returns default when no token is set
l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("mem")}, "") l.AddCheck(&structs.HealthCheck{CheckID: types.CheckID("mem")}, "")
@ -1371,7 +1355,8 @@ func TestAgent_checkTokens(t *testing.T) {
func TestAgent_checkCriticalTime(t *testing.T) { func TestAgent_checkCriticalTime(t *testing.T) {
t.Parallel() t.Parallel()
l := local.NewState(config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`), nil, new(token.Store), make(chan struct{}, 1)) lcfg := agent.LocalConfig(config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`))
l := local.NewState(lcfg, nil, new(token.Store), make(chan struct{}, 1))
svc := &structs.NodeService{ID: "redis", Service: "redis", Port: 8000} svc := &structs.NodeService{ID: "redis", Service: "redis", Port: 8000}
l.AddService(svc, "") l.AddService(svc, "")
@ -1433,7 +1418,8 @@ func TestAgent_checkCriticalTime(t *testing.T) {
func TestAgent_AddCheckFailure(t *testing.T) { func TestAgent_AddCheckFailure(t *testing.T) {
t.Parallel() t.Parallel()
l := local.NewState(config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`), nil, new(token.Store), make(chan struct{}, 1)) lcfg := agent.LocalConfig(config.DefaultRuntimeConfig(`bind_addr = "127.0.0.1" data_dir = "dummy"`))
l := local.NewState(lcfg, nil, new(token.Store), make(chan struct{}, 1))
// Add a check for a service that does not exist and verify that it fails // Add a check for a service that does not exist and verify that it fails
checkID := types.CheckID("redis:1") checkID := types.CheckID("redis:1")
@ -1465,8 +1451,10 @@ func TestAgent_sendCoordinate(t *testing.T) {
`) `)
defer a.Shutdown() defer a.Shutdown()
t.Logf("%d %d %s", a.consulConfig().CoordinateUpdateBatchSize, a.consulConfig().CoordinateUpdateMaxBatches, t.Logf("%d %d %s",
a.consulConfig().CoordinateUpdatePeriod.String()) a.Config.ConsulCoordinateUpdateBatchSize,
a.Config.ConsulCoordinateUpdateMaxBatches,
a.Config.ConsulCoordinateUpdatePeriod.String())
// Make sure the coordinate is present. // Make sure the coordinate is present.
req := structs.DCSpecificRequest{ req := structs.DCSpecificRequest{

Loading…
Cancel
Save