mirror of https://github.com/hashicorp/consul
More flaky unit test fixes (#2449)
* More flaky unit test fixes * Raise some test timeouts that were too lowpull/2453/head
parent
bfa0c3bc4c
commit
dce6702268
|
@ -31,7 +31,6 @@ func TestCatalog_Datacenters(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCatalog_Nodes(t *testing.T) {
|
func TestCatalog_Nodes(t *testing.T) {
|
||||||
t.Parallel()
|
|
||||||
c, s := makeClient(t)
|
c, s := makeClient(t)
|
||||||
defer s.Stop()
|
defer s.Stop()
|
||||||
|
|
||||||
|
@ -52,7 +51,7 @@ func TestCatalog_Nodes(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := nodes[0].TaggedAddresses["wan"]; !ok {
|
if _, ok := nodes[0].TaggedAddresses["wan"]; !ok {
|
||||||
return false, fmt.Errorf("Bad: %v", nodes)
|
return false, fmt.Errorf("Bad: %v", nodes[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
|
@ -76,7 +76,6 @@ func TestHealth_Checks(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHealth_Service(t *testing.T) {
|
func TestHealth_Service(t *testing.T) {
|
||||||
t.Parallel()
|
|
||||||
c, s := makeClient(t)
|
c, s := makeClient(t)
|
||||||
defer s.Stop()
|
defer s.Stop()
|
||||||
|
|
||||||
|
@ -95,7 +94,7 @@ func TestHealth_Service(t *testing.T) {
|
||||||
return false, fmt.Errorf("Bad: %v", checks)
|
return false, fmt.Errorf("Bad: %v", checks)
|
||||||
}
|
}
|
||||||
if _, ok := checks[0].Node.TaggedAddresses["wan"]; !ok {
|
if _, ok := checks[0].Node.TaggedAddresses["wan"]; !ok {
|
||||||
return false, fmt.Errorf("Bad: %v", checks)
|
return false, fmt.Errorf("Bad: %v", checks[0].Node)
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}, func(err error) {
|
}, func(err error) {
|
||||||
|
|
|
@ -139,7 +139,7 @@ func TestLock_DeleteKey(t *testing.T) {
|
||||||
// Should loose leadership
|
// Should loose leadership
|
||||||
select {
|
select {
|
||||||
case <-leaderCh:
|
case <-leaderCh:
|
||||||
case <-time.After(time.Second):
|
case <-time.After(10 * time.Second):
|
||||||
t.Fatalf("should not be leader")
|
t.Fatalf("should not be leader")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
|
@ -244,21 +244,24 @@ func expectHTTPStatus(t *testing.T, url string, status string) {
|
||||||
check.Start()
|
check.Start()
|
||||||
defer check.Stop()
|
defer check.Stop()
|
||||||
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
// Should have at least 2 updates
|
||||||
|
if mock.updates["foo"] < 2 {
|
||||||
|
return false, fmt.Errorf("should have 2 updates %v", mock.updates)
|
||||||
|
}
|
||||||
|
|
||||||
// Should have at least 2 updates
|
if mock.state["foo"] != status {
|
||||||
if mock.updates["foo"] < 2 {
|
return false, fmt.Errorf("should be %v %v", status, mock.state)
|
||||||
t.Fatalf("should have 2 updates %v", mock.updates)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if mock.state["foo"] != status {
|
// Allow slightly more data than CheckBufSize, for the header
|
||||||
t.Fatalf("should be %v %v", status, mock.state)
|
if n := len(mock.output["foo"]); n > (CheckBufSize + 256) {
|
||||||
}
|
return false, fmt.Errorf("output too long: %d (%d-byte limit)", n, CheckBufSize)
|
||||||
|
}
|
||||||
// Allow slightly more data than CheckBufSize, for the header
|
return true, nil
|
||||||
if n := len(mock.output["foo"]); n > (CheckBufSize + 256) {
|
}, func(err error) {
|
||||||
t.Fatalf("output too long: %d (%d-byte limit)", n, CheckBufSize)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckHTTPCritical(t *testing.T) {
|
func TestCheckHTTPCritical(t *testing.T) {
|
||||||
|
@ -347,16 +350,19 @@ func TestCheckHTTPTimeout(t *testing.T) {
|
||||||
check.Start()
|
check.Start()
|
||||||
defer check.Stop()
|
defer check.Stop()
|
||||||
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
// Should have at least 2 updates
|
||||||
|
if mock.updates["bar"] < 2 {
|
||||||
|
return false, fmt.Errorf("should have at least 2 updates %v", mock.updates)
|
||||||
|
}
|
||||||
|
|
||||||
// Should have at least 2 updates
|
if mock.state["bar"] != structs.HealthCritical {
|
||||||
if mock.updates["bar"] < 2 {
|
return false, fmt.Errorf("should be critical %v", mock.state)
|
||||||
t.Fatalf("should have at least 2 updates %v", mock.updates)
|
}
|
||||||
}
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
if mock.state["bar"] != structs.HealthCritical {
|
t.Fatalf("err: %s", err)
|
||||||
t.Fatalf("should be critical %v", mock.state)
|
})
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckHTTP_disablesKeepAlives(t *testing.T) {
|
func TestCheckHTTP_disablesKeepAlives(t *testing.T) {
|
||||||
|
@ -410,16 +416,19 @@ func expectTCPStatus(t *testing.T, tcp string, status string) {
|
||||||
check.Start()
|
check.Start()
|
||||||
defer check.Stop()
|
defer check.Stop()
|
||||||
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
// Should have at least 2 updates
|
||||||
|
if mock.updates["foo"] < 2 {
|
||||||
|
return false, fmt.Errorf("should have 2 updates %v", mock.updates)
|
||||||
|
}
|
||||||
|
|
||||||
// Should have at least 2 updates
|
if mock.state["foo"] != status {
|
||||||
if mock.updates["foo"] < 2 {
|
return false, fmt.Errorf("should be %v %v", status, mock.state)
|
||||||
t.Fatalf("should have 2 updates %v", mock.updates)
|
}
|
||||||
}
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
if mock.state["foo"] != status {
|
t.Fatalf("err: %s", err)
|
||||||
t.Fatalf("should be %v %v", status, mock.state)
|
})
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckTCPCritical(t *testing.T) {
|
func TestCheckTCPCritical(t *testing.T) {
|
||||||
|
|
|
@ -655,49 +655,54 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
|
||||||
|
|
||||||
// Trigger anti-entropy run and wait
|
// Trigger anti-entropy run and wait
|
||||||
agent.StartSync()
|
agent.StartSync()
|
||||||
time.Sleep(200 * time.Millisecond)
|
|
||||||
|
|
||||||
// Verify that we are in sync
|
|
||||||
req := structs.NodeSpecificRequest{
|
req := structs.NodeSpecificRequest{
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
Node: agent.config.NodeName,
|
Node: agent.config.NodeName,
|
||||||
}
|
}
|
||||||
var checks structs.IndexedHealthChecks
|
var checks structs.IndexedHealthChecks
|
||||||
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
|
||||||
t.Fatalf("err: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We should have 5 checks (serf included)
|
// Verify that we are in sync
|
||||||
if len(checks.HealthChecks) != 5 {
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
t.Fatalf("bad: %v", checks)
|
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
||||||
}
|
return false, fmt.Errorf("err: %v", err)
|
||||||
|
|
||||||
// All the checks should match
|
|
||||||
for _, chk := range checks.HealthChecks {
|
|
||||||
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
||||||
switch chk.CheckID {
|
|
||||||
case "mysql":
|
|
||||||
if !reflect.DeepEqual(chk, chk1) {
|
|
||||||
t.Fatalf("bad: %v %v", chk, chk1)
|
|
||||||
}
|
|
||||||
case "redis":
|
|
||||||
if !reflect.DeepEqual(chk, chk2) {
|
|
||||||
t.Fatalf("bad: %v %v", chk, chk2)
|
|
||||||
}
|
|
||||||
case "web":
|
|
||||||
if !reflect.DeepEqual(chk, chk3) {
|
|
||||||
t.Fatalf("bad: %v %v", chk, chk3)
|
|
||||||
}
|
|
||||||
case "cache":
|
|
||||||
if !reflect.DeepEqual(chk, chk5) {
|
|
||||||
t.Fatalf("bad: %v %v", chk, chk5)
|
|
||||||
}
|
|
||||||
case "serfHealth":
|
|
||||||
// ignore
|
|
||||||
default:
|
|
||||||
t.Fatalf("unexpected check: %v", chk)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
// We should have 5 checks (serf included)
|
||||||
|
if len(checks.HealthChecks) != 5 {
|
||||||
|
return false, fmt.Errorf("bad: %v", checks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// All the checks should match
|
||||||
|
for _, chk := range checks.HealthChecks {
|
||||||
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
||||||
|
switch chk.CheckID {
|
||||||
|
case "mysql":
|
||||||
|
if !reflect.DeepEqual(chk, chk1) {
|
||||||
|
return false, fmt.Errorf("bad: %v %v", chk, chk1)
|
||||||
|
}
|
||||||
|
case "redis":
|
||||||
|
if !reflect.DeepEqual(chk, chk2) {
|
||||||
|
return false, fmt.Errorf("bad: %v %v", chk, chk2)
|
||||||
|
}
|
||||||
|
case "web":
|
||||||
|
if !reflect.DeepEqual(chk, chk3) {
|
||||||
|
return false, fmt.Errorf("bad: %v %v", chk, chk3)
|
||||||
|
}
|
||||||
|
case "cache":
|
||||||
|
if !reflect.DeepEqual(chk, chk5) {
|
||||||
|
return false, fmt.Errorf("bad: %v %v", chk, chk5)
|
||||||
|
}
|
||||||
|
case "serfHealth":
|
||||||
|
// ignore
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("unexpected check: %v", chk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
|
||||||
// Check the local state
|
// Check the local state
|
||||||
if len(agent.state.checks) != 4 {
|
if len(agent.state.checks) != 4 {
|
||||||
|
@ -734,40 +739,44 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
|
||||||
|
|
||||||
// Trigger anti-entropy run and wait
|
// Trigger anti-entropy run and wait
|
||||||
agent.StartSync()
|
agent.StartSync()
|
||||||
time.Sleep(200 * time.Millisecond)
|
|
||||||
|
|
||||||
// Verify that we are in sync
|
// Verify that we are in sync
|
||||||
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
t.Fatalf("err: %v", err)
|
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
||||||
}
|
return false, fmt.Errorf("err: %v", err)
|
||||||
|
|
||||||
// We should have 5 checks (serf included)
|
|
||||||
if len(checks.HealthChecks) != 4 {
|
|
||||||
t.Fatalf("bad: %v", checks)
|
|
||||||
}
|
|
||||||
|
|
||||||
// All the checks should match
|
|
||||||
for _, chk := range checks.HealthChecks {
|
|
||||||
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
|
||||||
switch chk.CheckID {
|
|
||||||
case "mysql":
|
|
||||||
if !reflect.DeepEqual(chk, chk1) {
|
|
||||||
t.Fatalf("bad: %v %v", chk, chk1)
|
|
||||||
}
|
|
||||||
case "web":
|
|
||||||
if !reflect.DeepEqual(chk, chk3) {
|
|
||||||
t.Fatalf("bad: %v %v", chk, chk3)
|
|
||||||
}
|
|
||||||
case "cache":
|
|
||||||
if !reflect.DeepEqual(chk, chk5) {
|
|
||||||
t.Fatalf("bad: %v %v", chk, chk5)
|
|
||||||
}
|
|
||||||
case "serfHealth":
|
|
||||||
// ignore
|
|
||||||
default:
|
|
||||||
t.Fatalf("unexpected check: %v", chk)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
// We should have 5 checks (serf included)
|
||||||
|
if len(checks.HealthChecks) != 4 {
|
||||||
|
return false, fmt.Errorf("bad: %v", checks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// All the checks should match
|
||||||
|
for _, chk := range checks.HealthChecks {
|
||||||
|
chk.CreateIndex, chk.ModifyIndex = 0, 0
|
||||||
|
switch chk.CheckID {
|
||||||
|
case "mysql":
|
||||||
|
if !reflect.DeepEqual(chk, chk1) {
|
||||||
|
return false, fmt.Errorf("bad: %v %v", chk, chk1)
|
||||||
|
}
|
||||||
|
case "web":
|
||||||
|
if !reflect.DeepEqual(chk, chk3) {
|
||||||
|
return false, fmt.Errorf("bad: %v %v", chk, chk3)
|
||||||
|
}
|
||||||
|
case "cache":
|
||||||
|
if !reflect.DeepEqual(chk, chk5) {
|
||||||
|
return false, fmt.Errorf("bad: %v %v", chk, chk5)
|
||||||
|
}
|
||||||
|
case "serfHealth":
|
||||||
|
// ignore
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("unexpected check: %v", chk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
|
||||||
// Check the local state
|
// Check the local state
|
||||||
if len(agent.state.checks) != 3 {
|
if len(agent.state.checks) != 3 {
|
||||||
|
@ -1212,22 +1221,24 @@ func TestAgent_sendCoordinate(t *testing.T) {
|
||||||
|
|
||||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||||
|
|
||||||
// Wait a little while for an update.
|
|
||||||
time.Sleep(3 * conf.ConsulConfig.CoordinateUpdatePeriod)
|
|
||||||
|
|
||||||
// Make sure the coordinate is present.
|
// Make sure the coordinate is present.
|
||||||
req := structs.DCSpecificRequest{
|
req := structs.DCSpecificRequest{
|
||||||
Datacenter: agent.config.Datacenter,
|
Datacenter: agent.config.Datacenter,
|
||||||
}
|
}
|
||||||
var reply structs.IndexedCoordinates
|
var reply structs.IndexedCoordinates
|
||||||
if err := agent.RPC("Coordinate.ListNodes", &req, &reply); err != nil {
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
if err := agent.RPC("Coordinate.ListNodes", &req, &reply); err != nil {
|
||||||
|
return false, fmt.Errorf("err: %s", err)
|
||||||
|
}
|
||||||
|
if len(reply.Coordinates) != 1 {
|
||||||
|
return false, fmt.Errorf("expected a coordinate: %v", reply)
|
||||||
|
}
|
||||||
|
coord := reply.Coordinates[0]
|
||||||
|
if coord.Node != agent.config.NodeName || coord.Coord == nil {
|
||||||
|
return false, fmt.Errorf("bad: %v", coord)
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
t.Fatalf("err: %s", err)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
})
|
||||||
if len(reply.Coordinates) != 1 {
|
|
||||||
t.Fatalf("expected a coordinate: %v", reply)
|
|
||||||
}
|
|
||||||
coord := reply.Coordinates[0]
|
|
||||||
if coord.Node != agent.config.NodeName || coord.Coord == nil {
|
|
||||||
t.Fatalf("bad: %v", coord)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ func TestExecCommandRun(t *testing.T) {
|
||||||
|
|
||||||
ui := new(cli.MockUi)
|
ui := new(cli.MockUi)
|
||||||
c := &ExecCommand{Ui: ui}
|
c := &ExecCommand{Ui: ui}
|
||||||
args := []string{"-http-addr=" + a1.httpAddr, "-wait=400ms", "uptime"}
|
args := []string{"-http-addr=" + a1.httpAddr, "-wait=10s", "uptime"}
|
||||||
|
|
||||||
code := c.Run(args)
|
code := c.Run(args)
|
||||||
if code != 0 {
|
if code != 0 {
|
||||||
|
|
Loading…
Reference in New Issue