From 8697cc2b4591ce5b760ac1e9470de1e825e5b5df Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Thu, 15 Oct 2020 11:42:40 -0400 Subject: [PATCH 1/3] router: make refreshServerRebalanceTimer test a lot more strict --- agent/router/manager_internal_test.go | 100 ++++++++++++++------------ 1 file changed, 55 insertions(+), 45 deletions(-) diff --git a/agent/router/manager_internal_test.go b/agent/router/manager_internal_test.go index 05807e2070..c23c3ac648 100644 --- a/agent/router/manager_internal_test.go +++ b/agent/router/manager_internal_test.go @@ -8,9 +8,11 @@ import ( "testing" "time" - "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/metadata" ) var ( @@ -262,55 +264,63 @@ func test_reconcileServerList(maxServers int) (bool, error) { return true, nil } -// func (l *serverList) refreshServerRebalanceTimer() { -func TestManagerInternal_refreshServerRebalanceTimer(t *testing.T) { - type clusterSizes struct { - numNodes int - numServers int - minRebalance time.Duration - } - clusters := []clusterSizes{ - {0, 3, 2 * time.Minute}, - {1, 0, 2 * time.Minute}, // partitioned cluster - {1, 3, 2 * time.Minute}, - {2, 3, 2 * time.Minute}, - {100, 0, 2 * time.Minute}, // partitioned - {100, 1, 2 * time.Minute}, // partitioned - {100, 3, 2 * time.Minute}, - {1024, 1, 2 * time.Minute}, // partitioned - {1024, 3, 2 * time.Minute}, // partitioned - {1024, 5, 2 * time.Minute}, - {16384, 1, 4 * time.Minute}, // partitioned - {16384, 2, 2 * time.Minute}, // partitioned - {16384, 3, 2 * time.Minute}, // partitioned - {16384, 5, 2 * time.Minute}, - {65535, 0, 2 * time.Minute}, // partitioned - {65535, 1, 8 * time.Minute}, // partitioned - {65535, 2, 3 * time.Minute}, // partitioned - {65535, 3, 5 * time.Minute}, // partitioned - {65535, 5, 3 * time.Minute}, // partitioned - {65535, 7, 2 * time.Minute}, - {1000000, 1, 4 * time.Hour}, // partitioned - {1000000, 2, 2 * time.Hour}, // partitioned - {1000000, 3, 80 * time.Minute}, // partitioned - {1000000, 5, 50 * time.Minute}, // partitioned - {1000000, 11, 20 * time.Minute}, // partitioned - {1000000, 19, 10 * time.Minute}, +func TestManager_refreshServerRebalanceTimer(t *testing.T) { + type testCase struct { + clients int + servers int + min time.Duration + max time.Duration + expected time.Duration } - logger := GetBufferedLogger() - shutdownCh := make(chan struct{}) + testCases := []testCase{ + {servers: 3, clients: 0}, + {servers: 0, clients: 1}, // partitioned cluster + {servers: 3, clients: 1}, + {servers: 3, clients: 2}, + {servers: 0, clients: 100}, // partitioned + {servers: 1, clients: 100}, // partitioned + {servers: 3, clients: 100}, + {servers: 1, clients: 1024}, // partitioned + {servers: 3, clients: 1024}, // partitioned + {servers: 5, clients: 1024}, + {servers: 1, clients: 16384, expected: 4*time.Minute + 16*time.Second}, // partitioned + {servers: 2, clients: 16384}, // partitioned + {servers: 3, clients: 16384}, // partitioned + {servers: 5, clients: 16384}, + {servers: 0, clients: 65535}, // partitioned + {servers: 1, clients: 65535, expected: 17*time.Minute + 3984375000}, + {servers: 2, clients: 65535, expected: 8*time.Minute + 31992187500}, // partitioned + {servers: 3, clients: 65535, expected: 5*time.Minute + 41328125000}, // partitioned + {servers: 5, clients: 65535, expected: 3*time.Minute + 24796875000}, // partitioned + {servers: 7, clients: 65535}, + {servers: 1, clients: 1000000, expected: 4*time.Hour + 20*time.Minute + 25*time.Second}, // partitioned + {servers: 2, clients: 1000000, expected: 2*time.Hour + 10*time.Minute + 12500*time.Millisecond}, // partitioned + {servers: 3, clients: 1000000, expected: 86*time.Minute + 48333333333}, // partitioned + {servers: 5, clients: 1000000, expected: 52*time.Minute + 5*time.Second}, // partitioned + {servers: 11, clients: 1000000, expected: 23*time.Minute + 40454545454}, // partitioned + {servers: 19, clients: 1000000, expected: 13*time.Minute + 42368421052}, + } - for _, s := range clusters { - m := New(logger, shutdownCh, &fauxSerf{numNodes: s.numNodes}, &fauxConnPool{}, "", noopRebalancer) - for i := 0; i < s.numServers; i++ { - nodeName := fmt.Sprintf("s%02d", i) - m.AddServer(&metadata.Server{Name: nodeName}) + for _, tc := range testCases { + m := &Manager{clusterInfo: &fauxSerf{numNodes: tc.clients}, rebalanceTimer: time.NewTimer(0)} + m.saveServerList(serverList{servers: make([]*metadata.Server, tc.servers)}) + delay := m.refreshServerRebalanceTimer() + + if tc.expected != 0 { + assert.Equal(t, tc.expected, delay, "nodes=%d, servers=%d", tc.clients, tc.servers) + continue } - d := m.refreshServerRebalanceTimer() - if d < s.minRebalance { - t.Errorf("duration too short for cluster of size %d and %d servers (%s < %s)", s.numNodes, s.numServers, d, s.minRebalance) + if tc.min == 0 { + tc.min = 2 * time.Minute + tc.max = 3 * time.Minute + } + if delay < tc.min { + t.Errorf("nodes=%d, servers=%d, expected >%v, actual %v", tc.clients, tc.servers, tc.min, delay) + } + if delay > tc.max { + t.Errorf("nodes=%d, servers=%d, expected <%v, actual %v", tc.clients, tc.servers, tc.max, delay) } } } From 12e174900b7e3a55675e9fa0f7da660938673c3a Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Thu, 15 Oct 2020 12:04:11 -0400 Subject: [PATCH 2/3] router: organize the test by number of servers And adddd some additional cases to show where the minimum value stops being used --- agent/router/manager_internal_test.go | 88 +++++++++++++++------------ 1 file changed, 49 insertions(+), 39 deletions(-) diff --git a/agent/router/manager_internal_test.go b/agent/router/manager_internal_test.go index c23c3ac648..1ef649d633 100644 --- a/agent/router/manager_internal_test.go +++ b/agent/router/manager_internal_test.go @@ -266,61 +266,71 @@ func test_reconcileServerList(maxServers int) (bool, error) { func TestManager_refreshServerRebalanceTimer(t *testing.T) { type testCase struct { - clients int servers int - min time.Duration - max time.Duration + nodes int expected time.Duration } testCases := []testCase{ - {servers: 3, clients: 0}, - {servers: 0, clients: 1}, // partitioned cluster - {servers: 3, clients: 1}, - {servers: 3, clients: 2}, - {servers: 0, clients: 100}, // partitioned - {servers: 1, clients: 100}, // partitioned - {servers: 3, clients: 100}, - {servers: 1, clients: 1024}, // partitioned - {servers: 3, clients: 1024}, // partitioned - {servers: 5, clients: 1024}, - {servers: 1, clients: 16384, expected: 4*time.Minute + 16*time.Second}, // partitioned - {servers: 2, clients: 16384}, // partitioned - {servers: 3, clients: 16384}, // partitioned - {servers: 5, clients: 16384}, - {servers: 0, clients: 65535}, // partitioned - {servers: 1, clients: 65535, expected: 17*time.Minute + 3984375000}, - {servers: 2, clients: 65535, expected: 8*time.Minute + 31992187500}, // partitioned - {servers: 3, clients: 65535, expected: 5*time.Minute + 41328125000}, // partitioned - {servers: 5, clients: 65535, expected: 3*time.Minute + 24796875000}, // partitioned - {servers: 7, clients: 65535}, - {servers: 1, clients: 1000000, expected: 4*time.Hour + 20*time.Minute + 25*time.Second}, // partitioned - {servers: 2, clients: 1000000, expected: 2*time.Hour + 10*time.Minute + 12500*time.Millisecond}, // partitioned - {servers: 3, clients: 1000000, expected: 86*time.Minute + 48333333333}, // partitioned - {servers: 5, clients: 1000000, expected: 52*time.Minute + 5*time.Second}, // partitioned - {servers: 11, clients: 1000000, expected: 23*time.Minute + 40454545454}, // partitioned - {servers: 19, clients: 1000000, expected: 13*time.Minute + 42368421052}, + {servers: 0, nodes: 1}, + {servers: 0, nodes: 100}, + {servers: 0, nodes: 65535}, + {servers: 0, nodes: 1000000}, + + {servers: 1, nodes: 100}, + {servers: 1, nodes: 1024}, + {servers: 1, nodes: 8192}, + {servers: 1, nodes: 11520}, + {servers: 1, nodes: 16384, expected: 4*time.Minute + 16*time.Second}, + {servers: 1, nodes: 65535, expected: 17*time.Minute + 3984375000}, + {servers: 1, nodes: 1000000, expected: 4*time.Hour + 20*time.Minute + 25*time.Second}, + + {servers: 2, nodes: 100}, + {servers: 2, nodes: 16384}, + {servers: 2, nodes: 65535, expected: 8*time.Minute + 31992187500}, + {servers: 2, nodes: 1000000, expected: 2*time.Hour + 10*time.Minute + 12500*time.Millisecond}, + + {servers: 3, nodes: 0}, + {servers: 3, nodes: 100}, + {servers: 3, nodes: 1024}, + {servers: 3, nodes: 16384}, + {servers: 3, nodes: 32768}, + {servers: 3, nodes: 65535, expected: 5*time.Minute + 41328125000}, + {servers: 3, nodes: 1000000, expected: 86*time.Minute + 48333333333}, + + {servers: 5, nodes: 0}, + {servers: 5, nodes: 1024}, + {servers: 5, nodes: 16384}, + {servers: 5, nodes: 32768}, + {servers: 5, nodes: 56000}, + {servers: 5, nodes: 65535, expected: 3*time.Minute + 24796875000}, + {servers: 5, nodes: 1000000, expected: 52*time.Minute + 5*time.Second}, + + {servers: 7, nodes: 65535}, + {servers: 7, nodes: 80500}, + {servers: 7, nodes: 131070, expected: 4*time.Minute + 52566964285}, + + {servers: 11, nodes: 1000000, expected: 23*time.Minute + 40454545454}, + {servers: 19, nodes: 1000000, expected: 13*time.Minute + 42368421052}, } for _, tc := range testCases { - m := &Manager{clusterInfo: &fauxSerf{numNodes: tc.clients}, rebalanceTimer: time.NewTimer(0)} + m := &Manager{clusterInfo: &fauxSerf{numNodes: tc.nodes}, rebalanceTimer: time.NewTimer(0)} m.saveServerList(serverList{servers: make([]*metadata.Server, tc.servers)}) delay := m.refreshServerRebalanceTimer() if tc.expected != 0 { - assert.Equal(t, tc.expected, delay, "nodes=%d, servers=%d", tc.clients, tc.servers) + assert.Equal(t, tc.expected, delay, "nodes=%d, servers=%d", tc.nodes, tc.servers) continue } - if tc.min == 0 { - tc.min = 2 * time.Minute - tc.max = 3 * time.Minute + min := 2 * time.Minute + max := 3 * time.Minute + if delay < min { + t.Errorf("nodes=%d, servers=%d, expected >%v, actual %v", tc.nodes, tc.servers, min, delay) } - if delay < tc.min { - t.Errorf("nodes=%d, servers=%d, expected >%v, actual %v", tc.clients, tc.servers, tc.min, delay) - } - if delay > tc.max { - t.Errorf("nodes=%d, servers=%d, expected <%v, actual %v", tc.clients, tc.servers, tc.max, delay) + if delay > max { + t.Errorf("nodes=%d, servers=%d, expected <%v, actual %v", tc.nodes, tc.servers, max, delay) } } } From 119c446cf2d8a6507265309ee3e2fa5823b8f1b9 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Thu, 15 Oct 2020 14:43:29 -0400 Subject: [PATCH 3/3] agent/router: Add bounds test cases --- agent/router/manager_internal_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/agent/router/manager_internal_test.go b/agent/router/manager_internal_test.go index 1ef649d633..e172345fac 100644 --- a/agent/router/manager_internal_test.go +++ b/agent/router/manager_internal_test.go @@ -281,12 +281,15 @@ func TestManager_refreshServerRebalanceTimer(t *testing.T) { {servers: 1, nodes: 1024}, {servers: 1, nodes: 8192}, {servers: 1, nodes: 11520}, + {servers: 1, nodes: 11521, expected: 3*time.Minute + 15625*time.Microsecond}, {servers: 1, nodes: 16384, expected: 4*time.Minute + 16*time.Second}, {servers: 1, nodes: 65535, expected: 17*time.Minute + 3984375000}, {servers: 1, nodes: 1000000, expected: 4*time.Hour + 20*time.Minute + 25*time.Second}, {servers: 2, nodes: 100}, {servers: 2, nodes: 16384}, + {servers: 2, nodes: 23040}, + {servers: 2, nodes: 23041, expected: 3*time.Minute + 7812500}, {servers: 2, nodes: 65535, expected: 8*time.Minute + 31992187500}, {servers: 2, nodes: 1000000, expected: 2*time.Hour + 10*time.Minute + 12500*time.Millisecond}, @@ -294,7 +297,8 @@ func TestManager_refreshServerRebalanceTimer(t *testing.T) { {servers: 3, nodes: 100}, {servers: 3, nodes: 1024}, {servers: 3, nodes: 16384}, - {servers: 3, nodes: 32768}, + {servers: 3, nodes: 34560}, + {servers: 3, nodes: 34561, expected: 3*time.Minute + 5208333}, {servers: 3, nodes: 65535, expected: 5*time.Minute + 41328125000}, {servers: 3, nodes: 1000000, expected: 86*time.Minute + 48333333333}, @@ -302,7 +306,7 @@ func TestManager_refreshServerRebalanceTimer(t *testing.T) { {servers: 5, nodes: 1024}, {servers: 5, nodes: 16384}, {servers: 5, nodes: 32768}, - {servers: 5, nodes: 56000}, + {servers: 5, nodes: 57600}, {servers: 5, nodes: 65535, expected: 3*time.Minute + 24796875000}, {servers: 5, nodes: 1000000, expected: 52*time.Minute + 5*time.Second},