Merge pull request #8970 from hashicorp/dnephin/resolver-balance-test

agent/router: improve  the test for refreshServerRebalanceTimer
pull/9011/head
Daniel Nephin 2020-10-21 18:40:32 -04:00 committed by GitHub
commit 733ce9ad4a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 69 additions and 45 deletions

View File

@ -8,9 +8,11 @@ import (
"testing" "testing"
"time" "time"
"github.com/hashicorp/consul/agent/metadata"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/metadata"
) )
var ( var (
@ -262,55 +264,77 @@ func test_reconcileServerList(maxServers int) (bool, error) {
return true, nil return true, nil
} }
// func (l *serverList) refreshServerRebalanceTimer() { func TestManager_refreshServerRebalanceTimer(t *testing.T) {
func TestManagerInternal_refreshServerRebalanceTimer(t *testing.T) { type testCase struct {
type clusterSizes struct { servers int
numNodes int nodes int
numServers int expected time.Duration
minRebalance time.Duration
}
clusters := []clusterSizes{
{0, 3, 2 * time.Minute},
{1, 0, 2 * time.Minute}, // partitioned cluster
{1, 3, 2 * time.Minute},
{2, 3, 2 * time.Minute},
{100, 0, 2 * time.Minute}, // partitioned
{100, 1, 2 * time.Minute}, // partitioned
{100, 3, 2 * time.Minute},
{1024, 1, 2 * time.Minute}, // partitioned
{1024, 3, 2 * time.Minute}, // partitioned
{1024, 5, 2 * time.Minute},
{16384, 1, 4 * time.Minute}, // partitioned
{16384, 2, 2 * time.Minute}, // partitioned
{16384, 3, 2 * time.Minute}, // partitioned
{16384, 5, 2 * time.Minute},
{65535, 0, 2 * time.Minute}, // partitioned
{65535, 1, 8 * time.Minute}, // partitioned
{65535, 2, 3 * time.Minute}, // partitioned
{65535, 3, 5 * time.Minute}, // partitioned
{65535, 5, 3 * time.Minute}, // partitioned
{65535, 7, 2 * time.Minute},
{1000000, 1, 4 * time.Hour}, // partitioned
{1000000, 2, 2 * time.Hour}, // partitioned
{1000000, 3, 80 * time.Minute}, // partitioned
{1000000, 5, 50 * time.Minute}, // partitioned
{1000000, 11, 20 * time.Minute}, // partitioned
{1000000, 19, 10 * time.Minute},
} }
logger := GetBufferedLogger() testCases := []testCase{
shutdownCh := make(chan struct{}) {servers: 0, nodes: 1},
{servers: 0, nodes: 100},
{servers: 0, nodes: 65535},
{servers: 0, nodes: 1000000},
for _, s := range clusters { {servers: 1, nodes: 100},
m := New(logger, shutdownCh, &fauxSerf{numNodes: s.numNodes}, &fauxConnPool{}, "", noopRebalancer) {servers: 1, nodes: 1024},
for i := 0; i < s.numServers; i++ { {servers: 1, nodes: 8192},
nodeName := fmt.Sprintf("s%02d", i) {servers: 1, nodes: 11520},
m.AddServer(&metadata.Server{Name: nodeName}) {servers: 1, nodes: 11521, expected: 3*time.Minute + 15625*time.Microsecond},
{servers: 1, nodes: 16384, expected: 4*time.Minute + 16*time.Second},
{servers: 1, nodes: 65535, expected: 17*time.Minute + 3984375000},
{servers: 1, nodes: 1000000, expected: 4*time.Hour + 20*time.Minute + 25*time.Second},
{servers: 2, nodes: 100},
{servers: 2, nodes: 16384},
{servers: 2, nodes: 23040},
{servers: 2, nodes: 23041, expected: 3*time.Minute + 7812500},
{servers: 2, nodes: 65535, expected: 8*time.Minute + 31992187500},
{servers: 2, nodes: 1000000, expected: 2*time.Hour + 10*time.Minute + 12500*time.Millisecond},
{servers: 3, nodes: 0},
{servers: 3, nodes: 100},
{servers: 3, nodes: 1024},
{servers: 3, nodes: 16384},
{servers: 3, nodes: 34560},
{servers: 3, nodes: 34561, expected: 3*time.Minute + 5208333},
{servers: 3, nodes: 65535, expected: 5*time.Minute + 41328125000},
{servers: 3, nodes: 1000000, expected: 86*time.Minute + 48333333333},
{servers: 5, nodes: 0},
{servers: 5, nodes: 1024},
{servers: 5, nodes: 16384},
{servers: 5, nodes: 32768},
{servers: 5, nodes: 57600},
{servers: 5, nodes: 65535, expected: 3*time.Minute + 24796875000},
{servers: 5, nodes: 1000000, expected: 52*time.Minute + 5*time.Second},
{servers: 7, nodes: 65535},
{servers: 7, nodes: 80500},
{servers: 7, nodes: 131070, expected: 4*time.Minute + 52566964285},
{servers: 11, nodes: 1000000, expected: 23*time.Minute + 40454545454},
{servers: 19, nodes: 1000000, expected: 13*time.Minute + 42368421052},
}
for _, tc := range testCases {
m := &Manager{clusterInfo: &fauxSerf{numNodes: tc.nodes}, rebalanceTimer: time.NewTimer(0)}
m.saveServerList(serverList{servers: make([]*metadata.Server, tc.servers)})
delay := m.refreshServerRebalanceTimer()
if tc.expected != 0 {
assert.Equal(t, tc.expected, delay, "nodes=%d, servers=%d", tc.nodes, tc.servers)
continue
} }
d := m.refreshServerRebalanceTimer() min := 2 * time.Minute
if d < s.minRebalance { max := 3 * time.Minute
t.Errorf("duration too short for cluster of size %d and %d servers (%s < %s)", s.numNodes, s.numServers, d, s.minRebalance) if delay < min {
t.Errorf("nodes=%d, servers=%d, expected >%v, actual %v", tc.nodes, tc.servers, min, delay)
}
if delay > max {
t.Errorf("nodes=%d, servers=%d, expected <%v, actual %v", tc.nodes, tc.servers, max, delay)
} }
} }
} }