2023-03-28 22:48:58 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2022-04-25 14:41:36 +00:00
|
|
|
package cluster
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2023-01-11 21:34:27 +00:00
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
2022-04-25 14:41:36 +00:00
|
|
|
"fmt"
|
2023-01-11 21:34:27 +00:00
|
|
|
"os"
|
2022-12-01 15:39:09 +00:00
|
|
|
"path/filepath"
|
2023-01-11 21:34:27 +00:00
|
|
|
"strconv"
|
2022-04-25 14:41:36 +00:00
|
|
|
"strings"
|
2022-06-03 17:07:37 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2022-04-25 14:41:36 +00:00
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2022-11-01 19:03:23 +00:00
|
|
|
"github.com/hashicorp/serf/serf"
|
2023-02-07 19:13:19 +00:00
|
|
|
|
|
|
|
goretry "github.com/avast/retry-go"
|
2022-11-01 19:03:23 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"github.com/teris-io/shortid"
|
|
|
|
"github.com/testcontainers/testcontainers-go"
|
2022-04-25 14:41:36 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Cluster provides an interface for creating and controlling a Consul cluster
|
2022-11-01 19:03:23 +00:00
|
|
|
// in integration tests, with agents running in containers.
|
|
|
|
// These fields are public in the event someone might want to surgically
|
|
|
|
// craft a test case.
|
2022-04-25 14:41:36 +00:00
|
|
|
type Cluster struct {
|
2023-01-11 21:34:27 +00:00
|
|
|
Agents []Agent
|
|
|
|
// BuildContext *BuildContext // TODO
|
|
|
|
CACert string
|
|
|
|
CAKey string
|
|
|
|
ID string
|
|
|
|
Index int
|
|
|
|
Network testcontainers.Network
|
|
|
|
NetworkName string
|
|
|
|
ScratchDir string
|
2023-02-07 19:13:19 +00:00
|
|
|
|
|
|
|
ACLEnabled bool
|
|
|
|
TokenBootstrap string
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type TestingT interface {
|
2023-01-23 20:14:24 +00:00
|
|
|
Logf(format string, args ...any)
|
2023-01-11 21:34:27 +00:00
|
|
|
Cleanup(f func())
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewN(t TestingT, conf Config, count int) (*Cluster, error) {
|
|
|
|
var configs []Config
|
|
|
|
for i := 0; i < count; i++ {
|
|
|
|
configs = append(configs, conf)
|
|
|
|
}
|
|
|
|
|
|
|
|
return New(t, configs)
|
2022-04-25 14:41:36 +00:00
|
|
|
}
|
|
|
|
|
2022-11-01 19:03:23 +00:00
|
|
|
// New creates a Consul cluster. An agent will be started for each of the given
|
2022-04-25 14:41:36 +00:00
|
|
|
// configs and joined to the cluster.
|
2022-11-01 19:03:23 +00:00
|
|
|
//
|
2023-01-11 21:34:27 +00:00
|
|
|
// A cluster has its own docker network for DNS connectivity, but is also
|
|
|
|
// joined
|
|
|
|
//
|
|
|
|
// The provided TestingT is used to register a cleanup function to terminate
|
|
|
|
// the cluster.
|
2023-02-24 20:57:44 +00:00
|
|
|
func New(t TestingT, configs []Config, ports ...int) (*Cluster, error) {
|
2022-11-01 19:03:23 +00:00
|
|
|
id, err := shortid.Generate()
|
2022-05-19 19:05:41 +00:00
|
|
|
if err != nil {
|
2023-02-23 22:51:20 +00:00
|
|
|
return nil, fmt.Errorf("could not generate cluster id: %w", err)
|
2022-11-01 19:03:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
name := fmt.Sprintf("consul-int-cluster-%s", id)
|
2023-01-11 21:34:27 +00:00
|
|
|
network, err := createNetwork(t, name)
|
2022-11-01 19:03:23 +00:00
|
|
|
if err != nil {
|
2023-01-11 21:34:27 +00:00
|
|
|
return nil, fmt.Errorf("could not create cluster container network: %w", err)
|
2022-05-19 19:05:41 +00:00
|
|
|
}
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
// Rig up one scratch dir for the cluster with auto-cleanup on test exit.
|
|
|
|
scratchDir, err := os.MkdirTemp("", name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
t.Cleanup(func() {
|
|
|
|
_ = os.RemoveAll(scratchDir)
|
|
|
|
})
|
|
|
|
err = os.Chmod(scratchDir, 0777)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cluster := &Cluster{
|
2022-11-01 19:03:23 +00:00
|
|
|
ID: id,
|
|
|
|
Network: network,
|
|
|
|
NetworkName: name,
|
2023-01-11 21:34:27 +00:00
|
|
|
ScratchDir: scratchDir,
|
2023-02-07 19:13:19 +00:00
|
|
|
ACLEnabled: configs[0].ACLEnabled,
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
|
|
|
t.Cleanup(func() {
|
|
|
|
_ = cluster.Terminate()
|
|
|
|
})
|
|
|
|
|
2023-02-24 20:57:44 +00:00
|
|
|
if err := cluster.Add(configs, true, ports...); err != nil {
|
2023-01-11 21:34:27 +00:00
|
|
|
return nil, fmt.Errorf("could not start or join all agents: %w", err)
|
2022-11-01 19:03:23 +00:00
|
|
|
}
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
return cluster, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cluster) AddN(conf Config, count int, join bool) error {
|
|
|
|
var configs []Config
|
|
|
|
for i := 0; i < count; i++ {
|
|
|
|
configs = append(configs, conf)
|
2022-05-19 19:05:41 +00:00
|
|
|
}
|
2023-01-11 21:34:27 +00:00
|
|
|
return c.Add(configs, join)
|
2022-11-01 19:03:23 +00:00
|
|
|
}
|
2022-04-25 14:41:36 +00:00
|
|
|
|
2023-02-23 22:51:20 +00:00
|
|
|
// Add starts agents with the given configurations and joins them to the existing cluster
|
2023-02-24 20:57:44 +00:00
|
|
|
func (c *Cluster) Add(configs []Config, serfJoin bool, ports ...int) (xe error) {
|
2023-01-11 21:34:27 +00:00
|
|
|
if c.Index == 0 && !serfJoin {
|
2023-02-07 19:13:19 +00:00
|
|
|
return fmt.Errorf("the first call to Cluster.Add must have serfJoin=true")
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
2022-05-19 19:05:41 +00:00
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
var agents []Agent
|
2022-11-01 19:03:23 +00:00
|
|
|
for idx, conf := range configs {
|
2023-01-11 21:34:27 +00:00
|
|
|
// Each agent gets it's own area in the cluster scratch.
|
|
|
|
conf.ScratchDir = filepath.Join(c.ScratchDir, strconv.Itoa(c.Index))
|
|
|
|
if err := os.MkdirAll(conf.ScratchDir, 0777); err != nil {
|
2023-03-29 16:51:21 +00:00
|
|
|
return fmt.Errorf("container %d making scratchDir: %w", idx, err)
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
|
|
|
if err := os.Chmod(conf.ScratchDir, 0777); err != nil {
|
2023-03-29 16:51:21 +00:00
|
|
|
return fmt.Errorf("container %d perms on scratchDir: %w", idx, err)
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
|
|
|
|
2023-03-29 16:51:21 +00:00
|
|
|
var n Agent
|
|
|
|
|
|
|
|
// retry creating client every ten seconds. with local development, we've found
|
|
|
|
// that this "port not found" error occurs when runs happen too close together
|
|
|
|
if err := goretry.Do(
|
|
|
|
func() (err error) {
|
|
|
|
n, err = NewConsulContainer(
|
|
|
|
context.Background(),
|
|
|
|
conf,
|
|
|
|
c,
|
|
|
|
ports...,
|
|
|
|
)
|
|
|
|
return err
|
|
|
|
},
|
|
|
|
goretry.Delay(10*time.Second),
|
|
|
|
goretry.RetryIf(func(err error) bool {
|
|
|
|
return strings.Contains(err.Error(), "port not found")
|
|
|
|
}),
|
|
|
|
); err != nil {
|
|
|
|
return fmt.Errorf("container %d creating: %s", idx, err)
|
2022-04-25 14:41:36 +00:00
|
|
|
}
|
2023-03-29 16:51:21 +00:00
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
agents = append(agents, n)
|
2022-11-01 19:03:23 +00:00
|
|
|
c.Index++
|
2022-04-25 14:41:36 +00:00
|
|
|
}
|
2023-01-11 21:34:27 +00:00
|
|
|
|
|
|
|
if serfJoin {
|
|
|
|
if err := c.Join(agents); err != nil {
|
|
|
|
return fmt.Errorf("could not join agents to cluster: %w", err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := c.JoinExternally(agents); err != nil {
|
|
|
|
return fmt.Errorf("could not join agents to cluster: %w", err)
|
|
|
|
}
|
2022-04-25 14:41:36 +00:00
|
|
|
}
|
2023-01-11 21:34:27 +00:00
|
|
|
|
2022-11-01 19:03:23 +00:00
|
|
|
return nil
|
2022-04-25 14:41:36 +00:00
|
|
|
}
|
|
|
|
|
2022-11-01 19:03:23 +00:00
|
|
|
// Join joins the given agent to the cluster.
|
2023-01-11 21:34:27 +00:00
|
|
|
func (c *Cluster) Join(agents []Agent) error {
|
|
|
|
return c.join(agents, false)
|
|
|
|
}
|
2023-02-23 22:51:20 +00:00
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
func (c *Cluster) JoinExternally(agents []Agent) error {
|
|
|
|
return c.join(agents, true)
|
|
|
|
}
|
2023-02-23 22:51:20 +00:00
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
func (c *Cluster) join(agents []Agent, skipSerfJoin bool) error {
|
|
|
|
if len(agents) == 0 {
|
|
|
|
return nil // no change
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(c.Agents) == 0 {
|
2023-02-07 19:13:19 +00:00
|
|
|
// if acl enabled, generate the bootstrap tokens at the first agent
|
|
|
|
if c.ACLEnabled {
|
|
|
|
var (
|
|
|
|
output string
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
// retry since agent needs to start the ACL system
|
|
|
|
err = goretry.Do(
|
|
|
|
func() error {
|
|
|
|
output, err = agents[0].Exec(context.Background(), []string{"consul", "acl", "bootstrap"})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
goretry.Delay(time.Second*1),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error generating the bootstrap token, %s", err)
|
|
|
|
}
|
|
|
|
c.TokenBootstrap, err = extractSecretIDFrom(output)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fmt.Println("Cluster bootstrap token:", c.TokenBootstrap)
|
|
|
|
|
|
|
|
// The first node's default client needs to be updated after bootstrap token
|
|
|
|
// is created
|
|
|
|
_, err = agents[0].NewClient(c.TokenBootstrap, true)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error updating the first node's client, %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
// Join the rest to the first.
|
|
|
|
c.Agents = append(c.Agents, agents[0])
|
|
|
|
return c.join(agents[1:], skipSerfJoin)
|
2022-04-25 14:41:36 +00:00
|
|
|
}
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
// Always join to the original server.
|
|
|
|
joinAddr := c.Agents[0].GetIP()
|
|
|
|
|
2022-11-01 19:03:23 +00:00
|
|
|
for _, n := range agents {
|
2023-01-11 21:34:27 +00:00
|
|
|
if !skipSerfJoin {
|
2023-02-07 19:13:19 +00:00
|
|
|
// retry in case the agent token is updated at the agent
|
|
|
|
err := goretry.Do(
|
|
|
|
func() error {
|
|
|
|
err := n.GetClient().Agent().Join(joinAddr, false)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not join agent %s to %s: %w", n.GetName(), joinAddr, err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
)
|
2023-01-11 21:34:27 +00:00
|
|
|
if err != nil {
|
2023-02-07 19:13:19 +00:00
|
|
|
return err
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
2022-11-01 19:03:23 +00:00
|
|
|
}
|
|
|
|
c.Agents = append(c.Agents, n)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-02-07 19:13:19 +00:00
|
|
|
func (c *Cluster) CreateAgentToken(datacenter string, agentName string) (string, error) {
|
|
|
|
output, err := c.Agents[0].Exec(context.Background(), []string{"consul", "acl", "token", "create", "-description", "\"agent token\"",
|
|
|
|
"-token", c.TokenBootstrap,
|
|
|
|
"-node-identity", fmt.Sprintf("%s:%s", agentName, datacenter)})
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("after retry, error generating agent token, %s", err)
|
|
|
|
}
|
|
|
|
secretID, err := extractSecretIDFrom(output)
|
|
|
|
return secretID, err
|
|
|
|
}
|
|
|
|
|
2022-11-01 19:03:23 +00:00
|
|
|
// Remove instructs the agent to leave the cluster then removes it
|
|
|
|
// from the cluster Agent list.
|
2023-01-11 21:34:27 +00:00
|
|
|
func (c *Cluster) Remove(n Agent) error {
|
2022-11-01 19:03:23 +00:00
|
|
|
err := n.GetClient().Agent().Leave()
|
|
|
|
if err != nil {
|
2023-01-11 21:34:27 +00:00
|
|
|
return fmt.Errorf("could not remove agent %s: %w", n.GetName(), err)
|
2022-11-01 19:03:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
foundIdx := -1
|
|
|
|
for idx, this := range c.Agents {
|
|
|
|
if this == n {
|
|
|
|
foundIdx = idx
|
|
|
|
break
|
2022-04-25 14:41:36 +00:00
|
|
|
}
|
|
|
|
}
|
2022-11-01 19:03:23 +00:00
|
|
|
|
|
|
|
if foundIdx == -1 {
|
|
|
|
return errors.New("could not find agent in cluster")
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Agents = append(c.Agents[:foundIdx], c.Agents[foundIdx+1:]...)
|
2022-04-25 14:41:36 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-12-01 15:39:09 +00:00
|
|
|
// StandardUpgrade upgrades a running consul cluster following the steps from
|
|
|
|
//
|
|
|
|
// https://developer.hashicorp.com/consul/docs/upgrading#standard-upgrades
|
|
|
|
//
|
2023-01-11 21:34:27 +00:00
|
|
|
// - takes a snapshot (which is discarded)
|
|
|
|
// - terminate and rejoin the pod of a new version of consul
|
|
|
|
//
|
|
|
|
// NOTE: we pass in a *testing.T but this method also returns an error. JUST
|
|
|
|
// within this method when in doubt return an error. A testing assertion should
|
|
|
|
// be saved only for using t.Cleanup or in a few of the retry-until-working
|
|
|
|
// helpers below.
|
|
|
|
//
|
|
|
|
// This lets us have tests that assert that an upgrade will fail.
|
2022-12-01 15:39:09 +00:00
|
|
|
func (c *Cluster) StandardUpgrade(t *testing.T, ctx context.Context, targetVersion string) error {
|
2023-02-07 19:13:19 +00:00
|
|
|
var err error
|
2023-01-11 21:34:27 +00:00
|
|
|
// We take a snapshot, but note that we currently do nothing with it.
|
2023-02-07 19:13:19 +00:00
|
|
|
if c.ACLEnabled {
|
|
|
|
_, err = c.Agents[0].Exec(context.Background(), []string{"consul", "snapshot", "save",
|
|
|
|
"-token", c.TokenBootstrap, "backup.snap"})
|
|
|
|
} else {
|
|
|
|
_, err = c.Agents[0].Exec(context.Background(), []string{"consul", "snapshot", "save", "backup.snap"})
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
2023-02-07 19:13:19 +00:00
|
|
|
return fmt.Errorf("error taking the snapshot: %s", err)
|
2022-12-01 15:39:09 +00:00
|
|
|
}
|
|
|
|
|
2022-12-15 21:31:12 +00:00
|
|
|
// Upgrade individual agent to the target version in the following order
|
|
|
|
// 1. followers
|
|
|
|
// 2. leader
|
|
|
|
// 3. clients (TODO)
|
2023-01-11 21:34:27 +00:00
|
|
|
|
|
|
|
// Grab a client connected to the leader, which we will upgrade last so our
|
|
|
|
// connection remains ok.
|
2022-12-15 21:31:12 +00:00
|
|
|
leader, err := c.Leader()
|
2023-01-11 21:34:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
t.Logf("Leader name: %s", leader.GetName())
|
2022-12-15 21:31:12 +00:00
|
|
|
|
|
|
|
followers, err := c.Followers()
|
2023-01-11 21:34:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
t.Logf("The number of followers = %d", len(followers))
|
2022-12-01 15:39:09 +00:00
|
|
|
|
2023-03-06 18:28:02 +00:00
|
|
|
// NOTE: we only assert the number of agents in default partition
|
|
|
|
// TODO: add partition to the cluster struct to assert partition size
|
|
|
|
clusterSize := 0
|
|
|
|
for _, agent := range c.Agents {
|
|
|
|
if agent.GetPartition() == "" || agent.GetPartition() == "default" {
|
|
|
|
clusterSize++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.Logf("The number of agents in default partition = %d", clusterSize)
|
|
|
|
|
2023-01-30 14:49:52 +00:00
|
|
|
upgradeFn := func(agent Agent, clientFactory func() (*api.Client, error)) error {
|
2022-12-01 15:39:09 +00:00
|
|
|
config := agent.GetConfig()
|
|
|
|
config.Version = targetVersion
|
2023-01-11 21:34:27 +00:00
|
|
|
|
|
|
|
if agent.IsServer() {
|
|
|
|
// You only ever need bootstrap settings the FIRST time, so we do not need
|
|
|
|
// them again.
|
|
|
|
config.ConfigBuilder.Unset("bootstrap")
|
|
|
|
} else {
|
|
|
|
// If we upgrade the clients fast enough
|
|
|
|
// membership might not be gossiped to all of
|
|
|
|
// the clients to persist into their serf
|
|
|
|
// snapshot, so force them to rejoin the
|
|
|
|
// normal way on restart.
|
|
|
|
config.ConfigBuilder.Set("retry_join", []string{"agent-0"})
|
|
|
|
}
|
|
|
|
|
|
|
|
newJSON, err := json.MarshalIndent(config.ConfigBuilder, "", " ")
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not re-generate json config: %w", err)
|
|
|
|
}
|
|
|
|
config.JSON = string(newJSON)
|
|
|
|
t.Logf("Upgraded cluster config for %q:\n%s", agent.GetName(), config.JSON)
|
|
|
|
|
2022-12-15 21:31:12 +00:00
|
|
|
err = agent.Upgrade(context.Background(), config)
|
2022-12-01 15:39:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-01-30 14:49:52 +00:00
|
|
|
client, err := clientFactory()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-01-11 21:34:27 +00:00
|
|
|
|
2023-03-06 18:28:02 +00:00
|
|
|
// wait until the agent rejoin and leader is elected; skip non-default agent
|
|
|
|
if agent.GetPartition() == "" || agent.GetPartition() == "default" {
|
|
|
|
WaitForMembers(t, client, clusterSize)
|
|
|
|
}
|
2023-01-30 14:49:52 +00:00
|
|
|
WaitForLeader(t, c, client)
|
2023-01-11 21:34:27 +00:00
|
|
|
|
|
|
|
return nil
|
2022-12-15 21:31:12 +00:00
|
|
|
}
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
for _, agent := range followers {
|
|
|
|
t.Logf("Upgrade follower: %s", agent.GetName())
|
|
|
|
|
2023-01-30 14:49:52 +00:00
|
|
|
err := upgradeFn(agent, func() (*api.Client, error) {
|
|
|
|
return leader.GetClient(), nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
2023-01-11 21:34:27 +00:00
|
|
|
return fmt.Errorf("error upgrading follower %q: %w", agent.GetName(), err)
|
|
|
|
}
|
2022-12-15 21:31:12 +00:00
|
|
|
}
|
|
|
|
|
2023-02-07 19:13:19 +00:00
|
|
|
t.Logf("Upgrade leader: %s", leader.GetAgentName())
|
2023-01-30 14:49:52 +00:00
|
|
|
err = upgradeFn(leader, func() (*api.Client, error) {
|
2023-01-11 21:34:27 +00:00
|
|
|
if len(followers) > 0 {
|
2023-01-30 14:49:52 +00:00
|
|
|
return followers[0].GetClient(), nil
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
2023-01-30 14:49:52 +00:00
|
|
|
return leader.GetClient(), nil
|
2023-01-11 21:34:27 +00:00
|
|
|
})
|
2022-12-15 21:31:12 +00:00
|
|
|
if err != nil {
|
2023-01-11 21:34:27 +00:00
|
|
|
return fmt.Errorf("error upgrading leader %q: %w", leader.GetName(), err)
|
2022-12-01 15:39:09 +00:00
|
|
|
}
|
2022-12-15 21:31:12 +00:00
|
|
|
|
2023-01-30 14:49:52 +00:00
|
|
|
clientAgents := c.Clients()
|
|
|
|
for _, agent := range clientAgents {
|
|
|
|
t.Logf("Upgrade client agent: %s", agent.GetName())
|
|
|
|
|
|
|
|
err = upgradeFn(agent, func() (*api.Client, error) {
|
|
|
|
leader, err = c.Leader()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return leader.GetClient(), nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error upgrading client agent %q: %w", agent.GetName(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Log("Update completed\n")
|
2022-12-01 15:39:09 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-11-01 19:03:23 +00:00
|
|
|
// Terminate will attempt to terminate all agents in the cluster and its network. If any agent
|
2022-04-25 14:41:36 +00:00
|
|
|
// termination fails, Terminate will abort and return an error.
|
|
|
|
func (c *Cluster) Terminate() error {
|
2022-11-01 19:03:23 +00:00
|
|
|
for _, n := range c.Agents {
|
2022-04-25 14:41:36 +00:00
|
|
|
err := n.Terminate()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2022-11-01 19:03:23 +00:00
|
|
|
|
|
|
|
// Testcontainers seems to clean this the network.
|
|
|
|
// Trigger it now will throw an error while the containers are still shutting down
|
2023-01-11 21:34:27 +00:00
|
|
|
// if err := c.Network.Remove(context.Background()); err != nil {
|
|
|
|
// return fmt.Errorf("could not terminate cluster network %s: %w", c.ID, err)
|
|
|
|
// }
|
2022-04-25 14:41:36 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-11-01 19:03:23 +00:00
|
|
|
// Leader returns the cluster leader agent, or an error if no leader is
|
2022-04-25 14:41:36 +00:00
|
|
|
// available.
|
2023-01-11 21:34:27 +00:00
|
|
|
func (c *Cluster) Leader() (Agent, error) {
|
2022-11-01 19:03:23 +00:00
|
|
|
if len(c.Agents) < 1 {
|
|
|
|
return nil, fmt.Errorf("no agent available")
|
2022-04-25 14:41:36 +00:00
|
|
|
}
|
2022-11-01 19:03:23 +00:00
|
|
|
n0 := c.Agents[0]
|
2022-05-19 19:05:41 +00:00
|
|
|
|
2022-11-01 19:03:23 +00:00
|
|
|
leaderAdd, err := getLeader(n0.GetClient())
|
2022-04-25 14:41:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-05-19 19:05:41 +00:00
|
|
|
|
2022-11-01 19:03:23 +00:00
|
|
|
for _, n := range c.Agents {
|
2023-01-11 21:34:27 +00:00
|
|
|
addr := n.GetIP()
|
2022-04-25 14:41:36 +00:00
|
|
|
if strings.Contains(leaderAdd, addr) {
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("leader not found")
|
|
|
|
}
|
2022-05-19 19:05:41 +00:00
|
|
|
|
2022-11-01 19:03:23 +00:00
|
|
|
func getLeader(client *api.Client) (string, error) {
|
|
|
|
leaderAdd, err := client.Status().Leader()
|
2022-05-19 19:05:41 +00:00
|
|
|
if err != nil {
|
2023-01-11 21:34:27 +00:00
|
|
|
return "", fmt.Errorf("could not query leader: %w", err)
|
2022-05-19 19:05:41 +00:00
|
|
|
}
|
2022-11-01 19:03:23 +00:00
|
|
|
if leaderAdd == "" {
|
|
|
|
return "", errors.New("no leader available")
|
2022-05-19 19:05:41 +00:00
|
|
|
}
|
2022-11-01 19:03:23 +00:00
|
|
|
return leaderAdd, nil
|
2022-05-19 19:05:41 +00:00
|
|
|
}
|
|
|
|
|
2022-11-01 19:03:23 +00:00
|
|
|
// Followers returns the cluster following servers.
|
2023-01-11 21:34:27 +00:00
|
|
|
func (c *Cluster) Followers() ([]Agent, error) {
|
|
|
|
var followers []Agent
|
2022-11-01 19:03:23 +00:00
|
|
|
|
|
|
|
leader, err := c.Leader()
|
2022-05-19 19:05:41 +00:00
|
|
|
if err != nil {
|
2022-11-01 19:03:23 +00:00
|
|
|
return nil, fmt.Errorf("could not determine leader: %w", err)
|
2022-05-19 19:05:41 +00:00
|
|
|
}
|
2022-11-01 19:03:23 +00:00
|
|
|
|
|
|
|
for _, n := range c.Agents {
|
|
|
|
if n != leader && n.IsServer() {
|
|
|
|
followers = append(followers, n)
|
|
|
|
}
|
2022-05-19 19:05:41 +00:00
|
|
|
}
|
2022-11-01 19:03:23 +00:00
|
|
|
return followers, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Servers returns the handle to server agents
|
2023-01-11 21:34:27 +00:00
|
|
|
func (c *Cluster) Servers() []Agent {
|
|
|
|
var servers []Agent
|
2022-11-01 19:03:23 +00:00
|
|
|
|
|
|
|
for _, n := range c.Agents {
|
|
|
|
if n.IsServer() {
|
|
|
|
servers = append(servers, n)
|
|
|
|
}
|
|
|
|
}
|
2023-01-11 21:34:27 +00:00
|
|
|
return servers
|
2022-11-01 19:03:23 +00:00
|
|
|
}
|
|
|
|
|
2023-03-06 18:28:02 +00:00
|
|
|
// Clients returns the handle to client agents in provided partition
|
|
|
|
func (c *Cluster) ClientsInPartition(partition string) []Agent {
|
|
|
|
var clients []Agent
|
|
|
|
|
|
|
|
for _, n := range c.Agents {
|
|
|
|
if n.IsServer() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if n.GetPartition() == partition {
|
|
|
|
clients = append(clients, n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return clients
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clients returns the handle to client agents in all partitions
|
2023-01-11 21:34:27 +00:00
|
|
|
func (c *Cluster) Clients() []Agent {
|
|
|
|
var clients []Agent
|
2022-11-01 19:03:23 +00:00
|
|
|
|
|
|
|
for _, n := range c.Agents {
|
|
|
|
if !n.IsServer() {
|
|
|
|
clients = append(clients, n)
|
|
|
|
}
|
|
|
|
}
|
2023-01-11 21:34:27 +00:00
|
|
|
return clients
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cluster) APIClient(index int) *api.Client {
|
|
|
|
nodes := c.Clients()
|
|
|
|
if len(nodes) == 0 {
|
|
|
|
nodes = c.Servers()
|
|
|
|
if len(nodes) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nodes[0].GetClient()
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetClient returns a consul API client to the node if node is provided.
|
|
|
|
// Otherwise, GetClient returns the API client to the first node of either
|
|
|
|
// server or client agent.
|
|
|
|
//
|
|
|
|
// TODO: see about switching to just APIClient() calls instead?
|
|
|
|
func (c *Cluster) GetClient(node Agent, isServer bool) (*api.Client, error) {
|
|
|
|
if node != nil {
|
|
|
|
return node.GetClient(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes := c.Clients()
|
|
|
|
if isServer {
|
|
|
|
nodes = c.Servers()
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(nodes) <= 0 {
|
|
|
|
return nil, errors.New("no nodes")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nodes[0].GetClient(), nil
|
2022-05-19 19:05:41 +00:00
|
|
|
}
|
2022-06-03 17:07:37 +00:00
|
|
|
|
2022-12-15 21:31:12 +00:00
|
|
|
// PeerWithCluster establishes peering with the acceptor cluster
|
|
|
|
func (c *Cluster) PeerWithCluster(acceptingClient *api.Client, acceptingPeerName string, dialingPeerName string) error {
|
2023-01-11 21:34:27 +00:00
|
|
|
dialingClient := c.APIClient(0)
|
2022-12-15 21:31:12 +00:00
|
|
|
|
|
|
|
generateReq := api.PeeringGenerateTokenRequest{
|
|
|
|
PeerName: acceptingPeerName,
|
|
|
|
}
|
|
|
|
generateRes, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), generateReq, &api.WriteOptions{})
|
|
|
|
if err != nil {
|
2023-01-11 21:34:27 +00:00
|
|
|
return fmt.Errorf("error generate token: %w", err)
|
2022-12-15 21:31:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
establishReq := api.PeeringEstablishRequest{
|
|
|
|
PeerName: dialingPeerName,
|
|
|
|
PeeringToken: generateRes.PeeringToken,
|
|
|
|
}
|
|
|
|
_, _, err = dialingClient.Peerings().Establish(context.Background(), establishReq, &api.WriteOptions{})
|
|
|
|
if err != nil {
|
2023-01-11 21:34:27 +00:00
|
|
|
return fmt.Errorf("error establish peering: %w", err)
|
2022-12-15 21:31:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-12-01 15:39:09 +00:00
|
|
|
const retryTimeout = 90 * time.Second
|
2022-06-03 17:07:37 +00:00
|
|
|
const retryFrequency = 500 * time.Millisecond
|
|
|
|
|
|
|
|
func LongFailer() *retry.Timer {
|
|
|
|
return &retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}
|
|
|
|
}
|
|
|
|
|
|
|
|
func WaitForLeader(t *testing.T, cluster *Cluster, client *api.Client) {
|
|
|
|
retry.RunWith(LongFailer(), t, func(r *retry.R) {
|
|
|
|
leader, err := cluster.Leader()
|
|
|
|
require.NoError(r, err)
|
|
|
|
require.NotEmpty(r, leader)
|
|
|
|
})
|
|
|
|
|
|
|
|
if client != nil {
|
|
|
|
waitForLeaderFromClient(t, client)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func waitForLeaderFromClient(t *testing.T, client *api.Client) {
|
|
|
|
retry.RunWith(LongFailer(), t, func(r *retry.R) {
|
2022-11-01 19:03:23 +00:00
|
|
|
leader, err := getLeader(client)
|
2022-06-03 17:07:37 +00:00
|
|
|
require.NoError(r, err)
|
|
|
|
require.NotEmpty(r, leader)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func WaitForMembers(t *testing.T, client *api.Client, expectN int) {
|
|
|
|
retry.RunWith(LongFailer(), t, func(r *retry.R) {
|
|
|
|
members, err := client.Agent().Members(false)
|
2022-11-01 19:03:23 +00:00
|
|
|
var activeMembers int
|
|
|
|
for _, member := range members {
|
|
|
|
if serf.MemberStatus(member.Status) == serf.StatusAlive {
|
|
|
|
activeMembers++
|
|
|
|
}
|
|
|
|
}
|
2022-06-03 17:07:37 +00:00
|
|
|
require.NoError(r, err)
|
2022-12-01 15:39:09 +00:00
|
|
|
require.Equal(r, expectN, activeMembers)
|
2022-06-03 17:07:37 +00:00
|
|
|
})
|
|
|
|
}
|
2023-02-03 15:20:22 +00:00
|
|
|
|
|
|
|
func (c *Cluster) ConfigEntryWrite(entry api.ConfigEntry) error {
|
|
|
|
client, _ := c.GetClient(nil, true)
|
|
|
|
|
|
|
|
entries := client.ConfigEntries()
|
|
|
|
written := false
|
|
|
|
written, _, err := entries.Set(entry, nil)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error set config entry: %v", err)
|
|
|
|
}
|
|
|
|
if !written {
|
|
|
|
return fmt.Errorf("config entry not updated: %s/%s", entry.GetKind(), entry.GetName())
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2023-02-07 19:13:19 +00:00
|
|
|
|
2023-02-22 17:52:14 +00:00
|
|
|
func (c *Cluster) ConfigEntryDelete(entry api.ConfigEntry) error {
|
|
|
|
client, err := c.GetClient(nil, true)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
entries := client.ConfigEntries()
|
|
|
|
_, err = entries.Delete(entry.GetKind(), entry.GetName(), nil)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error deleting config entry: %v", err)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-02-07 19:13:19 +00:00
|
|
|
func extractSecretIDFrom(tokenOutput string) (string, error) {
|
|
|
|
lines := strings.Split(tokenOutput, "\n")
|
|
|
|
for _, line := range lines {
|
|
|
|
if strings.Contains(line, "SecretID") {
|
|
|
|
secretIDtoken := strings.Split(line, ":")
|
|
|
|
return strings.TrimSpace(secretIDtoken[1]), nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return "", fmt.Errorf("can't found secretID in token")
|
|
|
|
}
|