2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
2023-08-11 13:12:13 +00:00
|
|
|
// SPDX-License-Identifier: BUSL-1.1
|
2023-03-28 18:39:22 +00:00
|
|
|
|
2013-12-23 21:52:10 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
2014-11-24 08:36:03 +00:00
|
|
|
"bytes"
|
2019-05-24 18:36:56 +00:00
|
|
|
"context"
|
2021-11-04 20:07:54 +00:00
|
|
|
"crypto/md5"
|
2023-02-17 20:04:12 +00:00
|
|
|
"crypto/rand"
|
2019-03-13 09:29:06 +00:00
|
|
|
"crypto/tls"
|
2020-06-30 17:52:57 +00:00
|
|
|
"crypto/x509"
|
2020-06-10 20:47:35 +00:00
|
|
|
"encoding/base64"
|
2014-11-24 08:36:03 +00:00
|
|
|
"encoding/json"
|
2023-05-15 11:05:47 +00:00
|
|
|
"errors"
|
2013-12-23 21:52:10 +00:00
|
|
|
"fmt"
|
2023-08-24 22:44:14 +00:00
|
|
|
"github.com/hashicorp/consul/agent/grpc-external/limiter"
|
|
|
|
"github.com/hashicorp/consul/agent/proxycfg"
|
|
|
|
"github.com/hashicorp/consul/agent/proxycfg-sources/local"
|
|
|
|
"github.com/hashicorp/consul/agent/xds"
|
2023-08-29 15:15:34 +00:00
|
|
|
proxytracker "github.com/hashicorp/consul/internal/mesh/proxy-tracker"
|
2023-02-17 20:04:12 +00:00
|
|
|
mathrand "math/rand"
|
2015-06-05 11:44:42 +00:00
|
|
|
"net"
|
2017-11-08 02:22:09 +00:00
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
2020-11-18 17:22:07 +00:00
|
|
|
"net/url"
|
2013-12-23 21:52:10 +00:00
|
|
|
"os"
|
2022-04-14 20:55:10 +00:00
|
|
|
"path"
|
2014-09-18 05:31:32 +00:00
|
|
|
"path/filepath"
|
2023-08-24 22:44:14 +00:00
|
|
|
"reflect"
|
2019-05-24 18:36:56 +00:00
|
|
|
"strconv"
|
2017-01-24 02:11:13 +00:00
|
|
|
"strings"
|
2021-06-08 23:11:34 +00:00
|
|
|
"sync"
|
2013-12-23 21:52:10 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2014-11-19 19:51:25 +00:00
|
|
|
|
2022-04-14 20:55:10 +00:00
|
|
|
"github.com/google/go-cmp/cmp"
|
|
|
|
"github.com/google/go-cmp/cmp/cmpopts"
|
2020-03-09 20:59:02 +00:00
|
|
|
"github.com/google/tcpproxy"
|
2020-11-18 17:22:07 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2022-09-26 18:58:15 +00:00
|
|
|
"github.com/hashicorp/hcp-scada-provider/capability"
|
2020-11-18 17:22:07 +00:00
|
|
|
"github.com/hashicorp/serf/coordinate"
|
|
|
|
"github.com/hashicorp/serf/serf"
|
|
|
|
"github.com/stretchr/testify/assert"
|
2022-09-26 18:58:15 +00:00
|
|
|
"github.com/stretchr/testify/mock"
|
2020-11-18 17:22:07 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"golang.org/x/sync/errgroup"
|
2022-10-14 14:52:00 +00:00
|
|
|
"golang.org/x/time/rate"
|
2021-04-20 22:14:46 +00:00
|
|
|
"google.golang.org/grpc"
|
2023-01-11 14:39:10 +00:00
|
|
|
"google.golang.org/protobuf/encoding/protojson"
|
2020-11-18 17:22:07 +00:00
|
|
|
"gopkg.in/square/go-jose.v2/jwt"
|
|
|
|
|
2019-09-26 15:42:17 +00:00
|
|
|
"github.com/hashicorp/consul/agent/cache"
|
|
|
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
2017-10-25 09:18:07 +00:00
|
|
|
"github.com/hashicorp/consul/agent/checks"
|
2018-07-17 20:16:43 +00:00
|
|
|
"github.com/hashicorp/consul/agent/config"
|
2018-05-10 16:04:33 +00:00
|
|
|
"github.com/hashicorp/consul/agent/connect"
|
2020-11-18 17:22:07 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul"
|
2022-09-30 15:33:49 +00:00
|
|
|
"github.com/hashicorp/consul/agent/hcp"
|
|
|
|
"github.com/hashicorp/consul/agent/hcp/scada"
|
2023-06-13 15:54:45 +00:00
|
|
|
"github.com/hashicorp/consul/agent/leafcert"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2020-11-18 17:22:07 +00:00
|
|
|
"github.com/hashicorp/consul/agent/token"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2020-06-10 20:47:35 +00:00
|
|
|
"github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest"
|
2023-08-04 18:27:48 +00:00
|
|
|
"github.com/hashicorp/consul/internal/resource"
|
2020-03-09 20:59:02 +00:00
|
|
|
"github.com/hashicorp/consul/ipaddr"
|
2020-06-24 16:56:46 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2023-02-17 21:14:46 +00:00
|
|
|
"github.com/hashicorp/consul/proto/private/pbautoconf"
|
2019-05-24 18:36:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/freeport"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2020-03-09 20:59:02 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2020-11-18 17:22:07 +00:00
|
|
|
"github.com/hashicorp/consul/tlsutil"
|
2017-01-18 06:20:11 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2013-12-23 21:52:10 +00:00
|
|
|
)
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
func getService(a *TestAgent, id string) *structs.NodeService {
|
|
|
|
return a.State.Service(structs.NewServiceID(id, nil))
|
|
|
|
}
|
|
|
|
|
|
|
|
func getCheck(a *TestAgent, id types.CheckID) *structs.HealthCheck {
|
|
|
|
return a.State.Check(structs.NewCheckID(id, nil))
|
|
|
|
}
|
|
|
|
|
|
|
|
func requireServiceExists(t *testing.T, a *TestAgent, id string) *structs.NodeService {
|
|
|
|
t.Helper()
|
|
|
|
svc := getService(a, id)
|
|
|
|
require.NotNil(t, svc, "missing service %q", id)
|
|
|
|
return svc
|
|
|
|
}
|
|
|
|
|
|
|
|
func requireServiceMissing(t *testing.T, a *TestAgent, id string) {
|
|
|
|
t.Helper()
|
|
|
|
require.Nil(t, getService(a, id), "have service %q (expected missing)", id)
|
|
|
|
}
|
|
|
|
|
|
|
|
func requireCheckExists(t *testing.T, a *TestAgent, id types.CheckID) *structs.HealthCheck {
|
|
|
|
t.Helper()
|
|
|
|
chk := getCheck(a, id)
|
|
|
|
require.NotNil(t, chk, "missing check %q", id)
|
|
|
|
return chk
|
|
|
|
}
|
|
|
|
|
|
|
|
func requireCheckMissing(t *testing.T, a *TestAgent, id types.CheckID) {
|
|
|
|
t.Helper()
|
|
|
|
require.Nil(t, getCheck(a, id), "have check %q (expected missing)", id)
|
|
|
|
}
|
|
|
|
|
|
|
|
func requireCheckExistsMap(t *testing.T, m interface{}, id types.CheckID) {
|
|
|
|
t.Helper()
|
|
|
|
require.Contains(t, m, structs.NewCheckID(id, nil), "missing check %q", id)
|
|
|
|
}
|
|
|
|
|
|
|
|
func requireCheckMissingMap(t *testing.T, m interface{}, id types.CheckID) {
|
|
|
|
t.Helper()
|
|
|
|
require.NotContains(t, m, structs.NewCheckID(id, nil), "have check %q (expected missing)", id)
|
|
|
|
}
|
|
|
|
|
2017-05-23 14:04:53 +00:00
|
|
|
func TestAgent_MultiStartStop(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
for i := 0; i < 10; i++ {
|
2017-05-23 14:04:53 +00:00
|
|
|
t.Run("", func(t *testing.T) {
|
2017-05-31 08:56:19 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-23 14:04:53 +00:00
|
|
|
time.Sleep(250 * time.Millisecond)
|
|
|
|
a.Shutdown()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-10 16:04:33 +00:00
|
|
|
func TestAgent_ConnectClusterIDConfig(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-05-10 16:04:33 +00:00
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
hcl string
|
|
|
|
wantClusterID string
|
2019-09-04 20:59:11 +00:00
|
|
|
wantErr bool
|
2018-05-10 16:04:33 +00:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "default TestAgent has fixed cluster id",
|
|
|
|
hcl: "",
|
|
|
|
wantClusterID: connect.TestClusterID,
|
|
|
|
},
|
|
|
|
{
|
2018-06-13 08:34:20 +00:00
|
|
|
name: "no cluster ID specified sets to test ID",
|
2018-05-10 16:04:33 +00:00
|
|
|
hcl: "connect { enabled = true }",
|
2018-06-13 08:34:20 +00:00
|
|
|
wantClusterID: connect.TestClusterID,
|
2018-05-10 16:04:33 +00:00
|
|
|
},
|
|
|
|
{
|
2018-05-22 14:11:13 +00:00
|
|
|
name: "non-UUID cluster_id is fatal",
|
|
|
|
hcl: `connect {
|
|
|
|
enabled = true
|
|
|
|
ca_config {
|
|
|
|
cluster_id = "fake-id"
|
|
|
|
}
|
|
|
|
}`,
|
2018-05-10 16:04:33 +00:00
|
|
|
wantClusterID: "",
|
2019-09-04 20:59:11 +00:00
|
|
|
wantErr: true,
|
2018-05-10 16:04:33 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
2020-03-31 20:12:33 +00:00
|
|
|
a := TestAgent{HCL: tt.hcl}
|
2020-03-30 20:05:27 +00:00
|
|
|
err := a.Start(t)
|
2019-09-04 20:59:11 +00:00
|
|
|
if tt.wantErr {
|
|
|
|
if err == nil {
|
|
|
|
t.Fatal("expected error, got nil")
|
2019-09-03 22:05:51 +00:00
|
|
|
}
|
2019-09-04 20:59:11 +00:00
|
|
|
return // don't run the rest of the test
|
2018-05-22 14:11:13 +00:00
|
|
|
}
|
2019-09-04 20:59:11 +00:00
|
|
|
if !tt.wantErr && err != nil {
|
|
|
|
t.Fatal(err)
|
2018-05-22 14:11:13 +00:00
|
|
|
}
|
2019-09-04 20:59:11 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
cfg := a.consulConfig()
|
|
|
|
assert.Equal(t, tt.wantClusterID, cfg.CAConfig.ClusterID)
|
2018-05-10 16:04:33 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
func TestAgent_StartStop(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2018-05-22 14:11:13 +00:00
|
|
|
defer a.Shutdown()
|
2013-12-23 21:52:10 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.Leave(); err != nil {
|
2013-12-23 21:52:10 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.Shutdown(); err != nil {
|
2013-12-23 21:52:10 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2017-05-21 07:11:09 +00:00
|
|
|
case <-a.ShutdownCh():
|
2013-12-23 21:52:10 +00:00
|
|
|
default:
|
|
|
|
t.Fatalf("should be closed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-16 01:24:15 +00:00
|
|
|
func TestAgent_RPCPing(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2019-02-22 02:47:19 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2013-12-23 21:52:10 +00:00
|
|
|
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != nil {
|
2013-12-23 21:52:10 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
|
2017-07-26 18:03:43 +00:00
|
|
|
func TestAgent_TokenStore(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-07-26 18:03:43 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2021-12-07 12:48:50 +00:00
|
|
|
acl {
|
|
|
|
tokens {
|
|
|
|
default = "user"
|
|
|
|
agent = "agent"
|
|
|
|
agent_recovery = "recovery"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`)
|
2017-07-26 18:03:43 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
if got, want := a.tokens.UserToken(), "user"; got != want {
|
|
|
|
t.Fatalf("got %q want %q", got, want)
|
|
|
|
}
|
|
|
|
if got, want := a.tokens.AgentToken(), "agent"; got != want {
|
|
|
|
t.Fatalf("got %q want %q", got, want)
|
|
|
|
}
|
2021-12-07 12:48:50 +00:00
|
|
|
if got, want := a.tokens.IsAgentRecoveryToken("recovery"), true; got != want {
|
2017-07-26 18:03:43 +00:00
|
|
|
t.Fatalf("got %v want %v", got, want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-11 05:46:07 +00:00
|
|
|
func TestAgent_ReconnectConfigSettings(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2016-04-11 05:46:07 +00:00
|
|
|
func() {
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2016-04-11 05:46:07 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
lan := a.consulConfig().SerfLANConfig.ReconnectTimeout
|
2016-04-11 05:46:07 +00:00
|
|
|
if lan != 3*24*time.Hour {
|
|
|
|
t.Fatalf("bad: %s", lan.String())
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
wan := a.consulConfig().SerfWANConfig.ReconnectTimeout
|
2016-04-11 05:46:07 +00:00
|
|
|
if wan != 3*24*time.Hour {
|
|
|
|
t.Fatalf("bad: %s", wan.String())
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
func() {
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
reconnect_timeout = "24h"
|
|
|
|
reconnect_timeout_wan = "36h"
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2016-04-11 05:46:07 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
lan := a.consulConfig().SerfLANConfig.ReconnectTimeout
|
2016-04-11 06:31:16 +00:00
|
|
|
if lan != 24*time.Hour {
|
2016-04-11 05:46:07 +00:00
|
|
|
t.Fatalf("bad: %s", lan.String())
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
wan := a.consulConfig().SerfWANConfig.ReconnectTimeout
|
2016-04-11 06:31:16 +00:00
|
|
|
if wan != 36*time.Hour {
|
2016-04-11 05:46:07 +00:00
|
|
|
t.Fatalf("bad: %s", wan.String())
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2020-10-29 17:38:19 +00:00
|
|
|
func TestAgent_HTTPMaxHeaderBytes(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
maxHeaderBytes int
|
|
|
|
expectedHTTPResponse int
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
"max header bytes 1 returns 431 http response when too large headers are sent",
|
|
|
|
1,
|
|
|
|
431,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"max header bytes 0 returns 200 http response, as the http.DefaultMaxHeaderBytes size of 1MB is used",
|
|
|
|
0,
|
|
|
|
200,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"negative maxHeaderBytes returns 200 http response, as the http.DefaultMaxHeaderBytes size of 1MB is used",
|
|
|
|
-10,
|
|
|
|
200,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
2020-12-30 20:03:48 +00:00
|
|
|
caConfig := tlsutil.Config{}
|
|
|
|
tlsConf, err := tlsutil.NewConfigurator(caConfig, hclog.New(nil))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
bd := BaseDeps{
|
|
|
|
Deps: consul.Deps{
|
|
|
|
Logger: hclog.NewInterceptLogger(nil),
|
|
|
|
Tokens: new(token.Store),
|
|
|
|
TLSConfigurator: tlsConf,
|
2021-04-20 22:14:46 +00:00
|
|
|
GRPCConnPool: &fakeGRPCConnPool{},
|
2023-08-04 18:27:48 +00:00
|
|
|
Registry: resource.NewRegistry(),
|
2020-12-30 20:03:48 +00:00
|
|
|
},
|
|
|
|
RuntimeConfig: &config.RuntimeConfig{
|
|
|
|
HTTPAddrs: []net.Addr{
|
2021-11-30 22:32:41 +00:00
|
|
|
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: freeport.GetOne(t)},
|
2020-12-30 20:03:48 +00:00
|
|
|
},
|
|
|
|
HTTPMaxHeaderBytes: tt.maxHeaderBytes,
|
|
|
|
},
|
2023-06-13 15:54:45 +00:00
|
|
|
Cache: cache.New(cache.Options{}),
|
|
|
|
NetRPC: &LazyNetRPC{},
|
2020-12-30 20:03:48 +00:00
|
|
|
}
|
2022-11-04 15:19:24 +00:00
|
|
|
|
2023-06-13 15:54:45 +00:00
|
|
|
bd.LeafCertManager = leafcert.NewManager(leafcert.Deps{
|
|
|
|
CertSigner: leafcert.NewNetRPCCertSigner(bd.NetRPC),
|
|
|
|
RootsReader: leafcert.NewCachedRootsReader(bd.Cache, "dc1"),
|
|
|
|
Config: leafcert.Config{},
|
|
|
|
})
|
|
|
|
|
2022-11-04 15:19:24 +00:00
|
|
|
cfg := config.RuntimeConfig{BuildDate: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC)}
|
|
|
|
bd, err = initEnterpriseBaseDeps(bd, &cfg)
|
2021-05-11 14:50:03 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-12-30 20:03:48 +00:00
|
|
|
a, err := New(bd)
|
2023-08-09 16:36:58 +00:00
|
|
|
mockDelegate := delegateMock{}
|
|
|
|
mockDelegate.On("LicenseCheck").Return()
|
|
|
|
a.delegate = &mockDelegate
|
2020-12-30 20:03:48 +00:00
|
|
|
require.NoError(t, err)
|
2020-10-29 17:38:19 +00:00
|
|
|
|
2021-05-11 14:50:03 +00:00
|
|
|
a.startLicenseManager(testutil.TestContext(t))
|
|
|
|
|
2020-12-30 20:03:48 +00:00
|
|
|
srvs, err := a.listenHTTP()
|
|
|
|
require.NoError(t, err)
|
2020-10-29 17:38:19 +00:00
|
|
|
|
2020-12-30 20:03:48 +00:00
|
|
|
require.Equal(t, tt.maxHeaderBytes, a.config.HTTPMaxHeaderBytes)
|
2020-10-29 17:38:19 +00:00
|
|
|
|
2020-12-30 20:03:48 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
|
|
t.Cleanup(cancel)
|
|
|
|
|
|
|
|
g := new(errgroup.Group)
|
|
|
|
for _, s := range srvs {
|
|
|
|
g.Go(s.Run)
|
2020-10-29 17:38:19 +00:00
|
|
|
}
|
|
|
|
|
2020-12-30 20:03:48 +00:00
|
|
|
require.Len(t, srvs, 1)
|
|
|
|
|
|
|
|
client := &http.Client{}
|
|
|
|
for _, s := range srvs {
|
|
|
|
u := url.URL{Scheme: s.Protocol, Host: s.Addr.String()}
|
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// This is directly pulled from the testing of request limits in the net/http source
|
|
|
|
// https://github.com/golang/go/blob/go1.15.3/src/net/http/serve_test.go#L2897-L2900
|
|
|
|
var bytesPerHeader = len("header12345: val12345\r\n")
|
|
|
|
for i := 0; i < ((tt.maxHeaderBytes+4096)/bytesPerHeader)+1; i++ {
|
|
|
|
req.Header.Set(fmt.Sprintf("header%05d", i), fmt.Sprintf("val%05d", i))
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := client.Do(req.WithContext(ctx))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, tt.expectedHTTPResponse, resp.StatusCode, "expected a '%d' http response, got '%d'", tt.expectedHTTPResponse, resp.StatusCode)
|
|
|
|
}
|
2020-10-29 17:38:19 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-20 22:14:46 +00:00
|
|
|
type fakeGRPCConnPool struct{}
|
|
|
|
|
|
|
|
func (f fakeGRPCConnPool) ClientConn(_ string) (*grpc.ClientConn, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2021-07-22 18:58:08 +00:00
|
|
|
func (f fakeGRPCConnPool) ClientConnLeader() (*grpc.ClientConn, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2021-08-24 21:28:44 +00:00
|
|
|
func (f fakeGRPCConnPool) SetGatewayResolver(_ func(string) string) {
|
|
|
|
}
|
|
|
|
|
2018-08-17 18:44:25 +00:00
|
|
|
func TestAgent_ReconnectConfigWanDisabled(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-08-17 18:44:25 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2018-08-17 18:44:25 +00:00
|
|
|
ports { serf_wan = -1 }
|
|
|
|
reconnect_timeout_wan = "36h"
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// This is also testing that we dont panic like before #4515
|
|
|
|
require.Nil(t, a.consulConfig().SerfWANConfig)
|
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
func TestAgent_AddService(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_AddService(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_AddService(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_AddService(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
node_name = "node1"
|
2019-09-24 15:04:48 +00:00
|
|
|
`+extraHCL)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-01-30 21:39:02 +00:00
|
|
|
|
2022-09-15 16:39:48 +00:00
|
|
|
duration3s, _ := time.ParseDuration("3s")
|
|
|
|
duration10s, _ := time.ParseDuration("10s")
|
|
|
|
|
2017-05-15 19:49:13 +00:00
|
|
|
tests := []struct {
|
|
|
|
desc string
|
|
|
|
srv *structs.NodeService
|
2019-01-08 10:13:49 +00:00
|
|
|
wantSrv func(ns *structs.NodeService)
|
2017-06-15 16:46:06 +00:00
|
|
|
chkTypes []*structs.CheckType
|
2017-05-15 19:49:13 +00:00
|
|
|
healthChks map[string]*structs.HealthCheck
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
"one check",
|
|
|
|
&structs.NodeService{
|
2019-12-10 02:26:41 +00:00
|
|
|
ID: "svcid1",
|
|
|
|
Service: "svcname1",
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Weights: nil, // nil weights...
|
|
|
|
Port: 8100,
|
2023-03-10 14:36:15 +00:00
|
|
|
Locality: &structs.Locality{Region: "us-west-1", Zone: "us-west-1a"},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2015-01-14 01:52:17 +00:00
|
|
|
},
|
2019-01-08 10:13:49 +00:00
|
|
|
// ... should be populated to avoid "IsSame" returning true during AE.
|
|
|
|
func(ns *structs.NodeService) {
|
|
|
|
ns.Weights = &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
}
|
|
|
|
},
|
2017-06-15 16:46:06 +00:00
|
|
|
[]*structs.CheckType{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2017-05-15 19:49:13 +00:00
|
|
|
CheckID: "check1",
|
|
|
|
Name: "name1",
|
|
|
|
TTL: time.Minute,
|
|
|
|
Notes: "note1",
|
|
|
|
},
|
2015-01-14 01:52:17 +00:00
|
|
|
},
|
2017-05-15 19:49:13 +00:00
|
|
|
map[string]*structs.HealthCheck{
|
2020-06-16 17:19:31 +00:00
|
|
|
"check1": {
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: "node1",
|
|
|
|
CheckID: "check1",
|
|
|
|
Name: "name1",
|
2022-02-18 20:05:33 +00:00
|
|
|
Interval: "",
|
|
|
|
Timeout: "", // these are empty because a TTL was provided
|
2019-12-10 02:26:41 +00:00
|
|
|
Status: "critical",
|
|
|
|
Notes: "note1",
|
|
|
|
ServiceID: "svcid1",
|
|
|
|
ServiceName: "svcname1",
|
|
|
|
ServiceTags: []string{"tag1"},
|
|
|
|
Type: "ttl",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
2015-01-14 01:52:17 +00:00
|
|
|
},
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
2022-09-15 16:39:48 +00:00
|
|
|
{
|
|
|
|
"one http check with interval and duration",
|
|
|
|
&structs.NodeService{
|
|
|
|
ID: "svcid1",
|
|
|
|
Service: "svcname1",
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Weights: nil, // nil weights...
|
|
|
|
Port: 8100,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
|
|
|
},
|
|
|
|
// ... should be populated to avoid "IsSame" returning true during AE.
|
|
|
|
func(ns *structs.NodeService) {
|
|
|
|
ns.Weights = &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
}
|
|
|
|
},
|
|
|
|
[]*structs.CheckType{
|
|
|
|
{
|
|
|
|
CheckID: "check1",
|
|
|
|
Name: "name1",
|
|
|
|
HTTP: "http://localhost:8100/",
|
|
|
|
Interval: duration10s,
|
|
|
|
Timeout: duration3s,
|
|
|
|
Notes: "note1",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
map[string]*structs.HealthCheck{
|
|
|
|
"check1": {
|
|
|
|
Node: "node1",
|
|
|
|
CheckID: "check1",
|
|
|
|
Name: "name1",
|
|
|
|
Interval: "10s",
|
|
|
|
Timeout: "3s",
|
|
|
|
Status: "critical",
|
|
|
|
Notes: "note1",
|
|
|
|
ServiceID: "svcid1",
|
|
|
|
ServiceName: "svcname1",
|
|
|
|
ServiceTags: []string{"tag1"},
|
|
|
|
Type: "http",
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-05-15 19:49:13 +00:00
|
|
|
{
|
|
|
|
"multiple checks",
|
|
|
|
&structs.NodeService{
|
|
|
|
ID: "svcid2",
|
|
|
|
Service: "svcname2",
|
2019-01-08 10:13:49 +00:00
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 2,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
Tags: []string{"tag2"},
|
|
|
|
Port: 8200,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
2019-01-08 10:13:49 +00:00
|
|
|
nil, // No change expected
|
2017-06-15 16:46:06 +00:00
|
|
|
[]*structs.CheckType{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2017-05-15 19:49:13 +00:00
|
|
|
CheckID: "check1",
|
|
|
|
Name: "name1",
|
|
|
|
TTL: time.Minute,
|
|
|
|
Notes: "note1",
|
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2017-05-15 19:49:13 +00:00
|
|
|
CheckID: "check-noname",
|
|
|
|
TTL: time.Minute,
|
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2017-05-15 19:49:13 +00:00
|
|
|
Name: "check-noid",
|
|
|
|
TTL: time.Minute,
|
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2017-05-15 19:49:13 +00:00
|
|
|
TTL: time.Minute,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
map[string]*structs.HealthCheck{
|
2020-06-16 17:19:31 +00:00
|
|
|
"check1": {
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: "node1",
|
|
|
|
CheckID: "check1",
|
|
|
|
Name: "name1",
|
2022-02-18 20:05:33 +00:00
|
|
|
Interval: "",
|
|
|
|
Timeout: "", // these are empty bcause a TTL was provided
|
2019-12-10 02:26:41 +00:00
|
|
|
Status: "critical",
|
|
|
|
Notes: "note1",
|
|
|
|
ServiceID: "svcid2",
|
|
|
|
ServiceName: "svcname2",
|
|
|
|
ServiceTags: []string{"tag2"},
|
|
|
|
Type: "ttl",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
"check-noname": {
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: "node1",
|
|
|
|
CheckID: "check-noname",
|
|
|
|
Name: "Service 'svcname2' check",
|
2022-02-18 20:05:33 +00:00
|
|
|
Interval: "",
|
|
|
|
Timeout: "", // these are empty because a TTL was provided
|
2019-12-10 02:26:41 +00:00
|
|
|
Status: "critical",
|
|
|
|
ServiceID: "svcid2",
|
|
|
|
ServiceName: "svcname2",
|
|
|
|
ServiceTags: []string{"tag2"},
|
|
|
|
Type: "ttl",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
"service:svcid2:3": {
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: "node1",
|
|
|
|
CheckID: "service:svcid2:3",
|
|
|
|
Name: "check-noid",
|
2022-02-18 20:05:33 +00:00
|
|
|
Interval: "",
|
|
|
|
Timeout: "", // these are empty becuase a TTL was provided
|
2019-12-10 02:26:41 +00:00
|
|
|
Status: "critical",
|
|
|
|
ServiceID: "svcid2",
|
|
|
|
ServiceName: "svcname2",
|
|
|
|
ServiceTags: []string{"tag2"},
|
|
|
|
Type: "ttl",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
2020-06-16 17:19:31 +00:00
|
|
|
"service:svcid2:4": {
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: "node1",
|
|
|
|
CheckID: "service:svcid2:4",
|
|
|
|
Name: "Service 'svcname2' check",
|
2022-02-18 20:05:33 +00:00
|
|
|
Interval: "",
|
|
|
|
Timeout: "", // these are empty because a TTL was provided
|
2019-12-10 02:26:41 +00:00
|
|
|
Status: "critical",
|
|
|
|
ServiceID: "svcid2",
|
|
|
|
ServiceName: "svcname2",
|
|
|
|
ServiceTags: []string{"tag2"},
|
|
|
|
Type: "ttl",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2017-05-15 19:49:13 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2015-01-14 01:52:17 +00:00
|
|
|
|
2017-05-15 19:49:13 +00:00
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.desc, func(t *testing.T) {
|
|
|
|
// check the service registration
|
|
|
|
t.Run(tt.srv.ID, func(t *testing.T) {
|
2020-11-30 18:26:58 +00:00
|
|
|
err := a.addServiceFromSource(tt.srv, tt.chkTypes, false, "", ConfigSourceLocal)
|
2017-05-15 19:49:13 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
got := getService(a, tt.srv.ID)
|
2019-01-08 10:13:49 +00:00
|
|
|
// Make a copy since the tt.srv points to the one in memory in the local
|
|
|
|
// state still so changing it is a tautology!
|
|
|
|
want := *tt.srv
|
|
|
|
if tt.wantSrv != nil {
|
|
|
|
tt.wantSrv(&want)
|
|
|
|
}
|
|
|
|
require.Equal(t, &want, got)
|
|
|
|
require.True(t, got.IsSame(&want))
|
2017-05-15 19:49:13 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// check the health checks
|
|
|
|
for k, v := range tt.healthChks {
|
|
|
|
t.Run(k, func(t *testing.T) {
|
2019-12-10 02:26:41 +00:00
|
|
|
got := getCheck(a, types.CheckID(k))
|
2019-01-08 10:13:49 +00:00
|
|
|
require.Equal(t, v, got)
|
2017-05-15 19:49:13 +00:00
|
|
|
})
|
|
|
|
}
|
2015-01-14 01:52:17 +00:00
|
|
|
|
2017-05-15 19:49:13 +00:00
|
|
|
// check the ttl checks
|
|
|
|
for k := range tt.healthChks {
|
|
|
|
t.Run(k+" ttl", func(t *testing.T) {
|
2019-12-10 02:26:41 +00:00
|
|
|
chk := a.checkTTLs[structs.NewCheckID(types.CheckID(k), nil)]
|
2017-05-15 19:49:13 +00:00
|
|
|
if chk == nil {
|
|
|
|
t.Fatal("got nil want TTL check")
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := string(chk.CheckID.ID), k; got != want {
|
2017-05-15 19:49:13 +00:00
|
|
|
t.Fatalf("got CheckID %v want %v", got, want)
|
|
|
|
}
|
|
|
|
if got, want := chk.TTL, time.Minute; got != want {
|
|
|
|
t.Fatalf("got TTL %v want %v", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
2014-11-07 02:24:04 +00:00
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
|
|
|
|
2020-11-30 18:26:58 +00:00
|
|
|
// addServiceFromSource is a test helper that exists to maintain an old function
|
|
|
|
// signature that was used in many tests.
|
|
|
|
// Deprecated: use AddService
|
|
|
|
func (a *Agent) addServiceFromSource(service *structs.NodeService, chkTypes []*structs.CheckType, persist bool, token string, source configSource) error {
|
|
|
|
return a.AddService(AddServiceRequest{
|
|
|
|
Service: service,
|
|
|
|
chkTypes: chkTypes,
|
|
|
|
persist: persist,
|
|
|
|
token: token,
|
|
|
|
replaceExistingChecks: false,
|
|
|
|
Source: source,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-07-17 19:06:50 +00:00
|
|
|
func TestAgent_AddServices_AliasUpdateCheckNotReverted(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_AddServices_AliasUpdateCheckNotReverted(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_AddServices_AliasUpdateCheckNotReverted(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_AddServices_AliasUpdateCheckNotReverted(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2019-07-17 19:06:50 +00:00
|
|
|
node_name = "node1"
|
2019-09-24 15:04:48 +00:00
|
|
|
`+extraHCL)
|
2019-07-17 19:06:50 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// It's tricky to get an UpdateCheck call to be timed properly so it lands
|
|
|
|
// right in the middle of an addServiceInternal call so we cheat a bit and
|
|
|
|
// rely upon alias checks to do that work for us. We add enough services
|
|
|
|
// that probabilistically one of them is going to end up properly in the
|
|
|
|
// critical section.
|
|
|
|
//
|
|
|
|
// The first number I picked here (10) surprisingly failed every time prior
|
|
|
|
// to PR #6144 solving the underlying problem.
|
|
|
|
const numServices = 10
|
|
|
|
|
|
|
|
services := make([]*structs.ServiceDefinition, numServices)
|
|
|
|
checkIDs := make([]types.CheckID, numServices)
|
2020-06-04 12:50:52 +00:00
|
|
|
services[0] = &structs.ServiceDefinition{
|
|
|
|
ID: "fake",
|
|
|
|
Name: "fake",
|
|
|
|
Port: 8080,
|
|
|
|
Checks: []*structs.CheckType{},
|
|
|
|
}
|
|
|
|
for i := 1; i < numServices; i++ {
|
2019-07-17 19:06:50 +00:00
|
|
|
name := fmt.Sprintf("web-%d", i)
|
|
|
|
|
|
|
|
services[i] = &structs.ServiceDefinition{
|
|
|
|
ID: name,
|
|
|
|
Name: name,
|
|
|
|
Port: 8080 + i,
|
|
|
|
Checks: []*structs.CheckType{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-07-17 19:06:50 +00:00
|
|
|
Name: "alias-for-fake-service",
|
|
|
|
AliasService: "fake",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
checkIDs[i] = types.CheckID("service:" + name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add all of the services quickly as you might do from config file snippets.
|
|
|
|
for _, service := range services {
|
|
|
|
ns := service.NodeService()
|
|
|
|
|
|
|
|
chkTypes, err := service.CheckTypes()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-11-30 18:26:58 +00:00
|
|
|
require.NoError(t, a.addServiceFromSource(ns, chkTypes, false, service.Token, ConfigSourceLocal))
|
2019-07-17 19:06:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
gotChecks := a.State.Checks(nil)
|
2019-07-17 19:06:50 +00:00
|
|
|
for id, check := range gotChecks {
|
|
|
|
require.Equal(r, "passing", check.Status, "check %q is wrong", id)
|
|
|
|
require.Equal(r, "No checks found.", check.Output, "check %q is wrong", id)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-06-04 12:50:52 +00:00
|
|
|
func test_createAlias(t *testing.T, agent *TestAgent, chk *structs.CheckType, expectedResult string) func(r *retry.R) {
|
|
|
|
t.Helper()
|
2023-02-17 20:04:12 +00:00
|
|
|
serviceNum := mathrand.Int()
|
2020-06-04 12:50:52 +00:00
|
|
|
srv := &structs.NodeService{
|
|
|
|
Service: fmt.Sprintf("serviceAlias-%d", serviceNum),
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Port: 8900 + serviceNum,
|
|
|
|
}
|
|
|
|
if srv.ID == "" {
|
|
|
|
srv.ID = fmt.Sprintf("serviceAlias-%d", serviceNum)
|
|
|
|
}
|
|
|
|
chk.Status = api.HealthWarning
|
|
|
|
if chk.CheckID == "" {
|
|
|
|
chk.CheckID = types.CheckID(fmt.Sprintf("check-%d", serviceNum))
|
|
|
|
}
|
2020-11-30 18:26:58 +00:00
|
|
|
err := agent.addServiceFromSource(srv, []*structs.CheckType{chk}, false, "", ConfigSourceLocal)
|
2020-06-04 12:50:52 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
return func(r *retry.R) {
|
|
|
|
t.Helper()
|
|
|
|
found := false
|
2021-07-22 18:20:45 +00:00
|
|
|
for _, c := range agent.State.CheckStates(structs.WildcardEnterpriseMetaInDefaultPartition()) {
|
2020-06-04 12:50:52 +00:00
|
|
|
if c.Check.CheckID == chk.CheckID {
|
|
|
|
found = true
|
|
|
|
assert.Equal(t, expectedResult, c.Check.Status, "Check state should be %s, was %s in %#v", expectedResult, c.Check.Status, c.Check)
|
2021-07-22 18:20:45 +00:00
|
|
|
srvID := structs.NewServiceID(srv.ID, structs.WildcardEnterpriseMetaInDefaultPartition())
|
2020-06-04 20:59:06 +00:00
|
|
|
if err := agent.Agent.State.RemoveService(srvID); err != nil {
|
2020-06-04 12:50:52 +00:00
|
|
|
fmt.Println("[DEBUG] Fail to remove service", srvID, ", err:=", err)
|
|
|
|
}
|
|
|
|
fmt.Println("[DEBUG] Service Removed", srvID, ", err:=", err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert.True(t, found)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestAgent_CheckAliasRPC test the Alias Check to be properly sync remotely
|
|
|
|
// and locally.
|
|
|
|
// It contains a few hacks such as unlockIndexOnNode because watch performed
|
|
|
|
// in CheckAlias.runQuery() waits for 1 min, so Shutdoww the agent might take time
|
|
|
|
// So, we ensure the agent will update regularilly the index
|
|
|
|
func TestAgent_CheckAliasRPC(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-06-04 12:50:52 +00:00
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
a := NewTestAgent(t, `
|
|
|
|
node_name = "node1"
|
|
|
|
`)
|
|
|
|
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "svcid1",
|
|
|
|
Service: "svcname1",
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Port: 8100,
|
|
|
|
}
|
|
|
|
unlockIndexOnNode := func() {
|
|
|
|
// We ensure to not block and update Agent's index
|
|
|
|
srv.Tags = []string{fmt.Sprintf("tag-%s", time.Now())}
|
|
|
|
assert.NoError(t, a.waitForUp())
|
2020-11-30 18:26:58 +00:00
|
|
|
err := a.addServiceFromSource(srv, []*structs.CheckType{}, false, "", ConfigSourceLocal)
|
2020-06-04 12:50:52 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
|
|
|
shutdownAgent := func() {
|
|
|
|
// This is to be sure Alias Checks on remote won't be blocked during 1 min
|
|
|
|
unlockIndexOnNode()
|
|
|
|
fmt.Println("[DEBUG] STARTING shutdown for TestAgent_CheckAliasRPC", time.Now())
|
|
|
|
go a.Shutdown()
|
|
|
|
unlockIndexOnNode()
|
|
|
|
fmt.Println("[DEBUG] DONE shutdown for TestAgent_CheckAliasRPC", time.Now())
|
|
|
|
}
|
|
|
|
defer shutdownAgent()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
assert.NoError(t, a.waitForUp())
|
2020-11-30 18:26:58 +00:00
|
|
|
err := a.addServiceFromSource(srv, []*structs.CheckType{}, false, "", ConfigSourceLocal)
|
2020-06-04 12:50:52 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
t.Helper()
|
|
|
|
var args structs.NodeSpecificRequest
|
|
|
|
args.Datacenter = "dc1"
|
|
|
|
args.Node = "node1"
|
|
|
|
args.AllowStale = true
|
|
|
|
var out structs.IndexedNodeServices
|
2022-12-14 15:24:22 +00:00
|
|
|
err := a.RPC(context.Background(), "Catalog.NodeServices", &args, &out)
|
2020-06-04 12:50:52 +00:00
|
|
|
assert.NoError(r, err)
|
|
|
|
foundService := false
|
2021-07-22 18:20:45 +00:00
|
|
|
lookup := structs.NewServiceID("svcid1", structs.WildcardEnterpriseMetaInDefaultPartition())
|
2020-06-04 12:50:52 +00:00
|
|
|
for _, srv := range out.NodeServices.Services {
|
2020-12-11 21:10:00 +00:00
|
|
|
if lookup.Matches(srv.CompoundServiceID()) {
|
2020-06-04 12:50:52 +00:00
|
|
|
foundService = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert.True(r, foundService, "could not find svcid1 in %#v", out.NodeServices.Services)
|
|
|
|
})
|
|
|
|
|
|
|
|
checks := make([](func(*retry.R)), 0)
|
|
|
|
|
|
|
|
checks = append(checks, test_createAlias(t, a, &structs.CheckType{
|
|
|
|
Name: "Check_Local_Ok",
|
|
|
|
AliasService: "svcid1",
|
|
|
|
}, api.HealthPassing))
|
|
|
|
|
|
|
|
checks = append(checks, test_createAlias(t, a, &structs.CheckType{
|
|
|
|
Name: "Check_Local_Fail",
|
|
|
|
AliasService: "svcidNoExistingID",
|
|
|
|
}, api.HealthCritical))
|
|
|
|
|
|
|
|
checks = append(checks, test_createAlias(t, a, &structs.CheckType{
|
|
|
|
Name: "Check_Remote_Host_Ok",
|
|
|
|
AliasNode: "node1",
|
|
|
|
AliasService: "svcid1",
|
|
|
|
}, api.HealthPassing))
|
|
|
|
|
|
|
|
checks = append(checks, test_createAlias(t, a, &structs.CheckType{
|
|
|
|
Name: "Check_Remote_Host_Non_Existing_Service",
|
|
|
|
AliasNode: "node1",
|
|
|
|
AliasService: "svcidNoExistingID",
|
|
|
|
}, api.HealthCritical))
|
|
|
|
|
|
|
|
// We wait for max 5s for all checks to be in sync
|
|
|
|
{
|
|
|
|
for i := 0; i < 50; i++ {
|
|
|
|
unlockIndexOnNode()
|
|
|
|
allNonWarning := true
|
2021-07-22 18:20:45 +00:00
|
|
|
for _, chk := range a.State.Checks(structs.WildcardEnterpriseMetaInDefaultPartition()) {
|
2020-06-04 12:50:52 +00:00
|
|
|
if chk.Status == api.HealthWarning {
|
|
|
|
allNonWarning = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if allNonWarning {
|
|
|
|
break
|
|
|
|
} else {
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, toRun := range checks {
|
|
|
|
unlockIndexOnNode()
|
|
|
|
retry.Run(t, toRun)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-09 19:12:10 +00:00
|
|
|
func TestAgent_AddServiceWithH2PINGCheck(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
a := NewTestAgent(t, "")
|
|
|
|
defer a.Shutdown()
|
|
|
|
check := []*structs.CheckType{
|
|
|
|
{
|
|
|
|
CheckID: "test-h2ping-check",
|
|
|
|
Name: "test-h2ping-check",
|
|
|
|
H2PING: "localhost:12345",
|
|
|
|
TLSSkipVerify: true,
|
|
|
|
Interval: 10 * time.Second,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeService := &structs.NodeService{
|
|
|
|
ID: "test-h2ping-check-service",
|
|
|
|
Service: "test-h2ping-check-service",
|
|
|
|
}
|
|
|
|
err := a.addServiceFromSource(nodeService, check, false, "", ConfigSourceLocal)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error registering service: %v", err)
|
|
|
|
}
|
|
|
|
requireCheckExists(t, a, "test-h2ping-check")
|
|
|
|
}
|
|
|
|
|
2021-07-25 20:08:44 +00:00
|
|
|
func TestAgent_AddServiceWithH2CPINGCheck(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
a := NewTestAgent(t, "")
|
|
|
|
defer a.Shutdown()
|
|
|
|
check := []*structs.CheckType{
|
|
|
|
{
|
2021-10-05 02:12:26 +00:00
|
|
|
CheckID: "test-h2cping-check",
|
|
|
|
Name: "test-h2cping-check",
|
|
|
|
H2PING: "localhost:12345",
|
|
|
|
TLSSkipVerify: true,
|
|
|
|
Interval: 10 * time.Second,
|
|
|
|
H2PingUseTLS: false,
|
2021-07-25 20:08:44 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeService := &structs.NodeService{
|
|
|
|
ID: "test-h2cping-check-service",
|
|
|
|
Service: "test-h2cping-check-service",
|
|
|
|
}
|
|
|
|
err := a.addServiceFromSource(nodeService, check, false, "", ConfigSourceLocal)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error registering service: %v", err)
|
|
|
|
}
|
|
|
|
requireCheckExists(t, a, "test-h2cping-check")
|
|
|
|
}
|
|
|
|
|
2018-10-11 12:22:11 +00:00
|
|
|
func TestAgent_AddServiceNoExec(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_AddServiceNoExec(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_AddServiceNoExec(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_AddServiceNoExec(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2018-10-11 12:22:11 +00:00
|
|
|
node_name = "node1"
|
2019-09-24 15:04:48 +00:00
|
|
|
`+extraHCL)
|
2018-10-11 12:22:11 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "svcid1",
|
|
|
|
Service: "svcname1",
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Port: 8100,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
|
|
|
}
|
|
|
|
|
2020-11-30 18:26:58 +00:00
|
|
|
err := a.addServiceFromSource(srv, []*structs.CheckType{chk}, false, "", ConfigSourceLocal)
|
2018-10-11 12:22:11 +00:00
|
|
|
if err == nil || !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2020-11-30 18:26:58 +00:00
|
|
|
err = a.addServiceFromSource(srv, []*structs.CheckType{chk}, false, "", ConfigSourceRemote)
|
2018-10-11 12:22:11 +00:00
|
|
|
if err == nil || !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AddServiceNoRemoteExec(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_AddServiceNoRemoteExec(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_AddServiceNoRemoteExec(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_AddServiceNoRemoteExec(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2018-10-11 12:22:11 +00:00
|
|
|
node_name = "node1"
|
|
|
|
enable_local_script_checks = true
|
2019-09-24 15:04:48 +00:00
|
|
|
`+extraHCL)
|
2018-10-11 12:22:11 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "svcid1",
|
|
|
|
Service: "svcname1",
|
|
|
|
Tags: []string{"tag1"},
|
|
|
|
Port: 8100,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
|
|
|
}
|
|
|
|
|
2020-11-30 18:26:58 +00:00
|
|
|
err := a.addServiceFromSource(srv, []*structs.CheckType{chk}, false, "", ConfigSourceRemote)
|
2018-10-11 12:22:11 +00:00
|
|
|
if err == nil || !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-17 14:54:17 +00:00
|
|
|
func TestAddServiceIPv4TaggedDefault(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-01-17 14:54:17 +00:00
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2020-01-17 14:54:17 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
Service: "my_service",
|
|
|
|
ID: "my_service_id",
|
|
|
|
Port: 8100,
|
|
|
|
Address: "10.0.1.2",
|
|
|
|
}
|
|
|
|
|
2020-11-30 18:26:58 +00:00
|
|
|
err := a.addServiceFromSource(srv, []*structs.CheckType{}, false, "", ConfigSourceRemote)
|
2020-01-17 14:54:17 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
ns := a.State.Service(structs.NewServiceID("my_service_id", nil))
|
|
|
|
require.NotNil(t, ns)
|
|
|
|
|
|
|
|
svcAddr := structs.ServiceAddress{Address: srv.Address, Port: srv.Port}
|
|
|
|
require.Equal(t, svcAddr, ns.TaggedAddresses[structs.TaggedAddressLANIPv4])
|
|
|
|
require.Equal(t, svcAddr, ns.TaggedAddresses[structs.TaggedAddressWANIPv4])
|
|
|
|
_, ok := ns.TaggedAddresses[structs.TaggedAddressLANIPv6]
|
|
|
|
require.False(t, ok)
|
|
|
|
_, ok = ns.TaggedAddresses[structs.TaggedAddressWANIPv6]
|
|
|
|
require.False(t, ok)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAddServiceIPv6TaggedDefault(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-01-17 14:54:17 +00:00
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2020-01-17 14:54:17 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
Service: "my_service",
|
|
|
|
ID: "my_service_id",
|
|
|
|
Port: 8100,
|
|
|
|
Address: "::5",
|
|
|
|
}
|
|
|
|
|
2020-11-30 18:26:58 +00:00
|
|
|
err := a.addServiceFromSource(srv, []*structs.CheckType{}, false, "", ConfigSourceRemote)
|
2020-01-17 14:54:17 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
ns := a.State.Service(structs.NewServiceID("my_service_id", nil))
|
|
|
|
require.NotNil(t, ns)
|
|
|
|
|
|
|
|
svcAddr := structs.ServiceAddress{Address: srv.Address, Port: srv.Port}
|
|
|
|
require.Equal(t, svcAddr, ns.TaggedAddresses[structs.TaggedAddressLANIPv6])
|
|
|
|
require.Equal(t, svcAddr, ns.TaggedAddresses[structs.TaggedAddressWANIPv6])
|
|
|
|
_, ok := ns.TaggedAddresses[structs.TaggedAddressLANIPv4]
|
|
|
|
require.False(t, ok)
|
|
|
|
_, ok = ns.TaggedAddresses[structs.TaggedAddressWANIPv4]
|
|
|
|
require.False(t, ok)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAddServiceIPv4TaggedSet(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-01-17 14:54:17 +00:00
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2020-01-17 14:54:17 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
Service: "my_service",
|
|
|
|
ID: "my_service_id",
|
|
|
|
Port: 8100,
|
|
|
|
Address: "10.0.1.2",
|
|
|
|
TaggedAddresses: map[string]structs.ServiceAddress{
|
|
|
|
structs.TaggedAddressWANIPv4: {
|
|
|
|
Address: "10.100.200.5",
|
|
|
|
Port: 8100,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-11-30 18:26:58 +00:00
|
|
|
err := a.addServiceFromSource(srv, []*structs.CheckType{}, false, "", ConfigSourceRemote)
|
2020-01-17 14:54:17 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
ns := a.State.Service(structs.NewServiceID("my_service_id", nil))
|
|
|
|
require.NotNil(t, ns)
|
|
|
|
|
|
|
|
svcAddr := structs.ServiceAddress{Address: srv.Address, Port: srv.Port}
|
|
|
|
require.Equal(t, svcAddr, ns.TaggedAddresses[structs.TaggedAddressLANIPv4])
|
|
|
|
require.Equal(t, structs.ServiceAddress{Address: "10.100.200.5", Port: 8100}, ns.TaggedAddresses[structs.TaggedAddressWANIPv4])
|
|
|
|
_, ok := ns.TaggedAddresses[structs.TaggedAddressLANIPv6]
|
|
|
|
require.False(t, ok)
|
|
|
|
_, ok = ns.TaggedAddresses[structs.TaggedAddressWANIPv6]
|
|
|
|
require.False(t, ok)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAddServiceIPv6TaggedSet(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-01-17 14:54:17 +00:00
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2020-01-17 14:54:17 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
Service: "my_service",
|
|
|
|
ID: "my_service_id",
|
|
|
|
Port: 8100,
|
|
|
|
Address: "::5",
|
|
|
|
TaggedAddresses: map[string]structs.ServiceAddress{
|
|
|
|
structs.TaggedAddressWANIPv6: {
|
|
|
|
Address: "::6",
|
|
|
|
Port: 8100,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-11-30 18:26:58 +00:00
|
|
|
err := a.addServiceFromSource(srv, []*structs.CheckType{}, false, "", ConfigSourceRemote)
|
2020-01-17 14:54:17 +00:00
|
|
|
require.Nil(t, err)
|
|
|
|
|
|
|
|
ns := a.State.Service(structs.NewServiceID("my_service_id", nil))
|
|
|
|
require.NotNil(t, ns)
|
|
|
|
|
|
|
|
svcAddr := structs.ServiceAddress{Address: srv.Address, Port: srv.Port}
|
|
|
|
require.Equal(t, svcAddr, ns.TaggedAddresses[structs.TaggedAddressLANIPv6])
|
|
|
|
require.Equal(t, structs.ServiceAddress{Address: "::6", Port: 8100}, ns.TaggedAddresses[structs.TaggedAddressWANIPv6])
|
|
|
|
_, ok := ns.TaggedAddresses[structs.TaggedAddressLANIPv4]
|
|
|
|
require.False(t, ok)
|
|
|
|
_, ok = ns.TaggedAddresses[structs.TaggedAddressWANIPv4]
|
|
|
|
require.False(t, ok)
|
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
func TestAgent_RemoveService(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_RemoveService(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RemoveService(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RemoveService(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, extraHCL)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-01-30 21:39:02 +00:00
|
|
|
|
|
|
|
// Remove a service that doesn't exist
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.RemoveService(structs.NewServiceID("redis", nil)); err != nil {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-01-27 09:10:56 +00:00
|
|
|
// Remove without an ID
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.RemoveService(structs.NewServiceID("", nil)); err == nil {
|
2015-01-27 09:10:56 +00:00
|
|
|
t.Fatalf("should have errored")
|
|
|
|
}
|
|
|
|
|
2015-01-14 01:52:17 +00:00
|
|
|
// Removing a service with a single check works
|
|
|
|
{
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "memcache",
|
|
|
|
Service: "memcache",
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2020-06-16 17:19:31 +00:00
|
|
|
chkTypes := []*structs.CheckType{{TTL: time.Minute}}
|
2015-01-14 01:52:17 +00:00
|
|
|
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(srv, chkTypes, false, "", ConfigSourceLocal); err != nil {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-05-07 22:30:01 +00:00
|
|
|
// Add a check after the fact with a specific check ID
|
2017-06-15 16:46:06 +00:00
|
|
|
check := &structs.CheckDefinition{
|
2015-05-07 22:30:01 +00:00
|
|
|
ID: "check2",
|
|
|
|
Name: "check2",
|
|
|
|
ServiceID: "memcache",
|
2017-05-15 19:49:13 +00:00
|
|
|
TTL: time.Minute,
|
2015-05-07 22:30:01 +00:00
|
|
|
}
|
|
|
|
hc := check.HealthCheck("node1")
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(hc, check.CheckType(), false, "", ConfigSourceLocal); err != nil {
|
2015-05-07 22:30:01 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.RemoveService(structs.NewServiceID("memcache", nil)); err != nil {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Nil(t, a.State.Check(structs.NewCheckID("service:memcache", nil)), "have memcache check")
|
|
|
|
require.Nil(t, a.State.Check(structs.NewCheckID("check2", nil)), "have check2 check")
|
2015-01-14 01:52:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Removing a service with multiple checks works
|
|
|
|
{
|
2019-03-14 15:02:49 +00:00
|
|
|
// add a service to remove
|
2015-01-14 01:52:17 +00:00
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkTypes := []*structs.CheckType{
|
2020-06-16 17:19:31 +00:00
|
|
|
{TTL: time.Minute},
|
|
|
|
{TTL: 30 * time.Second},
|
2015-01-14 01:52:17 +00:00
|
|
|
}
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(srv, chkTypes, false, "", ConfigSourceLocal); err != nil {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-03-14 15:02:49 +00:00
|
|
|
// add another service that wont be affected
|
|
|
|
srv = &structs.NodeService{
|
|
|
|
ID: "mysql",
|
|
|
|
Service: "mysql",
|
|
|
|
Port: 3306,
|
|
|
|
}
|
|
|
|
chkTypes = []*structs.CheckType{
|
2020-06-16 17:19:31 +00:00
|
|
|
{TTL: time.Minute},
|
|
|
|
{TTL: 30 * time.Second},
|
2019-03-14 15:02:49 +00:00
|
|
|
}
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(srv, chkTypes, false, "", ConfigSourceLocal); err != nil {
|
2019-03-14 15:02:49 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-01-14 01:52:17 +00:00
|
|
|
// Remove the service
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.RemoveService(structs.NewServiceID("redis", nil)); err != nil {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a state mapping
|
2019-12-10 02:26:41 +00:00
|
|
|
requireServiceMissing(t, a, "redis")
|
2015-01-14 01:52:17 +00:00
|
|
|
|
|
|
|
// Ensure checks were removed
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckMissing(t, a, "service:redis:1")
|
|
|
|
requireCheckMissing(t, a, "service:redis:2")
|
|
|
|
requireCheckMissingMap(t, a.checkTTLs, "service:redis:1")
|
|
|
|
requireCheckMissingMap(t, a.checkTTLs, "service:redis:2")
|
2019-03-14 15:02:49 +00:00
|
|
|
|
|
|
|
// check the mysql service is unnafected
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckExistsMap(t, a.checkTTLs, "service:mysql:1")
|
|
|
|
requireCheckExists(t, a, "service:mysql:1")
|
|
|
|
requireCheckExistsMap(t, a.checkTTLs, "service:mysql:2")
|
|
|
|
requireCheckExists(t, a, "service:mysql:2")
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-15 19:49:13 +00:00
|
|
|
func TestAgent_RemoveServiceRemovesAllChecks(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_RemoveServiceRemovesAllChecks(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_RemoveServiceRemovesAllChecks(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_RemoveServiceRemovesAllChecks(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
node_name = "node1"
|
2019-09-24 15:04:48 +00:00
|
|
|
`+extraHCL)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2021-07-22 18:20:45 +00:00
|
|
|
svc := &structs.NodeService{ID: "redis", Service: "redis", Port: 8000, EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition()}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk1 := &structs.CheckType{CheckID: "chk1", Name: "chk1", TTL: time.Minute}
|
|
|
|
chk2 := &structs.CheckType{CheckID: "chk2", Name: "chk2", TTL: 2 * time.Minute}
|
2019-10-17 18:33:11 +00:00
|
|
|
hchk1 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: "node1",
|
|
|
|
CheckID: "chk1",
|
|
|
|
Name: "chk1",
|
|
|
|
Status: "critical",
|
|
|
|
ServiceID: "redis",
|
|
|
|
ServiceName: "redis",
|
|
|
|
Type: "ttl",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2019-10-17 18:33:11 +00:00
|
|
|
}
|
|
|
|
hchk2 := &structs.HealthCheck{Node: "node1",
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: "chk2",
|
|
|
|
Name: "chk2",
|
|
|
|
Status: "critical",
|
|
|
|
ServiceID: "redis",
|
|
|
|
ServiceName: "redis",
|
|
|
|
Type: "ttl",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2019-10-17 18:33:11 +00:00
|
|
|
}
|
2017-05-15 19:49:13 +00:00
|
|
|
|
|
|
|
// register service with chk1
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, []*structs.CheckType{chk1}, false, "", ConfigSourceLocal); err != nil {
|
2017-05-15 19:49:13 +00:00
|
|
|
t.Fatal("Failed to register service", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// verify chk1 exists
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckExists(t, a, "chk1")
|
2017-05-15 19:49:13 +00:00
|
|
|
|
|
|
|
// update the service with chk2
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, []*structs.CheckType{chk2}, false, "", ConfigSourceLocal); err != nil {
|
2017-05-15 19:49:13 +00:00
|
|
|
t.Fatal("Failed to update service", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// check that both checks are there
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, hchk1, getCheck(a, "chk1"))
|
|
|
|
require.Equal(t, hchk2, getCheck(a, "chk2"))
|
2017-05-15 19:49:13 +00:00
|
|
|
|
|
|
|
// Remove service
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.RemoveService(structs.NewServiceID("redis", nil)); err != nil {
|
2017-05-15 19:49:13 +00:00
|
|
|
t.Fatal("Failed to remove service", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that both checks are gone
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckMissing(t, a, "chk1")
|
|
|
|
requireCheckMissing(t, a, "chk2")
|
2017-05-15 19:49:13 +00:00
|
|
|
}
|
|
|
|
|
2018-02-07 04:35:38 +00:00
|
|
|
// TestAgent_IndexChurn is designed to detect a class of issues where
|
|
|
|
// we would have unnecessary catalog churn from anti-entropy. See issues
|
|
|
|
// #3259, #3642, #3845, and #3866.
|
|
|
|
func TestAgent_IndexChurn(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-02-06 00:18:29 +00:00
|
|
|
t.Parallel()
|
2018-02-07 04:35:38 +00:00
|
|
|
|
|
|
|
t.Run("no tags", func(t *testing.T) {
|
|
|
|
verifyIndexChurn(t, nil)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("with tags", func(t *testing.T) {
|
|
|
|
verifyIndexChurn(t, []string{"foo", "bar"})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// verifyIndexChurn registers some things and runs anti-entropy a bunch of times
|
|
|
|
// in a row to make sure there are no index bumps.
|
|
|
|
func verifyIndexChurn(t *testing.T, tags []string) {
|
2021-05-12 20:51:39 +00:00
|
|
|
t.Helper()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2018-02-06 00:18:29 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2018-09-07 14:30:47 +00:00
|
|
|
weights := &structs.Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
}
|
2018-08-09 16:40:07 +00:00
|
|
|
// Ensure we have a leader before we start adding the services
|
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
|
|
|
|
2018-02-06 00:18:29 +00:00
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Port: 8000,
|
2018-02-07 04:35:38 +00:00
|
|
|
Tags: tags,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: weights,
|
2018-02-06 00:18:29 +00:00
|
|
|
}
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, nil, true, "", ConfigSourceLocal); err != nil {
|
2018-02-06 00:18:29 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chk := &structs.HealthCheck{
|
|
|
|
CheckID: "redis-check",
|
2018-02-07 04:35:38 +00:00
|
|
|
Name: "Service-level check",
|
2018-02-06 00:18:29 +00:00
|
|
|
ServiceID: "redis",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chkt := &structs.CheckType{
|
|
|
|
TTL: time.Hour,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, chkt, true, "", ConfigSourceLocal); err != nil {
|
2018-02-06 00:18:29 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-02-07 04:35:38 +00:00
|
|
|
chk = &structs.HealthCheck{
|
|
|
|
CheckID: "node-check",
|
|
|
|
Name: "Node-level check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chkt = &structs.CheckType{
|
|
|
|
TTL: time.Hour,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(chk, chkt, true, "", ConfigSourceLocal); err != nil {
|
2018-02-07 04:35:38 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-02-06 00:18:29 +00:00
|
|
|
if err := a.sync.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2018-02-07 04:35:38 +00:00
|
|
|
|
2018-02-06 00:18:29 +00:00
|
|
|
args := &structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
2018-02-07 04:35:38 +00:00
|
|
|
var before structs.IndexedCheckServiceNodes
|
2018-08-09 16:40:07 +00:00
|
|
|
|
|
|
|
// This sleep is so that the serfHealth check is added to the agent
|
|
|
|
// A value of 375ms is sufficient enough time to ensure the serfHealth
|
|
|
|
// check is added to an agent. 500ms so that we don't see flakiness ever.
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
|
|
|
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.ServiceNodes", args, &before); err != nil {
|
2018-02-06 00:18:29 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2018-08-09 16:40:07 +00:00
|
|
|
for _, name := range before.Nodes[0].Checks {
|
2020-01-28 23:50:41 +00:00
|
|
|
a.logger.Debug("Registered node", "node", name.Name)
|
2018-08-09 16:40:07 +00:00
|
|
|
}
|
2018-02-07 04:35:38 +00:00
|
|
|
if got, want := len(before.Nodes), 1; got != want {
|
|
|
|
t.Fatalf("got %d want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(before.Nodes[0].Checks), 3; /* incl. serfHealth */ got != want {
|
2018-02-06 00:18:29 +00:00
|
|
|
t.Fatalf("got %d want %d", got, want)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < 10; i++ {
|
2020-01-28 23:50:41 +00:00
|
|
|
a.logger.Info("Sync in progress", "iteration", i+1)
|
2018-02-06 00:18:29 +00:00
|
|
|
if err := a.sync.State.SyncFull(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2018-08-09 16:40:07 +00:00
|
|
|
// If this test fails here this means that the Consul-X-Index
|
|
|
|
// has changed for the RPC, which means that idempotent ops
|
|
|
|
// are not working as intended.
|
2018-02-07 04:35:38 +00:00
|
|
|
var after structs.IndexedCheckServiceNodes
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := a.RPC(context.Background(), "Health.ServiceNodes", args, &after); err != nil {
|
2018-02-06 00:18:29 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2020-06-02 16:41:25 +00:00
|
|
|
require.Equal(t, before, after)
|
2018-02-06 00:18:29 +00:00
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
func TestAgent_AddCheck(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
enable_script_checks = true
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-01-30 21:39:02 +00:00
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2014-01-30 21:39:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2019-12-10 02:26:41 +00:00
|
|
|
sChk := requireCheckExists(t, a, "mem")
|
2014-01-30 21:39:02 +00:00
|
|
|
|
2015-04-12 00:53:48 +00:00
|
|
|
// Ensure our check is in the right state
|
2017-04-19 23:00:11 +00:00
|
|
|
if sChk.Status != api.HealthCritical {
|
2015-04-12 00:53:48 +00:00
|
|
|
t.Fatalf("check not critical")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a TTL is setup
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckExistsMap(t, a.checkMonitors, "mem")
|
2015-04-12 00:53:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AddCheck_StartPassing(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
enable_script_checks = true
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-04-12 00:53:48 +00:00
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-04-12 00:53:48 +00:00
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
2015-04-12 00:53:48 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2015-04-12 00:53:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2019-12-10 02:26:41 +00:00
|
|
|
sChk := requireCheckExists(t, a, "mem")
|
2015-04-12 00:53:48 +00:00
|
|
|
|
|
|
|
// Ensure our check is in the right state
|
2017-04-19 23:00:11 +00:00
|
|
|
if sChk.Status != api.HealthPassing {
|
2015-04-12 00:53:48 +00:00
|
|
|
t.Fatalf("check not passing")
|
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
// Ensure a TTL is setup
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckExistsMap(t, a.checkMonitors, "mem")
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
|
|
|
|
2014-04-21 21:42:42 +00:00
|
|
|
func TestAgent_AddCheck_MinInterval(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
enable_script_checks = true
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-04-21 21:42:42 +00:00
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-04-21 21:42:42 +00:00
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: time.Microsecond,
|
2014-04-21 21:42:42 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2014-04-21 21:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckExists(t, a, "mem")
|
2014-04-21 21:42:42 +00:00
|
|
|
|
|
|
|
// Ensure a TTL is setup
|
2019-12-10 02:26:41 +00:00
|
|
|
if mon, ok := a.checkMonitors[structs.NewCheckID("mem", nil)]; !ok {
|
2014-04-21 21:42:42 +00:00
|
|
|
t.Fatalf("missing mem monitor")
|
2017-10-25 09:18:07 +00:00
|
|
|
} else if mon.Interval != checks.MinInterval {
|
2014-04-21 21:42:42 +00:00
|
|
|
t.Fatalf("bad mem monitor interval")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-14 01:52:17 +00:00
|
|
|
func TestAgent_AddCheck_MissingService(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
enable_script_checks = true
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-01-14 01:52:17 +00:00
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "baz",
|
|
|
|
Name: "baz check 1",
|
|
|
|
ServiceID: "baz",
|
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: time.Microsecond,
|
2015-01-14 01:52:17 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2019-12-10 02:26:41 +00:00
|
|
|
if err == nil || err.Error() != fmt.Sprintf("ServiceID %q does not exist", structs.ServiceIDString("baz", nil)) {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("expected service id error, got: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-06 00:33:34 +00:00
|
|
|
func TestAgent_AddCheck_RestoreState(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-06-06 00:33:34 +00:00
|
|
|
|
|
|
|
// Create some state and persist it
|
2017-10-25 09:18:07 +00:00
|
|
|
ttl := &checks.CheckTTL{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: structs.NewCheckID("baz", nil),
|
2015-06-06 00:33:34 +00:00
|
|
|
TTL: time.Minute,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
err := a.persistCheckState(ttl, api.HealthPassing, "yup")
|
2015-06-06 00:33:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build and register the check definition and initial state
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "baz",
|
|
|
|
Name: "baz check 1",
|
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2015-06-06 00:33:34 +00:00
|
|
|
TTL: time.Minute,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err = a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2015-06-06 00:33:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the check status was restored during registration
|
2019-12-10 02:26:41 +00:00
|
|
|
check := requireCheckExists(t, a, "baz")
|
2017-04-19 23:00:11 +00:00
|
|
|
if check.Status != api.HealthPassing {
|
2015-06-06 00:33:34 +00:00
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
|
|
|
if check.Output != "yup" {
|
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-17 18:20:35 +00:00
|
|
|
func TestAgent_AddCheck_ExecDisable(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-07-17 18:20:35 +00:00
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-01-30 21:39:02 +00:00
|
|
|
|
2017-07-17 18:20:35 +00:00
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
2017-07-17 18:20:35 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2017-07-20 05:15:04 +00:00
|
|
|
if err == nil || !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
2017-07-17 18:20:35 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we don't have a check mapping
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckMissing(t, a, "mem")
|
2018-10-11 12:22:11 +00:00
|
|
|
|
|
|
|
err = a.AddCheck(health, chk, false, "", ConfigSourceRemote)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "Scripts are disabled on this agent") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we don't have a check mapping
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckMissing(t, a, "mem")
|
2018-10-11 12:22:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AddCheck_ExecRemoteDisable(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-10-11 12:22:11 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2018-10-11 12:22:11 +00:00
|
|
|
enable_local_script_checks = true
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
|
|
|
}
|
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceRemote)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "Scripts are disabled on this agent from remote calls") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we don't have a check mapping
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckMissing(t, a, "mem")
|
2017-07-17 18:20:35 +00:00
|
|
|
}
|
|
|
|
|
2017-12-27 04:35:22 +00:00
|
|
|
func TestAgent_AddCheck_GRPC(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-12-27 04:35:22 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-12-27 04:35:22 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "grpchealth",
|
|
|
|
Name: "grpc health checking protocol",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
GRPC: "localhost:12345/package.Service",
|
|
|
|
Interval: 15 * time.Second,
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2017-12-27 04:35:22 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2019-12-10 02:26:41 +00:00
|
|
|
sChk := requireCheckExists(t, a, "grpchealth")
|
2017-12-27 04:35:22 +00:00
|
|
|
|
|
|
|
// Ensure our check is in the right state
|
|
|
|
if sChk.Status != api.HealthCritical {
|
|
|
|
t.Fatalf("check not critical")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a check is setup
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckExistsMap(t, a.checkGRPCs, "grpchealth")
|
2017-12-27 04:35:22 +00:00
|
|
|
}
|
|
|
|
|
2019-05-24 18:36:56 +00:00
|
|
|
func TestAgent_RestoreServiceWithAliasCheck(t *testing.T) {
|
|
|
|
// t.Parallel() don't even think about making this parallel
|
|
|
|
|
|
|
|
// This test is very contrived and tests for the absence of race conditions
|
|
|
|
// related to the implementation of alias checks. As such it is slow,
|
|
|
|
// serial, full of sleeps and retries, and not generally a great test to
|
|
|
|
// run all of the time.
|
|
|
|
//
|
|
|
|
// That said it made it incredibly easy to root out various race conditions
|
|
|
|
// quite successfully.
|
|
|
|
//
|
|
|
|
// The original set of races was between:
|
|
|
|
//
|
|
|
|
// - agent startup reloading Services and Checks from disk
|
|
|
|
// - API requests to also re-register those same Services and Checks
|
|
|
|
// - the goroutines for the as-yet-to-be-stopped CheckAlias goroutines
|
|
|
|
|
|
|
|
if os.Getenv("SLOWTEST") != "1" {
|
|
|
|
t.Skip("skipping slow test; set SLOWTEST=1 to run")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// We do this so that the agent logs and the informational messages from
|
|
|
|
// the test itself are interwoven properly.
|
|
|
|
logf := func(t *testing.T, a *TestAgent, format string, args ...interface{}) {
|
2020-01-28 23:50:41 +00:00
|
|
|
a.logger.Info("testharness: " + fmt.Sprintf(format, args...))
|
2019-05-24 18:36:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cfg := `
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
enable_central_service_config = false
|
|
|
|
`
|
2020-08-13 21:17:21 +00:00
|
|
|
a := StartTestAgent(t, TestAgent{HCL: cfg})
|
2019-05-24 18:36:56 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2021-11-29 17:19:43 +00:00
|
|
|
handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
_, _ = w.Write([]byte("OK\n"))
|
|
|
|
})
|
|
|
|
testHTTPServer := httptest.NewServer(handler)
|
|
|
|
t.Cleanup(testHTTPServer.Close)
|
2019-05-24 18:36:56 +00:00
|
|
|
|
|
|
|
registerServicesAndChecks := func(t *testing.T, a *TestAgent) {
|
|
|
|
// add one persistent service with a simple check
|
2020-11-30 18:26:58 +00:00
|
|
|
require.NoError(t, a.addServiceFromSource(
|
2019-05-24 18:36:56 +00:00
|
|
|
&structs.NodeService{
|
|
|
|
ID: "ping",
|
|
|
|
Service: "ping",
|
|
|
|
Port: 8000,
|
|
|
|
},
|
|
|
|
[]*structs.CheckType{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-05-24 18:36:56 +00:00
|
|
|
HTTP: testHTTPServer.URL,
|
|
|
|
Method: "GET",
|
|
|
|
Interval: 5 * time.Second,
|
|
|
|
Timeout: 1 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
true, "", ConfigSourceLocal,
|
|
|
|
))
|
|
|
|
|
|
|
|
// add one persistent sidecar service with an alias check in the manner
|
|
|
|
// of how sidecar_service would add it
|
2020-11-30 18:26:58 +00:00
|
|
|
require.NoError(t, a.addServiceFromSource(
|
2019-05-24 18:36:56 +00:00
|
|
|
&structs.NodeService{
|
|
|
|
ID: "ping-sidecar-proxy",
|
|
|
|
Service: "ping-sidecar-proxy",
|
|
|
|
Port: 9000,
|
|
|
|
},
|
|
|
|
[]*structs.CheckType{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-05-24 18:36:56 +00:00
|
|
|
Name: "Connect Sidecar Aliasing ping",
|
|
|
|
AliasService: "ping",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
true, "", ConfigSourceLocal,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
|
|
|
|
retryUntilCheckState := func(t *testing.T, a *TestAgent, checkID string, expectedStatus string) {
|
|
|
|
t.Helper()
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
chk := requireCheckExists(t, a, types.CheckID(checkID))
|
|
|
|
if chk.Status != expectedStatus {
|
|
|
|
logf(t, a, "check=%q expected status %q but got %q", checkID, expectedStatus, chk.Status)
|
|
|
|
r.Fatalf("check=%q expected status %q but got %q", checkID, expectedStatus, chk.Status)
|
2019-05-24 18:36:56 +00:00
|
|
|
}
|
|
|
|
logf(t, a, "check %q has reached desired status %q", checkID, expectedStatus)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
registerServicesAndChecks(t, a)
|
|
|
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
retryUntilCheckState(t, a, "service:ping", api.HealthPassing)
|
|
|
|
retryUntilCheckState(t, a, "service:ping-sidecar-proxy", api.HealthPassing)
|
|
|
|
|
|
|
|
logf(t, a, "==== POWERING DOWN ORIGINAL ====")
|
|
|
|
|
|
|
|
require.NoError(t, a.Shutdown())
|
|
|
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
futureHCL := cfg + `
|
|
|
|
node_id = "` + string(a.Config.NodeID) + `"
|
|
|
|
node_name = "` + a.Config.NodeName + `"
|
|
|
|
`
|
|
|
|
|
|
|
|
restartOnce := func(idx int, t *testing.T) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
// Reload and retain former NodeID and data directory.
|
2020-08-13 21:17:21 +00:00
|
|
|
a2 := StartTestAgent(t, TestAgent{HCL: futureHCL, DataDir: a.DataDir})
|
2019-05-24 18:36:56 +00:00
|
|
|
defer a2.Shutdown()
|
|
|
|
a = nil
|
|
|
|
|
|
|
|
// reregister during standup; we use an adjustable timing to try and force a race
|
|
|
|
sleepDur := time.Duration(idx+1) * 500 * time.Millisecond
|
|
|
|
time.Sleep(sleepDur)
|
|
|
|
logf(t, a2, "re-registering checks and services after a delay of %v", sleepDur)
|
|
|
|
for i := 0; i < 20; i++ { // RACE RACE RACE!
|
|
|
|
registerServicesAndChecks(t, a2)
|
|
|
|
time.Sleep(50 * time.Millisecond)
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
retryUntilCheckState(t, a2, "service:ping", api.HealthPassing)
|
|
|
|
|
|
|
|
logf(t, a2, "giving the alias check a chance to notice...")
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
|
|
|
|
retryUntilCheckState(t, a2, "service:ping-sidecar-proxy", api.HealthPassing)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < 20; i++ {
|
|
|
|
name := "restart-" + strconv.Itoa(i)
|
|
|
|
ok := t.Run(name, func(t *testing.T) {
|
|
|
|
restartOnce(i, t)
|
|
|
|
})
|
|
|
|
require.True(t, ok, name+" failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-10 22:42:29 +00:00
|
|
|
func TestAgent_Alias_AddRemove(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-06-30 13:38:56 +00:00
|
|
|
t.Parallel()
|
2018-07-12 17:17:53 +00:00
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2018-06-30 13:38:56 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2022-10-10 22:42:29 +00:00
|
|
|
cid := structs.NewCheckID("aliashealth", nil)
|
2018-06-30 13:38:56 +00:00
|
|
|
|
2022-10-10 22:42:29 +00:00
|
|
|
testutil.RunStep(t, "add check", func(t *testing.T) {
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: cid.ID,
|
|
|
|
Name: "Alias health check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
AliasService: "foo",
|
|
|
|
}
|
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
|
|
|
require.NoError(t, err)
|
2018-07-12 17:17:53 +00:00
|
|
|
|
2022-10-10 22:42:29 +00:00
|
|
|
sChk := requireCheckExists(t, a, cid.ID)
|
|
|
|
require.Equal(t, api.HealthCritical, sChk.Status)
|
2018-07-12 17:17:53 +00:00
|
|
|
|
2022-10-10 22:42:29 +00:00
|
|
|
chkImpl, ok := a.checkAliases[cid]
|
|
|
|
require.True(t, ok, "missing aliashealth check")
|
|
|
|
require.Equal(t, "", chkImpl.RPCReq.Token)
|
|
|
|
|
|
|
|
cs := a.State.CheckState(cid)
|
|
|
|
require.NotNil(t, cs)
|
|
|
|
require.Equal(t, "", cs.Token)
|
|
|
|
})
|
|
|
|
|
|
|
|
testutil.RunStep(t, "remove check", func(t *testing.T) {
|
|
|
|
require.NoError(t, a.RemoveCheck(cid, false))
|
|
|
|
|
|
|
|
requireCheckMissing(t, a, cid.ID)
|
|
|
|
requireCheckMissingMap(t, a.checkAliases, cid.ID)
|
|
|
|
})
|
2018-07-12 17:17:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AddCheck_Alias_setToken(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-07-12 17:17:53 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2018-07-12 17:17:53 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "aliashealth",
|
|
|
|
Name: "Alias health check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
AliasService: "foo",
|
2018-06-30 13:38:56 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "foo", ConfigSourceLocal)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-30 13:38:56 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
cs := a.State.CheckState(structs.NewCheckID("aliashealth", nil))
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NotNil(t, cs)
|
|
|
|
require.Equal(t, "foo", cs.Token)
|
2018-07-12 17:17:53 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
chkImpl, ok := a.checkAliases[structs.NewCheckID("aliashealth", nil)]
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.True(t, ok, "missing aliashealth check")
|
|
|
|
require.Equal(t, "foo", chkImpl.RPCReq.Token)
|
2018-07-12 17:17:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AddCheck_Alias_userToken(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-07-12 17:17:53 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2018-07-12 17:17:53 +00:00
|
|
|
acl_token = "hello"
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "aliashealth",
|
|
|
|
Name: "Alias health check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
AliasService: "foo",
|
2018-06-30 13:38:56 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-30 13:38:56 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
cs := a.State.CheckState(structs.NewCheckID("aliashealth", nil))
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NotNil(t, cs)
|
|
|
|
require.Equal(t, "", cs.Token) // State token should still be empty
|
2018-07-12 17:17:53 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
chkImpl, ok := a.checkAliases[structs.NewCheckID("aliashealth", nil)]
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.True(t, ok, "missing aliashealth check")
|
|
|
|
require.Equal(t, "hello", chkImpl.RPCReq.Token) // Check should use the token
|
2018-07-12 17:17:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AddCheck_Alias_userAndSetToken(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-07-12 17:17:53 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2018-07-12 17:17:53 +00:00
|
|
|
acl_token = "hello"
|
|
|
|
`)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "aliashealth",
|
|
|
|
Name: "Alias health check",
|
|
|
|
Status: api.HealthCritical,
|
2018-06-30 13:38:56 +00:00
|
|
|
}
|
2018-07-12 17:17:53 +00:00
|
|
|
chk := &structs.CheckType{
|
|
|
|
AliasService: "foo",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "goodbye", ConfigSourceLocal)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
2018-07-12 17:17:53 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
cs := a.State.CheckState(structs.NewCheckID("aliashealth", nil))
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NotNil(t, cs)
|
|
|
|
require.Equal(t, "goodbye", cs.Token)
|
2018-07-12 17:17:53 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
chkImpl, ok := a.checkAliases[structs.NewCheckID("aliashealth", nil)]
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.True(t, ok, "missing aliashealth check")
|
|
|
|
require.Equal(t, "goodbye", chkImpl.RPCReq.Token)
|
2018-06-30 13:38:56 +00:00
|
|
|
}
|
|
|
|
|
2017-07-17 18:20:35 +00:00
|
|
|
func TestAgent_RemoveCheck(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-07-17 18:20:35 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
enable_script_checks = true
|
|
|
|
`)
|
2017-07-17 18:20:35 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
// Remove check that doesn't exist
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.RemoveCheck(structs.NewCheckID("mem", nil), false); err != nil {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-01-27 09:10:56 +00:00
|
|
|
// Remove without an ID
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.RemoveCheck(structs.NewCheckID("", nil), false); err == nil {
|
2015-01-27 09:10:56 +00:00
|
|
|
t.Fatalf("should have errored")
|
|
|
|
}
|
|
|
|
|
2014-01-30 21:39:02 +00:00
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"exit", "0"},
|
|
|
|
Interval: 15 * time.Second,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2014-01-30 21:39:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove check
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.RemoveCheck(structs.NewCheckID("mem", nil), false); err != nil {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckMissing(t, a, "mem")
|
2014-01-30 21:39:02 +00:00
|
|
|
|
|
|
|
// Ensure a TTL is setup
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckMissingMap(t, a.checkMonitors, "mem")
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
|
|
|
|
2017-11-08 02:22:09 +00:00
|
|
|
func TestAgent_HTTPCheck_TLSSkipVerify(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-11-08 02:22:09 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
fmt.Fprintln(w, "GOOD")
|
|
|
|
})
|
|
|
|
server := httptest.NewTLSServer(handler)
|
|
|
|
defer server.Close()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-11-08 02:22:09 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "tls",
|
|
|
|
Name: "tls check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
chk := &structs.CheckType{
|
|
|
|
HTTP: server.URL,
|
|
|
|
Interval: 20 * time.Millisecond,
|
|
|
|
TLSSkipVerify: true,
|
|
|
|
}
|
|
|
|
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2017-11-08 02:22:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
status := getCheck(a, "tls")
|
2017-11-08 02:22:09 +00:00
|
|
|
if status.Status != api.HealthPassing {
|
|
|
|
r.Fatalf("bad: %v", status.Status)
|
|
|
|
}
|
|
|
|
if !strings.Contains(status.Output, "GOOD") {
|
|
|
|
r.Fatalf("bad: %v", status.Output)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_HTTPCheck_EnableAgentTLSForChecks(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-11-08 02:22:09 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
run := func(t *testing.T, ca string) {
|
2020-03-31 20:24:39 +00:00
|
|
|
a := StartTestAgent(t, TestAgent{
|
2022-08-19 17:07:22 +00:00
|
|
|
UseHTTPS: true,
|
2017-11-08 02:22:09 +00:00
|
|
|
HCL: `
|
|
|
|
enable_agent_tls_for_checks = true
|
|
|
|
|
|
|
|
verify_incoming = true
|
|
|
|
server_name = "consul.test"
|
|
|
|
key_file = "../test/client_certs/server.key"
|
|
|
|
cert_file = "../test/client_certs/server.crt"
|
|
|
|
` + ca,
|
2019-09-05 17:24:36 +00:00
|
|
|
})
|
2017-11-08 02:22:09 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "tls",
|
|
|
|
Name: "tls check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
|
2020-09-04 18:53:02 +00:00
|
|
|
addr, err := firstAddr(a.Agent.apiServers, "https")
|
|
|
|
require.NoError(t, err)
|
|
|
|
url := fmt.Sprintf("https://%s/v1/agent/self", addr.String())
|
2017-11-08 02:22:09 +00:00
|
|
|
chk := &structs.CheckType{
|
|
|
|
HTTP: url,
|
|
|
|
Interval: 20 * time.Millisecond,
|
|
|
|
}
|
|
|
|
|
2020-09-04 18:53:02 +00:00
|
|
|
err = a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2017-11-08 02:22:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
status := getCheck(a, "tls")
|
2017-11-08 02:22:09 +00:00
|
|
|
if status.Status != api.HealthPassing {
|
|
|
|
r.Fatalf("bad: %v", status.Status)
|
|
|
|
}
|
|
|
|
if !strings.Contains(status.Output, "200 OK") {
|
|
|
|
r.Fatalf("bad: %v", status.Output)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to test both methods of passing the CA info to ensure that
|
|
|
|
// we propagate all the fields correctly. All the other fields are
|
|
|
|
// covered by the HCL in the test run function.
|
|
|
|
tests := []struct {
|
|
|
|
desc string
|
|
|
|
config string
|
|
|
|
}{
|
|
|
|
{"ca_file", `ca_file = "../test/client_certs/rootca.crt"`},
|
|
|
|
{"ca_path", `ca_path = "../test/client_certs/path"`},
|
|
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.desc, func(t *testing.T) {
|
|
|
|
run(t, tt.config)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
func TestAgent_updateTTLCheck(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2019-06-26 15:43:25 +00:00
|
|
|
checkBufSize := 100
|
2014-01-30 21:39:02 +00:00
|
|
|
health := &structs.HealthCheck{
|
|
|
|
Node: "foo",
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory util",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chk := &structs.CheckType{
|
2019-06-26 15:43:25 +00:00
|
|
|
TTL: 15 * time.Second,
|
|
|
|
OutputMaxSize: checkBufSize,
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
// Add check and update it.
|
2018-10-11 12:22:11 +00:00
|
|
|
err := a.AddCheck(health, chk, false, "", ConfigSourceLocal)
|
2014-01-30 21:39:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.updateTTLCheck(structs.NewCheckID("mem", nil), api.HealthPassing, "foo"); err != nil {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// Ensure we have a check mapping.
|
2019-12-10 02:26:41 +00:00
|
|
|
status := getCheck(a, "mem")
|
2017-04-19 23:00:11 +00:00
|
|
|
if status.Status != api.HealthPassing {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("bad: %v", status)
|
|
|
|
}
|
2014-04-21 23:20:22 +00:00
|
|
|
if status.Output != "foo" {
|
2014-01-30 21:39:02 +00:00
|
|
|
t.Fatalf("bad: %v", status)
|
|
|
|
}
|
2019-06-26 15:43:25 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.updateTTLCheck(structs.NewCheckID("mem", nil), api.HealthCritical, strings.Repeat("--bad-- ", 5*checkBufSize)); err != nil {
|
2019-06-26 15:43:25 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have a check mapping.
|
2019-12-10 02:26:41 +00:00
|
|
|
status = getCheck(a, "mem")
|
2019-06-26 15:43:25 +00:00
|
|
|
if status.Status != api.HealthCritical {
|
|
|
|
t.Fatalf("bad: %v", status)
|
|
|
|
}
|
|
|
|
if len(status.Output) > checkBufSize*2 {
|
|
|
|
t.Fatalf("bad: %v", len(status.Output))
|
|
|
|
}
|
2014-01-30 21:39:02 +00:00
|
|
|
}
|
2014-10-14 22:05:41 +00:00
|
|
|
|
2014-11-24 08:36:03 +00:00
|
|
|
func TestAgent_PersistService(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_PersistService(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_PersistService(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_PersistService(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
cfg := `
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
2019-09-24 15:04:48 +00:00
|
|
|
` + extraHCL
|
2020-08-13 21:17:21 +00:00
|
|
|
a := StartTestAgent(t, TestAgent{HCL: cfg})
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-11-24 08:36:03 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
|
|
|
|
2021-11-04 20:07:54 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, servicesDir, structs.NewServiceID(svc.ID, nil).StringHashSHA256())
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
// Check is not persisted unless requested
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
2014-11-25 03:24:32 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(file); err == nil {
|
|
|
|
t.Fatalf("should not persist")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persists to file if requested
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, nil, true, "mytoken", ConfigSourceLocal); err != nil {
|
2014-11-24 08:36:03 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(file); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2015-04-28 19:44:46 +00:00
|
|
|
expected, err := json.Marshal(persistedService{
|
2015-05-05 00:36:17 +00:00
|
|
|
Token: "mytoken",
|
2015-04-28 05:46:01 +00:00
|
|
|
Service: svc,
|
2019-09-24 15:04:48 +00:00
|
|
|
Source: "local",
|
2015-04-28 05:46:01 +00:00
|
|
|
})
|
2014-11-24 08:36:03 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2022-11-10 16:26:01 +00:00
|
|
|
content, err := os.ReadFile(file)
|
2014-11-24 08:36:03 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(expected, content) {
|
|
|
|
t.Fatalf("bad: %s", string(content))
|
|
|
|
}
|
2015-05-06 05:08:03 +00:00
|
|
|
|
|
|
|
// Updates service definition on disk
|
|
|
|
svc.Port = 8001
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, nil, true, "mytoken", ConfigSourceLocal); err != nil {
|
2015-05-06 05:08:03 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
expected, err = json.Marshal(persistedService{
|
|
|
|
Token: "mytoken",
|
|
|
|
Service: svc,
|
2019-09-24 15:04:48 +00:00
|
|
|
Source: "local",
|
2015-05-06 05:08:03 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2022-11-10 16:26:01 +00:00
|
|
|
content, err = os.ReadFile(file)
|
2015-05-06 05:08:03 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(expected, content) {
|
|
|
|
t.Fatalf("bad: %s", string(content))
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.Shutdown()
|
2014-11-24 08:36:03 +00:00
|
|
|
|
|
|
|
// Should load it back during later start
|
2020-08-13 21:17:21 +00:00
|
|
|
a2 := StartTestAgent(t, TestAgent{HCL: cfg, DataDir: a.DataDir})
|
2017-05-31 08:56:19 +00:00
|
|
|
defer a2.Shutdown()
|
2014-11-24 08:36:03 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
restored := a2.State.ServiceState(structs.NewServiceID(svc.ID, nil))
|
2017-08-28 12:17:13 +00:00
|
|
|
if restored == nil {
|
|
|
|
t.Fatalf("service %q missing", svc.ID)
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
2017-08-28 12:17:13 +00:00
|
|
|
if got, want := restored.Token, "mytoken"; got != want {
|
|
|
|
t.Fatalf("got token %q want %q", got, want)
|
2015-04-28 05:46:01 +00:00
|
|
|
}
|
2017-08-28 12:17:14 +00:00
|
|
|
if got, want := restored.Service.Port, 8001; got != want {
|
2017-08-28 12:17:13 +00:00
|
|
|
t.Fatalf("got port %d want %d", got, want)
|
2015-05-06 05:08:03 +00:00
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
}
|
2014-11-24 08:36:03 +00:00
|
|
|
|
2015-04-28 19:44:46 +00:00
|
|
|
func TestAgent_persistedService_compat(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_persistedService_compat(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_persistedService_compat(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_persistedService_compat(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2015-04-28 19:18:41 +00:00
|
|
|
// Tests backwards compatibility of persisted services from pre-0.5.1
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, extraHCL)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-04-28 19:18:41 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
2020-01-17 14:54:17 +00:00
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
TaggedAddresses: map[string]structs.ServiceAddress{},
|
|
|
|
Weights: &structs.Weights{Passing: 1, Warning: 1},
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2015-04-28 19:18:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Encode the NodeService directly. This is what previous versions
|
|
|
|
// would serialize to the file (without the wrapper)
|
|
|
|
encoded, err := json.Marshal(svc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the content to the file
|
2021-11-04 20:07:54 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, servicesDir, structs.NewServiceID(svc.ID, nil).StringHashSHA256())
|
2015-04-28 19:18:41 +00:00
|
|
|
if err := os.MkdirAll(filepath.Dir(file), 0700); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2022-11-10 16:26:01 +00:00
|
|
|
if err := os.WriteFile(file, encoded, 0600); err != nil {
|
2015-04-28 19:18:41 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load the services
|
2020-03-09 11:59:41 +00:00
|
|
|
if err := a.loadServices(a.Config, nil); err != nil {
|
2015-04-28 19:18:41 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the service was restored
|
2019-12-10 02:26:41 +00:00
|
|
|
result := requireServiceExists(t, a, "redis")
|
2019-01-08 10:13:49 +00:00
|
|
|
require.Equal(t, svc, result)
|
2015-04-28 19:18:41 +00:00
|
|
|
}
|
|
|
|
|
2021-11-04 20:07:54 +00:00
|
|
|
func TestAgent_persistedService_compat_hash(t *testing.T) {
|
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_persistedService_compat_hash(t, "enable_central_service_config = false")
|
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_persistedService_compat_hash(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_persistedService_compat_hash(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
// Tests backwards compatibility of persisted services from pre-0.5.1
|
|
|
|
a := NewTestAgent(t, extraHCL)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
TaggedAddresses: map[string]structs.ServiceAddress{},
|
|
|
|
Weights: &structs.Weights{Passing: 1, Warning: 1},
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Encode the NodeService directly. This is what previous versions
|
|
|
|
// would serialize to the file (without the wrapper)
|
|
|
|
encoded, err := json.Marshal(svc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the content to the file using the old md5 based path
|
|
|
|
file := filepath.Join(a.Config.DataDir, servicesDir, stringHashMD5(svc.ID))
|
|
|
|
if err := os.MkdirAll(filepath.Dir(file), 0700); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2022-11-10 16:26:01 +00:00
|
|
|
if err := os.WriteFile(file, encoded, 0600); err != nil {
|
2021-11-04 20:07:54 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
wrapped := persistedServiceConfig{
|
|
|
|
ServiceID: "redis",
|
|
|
|
Defaults: &structs.ServiceConfigResponse{},
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
|
|
|
}
|
|
|
|
|
|
|
|
encodedConfig, err := json.Marshal(wrapped)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
configFile := filepath.Join(a.Config.DataDir, serviceConfigDir, stringHashMD5(svc.ID))
|
|
|
|
if err := os.MkdirAll(filepath.Dir(configFile), 0700); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2022-11-10 16:26:01 +00:00
|
|
|
if err := os.WriteFile(configFile, encodedConfig, 0600); err != nil {
|
2021-11-04 20:07:54 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load the services
|
|
|
|
if err := a.loadServices(a.Config, nil); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the service was restored
|
|
|
|
result := requireServiceExists(t, a, "redis")
|
|
|
|
require.Equal(t, svc, result)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Exists for backwards compatibility testing
|
|
|
|
func stringHashMD5(s string) string {
|
|
|
|
return fmt.Sprintf("%x", md5.Sum([]byte(s)))
|
|
|
|
}
|
|
|
|
|
2014-11-26 07:58:02 +00:00
|
|
|
func TestAgent_PurgeService(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_PurgeService(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_PurgeService(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_PurgeService(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, extraHCL)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-11-26 07:58:02 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
|
|
|
|
2021-11-04 20:07:54 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, servicesDir, structs.NewServiceID(svc.ID, nil).StringHashSHA256())
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, nil, true, "", ConfigSourceLocal); err != nil {
|
2014-11-26 07:58:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2019-09-24 15:04:48 +00:00
|
|
|
// Exists
|
|
|
|
if _, err := os.Stat(file); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
|
|
|
|
// Not removed
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.removeService(structs.NewServiceID(svc.ID, nil), false); err != nil {
|
2014-11-24 08:36:03 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
if _, err := os.Stat(file); err != nil {
|
2014-11-24 08:36:03 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
|
2016-11-09 21:56:54 +00:00
|
|
|
// Re-add the service
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, nil, true, "", ConfigSourceLocal); err != nil {
|
2016-11-09 21:56:54 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2014-11-26 07:58:02 +00:00
|
|
|
// Removed
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.removeService(structs.NewServiceID(svc.ID, nil), true); err != nil {
|
2014-11-26 07:58:02 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(file); !os.IsNotExist(err) {
|
|
|
|
t.Fatalf("bad: %#v", err)
|
|
|
|
}
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
|
|
|
|
2014-11-25 03:24:32 +00:00
|
|
|
func TestAgent_PurgeServiceOnDuplicate(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_PurgeServiceOnDuplicate(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_PurgeServiceOnDuplicate(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_PurgeServiceOnDuplicate(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
cfg := `
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
2019-09-24 15:04:48 +00:00
|
|
|
` + extraHCL
|
2020-08-13 21:17:21 +00:00
|
|
|
a := StartTestAgent(t, TestAgent{HCL: cfg})
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
svc1 := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
|
|
|
|
|
|
|
// First persist the service
|
2020-11-30 18:26:58 +00:00
|
|
|
require.NoError(t, a.addServiceFromSource(svc1, nil, true, "", ConfigSourceLocal))
|
2017-05-21 07:11:09 +00:00
|
|
|
a.Shutdown()
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
// Try bringing the agent back up with the service already
|
|
|
|
// existing in the config
|
2020-03-31 20:24:39 +00:00
|
|
|
a2 := StartTestAgent(t, TestAgent{Name: "Agent2", HCL: cfg + `
|
2017-09-25 18:40:42 +00:00
|
|
|
service = {
|
|
|
|
id = "redis"
|
|
|
|
name = "redis"
|
|
|
|
tags = ["bar"]
|
|
|
|
port = 9000
|
|
|
|
}
|
2020-08-13 21:17:21 +00:00
|
|
|
`, DataDir: a.DataDir})
|
2017-05-31 08:56:19 +00:00
|
|
|
defer a2.Shutdown()
|
2014-11-25 03:24:32 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
sid := svc1.CompoundServiceID()
|
2021-11-04 20:07:54 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, servicesDir, sid.StringHashSHA256())
|
2019-12-10 02:26:41 +00:00
|
|
|
_, err := os.Stat(file)
|
|
|
|
require.Error(t, err, "should have removed persisted service")
|
|
|
|
result := requireServiceExists(t, a, "redis")
|
|
|
|
require.NotEqual(t, []string{"bar"}, result.Tags)
|
|
|
|
require.NotEqual(t, 9000, result.Port)
|
2014-11-25 03:24:32 +00:00
|
|
|
}
|
|
|
|
|
2014-11-24 08:36:03 +00:00
|
|
|
func TestAgent_PersistCheck(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-09-25 18:40:42 +00:00
|
|
|
cfg := `
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
enable_script_checks = true
|
|
|
|
`
|
2020-08-13 21:17:21 +00:00
|
|
|
a := StartTestAgent(t, TestAgent{HCL: cfg})
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-11-24 08:36:03 +00:00
|
|
|
|
|
|
|
check := &structs.HealthCheck{
|
2017-09-25 18:40:42 +00:00
|
|
|
Node: a.config.NodeName,
|
2015-01-14 01:52:17 +00:00
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory check",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkType := &structs.CheckType{
|
2018-05-08 22:31:53 +00:00
|
|
|
ScriptArgs: []string{"/bin/true"},
|
|
|
|
Interval: 10 * time.Second,
|
2014-11-29 20:25:01 +00:00
|
|
|
}
|
2014-11-24 08:36:03 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := check.CompoundCheckID()
|
2021-11-04 20:07:54 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, checksDir, cid.StringHashSHA256())
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
// Not persisted if not requested
|
2019-12-10 02:26:41 +00:00
|
|
|
require.NoError(t, a.AddCheck(check, chkType, false, "", ConfigSourceLocal))
|
|
|
|
_, err := os.Stat(file)
|
|
|
|
require.Error(t, err, "should not persist")
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
// Should persist if requested
|
2019-12-10 02:26:41 +00:00
|
|
|
require.NoError(t, a.AddCheck(check, chkType, true, "mytoken", ConfigSourceLocal))
|
|
|
|
_, err = os.Stat(file)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2015-04-28 19:44:46 +00:00
|
|
|
expected, err := json.Marshal(persistedCheck{
|
|
|
|
Check: check,
|
|
|
|
ChkType: chkType,
|
2015-05-05 00:36:17 +00:00
|
|
|
Token: "mytoken",
|
2019-09-24 15:04:48 +00:00
|
|
|
Source: "local",
|
2015-04-28 19:44:46 +00:00
|
|
|
})
|
2019-12-10 02:26:41 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-11-10 16:26:01 +00:00
|
|
|
content, err := os.ReadFile(file)
|
2019-12-10 02:26:41 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, expected, content)
|
2015-05-06 05:08:03 +00:00
|
|
|
|
|
|
|
// Updates the check definition on disk
|
|
|
|
check.Name = "mem1"
|
2019-12-10 02:26:41 +00:00
|
|
|
require.NoError(t, a.AddCheck(check, chkType, true, "mytoken", ConfigSourceLocal))
|
2015-05-06 05:08:03 +00:00
|
|
|
expected, err = json.Marshal(persistedCheck{
|
|
|
|
Check: check,
|
|
|
|
ChkType: chkType,
|
|
|
|
Token: "mytoken",
|
2019-09-24 15:04:48 +00:00
|
|
|
Source: "local",
|
2015-05-06 05:08:03 +00:00
|
|
|
})
|
2019-12-10 02:26:41 +00:00
|
|
|
require.NoError(t, err)
|
2022-11-10 16:26:01 +00:00
|
|
|
content, err = os.ReadFile(file)
|
2019-12-10 02:26:41 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, expected, content)
|
2017-05-21 07:11:09 +00:00
|
|
|
a.Shutdown()
|
2014-11-24 08:36:03 +00:00
|
|
|
|
|
|
|
// Should load it back during later start
|
2020-08-13 21:17:21 +00:00
|
|
|
a2 := StartTestAgent(t, TestAgent{Name: "Agent2", HCL: cfg, DataDir: a.DataDir})
|
2017-05-31 08:56:19 +00:00
|
|
|
defer a2.Shutdown()
|
2014-11-24 08:36:03 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
result := requireCheckExists(t, a2, check.CheckID)
|
|
|
|
require.Equal(t, api.HealthCritical, result.Status)
|
|
|
|
require.Equal(t, "mem1", result.Name)
|
2014-11-29 20:25:01 +00:00
|
|
|
|
|
|
|
// Should have restored the monitor
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckExistsMap(t, a2.checkMonitors, check.CheckID)
|
|
|
|
chkState := a2.State.CheckState(structs.NewCheckID(check.CheckID, nil))
|
|
|
|
require.NotNil(t, chkState)
|
|
|
|
require.Equal(t, "mytoken", chkState.Token)
|
2014-11-26 07:58:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_PurgeCheck(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-11-26 07:58:02 +00:00
|
|
|
|
|
|
|
check := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 01:52:17 +00:00
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory check",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-11-26 07:58:02 +00:00
|
|
|
}
|
2014-11-24 08:36:03 +00:00
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, checksDir, checkIDHash(check.CheckID))
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(check, nil, true, "", ConfigSourceLocal); err != nil {
|
2014-11-26 07:58:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Not removed
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.RemoveCheck(structs.NewCheckID(check.CheckID, nil), false); err != nil {
|
2014-11-24 08:36:03 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
if _, err := os.Stat(file); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Removed
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.RemoveCheck(structs.NewCheckID(check.CheckID, nil), true); err != nil {
|
2014-11-24 08:36:03 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2014-11-26 07:58:02 +00:00
|
|
|
if _, err := os.Stat(file); !os.IsNotExist(err) {
|
|
|
|
t.Fatalf("bad: %#v", err)
|
|
|
|
}
|
2014-11-24 08:36:03 +00:00
|
|
|
}
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
func TestAgent_PurgeCheckOnDuplicate(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-09-25 18:40:42 +00:00
|
|
|
nodeID := NodeID()
|
2020-03-31 20:24:39 +00:00
|
|
|
a := StartTestAgent(t, TestAgent{
|
2019-12-10 02:26:41 +00:00
|
|
|
HCL: `
|
|
|
|
node_id = "` + nodeID + `"
|
|
|
|
node_name = "Node ` + nodeID + `"
|
2017-09-25 18:40:42 +00:00
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
enable_script_checks = true
|
2019-12-10 02:26:41 +00:00
|
|
|
`})
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
check1 := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory check",
|
|
|
|
Status: api.HealthPassing,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2014-11-25 03:24:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// First persist the check
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(check1, nil, true, "", ConfigSourceLocal); err != nil {
|
2014-11-25 03:24:32 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
a.Shutdown()
|
2014-11-25 03:24:32 +00:00
|
|
|
|
|
|
|
// Start again with the check registered in config
|
2020-03-31 20:24:39 +00:00
|
|
|
a2 := StartTestAgent(t, TestAgent{
|
2020-03-30 20:05:27 +00:00
|
|
|
Name: "Agent2",
|
2020-08-13 21:17:21 +00:00
|
|
|
DataDir: a.DataDir,
|
2019-12-10 02:26:41 +00:00
|
|
|
HCL: `
|
|
|
|
node_id = "` + nodeID + `"
|
|
|
|
node_name = "Node ` + nodeID + `"
|
2017-09-25 18:40:42 +00:00
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
enable_script_checks = true
|
|
|
|
check = {
|
|
|
|
id = "mem"
|
|
|
|
name = "memory check"
|
|
|
|
notes = "my cool notes"
|
2018-05-08 22:31:53 +00:00
|
|
|
args = ["/bin/check-redis.py"]
|
2017-09-25 18:40:42 +00:00
|
|
|
interval = "30s"
|
2021-08-03 22:26:49 +00:00
|
|
|
timeout = "5s"
|
2017-09-25 18:40:42 +00:00
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
`})
|
2017-05-31 08:56:19 +00:00
|
|
|
defer a2.Shutdown()
|
2014-11-25 03:24:32 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := check1.CompoundCheckID()
|
2021-11-04 20:07:54 +00:00
|
|
|
file := filepath.Join(a.DataDir, checksDir, cid.StringHashSHA256())
|
2014-11-25 03:24:32 +00:00
|
|
|
if _, err := os.Stat(file); err == nil {
|
|
|
|
t.Fatalf("should have removed persisted check")
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
result := requireCheckExists(t, a2, "mem")
|
2017-09-25 18:40:42 +00:00
|
|
|
expected := &structs.HealthCheck{
|
2019-12-10 02:26:41 +00:00
|
|
|
Node: a2.Config.NodeName,
|
|
|
|
CheckID: "mem",
|
|
|
|
Name: "memory check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
Notes: "my cool notes",
|
2021-08-03 22:26:49 +00:00
|
|
|
Interval: "30s",
|
|
|
|
Timeout: "5s",
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2014-11-25 03:24:32 +00:00
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(t, expected, result)
|
2014-11-25 03:24:32 +00:00
|
|
|
}
|
2015-01-08 06:26:40 +00:00
|
|
|
|
2020-10-12 19:45:08 +00:00
|
|
|
func TestAgent_DeregisterPersistedSidecarAfterRestart(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-10-12 19:45:08 +00:00
|
|
|
t.Parallel()
|
|
|
|
nodeID := NodeID()
|
|
|
|
a := StartTestAgent(t, TestAgent{
|
|
|
|
HCL: `
|
|
|
|
node_id = "` + nodeID + `"
|
|
|
|
node_name = "Node ` + nodeID + `"
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
enable_central_service_config = false
|
|
|
|
`})
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
srv := &structs.NodeService{
|
|
|
|
ID: "svc",
|
|
|
|
Service: "svc",
|
|
|
|
Weights: &structs.Weights{
|
|
|
|
Passing: 2,
|
|
|
|
Warning: 1,
|
|
|
|
},
|
|
|
|
Tags: []string{"tag2"},
|
|
|
|
Port: 8200,
|
2021-07-22 18:20:45 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
2020-10-12 19:45:08 +00:00
|
|
|
|
|
|
|
Connect: structs.ServiceConnect{
|
|
|
|
SidecarService: &structs.ServiceDefinition{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2022-09-07 00:35:31 +00:00
|
|
|
connectSrv, _, _, err := sidecarServiceFromNodeService(srv, "")
|
2020-10-12 19:45:08 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// First persist the check
|
2020-11-30 18:26:58 +00:00
|
|
|
err = a.addServiceFromSource(srv, nil, true, "", ConfigSourceLocal)
|
2020-10-12 19:45:08 +00:00
|
|
|
require.NoError(t, err)
|
2020-11-30 18:26:58 +00:00
|
|
|
err = a.addServiceFromSource(connectSrv, nil, true, "", ConfigSourceLocal)
|
2020-10-12 19:45:08 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// check both services were registered
|
|
|
|
require.NotNil(t, a.State.Service(srv.CompoundServiceID()))
|
|
|
|
require.NotNil(t, a.State.Service(connectSrv.CompoundServiceID()))
|
|
|
|
|
|
|
|
a.Shutdown()
|
|
|
|
|
|
|
|
// Start again with the check registered in config
|
|
|
|
a2 := StartTestAgent(t, TestAgent{
|
|
|
|
Name: "Agent2",
|
|
|
|
DataDir: a.DataDir,
|
|
|
|
HCL: `
|
|
|
|
node_id = "` + nodeID + `"
|
|
|
|
node_name = "Node ` + nodeID + `"
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
enable_central_service_config = false
|
|
|
|
`})
|
|
|
|
defer a2.Shutdown()
|
|
|
|
|
|
|
|
// check both services were restored
|
|
|
|
require.NotNil(t, a2.State.Service(srv.CompoundServiceID()))
|
|
|
|
require.NotNil(t, a2.State.Service(connectSrv.CompoundServiceID()))
|
|
|
|
|
|
|
|
err = a2.RemoveService(srv.CompoundServiceID())
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// check both services were deregistered
|
|
|
|
require.Nil(t, a2.State.Service(srv.CompoundServiceID()))
|
|
|
|
require.Nil(t, a2.State.Service(connectSrv.CompoundServiceID()))
|
|
|
|
}
|
|
|
|
|
2015-04-28 19:44:46 +00:00
|
|
|
func TestAgent_loadChecks_token(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
check = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
token = "abc123"
|
|
|
|
ttl = "10s"
|
|
|
|
}
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-04-28 19:44:46 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckExists(t, a, "rabbitmq")
|
|
|
|
require.Equal(t, "abc123", a.State.CheckToken(structs.NewCheckID("rabbitmq", nil)))
|
2015-04-28 19:44:46 +00:00
|
|
|
}
|
|
|
|
|
2015-01-08 06:26:40 +00:00
|
|
|
func TestAgent_unloadChecks(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-01-08 06:26:40 +00:00
|
|
|
|
2015-01-14 01:52:17 +00:00
|
|
|
// First register a service
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-01-14 01:52:17 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register a check
|
2015-01-08 06:26:40 +00:00
|
|
|
check1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-01-14 01:52:17 +00:00
|
|
|
CheckID: "service:redis",
|
2015-01-08 06:26:40 +00:00
|
|
|
Name: "redischeck",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-01-08 06:26:40 +00:00
|
|
|
ServiceID: "redis",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(check1, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-01-08 06:26:40 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
|
|
|
|
requireCheckExists(t, a, check1.CheckID)
|
2015-01-08 06:26:40 +00:00
|
|
|
|
|
|
|
// Unload all of the checks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.unloadChecks(); err != nil {
|
2015-01-08 06:26:40 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure it was unloaded
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckMissing(t, a, check1.CheckID)
|
2015-01-08 06:26:40 +00:00
|
|
|
}
|
|
|
|
|
2015-04-28 19:44:46 +00:00
|
|
|
func TestAgent_loadServices_token(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_loadServices_token(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_loadServices_token(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_loadServices_token(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
service = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
port = 5672
|
|
|
|
token = "abc123"
|
|
|
|
}
|
2019-09-24 15:04:48 +00:00
|
|
|
`+extraHCL)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-04-28 19:44:46 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
requireServiceExists(t, a, "rabbitmq")
|
|
|
|
if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq", nil)); token != "abc123" {
|
2015-04-28 19:44:46 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-27 13:33:12 +00:00
|
|
|
func TestAgent_loadServices_sidecar(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_loadServices_sidecar(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_loadServices_sidecar(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_loadServices_sidecar(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2018-09-27 13:33:12 +00:00
|
|
|
service = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
port = 5672
|
|
|
|
token = "abc123"
|
|
|
|
connect = {
|
|
|
|
sidecar_service {}
|
|
|
|
}
|
|
|
|
}
|
2019-09-24 15:04:48 +00:00
|
|
|
`+extraHCL)
|
2018-09-27 13:33:12 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
svc := requireServiceExists(t, a, "rabbitmq")
|
|
|
|
if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq", nil)); token != "abc123" {
|
2018-09-27 13:33:12 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2022-09-07 00:35:31 +00:00
|
|
|
sidecarSvc := requireServiceExists(t, a, "rabbitmq-sidecar-proxy")
|
2019-12-10 02:26:41 +00:00
|
|
|
if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq-sidecar-proxy", nil)); token != "abc123" {
|
2018-09-27 13:33:12 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
|
2022-09-07 00:35:31 +00:00
|
|
|
// Verify default checks have been added
|
|
|
|
wantChecks := sidecarDefaultChecks(sidecarSvc.ID, sidecarSvc.Address, sidecarSvc.Proxy.LocalServiceAddress, sidecarSvc.Port)
|
|
|
|
gotChecks := a.State.ChecksForService(sidecarSvc.CompoundServiceID(), true)
|
|
|
|
gotChkNames := make(map[string]types.CheckID)
|
|
|
|
for _, check := range gotChecks {
|
|
|
|
requireCheckExists(t, a, check.CheckID)
|
|
|
|
gotChkNames[check.Name] = check.CheckID
|
|
|
|
}
|
|
|
|
for _, check := range wantChecks {
|
|
|
|
chkName := check.Name
|
|
|
|
require.NotNil(t, gotChkNames[chkName])
|
|
|
|
}
|
|
|
|
|
2018-09-27 13:33:12 +00:00
|
|
|
// Sanity check rabbitmq service should NOT have sidecar info in state since
|
|
|
|
// it's done it's job and should be a registration syntax sugar only.
|
2019-12-10 02:26:41 +00:00
|
|
|
assert.Nil(t, svc.Connect.SidecarService)
|
2018-09-27 13:33:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_loadServices_sidecarSeparateToken(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_loadServices_sidecarSeparateToken(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_loadServices_sidecarSeparateToken(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_loadServices_sidecarSeparateToken(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
2018-09-27 13:33:12 +00:00
|
|
|
service = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
port = 5672
|
|
|
|
token = "abc123"
|
|
|
|
connect = {
|
|
|
|
sidecar_service {
|
|
|
|
token = "789xyz"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-09-24 15:04:48 +00:00
|
|
|
`+extraHCL)
|
2018-09-27 13:33:12 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
requireServiceExists(t, a, "rabbitmq")
|
|
|
|
if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq", nil)); token != "abc123" {
|
2018-09-27 13:33:12 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
requireServiceExists(t, a, "rabbitmq-sidecar-proxy")
|
|
|
|
if token := a.State.ServiceToken(structs.NewServiceID("rabbitmq-sidecar-proxy", nil)); token != "789xyz" {
|
2018-09-27 13:33:12 +00:00
|
|
|
t.Fatalf("bad: %s", token)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
func TestAgent_loadServices_sidecarInheritMeta(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_loadServices_sidecarInheritMeta(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_loadServices_sidecarInheritMeta(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_loadServices_sidecarInheritMeta(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
service = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
port = 5672
|
|
|
|
tags = ["a", "b"],
|
|
|
|
meta = {
|
|
|
|
environment = "prod"
|
|
|
|
}
|
|
|
|
connect = {
|
|
|
|
sidecar_service {
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-09-24 15:04:48 +00:00
|
|
|
`+extraHCL)
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
svc := requireServiceExists(t, a, "rabbitmq")
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
require.Len(t, svc.Tags, 2)
|
|
|
|
require.Len(t, svc.Meta, 1)
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
sidecar := requireServiceExists(t, a, "rabbitmq-sidecar-proxy")
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
require.ElementsMatch(t, svc.Tags, sidecar.Tags)
|
|
|
|
require.Len(t, sidecar.Meta, 1)
|
|
|
|
meta, ok := sidecar.Meta["environment"]
|
|
|
|
require.True(t, ok, "missing sidecar service meta")
|
|
|
|
require.Equal(t, "prod", meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_loadServices_sidecarOverrideMeta(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_loadServices_sidecarOverrideMeta(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_loadServices_sidecarOverrideMeta(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_loadServices_sidecarOverrideMeta(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, `
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
service = {
|
|
|
|
id = "rabbitmq"
|
|
|
|
name = "rabbitmq"
|
|
|
|
port = 5672
|
|
|
|
tags = ["a", "b"],
|
|
|
|
meta = {
|
|
|
|
environment = "prod"
|
|
|
|
}
|
|
|
|
connect = {
|
|
|
|
sidecar_service {
|
|
|
|
tags = ["foo"],
|
|
|
|
meta = {
|
|
|
|
environment = "qa"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-09-24 15:04:48 +00:00
|
|
|
`+extraHCL)
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
svc := requireServiceExists(t, a, "rabbitmq")
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
require.Len(t, svc.Tags, 2)
|
|
|
|
require.Len(t, svc.Meta, 1)
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
sidecar := requireServiceExists(t, a, "rabbitmq-sidecar-proxy")
|
Improve Connect with Prepared Queries (#5291)
Given a query like:
```
{
"Name": "tagged-connect-query",
"Service": {
"Service": "foo",
"Tags": ["tag"],
"Connect": true
}
}
```
And a Consul configuration like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {} },
"tags": ["tag"]
]
}
```
If you executed the query it would always turn up with 0 results. This was because the sidecar service was being created without any tags. You could instead make your config look like:
```
{
"services": [
"name": "foo",
"port": 8080,
"connect": { "sidecar_service": {
"tags": ["tag"]
} },
"tags": ["tag"]
]
}
```
However that is a bit redundant for most cases. This PR ensures that the tags and service meta of the parent service get copied to the sidecar service. If there are any tags or service meta set in the sidecar service definition then this copying does not take place. After the changes, the query will now return the expected results.
A second change was made to prepared queries in this PR which is to allow filtering on ServiceMeta just like we allow for filtering on NodeMeta.
2019-02-04 14:36:51 +00:00
|
|
|
require.Len(t, sidecar.Tags, 1)
|
|
|
|
require.Equal(t, "foo", sidecar.Tags[0])
|
|
|
|
require.Len(t, sidecar.Meta, 1)
|
|
|
|
meta, ok := sidecar.Meta["environment"]
|
|
|
|
require.True(t, ok, "missing sidecar service meta")
|
|
|
|
require.Equal(t, "qa", meta)
|
|
|
|
}
|
|
|
|
|
2015-01-08 06:26:40 +00:00
|
|
|
func TestAgent_unloadServices(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_unloadServices(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_unloadServices(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_unloadServices(t *testing.T, extraHCL string) {
|
|
|
|
t.Helper()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, extraHCL)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-01-08 06:26:40 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the service
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-01-08 06:26:40 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
|
|
|
|
requireServiceExists(t, a, svc.ID)
|
2015-01-08 06:26:40 +00:00
|
|
|
|
|
|
|
// Unload all services
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.unloadServices(); err != nil {
|
2015-01-08 06:26:40 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2021-07-22 18:20:45 +00:00
|
|
|
if len(a.State.Services(structs.WildcardEnterpriseMetaInDefaultPartition())) != 0 {
|
2017-07-14 05:33:47 +00:00
|
|
|
t.Fatalf("should have unloaded services")
|
2015-01-08 06:26:40 +00:00
|
|
|
}
|
|
|
|
}
|
2015-01-15 08:25:36 +00:00
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
func TestAgent_Service_MaintenanceMode(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-01-15 08:25:36 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the service
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-01-15 08:25:36 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
sid := structs.NewServiceID("redis", nil)
|
2015-01-15 08:25:36 +00:00
|
|
|
// Enter maintenance mode for the service
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.EnableServiceMaintenance(sid, "broken", "mytoken"); err != nil {
|
2015-01-15 08:25:36 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure the critical health check was added
|
2019-12-10 02:26:41 +00:00
|
|
|
checkID := serviceMaintCheckID(sid)
|
|
|
|
check := a.State.Check(checkID)
|
|
|
|
if check == nil {
|
2015-01-15 19:26:14 +00:00
|
|
|
t.Fatalf("should have registered critical maintenance check")
|
2015-01-15 08:25:36 +00:00
|
|
|
}
|
|
|
|
|
2015-09-10 19:08:08 +00:00
|
|
|
// Check that the token was used to register the check
|
2017-08-28 12:17:13 +00:00
|
|
|
if token := a.State.CheckToken(checkID); token != "mytoken" {
|
2015-09-10 19:08:08 +00:00
|
|
|
t.Fatalf("expected 'mytoken', got: '%s'", token)
|
|
|
|
}
|
|
|
|
|
2015-01-21 20:21:57 +00:00
|
|
|
// Ensure the reason was set in notes
|
|
|
|
if check.Notes != "broken" {
|
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
|
|
|
|
2015-01-15 08:25:36 +00:00
|
|
|
// Leave maintenance mode
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.DisableServiceMaintenance(sid); err != nil {
|
2015-01-15 08:25:36 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the check was deregistered
|
2019-12-10 02:26:41 +00:00
|
|
|
|
|
|
|
if found := a.State.Check(checkID); found != nil {
|
2015-01-15 19:26:14 +00:00
|
|
|
t.Fatalf("should have deregistered maintenance check")
|
|
|
|
}
|
2015-01-21 20:21:57 +00:00
|
|
|
|
|
|
|
// Enter service maintenance mode without providing a reason
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.EnableServiceMaintenance(sid, "", ""); err != nil {
|
2015-01-21 20:21:57 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the check was registered with the default notes
|
2019-12-10 02:26:41 +00:00
|
|
|
check = a.State.Check(checkID)
|
|
|
|
if check == nil {
|
2015-01-21 20:21:57 +00:00
|
|
|
t.Fatalf("should have registered critical check")
|
|
|
|
}
|
2015-01-21 22:45:09 +00:00
|
|
|
if check.Notes != defaultServiceMaintReason {
|
2015-01-21 20:21:57 +00:00
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
2015-01-15 19:26:14 +00:00
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
func TestAgent_Service_Reap(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-07-04 10:44:24 +00:00
|
|
|
// t.Parallel() // timing test. no parallel
|
2020-06-10 20:47:35 +00:00
|
|
|
a := StartTestAgent(t, TestAgent{Overrides: `
|
2017-09-25 18:40:42 +00:00
|
|
|
check_reap_interval = "50ms"
|
|
|
|
check_deregister_interval_min = "0s"
|
2020-06-10 20:47:35 +00:00
|
|
|
`})
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-04 11:31:51 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkTypes := []*structs.CheckType{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2019-03-18 16:06:16 +00:00
|
|
|
Status: api.HealthPassing,
|
|
|
|
TTL: 25 * time.Millisecond,
|
2017-07-04 10:44:24 +00:00
|
|
|
DeregisterCriticalServiceAfter: 200 * time.Millisecond,
|
2016-08-16 07:05:55 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the service.
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, chkTypes, false, "", ConfigSourceLocal); err != nil {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure it's there and there's no critical check yet.
|
2019-12-10 02:26:41 +00:00
|
|
|
requireServiceExists(t, a, "redis")
|
2021-07-22 18:20:45 +00:00
|
|
|
require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMetaInDefaultPartition()), 0, "should not have critical checks")
|
2016-08-16 07:05:55 +00:00
|
|
|
|
2017-07-04 10:44:24 +00:00
|
|
|
// Wait for the check TTL to fail but before the check is reaped.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
2019-12-10 02:26:41 +00:00
|
|
|
requireServiceExists(t, a, "redis")
|
|
|
|
require.Len(t, a.State.CriticalCheckStates(nil), 1, "should have 1 critical check")
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
// Pass the TTL.
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.updateTTLCheck(structs.NewCheckID("service:redis", nil), api.HealthPassing, "foo"); err != nil {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
requireServiceExists(t, a, "redis")
|
2021-07-22 18:20:45 +00:00
|
|
|
require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMetaInDefaultPartition()), 0, "should not have critical checks")
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
// Wait for the check TTL to fail again.
|
2017-07-04 10:44:24 +00:00
|
|
|
time.Sleep(100 * time.Millisecond)
|
2019-12-10 02:26:41 +00:00
|
|
|
requireServiceExists(t, a, "redis")
|
2021-07-22 18:20:45 +00:00
|
|
|
require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMetaInDefaultPartition()), 1, "should have 1 critical check")
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
// Wait for the reap.
|
2017-07-04 10:44:24 +00:00
|
|
|
time.Sleep(400 * time.Millisecond)
|
2019-12-10 02:26:41 +00:00
|
|
|
requireServiceMissing(t, a, "redis")
|
2021-07-22 18:20:45 +00:00
|
|
|
require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMetaInDefaultPartition()), 0, "should not have critical checks")
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_Service_NoReap(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-07-04 10:44:24 +00:00
|
|
|
// t.Parallel() // timing test. no parallel
|
2020-06-10 20:47:35 +00:00
|
|
|
a := StartTestAgent(t, TestAgent{Overrides: `
|
2017-09-25 18:40:42 +00:00
|
|
|
check_reap_interval = "50ms"
|
|
|
|
check_deregister_interval_min = "0s"
|
2020-06-10 20:47:35 +00:00
|
|
|
`})
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2017-06-15 16:46:06 +00:00
|
|
|
chkTypes := []*structs.CheckType{
|
2020-06-16 17:19:31 +00:00
|
|
|
{
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2017-07-04 10:44:24 +00:00
|
|
|
TTL: 25 * time.Millisecond,
|
2016-08-16 07:05:55 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the service.
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, chkTypes, false, "", ConfigSourceLocal); err != nil {
|
2016-08-16 07:05:55 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure it's there and there's no critical check yet.
|
2019-12-10 02:26:41 +00:00
|
|
|
requireServiceExists(t, a, "redis")
|
2021-07-22 18:20:45 +00:00
|
|
|
require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMetaInDefaultPartition()), 0)
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
// Wait for the check TTL to fail.
|
2017-07-05 09:24:03 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
2019-12-10 02:26:41 +00:00
|
|
|
requireServiceExists(t, a, "redis")
|
2021-07-22 18:20:45 +00:00
|
|
|
require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMetaInDefaultPartition()), 1)
|
2016-08-16 07:05:55 +00:00
|
|
|
|
|
|
|
// Wait a while and make sure it doesn't reap.
|
2017-07-04 10:44:24 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
2019-12-10 02:26:41 +00:00
|
|
|
requireServiceExists(t, a, "redis")
|
2021-07-22 18:20:45 +00:00
|
|
|
require.Len(t, a.State.CriticalCheckStates(structs.WildcardEnterpriseMetaInDefaultPartition()), 1)
|
2016-08-16 07:05:55 +00:00
|
|
|
}
|
|
|
|
|
2019-01-07 18:53:03 +00:00
|
|
|
func TestAgent_AddService_restoresSnapshot(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_AddService_restoresSnapshot(t, "enable_central_service_config = false")
|
2019-09-24 15:04:48 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_AddService_restoresSnapshot(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_AddService_restoresSnapshot(t *testing.T, extraHCL string) {
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, extraHCL)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-05-06 19:28:42 +00:00
|
|
|
|
|
|
|
// First register a service
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2020-11-30 18:26:58 +00:00
|
|
|
require.NoError(t, a.addServiceFromSource(svc, nil, false, "", ConfigSourceLocal))
|
2015-05-06 19:28:42 +00:00
|
|
|
|
|
|
|
// Register a check
|
|
|
|
check1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-05-06 19:28:42 +00:00
|
|
|
CheckID: "service:redis",
|
|
|
|
Name: "redischeck",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-05-06 19:28:42 +00:00
|
|
|
ServiceID: "redis",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
require.NoError(t, a.AddCheck(check1, nil, false, "", ConfigSourceLocal))
|
2015-05-06 19:28:42 +00:00
|
|
|
|
|
|
|
// Re-registering the service preserves the state of the check
|
2020-06-16 17:19:31 +00:00
|
|
|
chkTypes := []*structs.CheckType{{TTL: 30 * time.Second}}
|
2020-11-30 18:26:58 +00:00
|
|
|
require.NoError(t, a.addServiceFromSource(svc, chkTypes, false, "", ConfigSourceLocal))
|
2019-12-10 02:26:41 +00:00
|
|
|
check := requireCheckExists(t, a, "service:redis")
|
|
|
|
require.Equal(t, api.HealthPassing, check.Status)
|
2015-05-06 19:28:42 +00:00
|
|
|
}
|
|
|
|
|
2019-01-07 18:53:03 +00:00
|
|
|
func TestAgent_AddCheck_restoresSnapshot(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-01-07 18:53:03 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2019-01-07 18:53:03 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// First register a service
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
2019-01-07 18:53:03 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register a check
|
|
|
|
check1 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "service:redis",
|
|
|
|
Name: "redischeck",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "redis",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
|
|
|
if err := a.AddCheck(check1, nil, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Re-registering the check preserves its state
|
|
|
|
check1.Status = ""
|
|
|
|
if err := a.AddCheck(check1, &structs.CheckType{TTL: 30 * time.Second}, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
check := requireCheckExists(t, a, "service:redis")
|
2019-01-07 18:53:03 +00:00
|
|
|
if check.Status != api.HealthPassing {
|
|
|
|
t.Fatalf("bad: %s", check.Status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-15 19:26:14 +00:00
|
|
|
func TestAgent_NodeMaintenanceMode(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-01-15 19:26:14 +00:00
|
|
|
|
|
|
|
// Enter maintenance mode for the node
|
2017-05-21 07:11:09 +00:00
|
|
|
a.EnableNodeMaintenance("broken", "mytoken")
|
2015-01-15 19:26:14 +00:00
|
|
|
|
|
|
|
// Make sure the critical health check was added
|
2019-12-10 02:26:41 +00:00
|
|
|
check := requireCheckExists(t, a, structs.NodeMaint)
|
2015-01-15 19:26:14 +00:00
|
|
|
|
2015-09-10 19:08:08 +00:00
|
|
|
// Check that the token was used to register the check
|
2019-12-10 02:26:41 +00:00
|
|
|
if token := a.State.CheckToken(structs.NodeMaintCheckID); token != "mytoken" {
|
2015-09-10 19:08:08 +00:00
|
|
|
t.Fatalf("expected 'mytoken', got: '%s'", token)
|
|
|
|
}
|
|
|
|
|
2015-01-21 20:21:57 +00:00
|
|
|
// Ensure the reason was set in notes
|
|
|
|
if check.Notes != "broken" {
|
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
|
|
|
|
2015-01-15 19:26:14 +00:00
|
|
|
// Leave maintenance mode
|
2017-05-21 07:11:09 +00:00
|
|
|
a.DisableNodeMaintenance()
|
2015-01-15 19:26:14 +00:00
|
|
|
|
|
|
|
// Ensure the check was deregistered
|
2019-12-10 02:26:41 +00:00
|
|
|
requireCheckMissing(t, a, structs.NodeMaint)
|
2015-01-21 20:21:57 +00:00
|
|
|
|
|
|
|
// Enter maintenance mode without passing a reason
|
2017-05-21 07:11:09 +00:00
|
|
|
a.EnableNodeMaintenance("", "")
|
2015-01-21 20:21:57 +00:00
|
|
|
|
|
|
|
// Make sure the check was registered with the default note
|
2019-12-10 02:26:41 +00:00
|
|
|
check = requireCheckExists(t, a, structs.NodeMaint)
|
2015-01-21 22:45:09 +00:00
|
|
|
if check.Notes != defaultNodeMaintReason {
|
2015-01-21 20:21:57 +00:00
|
|
|
t.Fatalf("bad: %#v", check)
|
|
|
|
}
|
2015-01-15 08:25:36 +00:00
|
|
|
}
|
2015-02-17 20:00:04 +00:00
|
|
|
|
|
|
|
func TestAgent_checkStateSnapshot(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-02-17 20:00:04 +00:00
|
|
|
|
|
|
|
// First register a service
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
2015-02-17 20:00:04 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register a check
|
|
|
|
check1 := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-02-17 20:00:04 +00:00
|
|
|
CheckID: "service:redis",
|
|
|
|
Name: "redischeck",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-02-17 20:00:04 +00:00
|
|
|
ServiceID: "redis",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
2018-10-11 12:22:11 +00:00
|
|
|
if err := a.AddCheck(check1, nil, true, "", ConfigSourceLocal); err != nil {
|
2015-02-17 20:00:04 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Snapshot the state
|
2017-05-21 07:11:09 +00:00
|
|
|
snap := a.snapshotCheckState()
|
2015-02-17 20:00:04 +00:00
|
|
|
|
|
|
|
// Unload all of the checks
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.unloadChecks(); err != nil {
|
2015-02-17 20:00:04 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
2019-07-17 19:06:50 +00:00
|
|
|
// Reload the checks and restore the snapshot.
|
|
|
|
if err := a.loadChecks(a.Config, snap); err != nil {
|
2015-02-17 20:00:04 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Search for the check
|
2019-12-10 02:26:41 +00:00
|
|
|
out := requireCheckExists(t, a, check1.CheckID)
|
2015-02-17 20:00:04 +00:00
|
|
|
|
|
|
|
// Make sure state was restored
|
2017-04-19 23:00:11 +00:00
|
|
|
if out.Status != api.HealthPassing {
|
2015-02-17 20:00:04 +00:00
|
|
|
t.Fatalf("should have restored check state")
|
|
|
|
}
|
|
|
|
}
|
2015-03-11 23:13:19 +00:00
|
|
|
|
2021-11-04 20:07:54 +00:00
|
|
|
func TestAgent_checkStateSnapshot_backcompat(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Parallel()
|
|
|
|
a := NewTestAgent(t, "")
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// First register a service
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "redis",
|
|
|
|
Service: "redis",
|
|
|
|
Tags: []string{"foo"},
|
|
|
|
Port: 8000,
|
|
|
|
}
|
|
|
|
if err := a.addServiceFromSource(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register a check
|
|
|
|
check1 := &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
CheckID: "service:redis",
|
|
|
|
Name: "redischeck",
|
|
|
|
Status: api.HealthPassing,
|
|
|
|
ServiceID: "redis",
|
|
|
|
ServiceName: "redis",
|
|
|
|
}
|
|
|
|
if err := a.AddCheck(check1, nil, true, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Snapshot the state
|
|
|
|
snap := a.snapshotCheckState()
|
|
|
|
|
|
|
|
// Unload all of the checks
|
|
|
|
if err := a.unloadChecks(); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mutate the path to look like the old md5 checksum
|
|
|
|
dir := filepath.Join(a.config.DataDir, checksDir)
|
|
|
|
new_path := filepath.Join(dir, check1.CompoundCheckID().StringHashSHA256())
|
|
|
|
old_path := filepath.Join(dir, check1.CompoundCheckID().StringHashMD5())
|
|
|
|
if err := os.Rename(new_path, old_path); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reload the checks and restore the snapshot.
|
|
|
|
if err := a.loadChecks(a.Config, snap); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Search for the check
|
|
|
|
out := requireCheckExists(t, a, check1.CheckID)
|
|
|
|
|
|
|
|
// Make sure state was restored
|
|
|
|
if out.Status != api.HealthPassing {
|
|
|
|
t.Fatalf("should have restored check state")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-11 23:13:19 +00:00
|
|
|
func TestAgent_loadChecks_checkFails(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-03-11 23:13:19 +00:00
|
|
|
|
|
|
|
// Persist a health check with an invalid service ID
|
|
|
|
check := &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2015-03-11 23:13:19 +00:00
|
|
|
CheckID: "service:redis",
|
|
|
|
Name: "redischeck",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2015-03-11 23:13:19 +00:00
|
|
|
ServiceID: "nope",
|
|
|
|
}
|
2019-09-24 15:04:48 +00:00
|
|
|
if err := a.persistCheck(check, nil, ConfigSourceLocal); err != nil {
|
2015-03-11 23:13:19 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check to make sure the check was persisted
|
2016-06-06 08:53:30 +00:00
|
|
|
checkHash := checkIDHash(check.CheckID)
|
2017-05-21 07:11:09 +00:00
|
|
|
checkPath := filepath.Join(a.Config.DataDir, checksDir, checkHash)
|
2015-03-11 23:13:19 +00:00
|
|
|
if _, err := os.Stat(checkPath); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try loading the checks from the persisted files
|
2019-07-17 19:06:50 +00:00
|
|
|
if err := a.loadChecks(a.Config, nil); err != nil {
|
2015-03-11 23:13:19 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the erroneous check was purged
|
|
|
|
if _, err := os.Stat(checkPath); err == nil {
|
|
|
|
t.Fatalf("should have purged check")
|
|
|
|
}
|
|
|
|
}
|
2015-06-05 23:45:05 +00:00
|
|
|
|
2015-06-05 23:59:41 +00:00
|
|
|
func TestAgent_persistCheckState(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-06-05 23:45:05 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("check1", nil)
|
2015-06-05 23:45:05 +00:00
|
|
|
// Create the TTL check to persist
|
2017-10-25 09:18:07 +00:00
|
|
|
check := &checks.CheckTTL{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2015-06-05 23:45:05 +00:00
|
|
|
TTL: 10 * time.Minute,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persist some check state for the check
|
2017-05-21 07:11:09 +00:00
|
|
|
err := a.persistCheckState(check, api.HealthCritical, "nope")
|
2015-06-05 23:45:05 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the persisted file exists and has the content
|
2021-11-04 20:07:54 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, checkStateDir, cid.StringHashSHA256())
|
2022-11-10 16:26:01 +00:00
|
|
|
buf, err := os.ReadFile(file)
|
2015-06-05 23:45:05 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decode the state
|
|
|
|
var p persistedCheckState
|
|
|
|
if err := json.Unmarshal(buf, &p); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the fields
|
2019-12-10 02:26:41 +00:00
|
|
|
if p.CheckID != cid.ID {
|
2015-06-05 23:45:05 +00:00
|
|
|
t.Fatalf("bad: %#v", p)
|
|
|
|
}
|
|
|
|
if p.Output != "nope" {
|
|
|
|
t.Fatalf("bad: %#v", p)
|
|
|
|
}
|
2017-04-19 23:00:11 +00:00
|
|
|
if p.Status != api.HealthCritical {
|
2015-06-05 23:45:05 +00:00
|
|
|
t.Fatalf("bad: %#v", p)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the expiration time was set
|
|
|
|
if p.Expires < time.Now().Unix() {
|
|
|
|
t.Fatalf("bad: %#v", p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-08 16:35:10 +00:00
|
|
|
func TestAgent_loadCheckState(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-06-05 23:45:05 +00:00
|
|
|
|
|
|
|
// Create a check whose state will expire immediately
|
2017-10-25 09:18:07 +00:00
|
|
|
check := &checks.CheckTTL{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: structs.NewCheckID("check1", nil),
|
2015-06-05 23:45:05 +00:00
|
|
|
TTL: 0,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persist the check state
|
2017-05-21 07:11:09 +00:00
|
|
|
err := a.persistCheckState(check, api.HealthPassing, "yup")
|
2015-06-05 23:45:05 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
2015-06-08 16:35:10 +00:00
|
|
|
// Try to load the state
|
2015-06-05 23:45:05 +00:00
|
|
|
health := &structs.HealthCheck{
|
|
|
|
CheckID: "check1",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2015-06-05 23:45:05 +00:00
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.loadCheckState(health); err != nil {
|
2015-06-05 23:45:05 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should not have restored the status due to expiration
|
2017-04-19 23:00:11 +00:00
|
|
|
if health.Status != api.HealthCritical {
|
2015-06-05 23:45:05 +00:00
|
|
|
t.Fatalf("bad: %#v", health)
|
|
|
|
}
|
|
|
|
if health.Output != "" {
|
|
|
|
t.Fatalf("bad: %#v", health)
|
|
|
|
}
|
|
|
|
|
2015-06-05 23:59:41 +00:00
|
|
|
// Should have purged the state
|
2021-11-04 20:07:54 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, checksDir, structs.NewCheckID("check1", nil).StringHashSHA256())
|
2015-06-05 23:59:41 +00:00
|
|
|
if _, err := os.Stat(file); !os.IsNotExist(err) {
|
|
|
|
t.Fatalf("should have purged state")
|
|
|
|
}
|
|
|
|
|
2015-06-05 23:45:05 +00:00
|
|
|
// Set a TTL which will not expire before we check it
|
|
|
|
check.TTL = time.Minute
|
2017-05-21 07:11:09 +00:00
|
|
|
err = a.persistCheckState(check, api.HealthPassing, "yup")
|
2015-06-05 23:45:05 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
2015-06-08 16:35:10 +00:00
|
|
|
// Try to load
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.loadCheckState(health); err != nil {
|
2015-06-05 23:45:05 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should have restored
|
2017-04-19 23:00:11 +00:00
|
|
|
if health.Status != api.HealthPassing {
|
2015-06-05 23:45:05 +00:00
|
|
|
t.Fatalf("bad: %#v", health)
|
|
|
|
}
|
|
|
|
if health.Output != "yup" {
|
|
|
|
t.Fatalf("bad: %#v", health)
|
|
|
|
}
|
|
|
|
}
|
2015-06-05 23:57:14 +00:00
|
|
|
|
|
|
|
func TestAgent_purgeCheckState(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-06-05 23:57:14 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("check1", nil)
|
2015-06-05 23:57:14 +00:00
|
|
|
// No error if the state does not exist
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.purgeCheckState(cid); err != nil {
|
2015-06-05 23:57:14 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Persist some state to the data dir
|
2017-10-25 09:18:07 +00:00
|
|
|
check := &checks.CheckTTL{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2015-06-05 23:57:14 +00:00
|
|
|
TTL: time.Minute,
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
err := a.persistCheckState(check, api.HealthPassing, "yup")
|
2015-06-05 23:57:14 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Purge the check state
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := a.purgeCheckState(cid); err != nil {
|
2015-06-05 23:57:14 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Removed the file
|
2021-11-04 20:07:54 +00:00
|
|
|
file := filepath.Join(a.Config.DataDir, checkStateDir, cid.StringHashSHA256())
|
2015-06-05 23:57:14 +00:00
|
|
|
if _, err := os.Stat(file); !os.IsNotExist(err) {
|
|
|
|
t.Fatalf("should have removed file")
|
|
|
|
}
|
|
|
|
}
|
2015-10-16 02:28:31 +00:00
|
|
|
|
|
|
|
func TestAgent_GetCoordinate(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-06-24 16:56:46 +00:00
|
|
|
a := NewTestAgent(t, ``)
|
|
|
|
defer a.Shutdown()
|
2015-10-16 02:28:31 +00:00
|
|
|
|
2020-06-24 16:56:46 +00:00
|
|
|
coords, err := a.GetLANCoordinate()
|
|
|
|
require.NoError(t, err)
|
|
|
|
expected := lib.CoordinateSet{
|
|
|
|
"": &coordinate.Coordinate{
|
|
|
|
Error: 1.5,
|
|
|
|
Height: 1e-05,
|
|
|
|
Vec: []float64{0, 0, 0, 0, 0, 0, 0, 0},
|
|
|
|
},
|
2015-10-16 02:28:31 +00:00
|
|
|
}
|
2020-06-24 16:56:46 +00:00
|
|
|
require.Equal(t, expected, coords)
|
2015-10-16 02:28:31 +00:00
|
|
|
}
|
2017-09-26 20:47:27 +00:00
|
|
|
|
|
|
|
func TestAgent_reloadWatches(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-09-26 20:47:27 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-09-26 20:47:27 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Normal watch with http addr set, should succeed
|
|
|
|
newConf := *a.config
|
|
|
|
newConf.Watches = []map[string]interface{}{
|
|
|
|
{
|
2017-10-04 23:48:00 +00:00
|
|
|
"type": "key",
|
|
|
|
"key": "asdf",
|
|
|
|
"args": []interface{}{"ls"},
|
2017-09-26 20:47:27 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := a.reloadWatches(&newConf); err != nil {
|
|
|
|
t.Fatalf("bad: %s", err)
|
|
|
|
}
|
|
|
|
|
2018-04-26 17:06:26 +00:00
|
|
|
// Should fail to reload with connect watches
|
|
|
|
newConf.Watches = []map[string]interface{}{
|
|
|
|
{
|
|
|
|
"type": "connect_roots",
|
|
|
|
"key": "asdf",
|
|
|
|
"args": []interface{}{"ls"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := a.reloadWatches(&newConf); err == nil || !strings.Contains(err.Error(), "not allowed in agent config") {
|
|
|
|
t.Fatalf("bad: %s", err)
|
|
|
|
}
|
|
|
|
|
2017-09-26 20:47:27 +00:00
|
|
|
// Should still succeed with only HTTPS addresses
|
|
|
|
newConf.HTTPSAddrs = newConf.HTTPAddrs
|
|
|
|
newConf.HTTPAddrs = make([]net.Addr, 0)
|
|
|
|
newConf.Watches = []map[string]interface{}{
|
|
|
|
{
|
2017-10-04 23:48:00 +00:00
|
|
|
"type": "key",
|
|
|
|
"key": "asdf",
|
|
|
|
"args": []interface{}{"ls"},
|
2017-09-26 20:47:27 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := a.reloadWatches(&newConf); err != nil {
|
|
|
|
t.Fatalf("bad: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should fail to reload with no http or https addrs
|
|
|
|
newConf.HTTPSAddrs = make([]net.Addr, 0)
|
|
|
|
newConf.Watches = []map[string]interface{}{
|
|
|
|
{
|
2017-10-04 23:48:00 +00:00
|
|
|
"type": "key",
|
|
|
|
"key": "asdf",
|
|
|
|
"args": []interface{}{"ls"},
|
2017-09-26 20:47:27 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := a.reloadWatches(&newConf); err == nil || !strings.Contains(err.Error(), "watch plans require an HTTP or HTTPS endpoint") {
|
|
|
|
t.Fatalf("bad: %s", err)
|
|
|
|
}
|
|
|
|
}
|
2018-05-31 21:20:16 +00:00
|
|
|
|
|
|
|
func TestAgent_reloadWatchesHTTPS(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-05-31 21:20:16 +00:00
|
|
|
t.Parallel()
|
2022-08-19 17:07:22 +00:00
|
|
|
a := TestAgent{UseHTTPS: true}
|
2020-03-30 20:05:27 +00:00
|
|
|
if err := a.Start(t); err != nil {
|
2019-09-03 22:05:51 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-05-31 21:20:16 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Normal watch with http addr set, should succeed
|
|
|
|
newConf := *a.config
|
|
|
|
newConf.Watches = []map[string]interface{}{
|
|
|
|
{
|
|
|
|
"type": "key",
|
|
|
|
"key": "asdf",
|
|
|
|
"args": []interface{}{"ls"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := a.reloadWatches(&newConf); err != nil {
|
|
|
|
t.Fatalf("bad: %s", err)
|
|
|
|
}
|
|
|
|
}
|
2018-04-16 15:00:20 +00:00
|
|
|
|
2020-04-02 07:59:23 +00:00
|
|
|
func TestAgent_SecurityChecks(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-04-02 07:59:23 +00:00
|
|
|
t.Parallel()
|
|
|
|
hcl := `
|
|
|
|
enable_script_checks = true
|
|
|
|
`
|
|
|
|
a := &TestAgent{Name: t.Name(), HCL: hcl}
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
data := make([]byte, 0, 8192)
|
2021-06-08 23:11:34 +00:00
|
|
|
buf := &syncBuffer{b: bytes.NewBuffer(data)}
|
|
|
|
a.LogOutput = buf
|
2020-04-02 07:59:23 +00:00
|
|
|
assert.NoError(t, a.Start(t))
|
2021-06-08 23:11:34 +00:00
|
|
|
assert.Contains(t, buf.String(), "using enable-script-checks without ACLs and without allow_write_http_from is DANGEROUS")
|
|
|
|
}
|
|
|
|
|
|
|
|
type syncBuffer struct {
|
|
|
|
lock sync.RWMutex
|
|
|
|
b *bytes.Buffer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *syncBuffer) Write(data []byte) (int, error) {
|
|
|
|
b.lock.Lock()
|
|
|
|
defer b.lock.Unlock()
|
|
|
|
return b.b.Write(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *syncBuffer) String() string {
|
|
|
|
b.lock.Lock()
|
|
|
|
defer b.lock.Unlock()
|
|
|
|
return b.b.String()
|
2020-04-02 07:59:23 +00:00
|
|
|
}
|
|
|
|
|
2019-03-13 09:29:06 +00:00
|
|
|
func TestAgent_ReloadConfigOutgoingRPCConfig(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-03-13 09:29:06 +00:00
|
|
|
t.Parallel()
|
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
hcl := `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_outgoing = true
|
|
|
|
ca_file = "../test/ca/root.cer"
|
|
|
|
cert_file = "../test/key/ourdomain.cer"
|
|
|
|
key_file = "../test/key/ourdomain.key"
|
|
|
|
verify_server_hostname = false
|
|
|
|
`
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, hcl)
|
2019-03-18 16:06:16 +00:00
|
|
|
defer a.Shutdown()
|
2019-03-13 09:29:06 +00:00
|
|
|
tlsConf := a.tlsConfigurator.OutgoingRPCConfig()
|
2022-04-14 20:55:10 +00:00
|
|
|
|
2019-03-13 09:29:06 +00:00
|
|
|
require.True(t, tlsConf.InsecureSkipVerify)
|
2022-04-14 20:55:10 +00:00
|
|
|
expectedCaPoolByFile := getExpectedCaPoolByFile(t)
|
|
|
|
assertDeepEqual(t, expectedCaPoolByFile, tlsConf.RootCAs, cmpCertPool)
|
|
|
|
assertDeepEqual(t, expectedCaPoolByFile, tlsConf.ClientCAs, cmpCertPool)
|
2019-03-13 09:29:06 +00:00
|
|
|
|
|
|
|
hcl = `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_outgoing = true
|
|
|
|
ca_path = "../test/ca_path"
|
|
|
|
cert_file = "../test/key/ourdomain.cer"
|
|
|
|
key_file = "../test/key/ourdomain.key"
|
|
|
|
verify_server_hostname = true
|
|
|
|
`
|
2020-08-10 16:46:28 +00:00
|
|
|
c := TestConfig(testutil.Logger(t), config.FileSource{Name: t.Name(), Format: "hcl", Data: hcl})
|
2020-06-10 20:47:35 +00:00
|
|
|
require.NoError(t, a.reloadConfigInternal(c))
|
2019-03-13 09:29:06 +00:00
|
|
|
tlsConf = a.tlsConfigurator.OutgoingRPCConfig()
|
2022-04-14 20:55:10 +00:00
|
|
|
|
2019-03-13 09:29:06 +00:00
|
|
|
require.False(t, tlsConf.InsecureSkipVerify)
|
2022-04-14 20:55:10 +00:00
|
|
|
expectedCaPoolByDir := getExpectedCaPoolByDir(t)
|
|
|
|
assertDeepEqual(t, expectedCaPoolByDir, tlsConf.RootCAs, cmpCertPool)
|
|
|
|
assertDeepEqual(t, expectedCaPoolByDir, tlsConf.ClientCAs, cmpCertPool)
|
2019-03-13 09:29:06 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 11:59:41 +00:00
|
|
|
func TestAgent_ReloadConfigAndKeepChecksStatus(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-09-24 21:24:04 +00:00
|
|
|
t.Run("normal", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
2020-10-01 14:19:14 +00:00
|
|
|
testAgent_ReloadConfigAndKeepChecksStatus(t, "enable_central_service_config = false")
|
2020-09-24 21:24:04 +00:00
|
|
|
})
|
|
|
|
t.Run("service manager", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testAgent_ReloadConfigAndKeepChecksStatus(t, "enable_central_service_config = true")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testAgent_ReloadConfigAndKeepChecksStatus(t *testing.T, extraHCL string) {
|
2020-03-09 11:59:41 +00:00
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
hcl := `data_dir = "` + dataDir + `"
|
|
|
|
enable_local_script_checks=true
|
|
|
|
services=[{
|
|
|
|
name="webserver1",
|
2020-03-25 14:09:13 +00:00
|
|
|
check{id="check1", ttl="30s"}
|
2020-09-24 21:24:04 +00:00
|
|
|
}] ` + extraHCL
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, hcl)
|
2020-03-09 11:59:41 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2020-03-25 14:09:13 +00:00
|
|
|
require.NoError(t, a.updateTTLCheck(structs.NewCheckID("check1", nil), api.HealthPassing, "testing agent reload"))
|
2020-03-09 11:59:41 +00:00
|
|
|
|
2020-03-25 14:09:13 +00:00
|
|
|
// Make sure check is passing before we reload.
|
|
|
|
gotChecks := a.State.Checks(nil)
|
|
|
|
require.Equal(t, 1, len(gotChecks), "Should have a check registered, but had %#v", gotChecks)
|
|
|
|
for id, check := range gotChecks {
|
2020-03-09 11:59:41 +00:00
|
|
|
require.Equal(t, "passing", check.Status, "check %q is wrong", id)
|
|
|
|
}
|
2020-03-25 14:09:13 +00:00
|
|
|
|
2020-08-10 16:46:28 +00:00
|
|
|
c := TestConfig(testutil.Logger(t), config.FileSource{Name: t.Name(), Format: "hcl", Data: hcl})
|
2020-06-10 20:47:35 +00:00
|
|
|
require.NoError(t, a.reloadConfigInternal(c))
|
2020-09-24 21:24:04 +00:00
|
|
|
|
2020-03-25 14:09:13 +00:00
|
|
|
// After reload, should be passing directly (no critical state)
|
2020-03-09 11:59:41 +00:00
|
|
|
for id, check := range a.State.Checks(nil) {
|
|
|
|
require.Equal(t, "passing", check.Status, "check %q is wrong", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-13 09:29:06 +00:00
|
|
|
func TestAgent_ReloadConfigIncomingRPCConfig(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-03-13 09:29:06 +00:00
|
|
|
t.Parallel()
|
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
hcl := `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_outgoing = true
|
|
|
|
ca_file = "../test/ca/root.cer"
|
|
|
|
cert_file = "../test/key/ourdomain.cer"
|
|
|
|
key_file = "../test/key/ourdomain.key"
|
|
|
|
verify_server_hostname = false
|
|
|
|
`
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, hcl)
|
2019-03-18 16:06:16 +00:00
|
|
|
defer a.Shutdown()
|
2019-03-13 09:29:06 +00:00
|
|
|
tlsConf := a.tlsConfigurator.IncomingRPCConfig()
|
|
|
|
require.NotNil(t, tlsConf.GetConfigForClient)
|
2019-03-18 16:06:16 +00:00
|
|
|
tlsConf, err := tlsConf.GetConfigForClient(nil)
|
2019-03-13 09:29:06 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, tlsConf)
|
|
|
|
require.True(t, tlsConf.InsecureSkipVerify)
|
2022-04-14 20:55:10 +00:00
|
|
|
expectedCaPoolByFile := getExpectedCaPoolByFile(t)
|
|
|
|
assertDeepEqual(t, expectedCaPoolByFile, tlsConf.RootCAs, cmpCertPool)
|
|
|
|
assertDeepEqual(t, expectedCaPoolByFile, tlsConf.ClientCAs, cmpCertPool)
|
2019-03-13 09:29:06 +00:00
|
|
|
|
|
|
|
hcl = `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_outgoing = true
|
|
|
|
ca_path = "../test/ca_path"
|
|
|
|
cert_file = "../test/key/ourdomain.cer"
|
|
|
|
key_file = "../test/key/ourdomain.key"
|
|
|
|
verify_server_hostname = true
|
|
|
|
`
|
2020-08-10 16:46:28 +00:00
|
|
|
c := TestConfig(testutil.Logger(t), config.FileSource{Name: t.Name(), Format: "hcl", Data: hcl})
|
2020-06-10 20:47:35 +00:00
|
|
|
require.NoError(t, a.reloadConfigInternal(c))
|
2019-03-13 09:29:06 +00:00
|
|
|
tlsConf, err = tlsConf.GetConfigForClient(nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.False(t, tlsConf.InsecureSkipVerify)
|
2022-04-14 20:55:10 +00:00
|
|
|
expectedCaPoolByDir := getExpectedCaPoolByDir(t)
|
|
|
|
assertDeepEqual(t, expectedCaPoolByDir, tlsConf.RootCAs, cmpCertPool)
|
|
|
|
assertDeepEqual(t, expectedCaPoolByDir, tlsConf.ClientCAs, cmpCertPool)
|
2019-03-13 09:29:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_ReloadConfigTLSConfigFailure(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-03-13 09:29:06 +00:00
|
|
|
t.Parallel()
|
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
hcl := `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_outgoing = true
|
|
|
|
ca_file = "../test/ca/root.cer"
|
|
|
|
cert_file = "../test/key/ourdomain.cer"
|
|
|
|
key_file = "../test/key/ourdomain.key"
|
|
|
|
verify_server_hostname = false
|
|
|
|
`
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, hcl)
|
2019-03-18 16:06:16 +00:00
|
|
|
defer a.Shutdown()
|
2019-03-13 09:29:06 +00:00
|
|
|
tlsConf := a.tlsConfigurator.IncomingRPCConfig()
|
|
|
|
|
|
|
|
hcl = `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_incoming = true
|
|
|
|
`
|
2020-08-10 16:46:28 +00:00
|
|
|
c := TestConfig(testutil.Logger(t), config.FileSource{Name: t.Name(), Format: "hcl", Data: hcl})
|
2020-06-10 20:47:35 +00:00
|
|
|
require.Error(t, a.reloadConfigInternal(c))
|
2019-03-18 16:06:16 +00:00
|
|
|
tlsConf, err := tlsConf.GetConfigForClient(nil)
|
2019-03-13 09:29:06 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, tls.NoClientCert, tlsConf.ClientAuth)
|
2022-04-14 20:55:10 +00:00
|
|
|
|
|
|
|
expectedCaPoolByFile := getExpectedCaPoolByFile(t)
|
|
|
|
assertDeepEqual(t, expectedCaPoolByFile, tlsConf.RootCAs, cmpCertPool)
|
|
|
|
assertDeepEqual(t, expectedCaPoolByFile, tlsConf.ClientCAs, cmpCertPool)
|
2019-03-13 09:29:06 +00:00
|
|
|
}
|
2019-06-27 20:22:07 +00:00
|
|
|
|
2022-10-14 14:52:00 +00:00
|
|
|
func TestAgent_ReloadConfig_XDSUpdateRateLimit(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg := fmt.Sprintf(`data_dir = %q`, testutil.TempDir(t, "agent"))
|
|
|
|
|
|
|
|
a := NewTestAgent(t, cfg)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
c := TestConfig(
|
|
|
|
testutil.Logger(t),
|
|
|
|
config.FileSource{
|
|
|
|
Name: t.Name(),
|
|
|
|
Format: "hcl",
|
|
|
|
Data: cfg + ` xds { update_max_per_second = 1000 }`,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
require.NoError(t, a.reloadConfigInternal(c))
|
|
|
|
require.Equal(t, rate.Limit(1000), a.proxyConfig.UpdateRateLimit())
|
|
|
|
}
|
|
|
|
|
2023-06-30 03:00:29 +00:00
|
|
|
func TestAgent_ReloadConfig_EnableDebug(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg := fmt.Sprintf(`data_dir = %q`, testutil.TempDir(t, "agent"))
|
|
|
|
|
|
|
|
a := NewTestAgent(t, cfg)
|
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
c := TestConfig(
|
|
|
|
testutil.Logger(t),
|
|
|
|
config.FileSource{
|
|
|
|
Name: t.Name(),
|
|
|
|
Format: "hcl",
|
|
|
|
Data: cfg + ` enable_debug = true`,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
require.NoError(t, a.reloadConfigInternal(c))
|
|
|
|
require.Equal(t, true, a.enableDebug.Load())
|
|
|
|
|
|
|
|
c = TestConfig(
|
|
|
|
testutil.Logger(t),
|
|
|
|
config.FileSource{
|
|
|
|
Name: t.Name(),
|
|
|
|
Format: "hcl",
|
|
|
|
Data: cfg + ` enable_debug = false`,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
require.NoError(t, a.reloadConfigInternal(c))
|
|
|
|
require.Equal(t, false, a.enableDebug.Load())
|
|
|
|
}
|
|
|
|
|
2019-07-23 14:19:57 +00:00
|
|
|
func TestAgent_consulConfig_AutoEncryptAllowTLS(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-06-27 20:22:07 +00:00
|
|
|
t.Parallel()
|
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
hcl := `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
verify_incoming = true
|
|
|
|
ca_file = "../test/ca/root.cer"
|
|
|
|
cert_file = "../test/key/ourdomain.cer"
|
|
|
|
key_file = "../test/key/ourdomain.key"
|
|
|
|
auto_encrypt { allow_tls = true }
|
|
|
|
`
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, hcl)
|
2019-06-27 20:22:07 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
require.True(t, a.consulConfig().AutoEncryptAllowTLS)
|
|
|
|
}
|
2019-07-23 14:19:57 +00:00
|
|
|
|
2022-10-18 19:05:09 +00:00
|
|
|
func TestAgent_ReloadConfigRPCClientConfig(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
|
|
|
hcl := `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
`
|
|
|
|
a := NewTestAgent(t, hcl)
|
|
|
|
|
|
|
|
defaultRPCTimeout := 60 * time.Second
|
|
|
|
require.Equal(t, defaultRPCTimeout, a.baseDeps.ConnPool.RPCClientTimeout())
|
|
|
|
|
|
|
|
hcl = `
|
|
|
|
data_dir = "` + dataDir + `"
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
limits {
|
|
|
|
rpc_client_timeout = "2m"
|
|
|
|
}
|
|
|
|
`
|
|
|
|
c := TestConfig(testutil.Logger(t), config.FileSource{Name: t.Name(), Format: "hcl", Data: hcl})
|
|
|
|
require.NoError(t, a.reloadConfigInternal(c))
|
|
|
|
|
|
|
|
require.Equal(t, 2*time.Minute, a.baseDeps.ConnPool.RPCClientTimeout())
|
|
|
|
}
|
|
|
|
|
2019-07-23 14:19:57 +00:00
|
|
|
func TestAgent_consulConfig_RaftTrailingLogs(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-07-23 14:19:57 +00:00
|
|
|
t.Parallel()
|
|
|
|
hcl := `
|
|
|
|
raft_trailing_logs = 812345
|
|
|
|
`
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, hcl)
|
2019-07-23 14:19:57 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
require.Equal(t, uint64(812345), a.consulConfig().RaftConfig.TrailingLogs)
|
|
|
|
}
|
2019-09-24 15:04:48 +00:00
|
|
|
|
2022-12-13 20:09:55 +00:00
|
|
|
func TestAgent_consulConfig_RequestLimits(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Parallel()
|
|
|
|
hcl := `
|
|
|
|
limits {
|
|
|
|
request_limits {
|
|
|
|
mode = "enforcing"
|
|
|
|
read_rate = 8888
|
|
|
|
write_rate = 9999
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`
|
|
|
|
a := NewTestAgent(t, hcl)
|
|
|
|
defer a.Shutdown()
|
|
|
|
require.Equal(t, "enforcing", a.consulConfig().RequestLimitsMode)
|
|
|
|
require.Equal(t, rate.Limit(8888), a.consulConfig().RequestLimitsReadRate)
|
|
|
|
require.Equal(t, rate.Limit(9999), a.consulConfig().RequestLimitsWriteRate)
|
|
|
|
}
|
|
|
|
|
2019-09-26 02:55:52 +00:00
|
|
|
func TestAgent_grpcInjectAddr(t *testing.T) {
|
|
|
|
tt := []struct {
|
|
|
|
name string
|
|
|
|
grpc string
|
|
|
|
ip string
|
|
|
|
port int
|
|
|
|
want string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "localhost web svc",
|
|
|
|
grpc: "localhost:8080/web",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "192.168.0.0:9090/web",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "localhost no svc",
|
|
|
|
grpc: "localhost:8080",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "192.168.0.0:9090",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ipv4 web svc",
|
|
|
|
grpc: "127.0.0.1:8080/web",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "192.168.0.0:9090/web",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ipv4 no svc",
|
|
|
|
grpc: "127.0.0.1:8080",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "192.168.0.0:9090",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ipv6 no svc",
|
|
|
|
grpc: "2001:db8:1f70::999:de8:7648:6e8:5000",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "192.168.0.0:9090",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ipv6 web svc",
|
|
|
|
grpc: "2001:db8:1f70::999:de8:7648:6e8:5000/web",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "192.168.0.0:9090/web",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "zone ipv6 web svc",
|
|
|
|
grpc: "::FFFF:C0A8:1%1:5000/web",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "192.168.0.0:9090/web",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ipv6 literal web svc",
|
|
|
|
grpc: "::FFFF:192.168.0.1:5000/web",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "192.168.0.0:9090/web",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ipv6 injected into ipv6 url",
|
|
|
|
grpc: "2001:db8:1f70::999:de8:7648:6e8:5000",
|
|
|
|
ip: "::FFFF:C0A8:1",
|
|
|
|
port: 9090,
|
|
|
|
want: "::FFFF:C0A8:1:9090",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ipv6 injected into ipv6 url with svc",
|
|
|
|
grpc: "2001:db8:1f70::999:de8:7648:6e8:5000/web",
|
|
|
|
ip: "::FFFF:C0A8:1",
|
|
|
|
port: 9090,
|
|
|
|
want: "::FFFF:C0A8:1:9090/web",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ipv6 injected into ipv6 url with special",
|
|
|
|
grpc: "2001:db8:1f70::999:de8:7648:6e8:5000/service-$name:with@special:Chars",
|
|
|
|
ip: "::FFFF:C0A8:1",
|
|
|
|
port: 9090,
|
|
|
|
want: "::FFFF:C0A8:1:9090/service-$name:with@special:Chars",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tt := range tt {
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
|
|
got := grpcInjectAddr(tt.grpc, tt.ip, tt.port)
|
|
|
|
if got != tt.want {
|
|
|
|
t.Errorf("httpInjectAddr() got = %v, want %v", got, tt.want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_httpInjectAddr(t *testing.T) {
|
|
|
|
tt := []struct {
|
|
|
|
name string
|
|
|
|
url string
|
|
|
|
ip string
|
|
|
|
port int
|
|
|
|
want string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "localhost health",
|
|
|
|
url: "http://localhost:8080/health",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "http://192.168.0.0:9090/health",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "https localhost health",
|
|
|
|
url: "https://localhost:8080/health",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "https://192.168.0.0:9090/health",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "https ipv4 health",
|
|
|
|
url: "https://127.0.0.1:8080/health",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "https://192.168.0.0:9090/health",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "https ipv4 without path",
|
|
|
|
url: "https://127.0.0.1:8080",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "https://192.168.0.0:9090",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "https ipv6 health",
|
|
|
|
url: "https://[2001:db8:1f70::999:de8:7648:6e8]:5000/health",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "https://192.168.0.0:9090/health",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "https ipv6 with zone",
|
|
|
|
url: "https://[::FFFF:C0A8:1%1]:5000/health",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "https://192.168.0.0:9090/health",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "https ipv6 literal",
|
|
|
|
url: "https://[::FFFF:192.168.0.1]:5000/health",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "https://192.168.0.0:9090/health",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "https ipv6 without path",
|
|
|
|
url: "https://[2001:db8:1f70::999:de8:7648:6e8]:5000",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "https://192.168.0.0:9090",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ipv6 injected into ipv6 url",
|
|
|
|
url: "https://[2001:db8:1f70::999:de8:7648:6e8]:5000",
|
|
|
|
ip: "::FFFF:C0A8:1",
|
|
|
|
port: 9090,
|
|
|
|
want: "https://[::FFFF:C0A8:1]:9090",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ipv6 with brackets injected into ipv6 url",
|
|
|
|
url: "https://[2001:db8:1f70::999:de8:7648:6e8]:5000",
|
|
|
|
ip: "[::FFFF:C0A8:1]",
|
|
|
|
port: 9090,
|
|
|
|
want: "https://[::FFFF:C0A8:1]:9090",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "short domain health",
|
|
|
|
url: "http://i.co:8080/health",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "http://192.168.0.0:9090/health",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "nested url in query",
|
|
|
|
url: "http://my.corp.com:8080/health?from=http://google.com:8080",
|
|
|
|
ip: "192.168.0.0",
|
|
|
|
port: 9090,
|
|
|
|
want: "http://192.168.0.0:9090/health?from=http://google.com:8080",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tt := range tt {
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
|
|
got := httpInjectAddr(tt.url, tt.ip, tt.port)
|
|
|
|
if got != tt.want {
|
|
|
|
t.Errorf("httpInjectAddr() got = %v, want %v", got, tt.want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
func TestDefaultIfEmpty(t *testing.T) {
|
|
|
|
require.Equal(t, "", defaultIfEmpty("", ""))
|
|
|
|
require.Equal(t, "foo", defaultIfEmpty("", "foo"))
|
|
|
|
require.Equal(t, "bar", defaultIfEmpty("bar", "foo"))
|
|
|
|
require.Equal(t, "bar", defaultIfEmpty("bar", ""))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestConfigSourceFromName(t *testing.T) {
|
|
|
|
cases := []struct {
|
|
|
|
in string
|
|
|
|
expect configSource
|
|
|
|
bad bool
|
|
|
|
}{
|
|
|
|
{in: "local", expect: ConfigSourceLocal},
|
|
|
|
{in: "remote", expect: ConfigSourceRemote},
|
|
|
|
{in: "", expect: ConfigSourceLocal},
|
|
|
|
{in: "LOCAL", bad: true},
|
|
|
|
{in: "REMOTE", bad: true},
|
|
|
|
{in: "garbage", bad: true},
|
|
|
|
{in: " ", bad: true},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
tc := tc
|
|
|
|
t.Run(tc.in, func(t *testing.T) {
|
|
|
|
got, ok := ConfigSourceFromName(tc.in)
|
|
|
|
if tc.bad {
|
|
|
|
require.False(t, ok)
|
|
|
|
require.Empty(t, got)
|
|
|
|
} else {
|
|
|
|
require.True(t, ok)
|
|
|
|
require.Equal(t, tc.expect, got)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2019-09-26 02:55:52 +00:00
|
|
|
|
|
|
|
func TestAgent_RerouteExistingHTTPChecks(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-26 02:55:52 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2019-09-26 02:55:52 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
// Register a service without a ProxyAddr
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "web",
|
|
|
|
Service: "web",
|
|
|
|
Address: "localhost",
|
|
|
|
Port: 8080,
|
|
|
|
}
|
|
|
|
chks := []*structs.CheckType{
|
|
|
|
{
|
|
|
|
CheckID: "http",
|
|
|
|
HTTP: "http://localhost:8080/mypath?query",
|
|
|
|
Interval: 20 * time.Millisecond,
|
|
|
|
TLSSkipVerify: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
CheckID: "grpc",
|
|
|
|
GRPC: "localhost:8080/myservice",
|
|
|
|
Interval: 20 * time.Millisecond,
|
|
|
|
TLSSkipVerify: true,
|
|
|
|
},
|
|
|
|
}
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, chks, false, "", ConfigSourceLocal); err != nil {
|
2019-09-26 02:55:52 +00:00
|
|
|
t.Fatalf("failed to add svc: %v", err)
|
|
|
|
}
|
|
|
|
|
2021-05-12 20:51:39 +00:00
|
|
|
// Register a proxy and expose HTTP checks.
|
|
|
|
// This should trigger setting ProxyHTTP and ProxyGRPC in the checks.
|
2019-09-26 02:55:52 +00:00
|
|
|
proxy := &structs.NodeService{
|
|
|
|
Kind: "connect-proxy",
|
|
|
|
ID: "web-proxy",
|
|
|
|
Service: "web-proxy",
|
|
|
|
Address: "localhost",
|
|
|
|
Port: 21500,
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "web",
|
|
|
|
DestinationServiceID: "web",
|
|
|
|
LocalServiceAddress: "localhost",
|
|
|
|
LocalServicePort: 8080,
|
|
|
|
MeshGateway: structs.MeshGatewayConfig{},
|
|
|
|
Expose: structs.ExposeConfig{
|
|
|
|
Checks: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(proxy, nil, false, "", ConfigSourceLocal); err != nil {
|
2019-09-26 02:55:52 +00:00
|
|
|
t.Fatalf("failed to add svc: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil))
|
2021-05-12 20:51:39 +00:00
|
|
|
require.Equal(r, chks[0].ProxyHTTP, "http://localhost:21500/mypath?query")
|
|
|
|
})
|
2019-09-26 02:55:52 +00:00
|
|
|
|
2021-05-12 20:51:39 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
hc := a.State.Check(structs.NewCheckID("http", nil))
|
|
|
|
require.Equal(r, hc.ExposedPort, 21500)
|
2019-09-26 02:55:52 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil))
|
2019-09-26 02:55:52 +00:00
|
|
|
|
2021-05-12 20:51:39 +00:00
|
|
|
// GRPC check will be at a later index than HTTP check because of the fetching order in ServiceHTTPBasedChecks.
|
|
|
|
// Note that this relies on listener ports auto-incrementing in a.listenerPortLocked.
|
|
|
|
require.Equal(r, chks[1].ProxyGRPC, "localhost:21501/myservice")
|
|
|
|
})
|
2019-09-26 02:55:52 +00:00
|
|
|
|
2021-05-12 20:51:39 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
hc := a.State.Check(structs.NewCheckID("grpc", nil))
|
|
|
|
require.Equal(r, hc.ExposedPort, 21501)
|
2019-09-26 02:55:52 +00:00
|
|
|
})
|
|
|
|
|
2021-05-12 20:51:39 +00:00
|
|
|
// Re-register a proxy and disable exposing HTTP checks.
|
2019-09-26 02:55:52 +00:00
|
|
|
// This should trigger resetting ProxyHTTP and ProxyGRPC to empty strings
|
2021-05-12 20:51:39 +00:00
|
|
|
// and reset saved exposed ports in the agent's state.
|
2019-09-26 02:55:52 +00:00
|
|
|
proxy = &structs.NodeService{
|
|
|
|
Kind: "connect-proxy",
|
|
|
|
ID: "web-proxy",
|
|
|
|
Service: "web-proxy",
|
|
|
|
Address: "localhost",
|
|
|
|
Port: 21500,
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "web",
|
|
|
|
DestinationServiceID: "web",
|
|
|
|
LocalServiceAddress: "localhost",
|
|
|
|
LocalServicePort: 8080,
|
|
|
|
MeshGateway: structs.MeshGatewayConfig{},
|
|
|
|
Expose: structs.ExposeConfig{
|
|
|
|
Checks: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(proxy, nil, false, "", ConfigSourceLocal); err != nil {
|
2019-09-26 02:55:52 +00:00
|
|
|
t.Fatalf("failed to add svc: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil))
|
2021-05-12 20:51:39 +00:00
|
|
|
require.Empty(r, chks[0].ProxyHTTP, "ProxyHTTP addr was not reset")
|
|
|
|
})
|
2019-09-26 02:55:52 +00:00
|
|
|
|
2021-05-12 20:51:39 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
hc := a.State.Check(structs.NewCheckID("http", nil))
|
|
|
|
require.Equal(r, hc.ExposedPort, 0)
|
2019-09-26 02:55:52 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil))
|
2019-09-26 02:55:52 +00:00
|
|
|
|
2021-05-12 20:51:39 +00:00
|
|
|
// Will be at a later index than HTTP check because of the fetching order in ServiceHTTPBasedChecks.
|
|
|
|
require.Empty(r, chks[1].ProxyGRPC, "ProxyGRPC addr was not reset")
|
|
|
|
})
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
hc := a.State.Check(structs.NewCheckID("grpc", nil))
|
|
|
|
require.Equal(r, hc.ExposedPort, 0)
|
2019-09-26 02:55:52 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_RerouteNewHTTPChecks(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-26 02:55:52 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2019-09-26 02:55:52 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
// Register a service without a ProxyAddr
|
|
|
|
svc := &structs.NodeService{
|
|
|
|
ID: "web",
|
|
|
|
Service: "web",
|
|
|
|
Address: "localhost",
|
|
|
|
Port: 8080,
|
|
|
|
}
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(svc, nil, false, "", ConfigSourceLocal); err != nil {
|
2019-09-26 02:55:52 +00:00
|
|
|
t.Fatalf("failed to add svc: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register a proxy and expose HTTP checks
|
|
|
|
proxy := &structs.NodeService{
|
|
|
|
Kind: "connect-proxy",
|
|
|
|
ID: "web-proxy",
|
|
|
|
Service: "web-proxy",
|
|
|
|
Address: "localhost",
|
|
|
|
Port: 21500,
|
|
|
|
Proxy: structs.ConnectProxyConfig{
|
|
|
|
DestinationServiceName: "web",
|
|
|
|
DestinationServiceID: "web",
|
|
|
|
LocalServiceAddress: "localhost",
|
|
|
|
LocalServicePort: 8080,
|
|
|
|
MeshGateway: structs.MeshGatewayConfig{},
|
|
|
|
Expose: structs.ExposeConfig{
|
|
|
|
Checks: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2020-11-30 18:26:58 +00:00
|
|
|
if err := a.addServiceFromSource(proxy, nil, false, "", ConfigSourceLocal); err != nil {
|
2019-09-26 02:55:52 +00:00
|
|
|
t.Fatalf("failed to add svc: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
checks := []*structs.HealthCheck{
|
|
|
|
{
|
|
|
|
CheckID: "http",
|
|
|
|
Name: "http",
|
|
|
|
ServiceID: "web",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
CheckID: "grpc",
|
|
|
|
Name: "grpc",
|
|
|
|
ServiceID: "web",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
chkTypes := []*structs.CheckType{
|
|
|
|
{
|
|
|
|
CheckID: "http",
|
|
|
|
HTTP: "http://localhost:8080/mypath?query",
|
|
|
|
Interval: 20 * time.Millisecond,
|
|
|
|
TLSSkipVerify: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
CheckID: "grpc",
|
|
|
|
GRPC: "localhost:8080/myservice",
|
|
|
|
Interval: 20 * time.Millisecond,
|
|
|
|
TLSSkipVerify: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// ProxyGRPC and ProxyHTTP should be set when creating check
|
|
|
|
// since proxy.expose.checks is enabled on the proxy
|
|
|
|
if err := a.AddCheck(checks[0], chkTypes[0], false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("failed to add check: %v", err)
|
|
|
|
}
|
|
|
|
if err := a.AddCheck(checks[1], chkTypes[1], false, "", ConfigSourceLocal); err != nil {
|
|
|
|
t.Fatalf("failed to add check: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil))
|
2021-05-12 20:51:39 +00:00
|
|
|
require.Equal(r, chks[0].ProxyHTTP, "http://localhost:21500/mypath?query")
|
|
|
|
})
|
2019-09-26 02:55:52 +00:00
|
|
|
|
2021-05-12 20:51:39 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
hc := a.State.Check(structs.NewCheckID("http", nil))
|
|
|
|
require.Equal(r, hc.ExposedPort, 21500)
|
2019-09-26 02:55:52 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
chks := a.ServiceHTTPBasedChecks(structs.NewServiceID("web", nil))
|
2019-09-26 02:55:52 +00:00
|
|
|
|
2021-05-12 20:51:39 +00:00
|
|
|
// GRPC check will be at a later index than HTTP check because of the fetching order in ServiceHTTPBasedChecks.
|
|
|
|
require.Equal(r, chks[1].ProxyGRPC, "localhost:21501/myservice")
|
|
|
|
})
|
2019-09-26 02:55:52 +00:00
|
|
|
|
2021-05-12 20:51:39 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
hc := a.State.Check(structs.NewCheckID("grpc", nil))
|
|
|
|
require.Equal(r, hc.ExposedPort, 21501)
|
2019-09-26 02:55:52 +00:00
|
|
|
})
|
|
|
|
}
|
2019-09-26 15:42:17 +00:00
|
|
|
|
|
|
|
func TestAgentCache_serviceInConfigFile_initialFetchErrors_Issue6521(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-09-26 15:42:17 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// Ensure that initial failures to fetch the discovery chain via the agent
|
|
|
|
// cache using the notify API for a service with no config entries
|
|
|
|
// correctly recovers when those RPCs resume working. The key here is that
|
|
|
|
// the lack of config entries guarantees that the RPC will come back with a
|
|
|
|
// synthetic index of 1.
|
|
|
|
//
|
|
|
|
// The bug in the Cache.notifyBlockingQuery used to incorrectly "fix" the
|
|
|
|
// index for the next query from 0 to 1 for all queries, when it should
|
|
|
|
// have not done so for queries that errored.
|
|
|
|
|
2020-03-31 20:24:39 +00:00
|
|
|
a1 := StartTestAgent(t, TestAgent{Name: "Agent1"})
|
2019-09-26 15:42:17 +00:00
|
|
|
defer a1.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, a1.RPC, "dc1")
|
|
|
|
|
2020-03-31 20:24:39 +00:00
|
|
|
a2 := StartTestAgent(t, TestAgent{Name: "Agent2", HCL: `
|
2019-09-26 15:42:17 +00:00
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
services {
|
|
|
|
name = "echo-client"
|
|
|
|
port = 8080
|
|
|
|
connect {
|
|
|
|
sidecar_service {
|
|
|
|
proxy {
|
|
|
|
upstreams {
|
|
|
|
destination_name = "echo"
|
|
|
|
local_bind_port = 9191
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
services {
|
|
|
|
name = "echo"
|
|
|
|
port = 9090
|
|
|
|
connect {
|
|
|
|
sidecar_service {}
|
|
|
|
}
|
|
|
|
}
|
2020-03-31 20:12:33 +00:00
|
|
|
`})
|
2019-09-26 15:42:17 +00:00
|
|
|
defer a2.Shutdown()
|
|
|
|
|
|
|
|
// Starting a client agent disconnected from a server with services.
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
ch := make(chan cache.UpdateEvent, 1)
|
|
|
|
require.NoError(t, a2.cache.Notify(ctx, cachetype.CompiledDiscoveryChainName, &structs.DiscoveryChainRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Name: "echo",
|
|
|
|
EvaluateInDatacenter: "dc1",
|
|
|
|
EvaluateInNamespace: "default",
|
|
|
|
}, "foo", ch))
|
|
|
|
|
|
|
|
{ // The first event is an error because we are not joined yet.
|
|
|
|
evt := <-ch
|
|
|
|
require.Equal(t, "foo", evt.CorrelationID)
|
|
|
|
require.Nil(t, evt.Result)
|
|
|
|
require.Error(t, evt.Err)
|
|
|
|
require.Equal(t, evt.Err, structs.ErrNoServers)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("joining client to server")
|
|
|
|
|
|
|
|
// Now connect to server
|
|
|
|
_, err := a1.JoinLAN([]string{
|
|
|
|
fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN),
|
2021-10-26 20:08:55 +00:00
|
|
|
}, nil)
|
2019-09-26 15:42:17 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
t.Logf("joined client to server")
|
|
|
|
|
|
|
|
deadlineCh := time.After(10 * time.Second)
|
|
|
|
start := time.Now()
|
2023-04-19 18:17:21 +00:00
|
|
|
LOOP:
|
2019-09-26 15:42:17 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case evt := <-ch:
|
|
|
|
// We may receive several notifications of an error until we get the
|
|
|
|
// first successful reply.
|
|
|
|
require.Equal(t, "foo", evt.CorrelationID)
|
2023-04-19 18:17:21 +00:00
|
|
|
if evt.Err != nil {
|
|
|
|
break LOOP
|
2019-09-26 15:42:17 +00:00
|
|
|
}
|
2023-04-19 18:17:21 +00:00
|
|
|
require.NoError(t, evt.Err)
|
|
|
|
require.NotNil(t, evt.Result)
|
|
|
|
t.Logf("took %s to get first success", time.Since(start))
|
2019-09-26 15:42:17 +00:00
|
|
|
case <-deadlineCh:
|
|
|
|
t.Fatal("did not get notified successfully")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-09 20:59:02 +00:00
|
|
|
|
|
|
|
// This is a mirror of a similar test in agent/consul/server_test.go
|
2021-08-24 21:28:44 +00:00
|
|
|
//
|
|
|
|
// TODO(rb): implement something similar to this as a full containerized test suite with proper
|
|
|
|
// isolation so requests can't "cheat" and bypass the mesh gateways
|
2020-03-09 20:59:02 +00:00
|
|
|
func TestAgent_JoinWAN_viaMeshGateway(t *testing.T) {
|
2021-11-01 15:40:16 +00:00
|
|
|
// if this test is failing because of expired certificates
|
|
|
|
// use the procedure in test/CA-GENERATION.md
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2021-11-30 22:32:41 +00:00
|
|
|
port := freeport.GetOne(t)
|
2021-11-27 20:27:59 +00:00
|
|
|
gwAddr := ipaddr.FormatAddressPort("127.0.0.1", port)
|
2020-03-09 20:59:02 +00:00
|
|
|
|
|
|
|
// Due to some ordering, we'll have to manually configure these ports in
|
|
|
|
// advance.
|
2021-11-27 20:27:59 +00:00
|
|
|
secondaryRPCPorts := freeport.GetN(t, 2)
|
2020-03-09 20:59:02 +00:00
|
|
|
|
2020-03-31 20:24:39 +00:00
|
|
|
a1 := StartTestAgent(t, TestAgent{Name: "bob", HCL: `
|
2020-03-09 20:59:02 +00:00
|
|
|
domain = "consul"
|
|
|
|
node_name = "bob"
|
|
|
|
datacenter = "dc1"
|
|
|
|
primary_datacenter = "dc1"
|
|
|
|
# tls
|
|
|
|
ca_file = "../test/hostname/CertAuth.crt"
|
|
|
|
cert_file = "../test/hostname/Bob.crt"
|
|
|
|
key_file = "../test/hostname/Bob.key"
|
|
|
|
verify_incoming = true
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
# wanfed
|
|
|
|
connect {
|
|
|
|
enabled = true
|
|
|
|
enable_mesh_gateway_wan_federation = true
|
|
|
|
}
|
2020-03-31 20:12:33 +00:00
|
|
|
`})
|
2020-03-09 20:59:02 +00:00
|
|
|
defer a1.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a1.RPC, "dc1")
|
|
|
|
|
|
|
|
// We'll use the same gateway for all datacenters since it doesn't care.
|
|
|
|
var (
|
|
|
|
rpcAddr1 = ipaddr.FormatAddressPort("127.0.0.1", a1.Config.ServerPort)
|
|
|
|
rpcAddr2 = ipaddr.FormatAddressPort("127.0.0.1", secondaryRPCPorts[0])
|
|
|
|
rpcAddr3 = ipaddr.FormatAddressPort("127.0.0.1", secondaryRPCPorts[1])
|
|
|
|
)
|
|
|
|
var p tcpproxy.Proxy
|
|
|
|
p.AddSNIRoute(gwAddr, "bob.server.dc1.consul", tcpproxy.To(rpcAddr1))
|
|
|
|
p.AddSNIRoute(gwAddr, "server.dc1.consul", tcpproxy.To(rpcAddr1))
|
|
|
|
p.AddSNIRoute(gwAddr, "betty.server.dc2.consul", tcpproxy.To(rpcAddr2))
|
|
|
|
p.AddSNIRoute(gwAddr, "server.dc2.consul", tcpproxy.To(rpcAddr2))
|
|
|
|
p.AddSNIRoute(gwAddr, "bonnie.server.dc3.consul", tcpproxy.To(rpcAddr3))
|
|
|
|
p.AddSNIRoute(gwAddr, "server.dc3.consul", tcpproxy.To(rpcAddr3))
|
|
|
|
p.AddStopACMESearch(gwAddr)
|
|
|
|
require.NoError(t, p.Start())
|
|
|
|
defer func() {
|
|
|
|
p.Close()
|
|
|
|
p.Wait()
|
|
|
|
}()
|
|
|
|
|
|
|
|
t.Logf("routing %s => %s", "{bob.,}server.dc1.consul", rpcAddr1)
|
|
|
|
t.Logf("routing %s => %s", "{betty.,}server.dc2.consul", rpcAddr2)
|
|
|
|
t.Logf("routing %s => %s", "{bonnie.,}server.dc3.consul", rpcAddr3)
|
|
|
|
|
|
|
|
// Register this into the agent in dc1.
|
|
|
|
{
|
|
|
|
args := &structs.ServiceDefinition{
|
|
|
|
Kind: structs.ServiceKindMeshGateway,
|
|
|
|
ID: "mesh-gateway",
|
|
|
|
Name: "mesh-gateway",
|
|
|
|
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
2021-11-27 20:27:59 +00:00
|
|
|
Port: port,
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
|
|
|
req, err := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
obj, err := a1.srv.AgentRegisterService(nil, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
waitForFederationState := func(t *testing.T, a *TestAgent, dc string) {
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
req, err := http.NewRequest("GET", "/v1/internal/federation-state/"+dc, nil)
|
|
|
|
require.NoError(r, err)
|
|
|
|
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.FederationStateGet(resp, req)
|
|
|
|
require.NoError(r, err)
|
|
|
|
require.NotNil(r, obj)
|
|
|
|
|
|
|
|
out, ok := obj.(structs.FederationStateResponse)
|
|
|
|
require.True(r, ok)
|
|
|
|
require.NotNil(r, out.State)
|
|
|
|
require.Len(r, out.State.MeshGateways, 1)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until at least catalog AE and federation state AE fire.
|
|
|
|
waitForFederationState(t, a1, "dc1")
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NotEmpty(r, a1.PickRandomMeshGatewaySuitableForDialing("dc1"))
|
|
|
|
})
|
|
|
|
|
2020-03-31 20:24:39 +00:00
|
|
|
a2 := StartTestAgent(t, TestAgent{Name: "betty", HCL: `
|
2020-03-09 20:59:02 +00:00
|
|
|
domain = "consul"
|
|
|
|
node_name = "betty"
|
|
|
|
datacenter = "dc2"
|
|
|
|
primary_datacenter = "dc1"
|
|
|
|
# tls
|
|
|
|
ca_file = "../test/hostname/CertAuth.crt"
|
|
|
|
cert_file = "../test/hostname/Betty.crt"
|
|
|
|
key_file = "../test/hostname/Betty.key"
|
|
|
|
verify_incoming = true
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
ports {
|
2020-03-31 20:12:33 +00:00
|
|
|
server = ` + strconv.Itoa(secondaryRPCPorts[0]) + `
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
|
|
|
# wanfed
|
2020-03-31 20:12:33 +00:00
|
|
|
primary_gateways = ["` + gwAddr + `"]
|
2021-01-25 23:30:38 +00:00
|
|
|
retry_interval_wan = "250ms"
|
2020-03-09 20:59:02 +00:00
|
|
|
connect {
|
|
|
|
enabled = true
|
|
|
|
enable_mesh_gateway_wan_federation = true
|
|
|
|
}
|
2020-03-31 20:12:33 +00:00
|
|
|
`})
|
2020-03-09 20:59:02 +00:00
|
|
|
defer a2.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a2.RPC, "dc2")
|
|
|
|
|
2020-03-31 20:24:39 +00:00
|
|
|
a3 := StartTestAgent(t, TestAgent{Name: "bonnie", HCL: `
|
2020-03-09 20:59:02 +00:00
|
|
|
domain = "consul"
|
|
|
|
node_name = "bonnie"
|
|
|
|
datacenter = "dc3"
|
|
|
|
primary_datacenter = "dc1"
|
|
|
|
# tls
|
|
|
|
ca_file = "../test/hostname/CertAuth.crt"
|
|
|
|
cert_file = "../test/hostname/Bonnie.crt"
|
|
|
|
key_file = "../test/hostname/Bonnie.key"
|
|
|
|
verify_incoming = true
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
ports {
|
2020-03-31 20:12:33 +00:00
|
|
|
server = ` + strconv.Itoa(secondaryRPCPorts[1]) + `
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
|
|
|
# wanfed
|
2020-03-31 20:12:33 +00:00
|
|
|
primary_gateways = ["` + gwAddr + `"]
|
2021-01-25 23:30:38 +00:00
|
|
|
retry_interval_wan = "250ms"
|
2020-03-09 20:59:02 +00:00
|
|
|
connect {
|
|
|
|
enabled = true
|
|
|
|
enable_mesh_gateway_wan_federation = true
|
|
|
|
}
|
2020-03-31 20:12:33 +00:00
|
|
|
`})
|
2020-03-09 20:59:02 +00:00
|
|
|
defer a3.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a3.RPC, "dc3")
|
|
|
|
|
|
|
|
// The primary_gateways config setting should cause automatic mesh join.
|
|
|
|
// Assert that the secondaries have joined the primary.
|
|
|
|
findPrimary := func(r *retry.R, a *TestAgent) *serf.Member {
|
|
|
|
var primary *serf.Member
|
|
|
|
for _, m := range a.WANMembers() {
|
|
|
|
if m.Tags["dc"] == "dc1" {
|
|
|
|
require.Nil(r, primary, "already found one node in dc1")
|
|
|
|
primary = &m
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.NotNil(r, primary)
|
|
|
|
return primary
|
|
|
|
}
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
p2, p3 := findPrimary(r, a2), findPrimary(r, a3)
|
|
|
|
require.Equal(r, "bob.dc1", p2.Name)
|
|
|
|
require.Equal(r, "bob.dc1", p3.Name)
|
|
|
|
})
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, a2.RPC, "dc2")
|
|
|
|
testrpc.WaitForLeader(t, a3.RPC, "dc3")
|
|
|
|
|
|
|
|
// Now we can register this into the catalog in dc2 and dc3.
|
|
|
|
{
|
|
|
|
args := &structs.ServiceDefinition{
|
|
|
|
Kind: structs.ServiceKindMeshGateway,
|
|
|
|
ID: "mesh-gateway",
|
|
|
|
Name: "mesh-gateway",
|
|
|
|
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
2021-11-27 20:27:59 +00:00
|
|
|
Port: port,
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
|
|
|
req, err := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
obj, err := a2.srv.AgentRegisterService(nil, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, obj)
|
|
|
|
}
|
|
|
|
{
|
|
|
|
args := &structs.ServiceDefinition{
|
|
|
|
Kind: structs.ServiceKindMeshGateway,
|
|
|
|
ID: "mesh-gateway",
|
|
|
|
Name: "mesh-gateway",
|
|
|
|
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
2021-11-27 20:27:59 +00:00
|
|
|
Port: port,
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
|
|
|
req, err := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
obj, err := a3.srv.AgentRegisterService(nil, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until federation state replication functions
|
|
|
|
waitForFederationState(t, a1, "dc1")
|
|
|
|
waitForFederationState(t, a1, "dc2")
|
|
|
|
waitForFederationState(t, a1, "dc3")
|
|
|
|
|
|
|
|
waitForFederationState(t, a2, "dc1")
|
|
|
|
waitForFederationState(t, a2, "dc2")
|
|
|
|
waitForFederationState(t, a2, "dc3")
|
|
|
|
|
|
|
|
waitForFederationState(t, a3, "dc1")
|
|
|
|
waitForFederationState(t, a3, "dc2")
|
|
|
|
waitForFederationState(t, a3, "dc3")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NotEmpty(r, a1.PickRandomMeshGatewaySuitableForDialing("dc1"))
|
|
|
|
require.NotEmpty(r, a1.PickRandomMeshGatewaySuitableForDialing("dc2"))
|
|
|
|
require.NotEmpty(r, a1.PickRandomMeshGatewaySuitableForDialing("dc3"))
|
|
|
|
|
|
|
|
require.NotEmpty(r, a2.PickRandomMeshGatewaySuitableForDialing("dc1"))
|
|
|
|
require.NotEmpty(r, a2.PickRandomMeshGatewaySuitableForDialing("dc2"))
|
|
|
|
require.NotEmpty(r, a2.PickRandomMeshGatewaySuitableForDialing("dc3"))
|
|
|
|
|
|
|
|
require.NotEmpty(r, a3.PickRandomMeshGatewaySuitableForDialing("dc1"))
|
|
|
|
require.NotEmpty(r, a3.PickRandomMeshGatewaySuitableForDialing("dc2"))
|
|
|
|
require.NotEmpty(r, a3.PickRandomMeshGatewaySuitableForDialing("dc3"))
|
|
|
|
})
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := len(a1.WANMembers()), 3; got != want {
|
|
|
|
r.Fatalf("got %d WAN members want at least %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(a2.WANMembers()), 3; got != want {
|
|
|
|
r.Fatalf("got %d WAN members want at least %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(a3.WANMembers()), 3; got != want {
|
|
|
|
r.Fatalf("got %d WAN members want at least %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Ensure we can do some trivial RPC in all directions.
|
2021-08-24 21:28:44 +00:00
|
|
|
//
|
|
|
|
// NOTE: we explicitly make streaming and non-streaming assertions here to
|
|
|
|
// verify both rpc and grpc codepaths.
|
2020-03-09 20:59:02 +00:00
|
|
|
agents := map[string]*TestAgent{"dc1": a1, "dc2": a2, "dc3": a3}
|
|
|
|
names := map[string]string{"dc1": "bob", "dc2": "betty", "dc3": "bonnie"}
|
|
|
|
for _, srcDC := range []string{"dc1", "dc2", "dc3"} {
|
|
|
|
a := agents[srcDC]
|
|
|
|
for _, dstDC := range []string{"dc1", "dc2", "dc3"} {
|
|
|
|
if srcDC == dstDC {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
t.Run(srcDC+" to "+dstDC, func(t *testing.T) {
|
2021-08-24 21:28:44 +00:00
|
|
|
t.Run("normal-rpc", func(t *testing.T) {
|
|
|
|
req, err := http.NewRequest("GET", "/v1/catalog/nodes?dc="+dstDC, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.CatalogNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, obj)
|
|
|
|
|
|
|
|
nodes, ok := obj.(structs.Nodes)
|
|
|
|
require.True(t, ok)
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
node := nodes[0]
|
|
|
|
require.Equal(t, dstDC, node.Datacenter)
|
|
|
|
require.Equal(t, names[dstDC], node.Node)
|
|
|
|
})
|
|
|
|
t.Run("streaming-grpc", func(t *testing.T) {
|
|
|
|
req, err := http.NewRequest("GET", "/v1/health/service/consul?cached&dc="+dstDC, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, obj)
|
|
|
|
|
|
|
|
csns, ok := obj.(structs.CheckServiceNodes)
|
|
|
|
require.True(t, ok)
|
|
|
|
require.Len(t, csns, 1)
|
|
|
|
|
|
|
|
csn := csns[0]
|
|
|
|
require.Equal(t, dstDC, csn.Node.Datacenter)
|
|
|
|
require.Equal(t, names[dstDC], csn.Node.Node)
|
|
|
|
})
|
2020-03-09 20:59:02 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-06-10 20:47:35 +00:00
|
|
|
|
|
|
|
func TestAutoConfig_Integration(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-06-10 20:47:35 +00:00
|
|
|
// eventually this test should really live with integration tests
|
|
|
|
// the goal here is to have one test server and another test client
|
|
|
|
// spin up both agents and allow the server to authorize the auto config
|
2020-07-28 19:31:48 +00:00
|
|
|
// request and then see the client joined. Finally we force a CA roots
|
|
|
|
// update and wait to see that the agents TLS certificate gets updated.
|
2020-06-10 20:47:35 +00:00
|
|
|
|
|
|
|
cfgDir := testutil.TempDir(t, "auto-config")
|
|
|
|
|
|
|
|
// write some test TLS certificates out to the cfg dir
|
|
|
|
cert, key, cacert, err := testTLSCertificates("server.dc1.consul")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
certFile := filepath.Join(cfgDir, "cert.pem")
|
|
|
|
caFile := filepath.Join(cfgDir, "cacert.pem")
|
|
|
|
keyFile := filepath.Join(cfgDir, "key.pem")
|
|
|
|
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(caFile, []byte(cacert), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(keyFile, []byte(key), 0600))
|
2020-06-10 20:47:35 +00:00
|
|
|
|
|
|
|
// generate a gossip key
|
|
|
|
gossipKey := make([]byte, 32)
|
|
|
|
n, err := rand.Read(gossipKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 32, n)
|
|
|
|
gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey)
|
|
|
|
|
|
|
|
// generate the JWT signing keys
|
|
|
|
pub, priv, err := oidcauthtest.GenerateKey()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
hclConfig := TestACLConfigWithParams(nil) + `
|
|
|
|
encrypt = "` + gossipKeyEncoded + `"
|
|
|
|
encrypt_verify_incoming = true
|
|
|
|
encrypt_verify_outgoing = true
|
|
|
|
verify_incoming = true
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
ca_file = "` + caFile + `"
|
|
|
|
cert_file = "` + certFile + `"
|
|
|
|
key_file = "` + keyFile + `"
|
|
|
|
connect { enabled = true }
|
|
|
|
auto_config {
|
2020-06-18 15:16:57 +00:00
|
|
|
authorization {
|
2020-06-10 20:47:35 +00:00
|
|
|
enabled = true
|
2020-06-18 15:16:57 +00:00
|
|
|
static {
|
|
|
|
claim_mappings = {
|
|
|
|
consul_node_name = "node"
|
|
|
|
}
|
|
|
|
claim_assertions = [
|
|
|
|
"value.node == \"${node}\""
|
|
|
|
]
|
|
|
|
bound_issuer = "consul"
|
|
|
|
bound_audiences = [
|
|
|
|
"consul"
|
|
|
|
]
|
|
|
|
jwt_validation_pub_keys = ["` + strings.ReplaceAll(pub, "\n", "\\n") + `"]
|
2020-06-10 20:47:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
`
|
|
|
|
|
|
|
|
srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig})
|
|
|
|
defer srv.Shutdown()
|
|
|
|
|
2021-12-07 12:48:50 +00:00
|
|
|
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
2020-06-10 20:47:35 +00:00
|
|
|
|
|
|
|
// sign a JWT token
|
|
|
|
now := time.Now()
|
|
|
|
token, err := oidcauthtest.SignJWT(priv, jwt.Claims{
|
|
|
|
Subject: "consul",
|
|
|
|
Issuer: "consul",
|
|
|
|
Audience: jwt.Audience{"consul"},
|
|
|
|
NotBefore: jwt.NewNumericDate(now.Add(-1 * time.Second)),
|
|
|
|
Expiry: jwt.NewNumericDate(now.Add(5 * time.Minute)),
|
|
|
|
}, map[string]interface{}{
|
|
|
|
"consul_node_name": "test-client",
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-07-30 15:37:18 +00:00
|
|
|
client := StartTestAgent(t, TestAgent{Name: "test-client",
|
|
|
|
Overrides: `
|
|
|
|
connect {
|
|
|
|
test_ca_leaf_root_change_spread = "1ns"
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
HCL: `
|
|
|
|
bootstrap = false
|
|
|
|
server = false
|
|
|
|
ca_file = "` + caFile + `"
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
node_name = "test-client"
|
|
|
|
ports {
|
|
|
|
server = ` + strconv.Itoa(srv.Config.RPCBindAddr.Port) + `
|
|
|
|
}
|
|
|
|
auto_config {
|
|
|
|
enabled = true
|
|
|
|
intro_token = "` + token + `"
|
|
|
|
server_addresses = ["` + srv.Config.RPCBindAddr.String() + `"]
|
|
|
|
}`,
|
|
|
|
})
|
2020-06-10 20:47:35 +00:00
|
|
|
|
|
|
|
defer client.Shutdown()
|
|
|
|
|
2020-07-28 19:31:48 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NotNil(r, client.Agent.tlsConfigurator.Cert())
|
|
|
|
})
|
|
|
|
|
2020-06-10 20:47:35 +00:00
|
|
|
// when this is successful we managed to get the gossip key and serf addresses to bind to
|
|
|
|
// and then connect. Additionally we would have to have certificates or else the
|
|
|
|
// verify_incoming config on the server would not let it work.
|
2021-12-07 12:48:50 +00:00
|
|
|
testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
2020-06-10 20:47:35 +00:00
|
|
|
|
2021-12-01 19:32:34 +00:00
|
|
|
// spot check that we now have an ACL token
|
|
|
|
require.NotEmpty(t, client.tokens.AgentToken())
|
|
|
|
|
2020-07-28 19:31:48 +00:00
|
|
|
// grab the existing cert
|
|
|
|
cert1 := client.Agent.tlsConfigurator.Cert()
|
|
|
|
require.NotNil(t, cert1)
|
|
|
|
|
|
|
|
// force a roots rotation by updating the CA config
|
|
|
|
t.Logf("Forcing roots rotation on the server")
|
|
|
|
ca := connect.TestCA(t, nil)
|
|
|
|
req := &structs.CARequest{
|
|
|
|
Datacenter: "dc1",
|
2021-12-07 12:48:50 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken},
|
2020-07-28 19:31:48 +00:00
|
|
|
Config: &structs.CAConfiguration{
|
|
|
|
Provider: "consul",
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"LeafCertTTL": "1h",
|
|
|
|
"PrivateKey": ca.SigningKey,
|
|
|
|
"RootCert": ca.RootCert,
|
|
|
|
"IntermediateCertTTL": "3h",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var reply interface{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, srv.RPC(context.Background(), "ConnectCA.ConfigurationSet", &req, &reply))
|
2020-07-28 19:31:48 +00:00
|
|
|
|
|
|
|
// ensure that a new cert gets generated and pushed into the TLS configurator
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NotEqual(r, cert1, client.Agent.tlsConfigurator.Cert())
|
2020-07-30 15:37:18 +00:00
|
|
|
|
|
|
|
// check that the on disk certs match expectations
|
2022-11-10 16:26:01 +00:00
|
|
|
data, err := os.ReadFile(filepath.Join(client.DataDir, "auto-config.json"))
|
2020-07-30 15:37:18 +00:00
|
|
|
require.NoError(r, err)
|
|
|
|
|
|
|
|
var resp pbautoconf.AutoConfigResponse
|
2023-01-11 14:39:10 +00:00
|
|
|
pbUnmarshaler := &protojson.UnmarshalOptions{
|
|
|
|
DiscardUnknown: false,
|
2020-07-30 15:37:18 +00:00
|
|
|
}
|
2023-01-11 14:39:10 +00:00
|
|
|
require.NoError(r, pbUnmarshaler.Unmarshal(data, &resp), "data: %s", data)
|
2020-07-30 15:37:18 +00:00
|
|
|
|
|
|
|
actual, err := tls.X509KeyPair([]byte(resp.Certificate.CertPEM), []byte(resp.Certificate.PrivateKeyPEM))
|
|
|
|
require.NoError(r, err)
|
|
|
|
require.Equal(r, client.Agent.tlsConfigurator.Cert(), &actual)
|
2020-07-28 19:31:48 +00:00
|
|
|
})
|
2020-06-10 20:47:35 +00:00
|
|
|
}
|
2020-06-30 17:52:57 +00:00
|
|
|
|
|
|
|
func TestAgent_AutoEncrypt(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-06-30 17:52:57 +00:00
|
|
|
// eventually this test should really live with integration tests
|
|
|
|
// the goal here is to have one test server and another test client
|
|
|
|
// spin up both agents and allow the server to authorize the auto encrypt
|
|
|
|
// request and then see the client get a TLS certificate
|
|
|
|
cfgDir := testutil.TempDir(t, "auto-encrypt")
|
|
|
|
|
|
|
|
// write some test TLS certificates out to the cfg dir
|
|
|
|
cert, key, cacert, err := testTLSCertificates("server.dc1.consul")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
certFile := filepath.Join(cfgDir, "cert.pem")
|
|
|
|
caFile := filepath.Join(cfgDir, "cacert.pem")
|
|
|
|
keyFile := filepath.Join(cfgDir, "key.pem")
|
|
|
|
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(caFile, []byte(cacert), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(keyFile, []byte(key), 0600))
|
2020-06-30 17:52:57 +00:00
|
|
|
|
|
|
|
hclConfig := TestACLConfigWithParams(nil) + `
|
|
|
|
verify_incoming = true
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
ca_file = "` + caFile + `"
|
|
|
|
cert_file = "` + certFile + `"
|
|
|
|
key_file = "` + keyFile + `"
|
|
|
|
connect { enabled = true }
|
|
|
|
auto_encrypt { allow_tls = true }
|
|
|
|
`
|
|
|
|
|
|
|
|
srv := StartTestAgent(t, TestAgent{Name: "test-server", HCL: hclConfig})
|
|
|
|
defer srv.Shutdown()
|
|
|
|
|
2021-12-07 12:48:50 +00:00
|
|
|
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
2020-06-30 17:52:57 +00:00
|
|
|
|
|
|
|
client := StartTestAgent(t, TestAgent{Name: "test-client", HCL: TestACLConfigWithParams(nil) + `
|
|
|
|
bootstrap = false
|
|
|
|
server = false
|
|
|
|
ca_file = "` + caFile + `"
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
node_name = "test-client"
|
|
|
|
auto_encrypt {
|
|
|
|
tls = true
|
|
|
|
}
|
|
|
|
ports {
|
|
|
|
server = ` + strconv.Itoa(srv.Config.RPCBindAddr.Port) + `
|
|
|
|
}
|
|
|
|
retry_join = ["` + srv.Config.SerfBindAddrLAN.String() + `"]`,
|
2022-08-19 17:07:22 +00:00
|
|
|
UseHTTPS: true,
|
2020-06-30 17:52:57 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
defer client.Shutdown()
|
|
|
|
|
|
|
|
// when this is successful we managed to get a TLS certificate and are using it for
|
|
|
|
// encrypted RPC connections.
|
2021-12-07 12:48:50 +00:00
|
|
|
testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
2020-06-30 17:52:57 +00:00
|
|
|
|
|
|
|
// now we need to validate that our certificate has the correct CN
|
|
|
|
aeCert := client.tlsConfigurator.Cert()
|
|
|
|
require.NotNil(t, aeCert)
|
|
|
|
|
|
|
|
id := connect.SpiffeIDAgent{
|
|
|
|
Host: connect.TestClusterID + ".consul",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Agent: "test-client",
|
|
|
|
}
|
|
|
|
x509Cert, err := x509.ParseCertificate(aeCert.Certificate[0])
|
|
|
|
require.NoError(t, err)
|
2021-06-25 18:00:00 +00:00
|
|
|
require.Empty(t, x509Cert.Subject.CommonName)
|
2020-06-30 17:52:57 +00:00
|
|
|
require.Len(t, x509Cert.URIs, 1)
|
|
|
|
require.Equal(t, id.URI(), x509Cert.URIs[0])
|
|
|
|
}
|
2020-08-27 15:23:52 +00:00
|
|
|
|
|
|
|
func TestSharedRPCRouter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-08-27 15:23:52 +00:00
|
|
|
// this test runs both a server and client and ensures that the shared
|
|
|
|
// router is being used. It would be possible for the Client and Server
|
|
|
|
// types to create and use their own routers and for RPCs such as the
|
|
|
|
// ones used in WaitForTestAgent to succeed. However accessing the
|
|
|
|
// router stored on the agent ensures that Serf information from the
|
|
|
|
// Client/Server types are being set in the same shared rpc router.
|
|
|
|
|
|
|
|
srv := NewTestAgent(t, "")
|
|
|
|
defer srv.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, srv.RPC, "dc1")
|
|
|
|
|
2020-09-14 22:31:07 +00:00
|
|
|
mgr, server := srv.Agent.baseDeps.Router.FindLANRoute()
|
2020-08-27 15:23:52 +00:00
|
|
|
require.NotNil(t, mgr)
|
|
|
|
require.NotNil(t, server)
|
|
|
|
|
|
|
|
client := NewTestAgent(t, `
|
|
|
|
server = false
|
|
|
|
bootstrap = false
|
|
|
|
retry_join = ["`+srv.Config.SerfBindAddrLAN.String()+`"]
|
|
|
|
`)
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, client.RPC, "dc1")
|
|
|
|
|
2020-09-14 22:31:07 +00:00
|
|
|
mgr, server = client.Agent.baseDeps.Router.FindLANRoute()
|
2020-08-27 15:23:52 +00:00
|
|
|
require.NotNil(t, mgr)
|
|
|
|
require.NotNil(t, server)
|
|
|
|
}
|
2020-11-18 17:22:07 +00:00
|
|
|
|
|
|
|
func TestAgent_ListenHTTP_MultipleAddresses(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2021-11-27 20:03:49 +00:00
|
|
|
ports := freeport.GetN(t, 2)
|
2020-11-18 17:22:07 +00:00
|
|
|
caConfig := tlsutil.Config{}
|
|
|
|
tlsConf, err := tlsutil.NewConfigurator(caConfig, hclog.New(nil))
|
|
|
|
require.NoError(t, err)
|
|
|
|
bd := BaseDeps{
|
|
|
|
Deps: consul.Deps{
|
|
|
|
Logger: hclog.NewInterceptLogger(nil),
|
|
|
|
Tokens: new(token.Store),
|
|
|
|
TLSConfigurator: tlsConf,
|
2021-04-20 22:14:46 +00:00
|
|
|
GRPCConnPool: &fakeGRPCConnPool{},
|
2023-08-04 18:27:48 +00:00
|
|
|
Registry: resource.NewRegistry(),
|
2020-11-18 17:22:07 +00:00
|
|
|
},
|
|
|
|
RuntimeConfig: &config.RuntimeConfig{
|
|
|
|
HTTPAddrs: []net.Addr{
|
2020-11-18 21:07:00 +00:00
|
|
|
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[0]},
|
|
|
|
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[1]},
|
2020-11-18 17:22:07 +00:00
|
|
|
},
|
|
|
|
},
|
2023-06-13 15:54:45 +00:00
|
|
|
Cache: cache.New(cache.Options{}),
|
|
|
|
NetRPC: &LazyNetRPC{},
|
2020-11-18 17:22:07 +00:00
|
|
|
}
|
2021-05-11 14:50:03 +00:00
|
|
|
|
2023-06-13 15:54:45 +00:00
|
|
|
bd.LeafCertManager = leafcert.NewManager(leafcert.Deps{
|
|
|
|
CertSigner: leafcert.NewNetRPCCertSigner(bd.NetRPC),
|
|
|
|
RootsReader: leafcert.NewCachedRootsReader(bd.Cache, "dc1"),
|
|
|
|
Config: leafcert.Config{},
|
|
|
|
})
|
|
|
|
|
2022-11-04 15:19:24 +00:00
|
|
|
cfg := config.RuntimeConfig{BuildDate: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC)}
|
|
|
|
bd, err = initEnterpriseBaseDeps(bd, &cfg)
|
2021-05-11 14:50:03 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-11-18 17:22:07 +00:00
|
|
|
agent, err := New(bd)
|
2023-08-09 16:36:58 +00:00
|
|
|
mockDelegate := delegateMock{}
|
|
|
|
mockDelegate.On("LicenseCheck").Return()
|
|
|
|
agent.delegate = &mockDelegate
|
2020-11-18 17:22:07 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-05-11 14:50:03 +00:00
|
|
|
agent.startLicenseManager(testutil.TestContext(t))
|
|
|
|
|
2020-11-18 17:22:07 +00:00
|
|
|
srvs, err := agent.listenHTTP()
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer func() {
|
|
|
|
ctx := context.Background()
|
|
|
|
for _, srv := range srvs {
|
|
|
|
srv.Shutdown(ctx)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
|
|
t.Cleanup(cancel)
|
|
|
|
|
|
|
|
g := new(errgroup.Group)
|
|
|
|
for _, s := range srvs {
|
|
|
|
g.Go(s.Run)
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Len(t, srvs, 2)
|
|
|
|
require.Len(t, uniqueAddrs(srvs), 2)
|
|
|
|
|
|
|
|
client := &http.Client{}
|
|
|
|
for _, s := range srvs {
|
|
|
|
u := url.URL{Scheme: s.Protocol, Host: s.Addr.String()}
|
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
resp, err := client.Do(req.WithContext(ctx))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 200, resp.StatusCode)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func uniqueAddrs(srvs []apiServer) map[string]struct{} {
|
|
|
|
result := make(map[string]struct{}, len(srvs))
|
|
|
|
for _, s := range srvs {
|
|
|
|
result[s.Addr.String()] = struct{}{}
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
2021-12-01 19:32:34 +00:00
|
|
|
|
2022-03-31 19:11:49 +00:00
|
|
|
func TestAgent_AutoReloadDoReload_WhenCertAndKeyUpdated(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
certsDir := testutil.TempDir(t, "auto-config")
|
|
|
|
|
|
|
|
// write some test TLS certificates out to the cfg dir
|
|
|
|
serverName := "server.dc1.consul"
|
|
|
|
signer, _, err := tlsutil.GeneratePrivateKey()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
|
|
|
Signer: signer,
|
|
|
|
CA: ca,
|
|
|
|
Name: "Test Cert Name",
|
|
|
|
Days: 365,
|
|
|
|
DNSNames: []string{serverName},
|
|
|
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
certFile := filepath.Join(certsDir, "cert.pem")
|
|
|
|
caFile := filepath.Join(certsDir, "cacert.pem")
|
|
|
|
keyFile := filepath.Join(certsDir, "key.pem")
|
|
|
|
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(caFile, []byte(ca), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(keyFile, []byte(privateKey), 0600))
|
2022-03-31 19:11:49 +00:00
|
|
|
|
|
|
|
// generate a gossip key
|
|
|
|
gossipKey := make([]byte, 32)
|
|
|
|
n, err := rand.Read(gossipKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 32, n)
|
|
|
|
gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey)
|
|
|
|
|
|
|
|
hclConfig := TestACLConfigWithParams(nil) + `
|
|
|
|
encrypt = "` + gossipKeyEncoded + `"
|
|
|
|
encrypt_verify_incoming = true
|
|
|
|
encrypt_verify_outgoing = true
|
|
|
|
verify_incoming = true
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
ca_file = "` + caFile + `"
|
|
|
|
cert_file = "` + certFile + `"
|
|
|
|
key_file = "` + keyFile + `"
|
|
|
|
connect { enabled = true }
|
|
|
|
auto_reload_config = true
|
|
|
|
`
|
|
|
|
|
|
|
|
srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig})
|
|
|
|
defer srv.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
|
|
|
|
|
|
|
aeCert := srv.tlsConfigurator.Cert()
|
|
|
|
require.NotNil(t, aeCert)
|
|
|
|
|
|
|
|
cert2, privateKey2, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
|
|
|
Signer: signer,
|
|
|
|
CA: ca,
|
|
|
|
Name: "Test Cert Name",
|
|
|
|
Days: 365,
|
|
|
|
DNSNames: []string{serverName},
|
|
|
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(certFile, []byte(cert2), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(keyFile, []byte(privateKey2), 0600))
|
2022-03-31 19:11:49 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
aeCert2 := srv.tlsConfigurator.Cert()
|
|
|
|
require.NotEqual(r, aeCert.Certificate, aeCert2.Certificate)
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AutoReloadDoNotReload_WhenCaUpdated(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
2021-12-01 19:32:34 +00:00
|
|
|
}
|
2022-03-31 19:11:49 +00:00
|
|
|
|
|
|
|
certsDir := testutil.TempDir(t, "auto-config")
|
|
|
|
|
|
|
|
// write some test TLS certificates out to the cfg dir
|
|
|
|
serverName := "server.dc1.consul"
|
|
|
|
signer, _, err := tlsutil.GeneratePrivateKey()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
|
|
|
Signer: signer,
|
|
|
|
CA: ca,
|
|
|
|
Name: "Test Cert Name",
|
|
|
|
Days: 365,
|
|
|
|
DNSNames: []string{serverName},
|
|
|
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
certFile := filepath.Join(certsDir, "cert.pem")
|
|
|
|
caFile := filepath.Join(certsDir, "cacert.pem")
|
|
|
|
keyFile := filepath.Join(certsDir, "key.pem")
|
|
|
|
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(caFile, []byte(ca), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(keyFile, []byte(privateKey), 0600))
|
2022-03-31 19:11:49 +00:00
|
|
|
|
|
|
|
// generate a gossip key
|
|
|
|
gossipKey := make([]byte, 32)
|
|
|
|
n, err := rand.Read(gossipKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 32, n)
|
|
|
|
gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey)
|
|
|
|
|
|
|
|
hclConfig := TestACLConfigWithParams(nil) + `
|
|
|
|
encrypt = "` + gossipKeyEncoded + `"
|
|
|
|
encrypt_verify_incoming = true
|
|
|
|
encrypt_verify_outgoing = true
|
|
|
|
verify_incoming = true
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
ca_file = "` + caFile + `"
|
|
|
|
cert_file = "` + certFile + `"
|
|
|
|
key_file = "` + keyFile + `"
|
|
|
|
connect { enabled = true }
|
|
|
|
auto_reload_config = true
|
|
|
|
`
|
|
|
|
|
|
|
|
srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig})
|
|
|
|
defer srv.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
|
|
|
|
|
|
|
aeCA := srv.tlsConfigurator.ManualCAPems()
|
|
|
|
require.NotNil(t, aeCA)
|
|
|
|
|
|
|
|
ca2, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
|
|
|
require.NoError(t, err)
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(caFile, []byte(ca2), 0600))
|
2022-03-31 19:11:49 +00:00
|
|
|
|
|
|
|
// wait a bit to see if it get updated.
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
|
|
|
|
aeCA2 := srv.tlsConfigurator.ManualCAPems()
|
|
|
|
require.NotNil(t, aeCA2)
|
|
|
|
require.Equal(t, aeCA, aeCA2)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AutoReloadDoReload_WhenCertThenKeyUpdated(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
certsDir := testutil.TempDir(t, "auto-config")
|
|
|
|
|
|
|
|
// write some test TLS certificates out to the cfg dir
|
|
|
|
serverName := "server.dc1.consul"
|
|
|
|
signer, _, err := tlsutil.GeneratePrivateKey()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
|
|
|
Signer: signer,
|
|
|
|
CA: ca,
|
|
|
|
Name: "Test Cert Name",
|
|
|
|
Days: 365,
|
|
|
|
DNSNames: []string{serverName},
|
|
|
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
certFile := filepath.Join(certsDir, "cert.pem")
|
|
|
|
caFile := filepath.Join(certsDir, "cacert.pem")
|
|
|
|
keyFile := filepath.Join(certsDir, "key.pem")
|
|
|
|
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(caFile, []byte(ca), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(keyFile, []byte(privateKey), 0600))
|
2022-03-31 19:11:49 +00:00
|
|
|
|
|
|
|
// generate a gossip key
|
|
|
|
gossipKey := make([]byte, 32)
|
|
|
|
n, err := rand.Read(gossipKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 32, n)
|
|
|
|
gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey)
|
|
|
|
|
|
|
|
hclConfig := TestACLConfigWithParams(nil)
|
|
|
|
|
|
|
|
configFile := testutil.TempDir(t, "config") + "/config.hcl"
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(configFile, []byte(`
|
2022-03-31 19:11:49 +00:00
|
|
|
encrypt = "`+gossipKeyEncoded+`"
|
|
|
|
encrypt_verify_incoming = true
|
|
|
|
encrypt_verify_outgoing = true
|
|
|
|
verify_incoming = true
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
ca_file = "`+caFile+`"
|
|
|
|
cert_file = "`+certFile+`"
|
|
|
|
key_file = "`+keyFile+`"
|
|
|
|
connect { enabled = true }
|
|
|
|
auto_reload_config = true
|
|
|
|
`), 0600))
|
|
|
|
|
|
|
|
srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig, configFiles: []string{configFile}})
|
|
|
|
defer srv.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
|
|
|
|
2022-04-04 15:31:39 +00:00
|
|
|
cert1Pub := srv.tlsConfigurator.Cert().Certificate
|
|
|
|
cert1Key := srv.tlsConfigurator.Cert().PrivateKey
|
2022-03-31 19:11:49 +00:00
|
|
|
|
|
|
|
certNew, privateKeyNew, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
|
|
|
Signer: signer,
|
|
|
|
CA: ca,
|
|
|
|
Name: "Test Cert Name",
|
|
|
|
Days: 365,
|
|
|
|
DNSNames: []string{serverName},
|
|
|
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
certFileNew := filepath.Join(certsDir, "cert_new.pem")
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(certFileNew, []byte(certNew), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(configFile, []byte(`
|
2022-03-31 19:11:49 +00:00
|
|
|
encrypt = "`+gossipKeyEncoded+`"
|
|
|
|
encrypt_verify_incoming = true
|
|
|
|
encrypt_verify_outgoing = true
|
|
|
|
verify_incoming = true
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
ca_file = "`+caFile+`"
|
|
|
|
cert_file = "`+certFileNew+`"
|
|
|
|
key_file = "`+keyFile+`"
|
|
|
|
connect { enabled = true }
|
|
|
|
auto_reload_config = true
|
|
|
|
`), 0600))
|
|
|
|
|
|
|
|
// cert should not change as we did not update the associated key
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-04-04 15:31:39 +00:00
|
|
|
cert := srv.tlsConfigurator.Cert()
|
|
|
|
require.NotNil(r, cert)
|
|
|
|
require.Equal(r, cert1Pub, cert.Certificate)
|
|
|
|
require.Equal(r, cert1Key, cert.PrivateKey)
|
2022-03-31 19:11:49 +00:00
|
|
|
})
|
|
|
|
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(keyFile, []byte(privateKeyNew), 0600))
|
2022-03-31 19:11:49 +00:00
|
|
|
|
|
|
|
// cert should change as we did not update the associated key
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-04-04 15:31:39 +00:00
|
|
|
require.NotEqual(r, cert1Pub, srv.tlsConfigurator.Cert().Certificate)
|
|
|
|
require.NotEqual(r, cert1Key, srv.tlsConfigurator.Cert().PrivateKey)
|
2022-03-31 19:11:49 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
certsDir := testutil.TempDir(t, "auto-config")
|
|
|
|
|
|
|
|
// write some test TLS certificates out to the cfg dir
|
|
|
|
serverName := "server.dc1.consul"
|
|
|
|
signer, _, err := tlsutil.GeneratePrivateKey()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
|
|
|
Signer: signer,
|
|
|
|
CA: ca,
|
|
|
|
Name: "Test Cert Name",
|
|
|
|
Days: 365,
|
|
|
|
DNSNames: []string{serverName},
|
|
|
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
certFile := filepath.Join(certsDir, "cert.pem")
|
|
|
|
caFile := filepath.Join(certsDir, "cacert.pem")
|
|
|
|
keyFile := filepath.Join(certsDir, "key.pem")
|
|
|
|
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(caFile, []byte(ca), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(keyFile, []byte(privateKey), 0600))
|
2022-03-31 19:11:49 +00:00
|
|
|
|
|
|
|
// generate a gossip key
|
|
|
|
gossipKey := make([]byte, 32)
|
|
|
|
n, err := rand.Read(gossipKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 32, n)
|
|
|
|
gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey)
|
|
|
|
|
|
|
|
hclConfig := TestACLConfigWithParams(nil)
|
|
|
|
|
|
|
|
configFile := testutil.TempDir(t, "config") + "/config.hcl"
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(configFile, []byte(`
|
2022-03-31 19:11:49 +00:00
|
|
|
encrypt = "`+gossipKeyEncoded+`"
|
|
|
|
encrypt_verify_incoming = true
|
|
|
|
encrypt_verify_outgoing = true
|
|
|
|
verify_incoming = true
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
ca_file = "`+caFile+`"
|
|
|
|
cert_file = "`+certFile+`"
|
|
|
|
key_file = "`+keyFile+`"
|
|
|
|
connect { enabled = true }
|
|
|
|
auto_reload_config = true
|
|
|
|
`), 0600))
|
|
|
|
|
|
|
|
srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig, configFiles: []string{configFile}})
|
2022-04-04 15:31:39 +00:00
|
|
|
|
2022-03-31 19:11:49 +00:00
|
|
|
defer srv.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
|
|
|
|
2022-04-04 15:31:39 +00:00
|
|
|
cert1Pub := srv.tlsConfigurator.Cert().Certificate
|
|
|
|
cert1Key := srv.tlsConfigurator.Cert().PrivateKey
|
2022-03-31 19:11:49 +00:00
|
|
|
|
|
|
|
certNew, privateKeyNew, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
|
|
|
Signer: signer,
|
|
|
|
CA: ca,
|
|
|
|
Name: "Test Cert Name",
|
|
|
|
Days: 365,
|
|
|
|
DNSNames: []string{serverName},
|
|
|
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
certFileNew := filepath.Join(certsDir, "cert_new.pem")
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(keyFile, []byte(privateKeyNew), 0600))
|
2022-03-31 19:11:49 +00:00
|
|
|
// cert should not change as we did not update the associated key
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-04-04 15:31:39 +00:00
|
|
|
cert := srv.tlsConfigurator.Cert()
|
|
|
|
require.NotNil(r, cert)
|
|
|
|
require.Equal(r, cert1Pub, cert.Certificate)
|
|
|
|
require.Equal(r, cert1Key, cert.PrivateKey)
|
2022-03-31 19:11:49 +00:00
|
|
|
})
|
|
|
|
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(certFileNew, []byte(certNew), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(configFile, []byte(`
|
2022-03-31 19:11:49 +00:00
|
|
|
encrypt = "`+gossipKeyEncoded+`"
|
|
|
|
encrypt_verify_incoming = true
|
|
|
|
encrypt_verify_outgoing = true
|
|
|
|
verify_incoming = true
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
ca_file = "`+caFile+`"
|
|
|
|
cert_file = "`+certFileNew+`"
|
|
|
|
key_file = "`+keyFile+`"
|
|
|
|
connect { enabled = true }
|
|
|
|
auto_reload_config = true
|
|
|
|
`), 0600))
|
|
|
|
|
|
|
|
// cert should change as we did not update the associated key
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-04-04 15:31:39 +00:00
|
|
|
cert := srv.tlsConfigurator.Cert()
|
|
|
|
require.NotNil(r, cert)
|
|
|
|
require.NotEqual(r, cert1Key, cert.Certificate)
|
|
|
|
require.NotEqual(r, cert1Key, cert.PrivateKey)
|
2022-03-31 19:11:49 +00:00
|
|
|
})
|
2022-04-04 15:31:39 +00:00
|
|
|
cert2Pub := srv.tlsConfigurator.Cert().Certificate
|
|
|
|
cert2Key := srv.tlsConfigurator.Cert().PrivateKey
|
2022-03-31 19:11:49 +00:00
|
|
|
|
|
|
|
certNew2, privateKeyNew2, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
|
|
|
Signer: signer,
|
|
|
|
CA: ca,
|
|
|
|
Name: "Test Cert Name",
|
|
|
|
Days: 365,
|
|
|
|
DNSNames: []string{serverName},
|
|
|
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(keyFile, []byte(privateKeyNew2), 0600))
|
2022-03-31 19:11:49 +00:00
|
|
|
// cert should not change as we did not update the associated cert
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-04-04 15:31:39 +00:00
|
|
|
cert := srv.tlsConfigurator.Cert()
|
|
|
|
require.NotNil(r, cert)
|
|
|
|
require.Equal(r, cert2Pub, cert.Certificate)
|
|
|
|
require.Equal(r, cert2Key, cert.PrivateKey)
|
2022-03-31 19:11:49 +00:00
|
|
|
})
|
|
|
|
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(certFileNew, []byte(certNew2), 0600))
|
2022-03-31 19:11:49 +00:00
|
|
|
|
|
|
|
// cert should change as we did update the associated key
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-04-04 15:31:39 +00:00
|
|
|
cert := srv.tlsConfigurator.Cert()
|
|
|
|
require.NotNil(r, cert)
|
|
|
|
require.NotEqual(r, cert2Pub, cert.Certificate)
|
|
|
|
require.NotEqual(r, cert2Key, cert.PrivateKey)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_coalesceTimerTwoPeriods(t *testing.T) {
|
|
|
|
|
|
|
|
certsDir := testutil.TempDir(t, "auto-config")
|
|
|
|
|
|
|
|
// write some test TLS certificates out to the cfg dir
|
|
|
|
serverName := "server.dc1.consul"
|
|
|
|
signer, _, err := tlsutil.GeneratePrivateKey()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
|
|
|
Signer: signer,
|
|
|
|
CA: ca,
|
|
|
|
Name: "Test Cert Name",
|
|
|
|
Days: 365,
|
|
|
|
DNSNames: []string{serverName},
|
|
|
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
certFile := filepath.Join(certsDir, "cert.pem")
|
|
|
|
caFile := filepath.Join(certsDir, "cacert.pem")
|
|
|
|
keyFile := filepath.Join(certsDir, "key.pem")
|
|
|
|
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(certFile, []byte(cert), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(caFile, []byte(ca), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(keyFile, []byte(privateKey), 0600))
|
2022-04-04 15:31:39 +00:00
|
|
|
|
|
|
|
// generate a gossip key
|
|
|
|
gossipKey := make([]byte, 32)
|
|
|
|
n, err := rand.Read(gossipKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 32, n)
|
|
|
|
gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey)
|
|
|
|
|
|
|
|
hclConfig := TestACLConfigWithParams(nil)
|
|
|
|
|
|
|
|
configFile := testutil.TempDir(t, "config") + "/config.hcl"
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(configFile, []byte(`
|
2022-04-04 15:31:39 +00:00
|
|
|
encrypt = "`+gossipKeyEncoded+`"
|
|
|
|
encrypt_verify_incoming = true
|
|
|
|
encrypt_verify_outgoing = true
|
|
|
|
verify_incoming = true
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
ca_file = "`+caFile+`"
|
|
|
|
cert_file = "`+certFile+`"
|
|
|
|
key_file = "`+keyFile+`"
|
|
|
|
connect { enabled = true }
|
|
|
|
auto_reload_config = true
|
|
|
|
`), 0600))
|
|
|
|
|
|
|
|
coalesceInterval := 100 * time.Millisecond
|
|
|
|
testAgent := TestAgent{Name: "TestAgent-Server", HCL: hclConfig, configFiles: []string{configFile}, Config: &config.RuntimeConfig{
|
|
|
|
AutoReloadConfigCoalesceInterval: coalesceInterval,
|
|
|
|
}}
|
|
|
|
srv := StartTestAgent(t, testAgent)
|
|
|
|
defer srv.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
|
|
|
|
|
|
|
cert1Pub := srv.tlsConfigurator.Cert().Certificate
|
|
|
|
cert1Key := srv.tlsConfigurator.Cert().PrivateKey
|
|
|
|
|
|
|
|
certNew, privateKeyNew, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
|
|
|
Signer: signer,
|
|
|
|
CA: ca,
|
|
|
|
Name: "Test Cert Name",
|
|
|
|
Days: 365,
|
|
|
|
DNSNames: []string{serverName},
|
|
|
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
certFileNew := filepath.Join(certsDir, "cert_new.pem")
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(certFileNew, []byte(certNew), 0600))
|
|
|
|
require.NoError(t, os.WriteFile(configFile, []byte(`
|
2022-04-04 15:31:39 +00:00
|
|
|
encrypt = "`+gossipKeyEncoded+`"
|
|
|
|
encrypt_verify_incoming = true
|
|
|
|
encrypt_verify_outgoing = true
|
|
|
|
verify_incoming = true
|
|
|
|
verify_outgoing = true
|
|
|
|
verify_server_hostname = true
|
|
|
|
ca_file = "`+caFile+`"
|
|
|
|
cert_file = "`+certFileNew+`"
|
|
|
|
key_file = "`+keyFile+`"
|
|
|
|
connect { enabled = true }
|
|
|
|
auto_reload_config = true
|
|
|
|
`), 0600))
|
|
|
|
|
|
|
|
// cert should not change as we did not update the associated key
|
|
|
|
time.Sleep(coalesceInterval * 2)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
cert := srv.tlsConfigurator.Cert()
|
|
|
|
require.NotNil(r, cert)
|
|
|
|
require.Equal(r, cert1Pub, cert.Certificate)
|
|
|
|
require.Equal(r, cert1Key, cert.PrivateKey)
|
|
|
|
})
|
|
|
|
|
2022-11-10 16:26:01 +00:00
|
|
|
require.NoError(t, os.WriteFile(keyFile, []byte(privateKeyNew), 0600))
|
2022-04-04 15:31:39 +00:00
|
|
|
|
|
|
|
// cert should change as we did not update the associated key
|
|
|
|
time.Sleep(coalesceInterval * 2)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NotEqual(r, cert1Pub, srv.tlsConfigurator.Cert().Certificate)
|
|
|
|
require.NotEqual(r, cert1Key, srv.tlsConfigurator.Cert().PrivateKey)
|
2022-03-31 19:11:49 +00:00
|
|
|
})
|
2022-04-04 15:31:39 +00:00
|
|
|
|
2021-12-01 19:32:34 +00:00
|
|
|
}
|
2022-04-14 20:55:10 +00:00
|
|
|
|
2022-08-09 16:22:39 +00:00
|
|
|
func TestAgent_startListeners(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
ports := freeport.GetN(t, 3)
|
|
|
|
bd := BaseDeps{
|
|
|
|
Deps: consul.Deps{
|
|
|
|
Logger: hclog.NewInterceptLogger(nil),
|
|
|
|
Tokens: new(token.Store),
|
|
|
|
GRPCConnPool: &fakeGRPCConnPool{},
|
2023-08-04 18:27:48 +00:00
|
|
|
Registry: resource.NewRegistry(),
|
2022-08-09 16:22:39 +00:00
|
|
|
},
|
|
|
|
RuntimeConfig: &config.RuntimeConfig{
|
|
|
|
HTTPAddrs: []net.Addr{},
|
|
|
|
},
|
2023-06-13 15:54:45 +00:00
|
|
|
Cache: cache.New(cache.Options{}),
|
|
|
|
NetRPC: &LazyNetRPC{},
|
2022-08-09 16:22:39 +00:00
|
|
|
}
|
|
|
|
|
2023-06-13 15:54:45 +00:00
|
|
|
bd.LeafCertManager = leafcert.NewManager(leafcert.Deps{
|
|
|
|
CertSigner: leafcert.NewNetRPCCertSigner(bd.NetRPC),
|
|
|
|
RootsReader: leafcert.NewCachedRootsReader(bd.Cache, "dc1"),
|
|
|
|
Config: leafcert.Config{},
|
|
|
|
})
|
|
|
|
|
2022-11-04 15:19:24 +00:00
|
|
|
bd, err := initEnterpriseBaseDeps(bd, &config.RuntimeConfig{})
|
2022-08-09 16:22:39 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
agent, err := New(bd)
|
2023-08-09 16:36:58 +00:00
|
|
|
mockDelegate := delegateMock{}
|
|
|
|
mockDelegate.On("LicenseCheck").Return()
|
|
|
|
agent.delegate = &mockDelegate
|
2022-08-09 16:22:39 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// use up an address
|
|
|
|
used := net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[2]}
|
|
|
|
l, err := net.Listen("tcp", used.String())
|
|
|
|
require.NoError(t, err)
|
|
|
|
t.Cleanup(func() { l.Close() })
|
|
|
|
|
|
|
|
var lns []net.Listener
|
|
|
|
t.Cleanup(func() {
|
|
|
|
for _, ln := range lns {
|
|
|
|
ln.Close()
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// first two addresses open listeners but third address should fail
|
|
|
|
lns, err = agent.startListeners([]net.Addr{
|
|
|
|
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[0]},
|
|
|
|
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[1]},
|
|
|
|
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[2]},
|
|
|
|
})
|
|
|
|
require.Contains(t, err.Error(), "address already in use")
|
|
|
|
|
|
|
|
// first two ports should be freed up
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
lns, err = agent.startListeners([]net.Addr{
|
|
|
|
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[0]},
|
|
|
|
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[1]},
|
|
|
|
})
|
|
|
|
require.NoError(r, err)
|
|
|
|
require.Len(r, lns, 2)
|
|
|
|
})
|
|
|
|
|
|
|
|
// first two ports should be in use
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
_, err = agent.startListeners([]net.Addr{
|
|
|
|
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[0]},
|
|
|
|
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: ports[1]},
|
|
|
|
})
|
|
|
|
require.Contains(r, err.Error(), "address already in use")
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2022-09-09 19:06:48 +00:00
|
|
|
func TestAgent_ServerCertificate(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
const expectURI = "spiffe://11111111-2222-3333-4444-555555555555.consul/agent/server/dc/dc1"
|
|
|
|
|
|
|
|
// Leader should acquire a sever cert after bootstrapping.
|
|
|
|
a1 := NewTestAgent(t, `
|
|
|
|
node_name = "a1"
|
|
|
|
acl {
|
|
|
|
enabled = true
|
|
|
|
tokens {
|
|
|
|
initial_management = "root"
|
|
|
|
default = "root"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
connect {
|
|
|
|
enabled = true
|
|
|
|
}
|
|
|
|
peering {
|
|
|
|
enabled = true
|
|
|
|
}`)
|
|
|
|
defer a1.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a1.RPC, "dc1")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
cert := a1.tlsConfigurator.AutoEncryptCert()
|
|
|
|
require.NotNil(r, cert)
|
|
|
|
require.Len(r, cert.URIs, 1)
|
|
|
|
require.Equal(r, expectURI, cert.URIs[0].String())
|
|
|
|
})
|
|
|
|
|
|
|
|
// Join a follower, and it should be able to acquire a server cert as well.
|
|
|
|
a2 := NewTestAgent(t, `
|
|
|
|
node_name = "a2"
|
|
|
|
bootstrap = false
|
|
|
|
acl {
|
|
|
|
enabled = true
|
|
|
|
tokens {
|
|
|
|
initial_management = "root"
|
|
|
|
default = "root"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
connect {
|
|
|
|
enabled = true
|
|
|
|
}
|
|
|
|
peering {
|
|
|
|
enabled = true
|
|
|
|
}`)
|
|
|
|
defer a2.Shutdown()
|
|
|
|
|
|
|
|
_, err := a2.JoinLAN([]string{fmt.Sprintf("127.0.0.1:%d", a1.Config.SerfPortLAN)}, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, a2.RPC, "dc1")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
cert := a2.tlsConfigurator.AutoEncryptCert()
|
|
|
|
require.NotNil(r, cert)
|
|
|
|
require.Len(r, cert.URIs, 1)
|
|
|
|
require.Equal(r, expectURI, cert.URIs[0].String())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-09-26 18:58:15 +00:00
|
|
|
func TestAgent_startListeners_scada(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
pvd := scada.NewMockProvider(t)
|
|
|
|
c := capability.NewAddr("testcap")
|
|
|
|
pvd.EXPECT().Listen(c.Capability()).Return(nil, nil).Once()
|
|
|
|
bd := BaseDeps{
|
|
|
|
Deps: consul.Deps{
|
|
|
|
Logger: hclog.NewInterceptLogger(nil),
|
|
|
|
Tokens: new(token.Store),
|
|
|
|
GRPCConnPool: &fakeGRPCConnPool{},
|
|
|
|
HCP: hcp.Deps{
|
|
|
|
Provider: pvd,
|
|
|
|
},
|
2023-08-04 18:27:48 +00:00
|
|
|
Registry: resource.NewRegistry(),
|
2022-09-26 18:58:15 +00:00
|
|
|
},
|
|
|
|
RuntimeConfig: &config.RuntimeConfig{},
|
|
|
|
Cache: cache.New(cache.Options{}),
|
2023-06-13 15:54:45 +00:00
|
|
|
NetRPC: &LazyNetRPC{},
|
2022-09-26 18:58:15 +00:00
|
|
|
}
|
|
|
|
|
2023-06-13 15:54:45 +00:00
|
|
|
bd.LeafCertManager = leafcert.NewManager(leafcert.Deps{
|
|
|
|
CertSigner: leafcert.NewNetRPCCertSigner(bd.NetRPC),
|
|
|
|
RootsReader: leafcert.NewCachedRootsReader(bd.Cache, "dc1"),
|
|
|
|
Config: leafcert.Config{},
|
|
|
|
})
|
|
|
|
|
2022-11-04 15:19:24 +00:00
|
|
|
cfg := config.RuntimeConfig{BuildDate: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC)}
|
|
|
|
bd, err := initEnterpriseBaseDeps(bd, &cfg)
|
2022-09-26 18:58:15 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
agent, err := New(bd)
|
2023-08-09 16:36:58 +00:00
|
|
|
mockDelegate := delegateMock{}
|
|
|
|
mockDelegate.On("LicenseCheck").Return()
|
|
|
|
agent.delegate = &mockDelegate
|
2022-09-26 18:58:15 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = agent.startListeners([]net.Addr{c})
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAgent_scadaProvider(t *testing.T) {
|
|
|
|
pvd := scada.NewMockProvider(t)
|
|
|
|
|
|
|
|
// this listener is used when mocking out the scada provider
|
|
|
|
l, err := net.Listen("tcp4", fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t)))
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer require.NoError(t, l.Close())
|
|
|
|
|
|
|
|
pvd.EXPECT().UpdateMeta(mock.Anything).Once()
|
|
|
|
pvd.EXPECT().Start().Return(nil).Once()
|
|
|
|
pvd.EXPECT().Listen(scada.CAPCoreAPI.Capability()).Return(l, nil).Once()
|
|
|
|
pvd.EXPECT().Stop().Return(nil).Once()
|
2022-09-30 15:33:49 +00:00
|
|
|
pvd.EXPECT().SessionStatus().Return("test")
|
2022-09-26 18:58:15 +00:00
|
|
|
a := TestAgent{
|
|
|
|
OverrideDeps: func(deps *BaseDeps) {
|
|
|
|
deps.HCP.Provider = pvd
|
|
|
|
},
|
|
|
|
Overrides: `
|
|
|
|
cloud {
|
|
|
|
resource_id = "organization/0b9de9a3-8403-4ca6-aba8-fca752f42100/project/0b9de9a3-8403-4ca6-aba8-fca752f42100/consul.cluster/0b9de9a3-8403-4ca6-aba8-fca752f42100"
|
|
|
|
client_id = "test"
|
|
|
|
client_secret = "test"
|
|
|
|
}`,
|
|
|
|
}
|
|
|
|
defer a.Shutdown()
|
|
|
|
require.NoError(t, a.Start(t))
|
|
|
|
|
|
|
|
_, err = api.NewClient(&api.Config{Address: l.Addr().String()})
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
2023-05-15 11:05:47 +00:00
|
|
|
func TestAgent_checkServerLastSeen(t *testing.T) {
|
|
|
|
bd := BaseDeps{
|
|
|
|
Deps: consul.Deps{
|
|
|
|
Logger: hclog.NewInterceptLogger(nil),
|
|
|
|
Tokens: new(token.Store),
|
|
|
|
GRPCConnPool: &fakeGRPCConnPool{},
|
2023-08-04 18:27:48 +00:00
|
|
|
Registry: resource.NewRegistry(),
|
2023-05-15 11:05:47 +00:00
|
|
|
},
|
|
|
|
RuntimeConfig: &config.RuntimeConfig{},
|
|
|
|
Cache: cache.New(cache.Options{}),
|
2023-06-13 15:54:45 +00:00
|
|
|
NetRPC: &LazyNetRPC{},
|
2023-05-15 11:05:47 +00:00
|
|
|
}
|
2023-06-13 15:54:45 +00:00
|
|
|
bd.LeafCertManager = leafcert.NewManager(leafcert.Deps{
|
|
|
|
CertSigner: leafcert.NewNetRPCCertSigner(bd.NetRPC),
|
|
|
|
RootsReader: leafcert.NewCachedRootsReader(bd.Cache, "dc1"),
|
|
|
|
Config: leafcert.Config{},
|
|
|
|
})
|
2023-05-15 11:05:47 +00:00
|
|
|
agent, err := New(bd)
|
2023-08-09 16:36:58 +00:00
|
|
|
mockDelegate := delegateMock{}
|
|
|
|
mockDelegate.On("LicenseCheck").Return()
|
|
|
|
agent.delegate = &mockDelegate
|
2023-05-15 11:05:47 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Test that an ErrNotExist OS error is treated as ok.
|
|
|
|
t.Run("TestReadErrNotExist", func(t *testing.T) {
|
|
|
|
readFn := func(filename string) (*consul.ServerMetadata, error) {
|
|
|
|
return nil, os.ErrNotExist
|
|
|
|
}
|
|
|
|
|
|
|
|
err := agent.checkServerLastSeen(readFn)
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Test that an error reading server metadata is treated as an error.
|
|
|
|
t.Run("TestReadErr", func(t *testing.T) {
|
|
|
|
expected := errors.New("read error")
|
|
|
|
readFn := func(filename string) (*consul.ServerMetadata, error) {
|
|
|
|
return nil, expected
|
|
|
|
}
|
|
|
|
|
|
|
|
err := agent.checkServerLastSeen(readFn)
|
|
|
|
require.ErrorIs(t, err, expected)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Test that a server with a 7d old last seen timestamp is treated as an error.
|
|
|
|
t.Run("TestIsLastSeenStaleErr", func(t *testing.T) {
|
|
|
|
agent.config.ServerRejoinAgeMax = time.Hour
|
|
|
|
|
|
|
|
readFn := func(filename string) (*consul.ServerMetadata, error) {
|
|
|
|
return &consul.ServerMetadata{
|
|
|
|
LastSeenUnix: time.Now().Add(-24 * 7 * time.Hour).Unix(),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
err := agent.checkServerLastSeen(readFn)
|
|
|
|
require.Error(t, err)
|
|
|
|
require.ErrorContains(t, err, "refusing to rejoin cluster because server has been offline for more than the configured server_rejoin_age_max")
|
|
|
|
})
|
|
|
|
|
|
|
|
// Test that a server with a 6h old last seen timestamp is not treated as an error.
|
|
|
|
t.Run("TestNoErr", func(t *testing.T) {
|
|
|
|
agent.config.ServerRejoinAgeMax = 24 * 7 * time.Hour
|
|
|
|
|
|
|
|
readFn := func(filename string) (*consul.ServerMetadata, error) {
|
|
|
|
return &consul.ServerMetadata{
|
|
|
|
LastSeenUnix: time.Now().Add(-6 * time.Hour).Unix(),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
err := agent.checkServerLastSeen(readFn)
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-08-24 22:44:14 +00:00
|
|
|
func TestAgent_getProxyWatcher(t *testing.T) {
|
|
|
|
type testcase struct {
|
|
|
|
description string
|
|
|
|
getExperiments func() []string
|
|
|
|
expectedType xds.ProxyWatcher
|
|
|
|
}
|
|
|
|
testscases := []testcase{
|
|
|
|
{
|
|
|
|
description: "config source is returned when api-resources experiment is not configured",
|
|
|
|
expectedType: &local.ConfigSource{},
|
|
|
|
getExperiments: func() []string {
|
|
|
|
return []string{}
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
description: "proxy tracker is returned when api-resources experiment is configured",
|
|
|
|
expectedType: &proxytracker.ProxyTracker{},
|
|
|
|
getExperiments: func() []string {
|
|
|
|
return []string{consul.CatalogResourceExperimentName}
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tc := range testscases {
|
|
|
|
caConfig := tlsutil.Config{}
|
|
|
|
tlsConf, err := tlsutil.NewConfigurator(caConfig, hclog.New(nil))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
bd := BaseDeps{
|
|
|
|
Deps: consul.Deps{
|
|
|
|
Logger: hclog.NewInterceptLogger(nil),
|
|
|
|
Tokens: new(token.Store),
|
|
|
|
TLSConfigurator: tlsConf,
|
|
|
|
GRPCConnPool: &fakeGRPCConnPool{},
|
|
|
|
Registry: resource.NewRegistry(),
|
|
|
|
},
|
|
|
|
RuntimeConfig: &config.RuntimeConfig{
|
|
|
|
HTTPAddrs: []net.Addr{
|
|
|
|
&net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: freeport.GetOne(t)},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Cache: cache.New(cache.Options{}),
|
|
|
|
NetRPC: &LazyNetRPC{},
|
|
|
|
}
|
|
|
|
|
|
|
|
bd.XDSStreamLimiter = limiter.NewSessionLimiter()
|
|
|
|
bd.LeafCertManager = leafcert.NewManager(leafcert.Deps{
|
|
|
|
CertSigner: leafcert.NewNetRPCCertSigner(bd.NetRPC),
|
|
|
|
RootsReader: leafcert.NewCachedRootsReader(bd.Cache, "dc1"),
|
|
|
|
Config: leafcert.Config{},
|
|
|
|
})
|
|
|
|
|
|
|
|
cfg := config.RuntimeConfig{
|
|
|
|
BuildDate: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC),
|
|
|
|
}
|
|
|
|
bd, err = initEnterpriseBaseDeps(bd, &cfg)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
bd.Experiments = tc.getExperiments()
|
|
|
|
|
|
|
|
agent, err := New(bd)
|
|
|
|
require.NoError(t, err)
|
|
|
|
agent.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{Logger: bd.Logger, Source: &structs.QuerySource{}})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.IsTypef(t, tc.expectedType, agent.getProxyWatcher(), fmt.Sprintf("Expected proxyWatcher to be of type %s", reflect.TypeOf(tc.expectedType)))
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2022-04-14 20:55:10 +00:00
|
|
|
func getExpectedCaPoolByFile(t *testing.T) *x509.CertPool {
|
|
|
|
pool := x509.NewCertPool()
|
2022-11-10 16:26:01 +00:00
|
|
|
data, err := os.ReadFile("../test/ca/root.cer")
|
2022-04-14 20:55:10 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
if !pool.AppendCertsFromPEM(data) {
|
|
|
|
t.Fatal("could not add test ca ../test/ca/root.cer to pool")
|
|
|
|
}
|
|
|
|
return pool
|
|
|
|
}
|
|
|
|
|
|
|
|
func getExpectedCaPoolByDir(t *testing.T) *x509.CertPool {
|
|
|
|
pool := x509.NewCertPool()
|
|
|
|
entries, err := os.ReadDir("../test/ca_path")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
for _, entry := range entries {
|
|
|
|
filename := path.Join("../test/ca_path", entry.Name())
|
|
|
|
|
2022-11-10 16:26:01 +00:00
|
|
|
data, err := os.ReadFile(filename)
|
2022-04-14 20:55:10 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if !pool.AppendCertsFromPEM(data) {
|
|
|
|
t.Fatalf("could not add test ca %s to pool", filename)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return pool
|
|
|
|
}
|
|
|
|
|
|
|
|
// lazyCerts has a func field which can't be compared.
|
|
|
|
var cmpCertPool = cmp.Options{
|
|
|
|
cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"),
|
|
|
|
cmp.AllowUnexported(x509.CertPool{}),
|
|
|
|
}
|
|
|
|
|
|
|
|
func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) {
|
|
|
|
t.Helper()
|
|
|
|
if diff := cmp.Diff(x, y, opts...); diff != "" {
|
|
|
|
t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff)
|
|
|
|
}
|
|
|
|
}
|