mirror of https://github.com/hashicorp/consul
upgrade test: consolidate resolver test cases (#16443)
parent
1606472304
commit
c7713462ca
|
@ -218,6 +218,23 @@ func AssertEnvoyPresentsCertURI(t *testing.T, port int, serviceName string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AssertEnvoyRunning assert the envoy is running by querying its stats page
|
||||||
|
func AssertEnvoyRunning(t *testing.T, port int) {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
failer := func() *retry.Timer {
|
||||||
|
return &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond}
|
||||||
|
}
|
||||||
|
|
||||||
|
retry.RunWith(failer(), t, func(r *retry.R) {
|
||||||
|
_, _, err = GetEnvoyOutput(port, "stats", nil)
|
||||||
|
if err != nil {
|
||||||
|
r.Fatal("could not fetch envoy stats")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func GetEnvoyOutput(port int, path string, query map[string]string) (string, int, error) {
|
func GetEnvoyOutput(port int, path string, query map[string]string) (string, int, error) {
|
||||||
client := cleanhttp.DefaultClient()
|
client := cleanhttp.DefaultClient()
|
||||||
var u url.URL
|
var u url.URL
|
||||||
|
@ -260,10 +277,16 @@ func sanitizeResult(s string) []string {
|
||||||
// AssertServiceHasHealthyInstances asserts the number of instances of service equals count for a given service.
|
// AssertServiceHasHealthyInstances asserts the number of instances of service equals count for a given service.
|
||||||
// https://developer.hashicorp.com/consul/docs/connect/config-entries/service-resolver#onlypassing
|
// https://developer.hashicorp.com/consul/docs/connect/config-entries/service-resolver#onlypassing
|
||||||
func AssertServiceHasHealthyInstances(t *testing.T, node libcluster.Agent, service string, onlypassing bool, count int) {
|
func AssertServiceHasHealthyInstances(t *testing.T, node libcluster.Agent, service string, onlypassing bool, count int) {
|
||||||
|
failer := func() *retry.Timer {
|
||||||
|
return &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond}
|
||||||
|
}
|
||||||
|
|
||||||
|
retry.RunWith(failer(), t, func(r *retry.R) {
|
||||||
services, _, err := node.GetClient().Health().Service(service, "", onlypassing, nil)
|
services, _, err := node.GetClient().Health().Service(service, "", onlypassing, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(r, err)
|
||||||
for _, v := range services {
|
for _, v := range services {
|
||||||
fmt.Printf("%s service status: %s\n", v.Service.ID, v.Checks.AggregatedStatus())
|
fmt.Printf("%s service status: %s\n", v.Service.ID, v.Checks.AggregatedStatus())
|
||||||
}
|
}
|
||||||
require.Equal(t, count, len(services))
|
require.Equal(r, count, len(services))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,8 +3,8 @@ package upgrade
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||||
|
@ -12,52 +12,65 @@ import (
|
||||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||||
libutils "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
libutils "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||||
|
upgrade "github.com/hashicorp/consul/test/integration/consul-container/test/upgrade"
|
||||||
"github.com/hashicorp/go-version"
|
"github.com/hashicorp/go-version"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gotest.tools/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestTrafficManagement_ServiceResolverDefaultSubset Summary
|
// TestTrafficManagement_ServiceResolver tests that upgraded cluster inherits and interpret
|
||||||
// This test starts up 3 servers and 1 client in the same datacenter.
|
// the resolver config entry correctly.
|
||||||
//
|
//
|
||||||
// Steps:
|
// The basic topology is a cluster with one static-client and one static-server. Addtional
|
||||||
// - Create a single agent cluster.
|
// services and resolver can be added to the create func() for each test cases.
|
||||||
// - Create one static-server and 2 subsets and 1 client and sidecar, then register them with Consul
|
func TestTrafficManagement_ServiceResolver(t *testing.T) {
|
||||||
// - Validate static-server and 2 subsets are and proxy admin endpoint is healthy - 3 instances
|
|
||||||
// - Validate static servers proxy listeners should be up and have right certs
|
|
||||||
func TestTrafficManagement_ServiceResolverDefaultSubset(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
var responseFormat = map[string]string{"format": "json"}
|
|
||||||
|
|
||||||
type testcase struct {
|
type testcase struct {
|
||||||
oldversion string
|
name string
|
||||||
targetVersion string
|
// create creates addtional resources in the cluster depending on cases, e.g., static-client,
|
||||||
|
// static server, and config-entries. It returns the proxy services of the client, an assertation
|
||||||
|
// function to be called to verify the resources, and a restartFn to be called after upgrade.
|
||||||
|
create func(*libcluster.Cluster, libservice.Service) (libservice.Service, func(), func(), error)
|
||||||
|
// extraAssertion adds additional assertion function to the common resources across cases.
|
||||||
|
// common resources includes static-client in dialing cluster, and static-server in accepting cluster.
|
||||||
|
//
|
||||||
|
// extraAssertion needs to be run before and after upgrade
|
||||||
|
extraAssertion func(libservice.Service)
|
||||||
}
|
}
|
||||||
tcs := []testcase{
|
tcs := []testcase{
|
||||||
{
|
{
|
||||||
oldversion: "1.13",
|
// Test resolver directs traffic to default subset
|
||||||
targetVersion: libutils.TargetVersion,
|
// - Create 2 additional static-server instances: one in V1 subset and the other in V2 subset
|
||||||
},
|
// - resolver directs traffic to the default subset, which is V2.
|
||||||
{
|
name: "resolver default subset",
|
||||||
oldversion: "1.14",
|
create: func(cluster *libcluster.Cluster, clientConnectProxy libservice.Service) (libservice.Service, func(), func(), error) {
|
||||||
targetVersion: libutils.TargetVersion,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
run := func(t *testing.T, tc testcase) {
|
|
||||||
buildOpts := &libcluster.BuildOptions{
|
|
||||||
ConsulVersion: tc.oldversion,
|
|
||||||
Datacenter: "dc1",
|
|
||||||
InjectAutoEncryption: true,
|
|
||||||
}
|
|
||||||
// If version < 1.14 disable AutoEncryption
|
|
||||||
oldVersion, _ := version.NewVersion(tc.oldversion)
|
|
||||||
if oldVersion.LessThan(libutils.Version_1_14) {
|
|
||||||
buildOpts.InjectAutoEncryption = false
|
|
||||||
}
|
|
||||||
cluster, _, _ := topology.NewPeeringCluster(t, 1, buildOpts)
|
|
||||||
node := cluster.Agents[0]
|
node := cluster.Agents[0]
|
||||||
|
client := node.GetClient()
|
||||||
|
|
||||||
|
// Create static-server-v1 and static-server-v2
|
||||||
|
serviceOptsV1 := &libservice.ServiceOpts{
|
||||||
|
Name: libservice.StaticServerServiceName,
|
||||||
|
ID: "static-server-v1",
|
||||||
|
Meta: map[string]string{"version": "v1"},
|
||||||
|
HTTPPort: 8081,
|
||||||
|
GRPCPort: 8078,
|
||||||
|
}
|
||||||
|
_, serverConnectProxyV1, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
serviceOptsV2 := &libservice.ServiceOpts{
|
||||||
|
Name: libservice.StaticServerServiceName,
|
||||||
|
ID: "static-server-v2",
|
||||||
|
Meta: map[string]string{"version": "v2"},
|
||||||
|
HTTPPort: 8082,
|
||||||
|
GRPCPort: 8077,
|
||||||
|
}
|
||||||
|
_, serverConnectProxyV2, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
libassert.CatalogServiceExists(t, client, "static-server")
|
||||||
|
|
||||||
|
// TODO: verify the number of instance of static-server is 3
|
||||||
|
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServerServiceName, true, 3)
|
||||||
|
|
||||||
// Register service resolver
|
// Register service resolver
|
||||||
serviceResolver := &api.ServiceResolverConfigEntry{
|
serviceResolver := &api.ServiceResolverConfigEntry{
|
||||||
|
@ -73,89 +86,51 @@ func TestTrafficManagement_ServiceResolverDefaultSubset(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err := cluster.ConfigEntryWrite(serviceResolver)
|
err = cluster.ConfigEntryWrite(serviceResolver)
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("error writing config entry %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
serverConnectProxy, serverConnectProxyV1, serverConnectProxyV2, clientConnectProxy := createService(t, cluster)
|
|
||||||
|
|
||||||
_, port := clientConnectProxy.GetAddr()
|
|
||||||
_, adminPort := clientConnectProxy.GetAdminAddr()
|
|
||||||
_, serverAdminPort := serverConnectProxy.GetAdminAddr()
|
|
||||||
_, serverAdminPortV1 := serverConnectProxyV1.GetAdminAddr()
|
_, serverAdminPortV1 := serverConnectProxyV1.GetAdminAddr()
|
||||||
_, serverAdminPortV2 := serverConnectProxyV2.GetAdminAddr()
|
_, serverAdminPortV2 := serverConnectProxyV2.GetAdminAddr()
|
||||||
|
|
||||||
// validate client and proxy is up and running
|
restartFn := func() {
|
||||||
libassert.AssertContainerState(t, clientConnectProxy, "running")
|
|
||||||
|
|
||||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
|
||||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, "v2.static-server.default", "HEALTHY", 1)
|
|
||||||
|
|
||||||
// Upgrade cluster, restart sidecars then begin service traffic validation
|
|
||||||
require.NoError(t, cluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
|
|
||||||
require.NoError(t, clientConnectProxy.Restart())
|
|
||||||
require.NoError(t, serverConnectProxy.Restart())
|
|
||||||
require.NoError(t, serverConnectProxyV1.Restart())
|
require.NoError(t, serverConnectProxyV1.Restart())
|
||||||
require.NoError(t, serverConnectProxyV2.Restart())
|
require.NoError(t, serverConnectProxyV2.Restart())
|
||||||
|
}
|
||||||
|
|
||||||
// POST upgrade validation; repeat client & proxy validation
|
_, adminPort := clientConnectProxy.GetAdminAddr()
|
||||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
assertionFn := func() {
|
||||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, "v2.static-server.default", "HEALTHY", 1)
|
libassert.AssertEnvoyRunning(t, serverAdminPortV1)
|
||||||
|
libassert.AssertEnvoyRunning(t, serverAdminPortV2)
|
||||||
|
|
||||||
// validate static-client proxy admin is up
|
|
||||||
_, statusCode, err := libassert.GetEnvoyOutput(adminPort, "stats", responseFormat)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, http.StatusOK, statusCode, fmt.Sprintf("service cannot be reached %v", statusCode))
|
|
||||||
|
|
||||||
// validate static-server proxy admin is up
|
|
||||||
_, statusCode1, err := libassert.GetEnvoyOutput(serverAdminPort, "stats", responseFormat)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, http.StatusOK, statusCode1, fmt.Sprintf("service cannot be reached %v", statusCode1))
|
|
||||||
|
|
||||||
// validate static-server-v1 proxy admin is up
|
|
||||||
_, statusCode2, err := libassert.GetEnvoyOutput(serverAdminPortV1, "stats", responseFormat)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, http.StatusOK, statusCode2, fmt.Sprintf("service cannot be reached %v", statusCode2))
|
|
||||||
|
|
||||||
// validate static-server-v2 proxy admin is up
|
|
||||||
_, statusCode3, err := libassert.GetEnvoyOutput(serverAdminPortV2, "stats", responseFormat)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, http.StatusOK, statusCode3, fmt.Sprintf("service cannot be reached %v", statusCode3))
|
|
||||||
|
|
||||||
// certs are valid
|
|
||||||
libassert.AssertEnvoyPresentsCertURI(t, adminPort, "static-client")
|
|
||||||
libassert.AssertEnvoyPresentsCertURI(t, serverAdminPort, "static-server")
|
|
||||||
libassert.AssertEnvoyPresentsCertURI(t, serverAdminPortV1, "static-server")
|
libassert.AssertEnvoyPresentsCertURI(t, serverAdminPortV1, "static-server")
|
||||||
libassert.AssertEnvoyPresentsCertURI(t, serverAdminPortV2, "static-server")
|
libassert.AssertEnvoyPresentsCertURI(t, serverAdminPortV2, "static-server")
|
||||||
|
|
||||||
|
libassert.AssertUpstreamEndpointStatus(t, adminPort, "v2.static-server.default", "HEALTHY", 1)
|
||||||
|
|
||||||
// assert static-server proxies should be healthy
|
// assert static-server proxies should be healthy
|
||||||
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServerServiceName, true, 3)
|
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServerServiceName, true, 3)
|
||||||
|
}
|
||||||
|
return nil, assertionFn, restartFn, nil
|
||||||
|
},
|
||||||
|
extraAssertion: func(clientConnectProxy libservice.Service) {
|
||||||
|
_, port := clientConnectProxy.GetAddr()
|
||||||
|
_, adminPort := clientConnectProxy.GetAdminAddr()
|
||||||
|
|
||||||
|
libassert.AssertUpstreamEndpointStatus(t, adminPort, "v2.static-server.default", "HEALTHY", 1)
|
||||||
|
|
||||||
// static-client upstream should connect to static-server-v2 because the default subset value is to v2 set in the service resolver
|
// static-client upstream should connect to static-server-v2 because the default subset value is to v2 set in the service resolver
|
||||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server-v2")
|
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server-v2")
|
||||||
}
|
},
|
||||||
|
},
|
||||||
for _, tc := range tcs {
|
{
|
||||||
t.Run(fmt.Sprintf("upgrade from %s to %s", tc.oldversion, tc.targetVersion),
|
// Test resolver resolves service instance based on their check status
|
||||||
func(t *testing.T) {
|
// - Create one addtional static-server with checks and V1 subset
|
||||||
run(t, tc)
|
// - resolver directs traffic to "test" service
|
||||||
})
|
name: "resolver default onlypassing",
|
||||||
}
|
create: func(cluster *libcluster.Cluster, clientConnectProxy libservice.Service) (libservice.Service, func(), func(), error) {
|
||||||
}
|
|
||||||
|
|
||||||
// create 3 servers and 1 client
|
|
||||||
func createService(t *testing.T, cluster *libcluster.Cluster) (libservice.Service, libservice.Service, libservice.Service, libservice.Service) {
|
|
||||||
node := cluster.Agents[0]
|
node := cluster.Agents[0]
|
||||||
client := node.GetClient()
|
|
||||||
|
|
||||||
serviceOpts := &libservice.ServiceOpts{
|
|
||||||
Name: libservice.StaticServerServiceName,
|
|
||||||
ID: "static-server",
|
|
||||||
HTTPPort: 8080,
|
|
||||||
GRPCPort: 8079,
|
|
||||||
}
|
|
||||||
_, serverConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts)
|
|
||||||
require.NoError(t, err)
|
|
||||||
libassert.CatalogServiceExists(t, client, "static-server")
|
|
||||||
|
|
||||||
serviceOptsV1 := &libservice.ServiceOpts{
|
serviceOptsV1 := &libservice.ServiceOpts{
|
||||||
Name: libservice.StaticServerServiceName,
|
Name: libservice.StaticServerServiceName,
|
||||||
|
@ -163,26 +138,284 @@ func createService(t *testing.T, cluster *libcluster.Cluster) (libservice.Servic
|
||||||
Meta: map[string]string{"version": "v1"},
|
Meta: map[string]string{"version": "v1"},
|
||||||
HTTPPort: 8081,
|
HTTPPort: 8081,
|
||||||
GRPCPort: 8078,
|
GRPCPort: 8078,
|
||||||
|
Checks: libservice.Checks{
|
||||||
|
Name: "main",
|
||||||
|
TTL: "30m",
|
||||||
|
},
|
||||||
|
Connect: libservice.SidecarService{
|
||||||
|
Port: 21011,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
_, serverConnectProxyV1, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV1)
|
_, serverConnectProxyV1, err := libservice.CreateAndRegisterStaticServerAndSidecarWithChecks(node, serviceOptsV1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
libassert.CatalogServiceExists(t, client, "static-server")
|
|
||||||
|
|
||||||
serviceOptsV2 := &libservice.ServiceOpts{
|
// Register service resolver
|
||||||
|
serviceResolver := &api.ServiceResolverConfigEntry{
|
||||||
|
Kind: api.ServiceResolver,
|
||||||
Name: libservice.StaticServerServiceName,
|
Name: libservice.StaticServerServiceName,
|
||||||
ID: "static-server-v2",
|
DefaultSubset: "test",
|
||||||
Meta: map[string]string{"version": "v2"},
|
Subsets: map[string]api.ServiceResolverSubset{
|
||||||
|
"test": {
|
||||||
|
OnlyPassing: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ConnectTimeout: 120 * time.Second,
|
||||||
|
}
|
||||||
|
_, serverAdminPortV1 := serverConnectProxyV1.GetAdminAddr()
|
||||||
|
|
||||||
|
restartFn := func() {
|
||||||
|
require.NoError(t, serverConnectProxyV1.Restart())
|
||||||
|
}
|
||||||
|
|
||||||
|
_, port := clientConnectProxy.GetAddr()
|
||||||
|
_, adminPort := clientConnectProxy.GetAdminAddr()
|
||||||
|
assertionFn := func() {
|
||||||
|
// force static-server-v1 into a warning state
|
||||||
|
err = node.GetClient().Agent().UpdateTTL("service:static-server-v1", "", "warn")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// ###########################
|
||||||
|
// ## with onlypassing=true
|
||||||
|
// assert only one static-server proxy is healthy
|
||||||
|
err = cluster.ConfigEntryWrite(serviceResolver)
|
||||||
|
require.NoError(t, err)
|
||||||
|
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServerServiceName, true, 1)
|
||||||
|
|
||||||
|
libassert.AssertEnvoyRunning(t, serverAdminPortV1)
|
||||||
|
libassert.AssertEnvoyPresentsCertURI(t, serverAdminPortV1, "static-server")
|
||||||
|
|
||||||
|
// assert static-server proxies should be healthy
|
||||||
|
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServerServiceName, true, 1)
|
||||||
|
|
||||||
|
// static-client upstream should have 1 healthy endpoint for test.static-server
|
||||||
|
libassert.AssertUpstreamEndpointStatus(t, adminPort, "test.static-server.default", "HEALTHY", 1)
|
||||||
|
|
||||||
|
// static-client upstream should have 1 unhealthy endpoint for test.static-server
|
||||||
|
libassert.AssertUpstreamEndpointStatus(t, adminPort, "test.static-server.default", "UNHEALTHY", 1)
|
||||||
|
|
||||||
|
// static-client upstream should connect to static-server since it is passing
|
||||||
|
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), libservice.StaticServerServiceName)
|
||||||
|
|
||||||
|
// ###########################
|
||||||
|
// ## with onlypassing=false
|
||||||
|
// revert to OnlyPassing=false by deleting the config
|
||||||
|
err = cluster.ConfigEntryDelete(serviceResolver)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Consul health check assert only one static-server proxy is healthy when onlyPassing is false
|
||||||
|
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServerServiceName, false, 2)
|
||||||
|
|
||||||
|
// Although the service status is in warning state, when onlypassing is set to false Envoy
|
||||||
|
// health check returns all service instances with "warning" or "passing" state as Healthy enpoints
|
||||||
|
libassert.AssertUpstreamEndpointStatus(t, adminPort, "static-server.default", "HEALTHY", 2)
|
||||||
|
|
||||||
|
// static-client upstream should have 0 unhealthy endpoint for static-server
|
||||||
|
libassert.AssertUpstreamEndpointStatus(t, adminPort, "static-server.default", "UNHEALTHY", 0)
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil, assertionFn, restartFn, nil
|
||||||
|
},
|
||||||
|
extraAssertion: func(clientConnectProxy libservice.Service) {
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Test resolver directs traffic to default subset
|
||||||
|
// - Create 3 static-server-2 server instances: one in V1, one in V2, one without any version
|
||||||
|
// - service2Resolver directs traffic to static-server-2-v2
|
||||||
|
name: "resolver subset redirect",
|
||||||
|
create: func(cluster *libcluster.Cluster, clientConnectProxy libservice.Service) (libservice.Service, func(), func(), error) {
|
||||||
|
node := cluster.Agents[0]
|
||||||
|
client := node.GetClient()
|
||||||
|
|
||||||
|
serviceOpts2 := &libservice.ServiceOpts{
|
||||||
|
Name: libservice.StaticServer2ServiceName,
|
||||||
|
ID: "static-server-2",
|
||||||
|
HTTPPort: 8081,
|
||||||
|
GRPCPort: 8078,
|
||||||
|
}
|
||||||
|
_, server2ConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
libassert.CatalogServiceExists(t, client, libservice.StaticServer2ServiceName)
|
||||||
|
|
||||||
|
serviceOptsV1 := &libservice.ServiceOpts{
|
||||||
|
Name: libservice.StaticServer2ServiceName,
|
||||||
|
ID: "static-server-2-v1",
|
||||||
|
Meta: map[string]string{"version": "v1"},
|
||||||
HTTPPort: 8082,
|
HTTPPort: 8082,
|
||||||
GRPCPort: 8077,
|
GRPCPort: 8077,
|
||||||
}
|
}
|
||||||
_, serverConnectProxyV2, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV2)
|
_, server2ConnectProxyV1, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
libassert.CatalogServiceExists(t, client, "static-server")
|
|
||||||
|
serviceOptsV2 := &libservice.ServiceOpts{
|
||||||
|
Name: libservice.StaticServer2ServiceName,
|
||||||
|
ID: "static-server-2-v2",
|
||||||
|
Meta: map[string]string{"version": "v2"},
|
||||||
|
HTTPPort: 8083,
|
||||||
|
GRPCPort: 8076,
|
||||||
|
}
|
||||||
|
_, server2ConnectProxyV2, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
libassert.CatalogServiceExists(t, client, libservice.StaticServer2ServiceName)
|
||||||
|
|
||||||
|
// Register static-server service resolver
|
||||||
|
serviceResolver := &api.ServiceResolverConfigEntry{
|
||||||
|
Kind: api.ServiceResolver,
|
||||||
|
Name: libservice.StaticServer2ServiceName,
|
||||||
|
Subsets: map[string]api.ServiceResolverSubset{
|
||||||
|
"v1": {
|
||||||
|
Filter: "Service.Meta.version == v1",
|
||||||
|
},
|
||||||
|
"v2": {
|
||||||
|
Filter: "Service.Meta.version == v2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err = cluster.ConfigEntryWrite(serviceResolver)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Register static-server-2 service resolver to redirect traffic
|
||||||
|
// from static-server to static-server-2-v2
|
||||||
|
service2Resolver := &api.ServiceResolverConfigEntry{
|
||||||
|
Kind: api.ServiceResolver,
|
||||||
|
Name: libservice.StaticServerServiceName,
|
||||||
|
Redirect: &api.ServiceResolverRedirect{
|
||||||
|
Service: libservice.StaticServer2ServiceName,
|
||||||
|
ServiceSubset: "v2",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err = cluster.ConfigEntryWrite(service2Resolver)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, server2AdminPort := server2ConnectProxy.GetAdminAddr()
|
||||||
|
_, server2AdminPortV1 := server2ConnectProxyV1.GetAdminAddr()
|
||||||
|
_, server2AdminPortV2 := server2ConnectProxyV2.GetAdminAddr()
|
||||||
|
|
||||||
|
restartFn := func() {
|
||||||
|
require.NoErrorf(t, server2ConnectProxy.Restart(), "%s", server2ConnectProxy.GetName())
|
||||||
|
require.NoErrorf(t, server2ConnectProxyV1.Restart(), "%s", server2ConnectProxyV1.GetName())
|
||||||
|
require.NoErrorf(t, server2ConnectProxyV2.Restart(), "%s", server2ConnectProxyV2.GetName())
|
||||||
|
}
|
||||||
|
|
||||||
|
assertionFn := func() {
|
||||||
|
// assert 3 static-server-2 instances are healthy
|
||||||
|
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServer2ServiceName, false, 3)
|
||||||
|
|
||||||
|
libassert.AssertEnvoyRunning(t, server2AdminPort)
|
||||||
|
libassert.AssertEnvoyRunning(t, server2AdminPortV1)
|
||||||
|
libassert.AssertEnvoyRunning(t, server2AdminPortV2)
|
||||||
|
|
||||||
|
libassert.AssertEnvoyPresentsCertURI(t, server2AdminPort, libservice.StaticServer2ServiceName)
|
||||||
|
libassert.AssertEnvoyPresentsCertURI(t, server2AdminPortV1, libservice.StaticServer2ServiceName)
|
||||||
|
libassert.AssertEnvoyPresentsCertURI(t, server2AdminPortV2, libservice.StaticServer2ServiceName)
|
||||||
|
|
||||||
|
// assert static-server proxies should be healthy
|
||||||
|
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServer2ServiceName, true, 3)
|
||||||
|
}
|
||||||
|
return nil, assertionFn, restartFn, nil
|
||||||
|
},
|
||||||
|
extraAssertion: func(clientConnectProxy libservice.Service) {
|
||||||
|
_, appPort := clientConnectProxy.GetAddr()
|
||||||
|
_, adminPort := clientConnectProxy.GetAdminAddr()
|
||||||
|
|
||||||
|
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", appPort), "static-server-2-v2")
|
||||||
|
libassert.AssertUpstreamEndpointStatus(t, adminPort, "v2.static-server-2.default", "HEALTHY", 1)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
run := func(t *testing.T, tc testcase, oldVersion, targetVersion string) {
|
||||||
|
buildOpts := &libcluster.BuildOptions{
|
||||||
|
ConsulVersion: oldVersion,
|
||||||
|
Datacenter: "dc1",
|
||||||
|
InjectAutoEncryption: true,
|
||||||
|
}
|
||||||
|
// If version < 1.14 disable AutoEncryption
|
||||||
|
oldVersionTmp, _ := version.NewVersion(oldVersion)
|
||||||
|
if oldVersionTmp.LessThan(libutils.Version_1_14) {
|
||||||
|
buildOpts.InjectAutoEncryption = false
|
||||||
|
}
|
||||||
|
cluster, _, _ := topology.NewPeeringCluster(t, 1, buildOpts)
|
||||||
|
node := cluster.Agents[0]
|
||||||
|
client := node.GetClient()
|
||||||
|
|
||||||
|
staticClientProxy, staticServerProxy, err := createStaticClientAndServer(cluster)
|
||||||
|
require.NoError(t, err)
|
||||||
|
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName)
|
||||||
|
libassert.CatalogServiceExists(t, client, fmt.Sprintf("%s-sidecar-proxy", libservice.StaticClientServiceName))
|
||||||
|
|
||||||
|
err = cluster.ConfigEntryWrite(&api.ProxyConfigEntry{
|
||||||
|
Kind: api.ProxyDefaults,
|
||||||
|
Name: "global",
|
||||||
|
Config: map[string]interface{}{
|
||||||
|
"protocol": "http",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, port := staticClientProxy.GetAddr()
|
||||||
|
_, adminPort := staticClientProxy.GetAdminAddr()
|
||||||
|
_, serverAdminPort := staticServerProxy.GetAdminAddr()
|
||||||
|
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||||
|
libassert.AssertEnvoyPresentsCertURI(t, adminPort, libservice.StaticClientServiceName)
|
||||||
|
libassert.AssertEnvoyPresentsCertURI(t, serverAdminPort, libservice.StaticServerServiceName)
|
||||||
|
|
||||||
|
_, assertionAdditionalResources, restartFn, err := tc.create(cluster, staticClientProxy)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// validate client and proxy is up and running
|
||||||
|
libassert.AssertContainerState(t, staticClientProxy, "running")
|
||||||
|
assertionAdditionalResources()
|
||||||
|
tc.extraAssertion(staticClientProxy)
|
||||||
|
|
||||||
|
// Upgrade cluster, restart sidecars then begin service traffic validation
|
||||||
|
require.NoError(t, cluster.StandardUpgrade(t, context.Background(), targetVersion))
|
||||||
|
require.NoError(t, staticClientProxy.Restart())
|
||||||
|
require.NoError(t, staticServerProxy.Restart())
|
||||||
|
restartFn()
|
||||||
|
|
||||||
|
// POST upgrade validation; repeat client & proxy validation
|
||||||
|
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||||
|
libassert.AssertEnvoyRunning(t, adminPort)
|
||||||
|
libassert.AssertEnvoyRunning(t, serverAdminPort)
|
||||||
|
|
||||||
|
// certs are valid
|
||||||
|
libassert.AssertEnvoyPresentsCertURI(t, adminPort, libservice.StaticClientServiceName)
|
||||||
|
libassert.AssertEnvoyPresentsCertURI(t, serverAdminPort, libservice.StaticServerServiceName)
|
||||||
|
|
||||||
|
assertionAdditionalResources()
|
||||||
|
tc.extraAssertion(staticClientProxy)
|
||||||
|
}
|
||||||
|
|
||||||
|
targetVersion := libutils.TargetVersion
|
||||||
|
for _, oldVersion := range upgrade.UpgradeFromVersions {
|
||||||
|
for _, tc := range tcs {
|
||||||
|
t.Run(fmt.Sprintf("%s upgrade from %s to %s", tc.name, oldVersion, targetVersion),
|
||||||
|
func(t *testing.T) {
|
||||||
|
run(t, tc, oldVersion, targetVersion)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// createStaticClientAndServer creates a static-client and a static-server in the cluster
|
||||||
|
func createStaticClientAndServer(cluster *libcluster.Cluster) (libservice.Service, libservice.Service, error) {
|
||||||
|
node := cluster.Agents[0]
|
||||||
|
serviceOpts := &libservice.ServiceOpts{
|
||||||
|
Name: libservice.StaticServerServiceName,
|
||||||
|
ID: "static-server",
|
||||||
|
HTTPPort: 8080,
|
||||||
|
GRPCPort: 8079,
|
||||||
|
}
|
||||||
|
_, serverConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Create a client proxy instance with the server as an upstream
|
// Create a client proxy instance with the server as an upstream
|
||||||
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false)
|
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false)
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
libassert.CatalogServiceExists(t, client, fmt.Sprintf("%s-sidecar-proxy", libservice.StaticClientServiceName))
|
return nil, nil, err
|
||||||
|
}
|
||||||
return serverConnectProxy, serverConnectProxyV1, serverConnectProxyV2, clientConnectProxy
|
|
||||||
|
return clientConnectProxy, serverConnectProxy, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,196 +0,0 @@
|
||||||
package upgrade
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hashicorp/consul/api"
|
|
||||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
|
||||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
|
||||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
|
||||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
|
||||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
|
||||||
libutils "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
|
||||||
"github.com/hashicorp/go-version"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestTrafficManagement_ServiceResolverSubsetOnlyPassing Summary
|
|
||||||
// This test starts up 2 servers and 1 client in the same datacenter.
|
|
||||||
//
|
|
||||||
// Steps:
|
|
||||||
// - Create a single agent cluster.
|
|
||||||
// - Create one static-server, 1 subset server 1 client and sidecars for all services, then register them with Consul
|
|
||||||
func TestTrafficManagement_ServiceResolverSubsetOnlyPassing(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
responseFormat := map[string]string{"format": "json"}
|
|
||||||
|
|
||||||
type testcase struct {
|
|
||||||
oldversion string
|
|
||||||
targetVersion string
|
|
||||||
}
|
|
||||||
tcs := []testcase{
|
|
||||||
{
|
|
||||||
oldversion: "1.13",
|
|
||||||
targetVersion: utils.TargetVersion,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
oldversion: "1.14",
|
|
||||||
targetVersion: utils.TargetVersion,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
run := func(t *testing.T, tc testcase) {
|
|
||||||
buildOpts := &libcluster.BuildOptions{
|
|
||||||
ConsulVersion: tc.oldversion,
|
|
||||||
Datacenter: "dc1",
|
|
||||||
InjectAutoEncryption: true,
|
|
||||||
}
|
|
||||||
// If version < 1.14 disable AutoEncryption
|
|
||||||
oldVersion, _ := version.NewVersion(tc.oldversion)
|
|
||||||
if oldVersion.LessThan(libutils.Version_1_14) {
|
|
||||||
buildOpts.InjectAutoEncryption = false
|
|
||||||
}
|
|
||||||
cluster, _, _ := topology.NewPeeringCluster(t, 1, buildOpts)
|
|
||||||
node := cluster.Agents[0]
|
|
||||||
|
|
||||||
// Register service resolver
|
|
||||||
serviceResolver := &api.ServiceResolverConfigEntry{
|
|
||||||
Kind: api.ServiceResolver,
|
|
||||||
Name: libservice.StaticServerServiceName,
|
|
||||||
DefaultSubset: "test",
|
|
||||||
Subsets: map[string]api.ServiceResolverSubset{
|
|
||||||
"test": {
|
|
||||||
OnlyPassing: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ConnectTimeout: 120 * time.Second,
|
|
||||||
}
|
|
||||||
err := cluster.ConfigEntryWrite(serviceResolver)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
serverConnectProxy, serverConnectProxyV1, clientConnectProxy := createServiceAndSubset(t, cluster)
|
|
||||||
|
|
||||||
_, port := clientConnectProxy.GetAddr()
|
|
||||||
_, adminPort := clientConnectProxy.GetAdminAddr()
|
|
||||||
_, serverAdminPort := serverConnectProxy.GetAdminAddr()
|
|
||||||
_, serverAdminPortV1 := serverConnectProxyV1.GetAdminAddr()
|
|
||||||
|
|
||||||
// Upgrade cluster, restart sidecars then begin service traffic validation
|
|
||||||
require.NoError(t, cluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
|
|
||||||
require.NoError(t, clientConnectProxy.Restart())
|
|
||||||
require.NoError(t, serverConnectProxy.Restart())
|
|
||||||
require.NoError(t, serverConnectProxyV1.Restart())
|
|
||||||
|
|
||||||
// force static-server-v1 into a warning state
|
|
||||||
err = node.GetClient().Agent().UpdateTTL("service:static-server-v1", "", "warn")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// validate static-client is up and running
|
|
||||||
libassert.AssertContainerState(t, clientConnectProxy, "running")
|
|
||||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
|
||||||
|
|
||||||
// validate static-client proxy admin is up
|
|
||||||
_, clientStatusCode, err := libassert.GetEnvoyOutput(adminPort, "stats", responseFormat)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, http.StatusOK, clientStatusCode, fmt.Sprintf("service cannot be reached %v", clientStatusCode))
|
|
||||||
|
|
||||||
// validate static-server proxy admin is up
|
|
||||||
_, serverStatusCode, err := libassert.GetEnvoyOutput(serverAdminPort, "stats", responseFormat)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, http.StatusOK, serverStatusCode, fmt.Sprintf("service cannot be reached %v", serverStatusCode))
|
|
||||||
|
|
||||||
// validate static-server-v1 proxy admin is up
|
|
||||||
_, serverStatusCodeV1, err := libassert.GetEnvoyOutput(serverAdminPortV1, "stats", responseFormat)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, http.StatusOK, serverStatusCodeV1, fmt.Sprintf("service cannot be reached %v", serverStatusCodeV1))
|
|
||||||
|
|
||||||
// certs are valid
|
|
||||||
libassert.AssertEnvoyPresentsCertURI(t, adminPort, libservice.StaticClientServiceName)
|
|
||||||
libassert.AssertEnvoyPresentsCertURI(t, serverAdminPort, libservice.StaticServerServiceName)
|
|
||||||
libassert.AssertEnvoyPresentsCertURI(t, serverAdminPortV1, libservice.StaticServerServiceName)
|
|
||||||
|
|
||||||
// ###########################
|
|
||||||
// ## with onlypassing=true
|
|
||||||
// assert only one static-server proxy is healthy
|
|
||||||
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServerServiceName, true, 1)
|
|
||||||
|
|
||||||
// static-client upstream should have 1 healthy endpoint for test.static-server
|
|
||||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, "test.static-server.default", "HEALTHY", 1)
|
|
||||||
|
|
||||||
// static-client upstream should have 1 unhealthy endpoint for test.static-server
|
|
||||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, "test.static-server.default", "UNHEALTHY", 1)
|
|
||||||
|
|
||||||
// static-client upstream should connect to static-server-v2 because the default subset value is to v2 set in the service resolver
|
|
||||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), libservice.StaticServerServiceName)
|
|
||||||
|
|
||||||
// ###########################
|
|
||||||
// ## with onlypassing=false
|
|
||||||
// revert to OnlyPassing=false by deleting the config
|
|
||||||
err = cluster.ConfigEntryDelete(serviceResolver)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Consul health check assert only one static-server proxy is healthy when onlyPassing is false
|
|
||||||
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServerServiceName, false, 2)
|
|
||||||
|
|
||||||
// Although the service status is in warning state, when onlypassing is set to false Envoy
|
|
||||||
// health check returns all service instances with "warning" or "passing" state as Healthy enpoints
|
|
||||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, "static-server.default", "HEALTHY", 2)
|
|
||||||
|
|
||||||
// static-client upstream should have 0 unhealthy endpoint for static-server
|
|
||||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, "static-server.default", "UNHEALTHY", 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tcs {
|
|
||||||
t.Run(fmt.Sprintf("upgrade from %s to %s", tc.oldversion, tc.targetVersion),
|
|
||||||
func(t *testing.T) {
|
|
||||||
run(t, tc)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// create 2 servers and 1 client
|
|
||||||
func createServiceAndSubset(t *testing.T, cluster *libcluster.Cluster) (libservice.Service, libservice.Service, libservice.Service) {
|
|
||||||
node := cluster.Agents[0]
|
|
||||||
client := node.GetClient()
|
|
||||||
|
|
||||||
serviceOpts := &libservice.ServiceOpts{
|
|
||||||
Name: libservice.StaticServerServiceName,
|
|
||||||
ID: libservice.StaticServerServiceName,
|
|
||||||
HTTPPort: 8080,
|
|
||||||
GRPCPort: 8079,
|
|
||||||
}
|
|
||||||
_, serverConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts)
|
|
||||||
require.NoError(t, err)
|
|
||||||
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName)
|
|
||||||
|
|
||||||
serviceOptsV1 := &libservice.ServiceOpts{
|
|
||||||
Name: libservice.StaticServerServiceName,
|
|
||||||
ID: "static-server-v1",
|
|
||||||
Meta: map[string]string{"version": "v1"},
|
|
||||||
HTTPPort: 8081,
|
|
||||||
GRPCPort: 8078,
|
|
||||||
Checks: libservice.Checks{
|
|
||||||
Name: "main",
|
|
||||||
TTL: "30m",
|
|
||||||
},
|
|
||||||
Connect: libservice.SidecarService{
|
|
||||||
Port: 21011,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, serverConnectProxyV1, err := libservice.CreateAndRegisterStaticServerAndSidecarWithChecks(node, serviceOptsV1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName)
|
|
||||||
|
|
||||||
// Create a client proxy instance with the server as an upstream
|
|
||||||
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
libassert.CatalogServiceExists(t, client, fmt.Sprintf("%s-sidecar-proxy", libservice.StaticClientServiceName))
|
|
||||||
|
|
||||||
return serverConnectProxy, serverConnectProxyV1, clientConnectProxy
|
|
||||||
}
|
|
|
@ -1,224 +0,0 @@
|
||||||
package upgrade
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hashicorp/consul/api"
|
|
||||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
|
||||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
|
||||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
|
||||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
|
||||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
|
||||||
"github.com/hashicorp/go-version"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"gotest.tools/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestTrafficManagement_ServiceResolverSubsetRedirect Summary
|
|
||||||
// This test starts up 4 servers and 1 client in the same datacenter.
|
|
||||||
//
|
|
||||||
// Steps:
|
|
||||||
// - Create a single agent cluster.
|
|
||||||
// - Create 2 static-servers, 2 subset servers and 1 client and sidecars for all services, then register them with Consul
|
|
||||||
// - Validate traffic is successfully redirected from server 1 to sever2-v2 as defined in the service resolver
|
|
||||||
func TestTrafficManagement_ServiceResolverSubsetRedirect(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
type testcase struct {
|
|
||||||
oldversion string
|
|
||||||
targetVersion string
|
|
||||||
}
|
|
||||||
tcs := []testcase{
|
|
||||||
{
|
|
||||||
oldversion: "1.13",
|
|
||||||
targetVersion: utils.TargetVersion,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
oldversion: "1.14",
|
|
||||||
targetVersion: utils.TargetVersion,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
run := func(t *testing.T, tc testcase) {
|
|
||||||
buildOpts := &libcluster.BuildOptions{
|
|
||||||
ConsulVersion: tc.oldversion,
|
|
||||||
Datacenter: "dc1",
|
|
||||||
InjectAutoEncryption: true,
|
|
||||||
}
|
|
||||||
// If version < 1.14 disable AutoEncryption
|
|
||||||
oldVersion, _ := version.NewVersion(tc.oldversion)
|
|
||||||
if oldVersion.LessThan(utils.Version_1_14) {
|
|
||||||
buildOpts.InjectAutoEncryption = false
|
|
||||||
}
|
|
||||||
cluster, _, _ := topology.NewPeeringCluster(t, 1, buildOpts)
|
|
||||||
node := cluster.Agents[0]
|
|
||||||
|
|
||||||
// Register static-server service resolver
|
|
||||||
serviceResolver := &api.ServiceResolverConfigEntry{
|
|
||||||
Kind: api.ServiceResolver,
|
|
||||||
Name: libservice.StaticServer2ServiceName,
|
|
||||||
Subsets: map[string]api.ServiceResolverSubset{
|
|
||||||
"v1": {
|
|
||||||
Filter: "Service.Meta.version == v1",
|
|
||||||
},
|
|
||||||
"v2": {
|
|
||||||
Filter: "Service.Meta.version == v2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err := cluster.ConfigEntryWrite(serviceResolver)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Register static-server-2 service resolver to redirect traffic
|
|
||||||
// from static-server to static-server-2-v2
|
|
||||||
service2Resolver := &api.ServiceResolverConfigEntry{
|
|
||||||
Kind: api.ServiceResolver,
|
|
||||||
Name: libservice.StaticServerServiceName,
|
|
||||||
Redirect: &api.ServiceResolverRedirect{
|
|
||||||
Service: libservice.StaticServer2ServiceName,
|
|
||||||
ServiceSubset: "v2",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err = cluster.ConfigEntryWrite(service2Resolver)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// register agent services
|
|
||||||
agentServices := setupServiceAndSubsets(t, cluster)
|
|
||||||
assertionFn, proxyRestartFn := agentServices.validateAgentServices(t)
|
|
||||||
_, port := agentServices.client.GetAddr()
|
|
||||||
_, adminPort := agentServices.client.GetAdminAddr()
|
|
||||||
|
|
||||||
// validate static-client is up and running
|
|
||||||
libassert.AssertContainerState(t, agentServices.client, "running")
|
|
||||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
|
||||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server-2-v2")
|
|
||||||
assertionFn()
|
|
||||||
|
|
||||||
// Upgrade cluster, restart sidecars then begin service traffic validation
|
|
||||||
require.NoError(t, cluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
|
|
||||||
proxyRestartFn()
|
|
||||||
|
|
||||||
libassert.AssertContainerState(t, agentServices.client, "running")
|
|
||||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
|
||||||
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server-2-v2")
|
|
||||||
assertionFn()
|
|
||||||
|
|
||||||
// assert 3 static-server instances are healthy
|
|
||||||
libassert.AssertServiceHasHealthyInstances(t, node, libservice.StaticServer2ServiceName, false, 3)
|
|
||||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, "v2.static-server-2.default", "HEALTHY", 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tcs {
|
|
||||||
t.Run(fmt.Sprintf("upgrade from %s to %s", tc.oldversion, tc.targetVersion),
|
|
||||||
func(t *testing.T) {
|
|
||||||
run(t, tc)
|
|
||||||
})
|
|
||||||
// test sometimes fails with error: could not start or join all agents: could not add container index 0: port not found
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *registeredServices) validateAgentServices(t *testing.T) (func(), func()) {
|
|
||||||
var (
|
|
||||||
responseFormat = map[string]string{"format": "json"}
|
|
||||||
proxyRestartFn func()
|
|
||||||
assertionFn func()
|
|
||||||
)
|
|
||||||
// validate services proxy admin is up
|
|
||||||
assertionFn = func() {
|
|
||||||
for serviceName, proxies := range s.services {
|
|
||||||
for _, proxy := range proxies {
|
|
||||||
_, adminPort := proxy.GetAdminAddr()
|
|
||||||
_, statusCode, err := libassert.GetEnvoyOutput(adminPort, "stats", responseFormat)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, http.StatusOK, statusCode, fmt.Sprintf("%s cannot be reached %v", serviceName, statusCode))
|
|
||||||
|
|
||||||
// certs are valid
|
|
||||||
libassert.AssertEnvoyPresentsCertURI(t, adminPort, serviceName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, serviceConnectProxy := range s.services {
|
|
||||||
for _, proxy := range serviceConnectProxy {
|
|
||||||
proxyRestartFn = func() { require.NoErrorf(t, proxy.Restart(), "%s", proxy.GetName()) }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return assertionFn, proxyRestartFn
|
|
||||||
}
|
|
||||||
|
|
||||||
type registeredServices struct {
|
|
||||||
client libservice.Service
|
|
||||||
services map[string][]libservice.Service
|
|
||||||
}
|
|
||||||
|
|
||||||
// create 3 servers and 1 client
|
|
||||||
func setupServiceAndSubsets(t *testing.T, cluster *libcluster.Cluster) *registeredServices {
|
|
||||||
node := cluster.Agents[0]
|
|
||||||
client := node.GetClient()
|
|
||||||
|
|
||||||
// create static-servers and subsets
|
|
||||||
serviceOpts := &libservice.ServiceOpts{
|
|
||||||
Name: libservice.StaticServerServiceName,
|
|
||||||
ID: "static-server",
|
|
||||||
HTTPPort: 8080,
|
|
||||||
GRPCPort: 8079,
|
|
||||||
}
|
|
||||||
_, serverConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts)
|
|
||||||
require.NoError(t, err)
|
|
||||||
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName)
|
|
||||||
|
|
||||||
serviceOpts2 := &libservice.ServiceOpts{
|
|
||||||
Name: libservice.StaticServer2ServiceName,
|
|
||||||
ID: "static-server-2",
|
|
||||||
HTTPPort: 8081,
|
|
||||||
GRPCPort: 8078,
|
|
||||||
}
|
|
||||||
_, server2ConnectProxy, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOpts2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
libassert.CatalogServiceExists(t, client, libservice.StaticServer2ServiceName)
|
|
||||||
|
|
||||||
serviceOptsV1 := &libservice.ServiceOpts{
|
|
||||||
Name: libservice.StaticServer2ServiceName,
|
|
||||||
ID: "static-server-2-v1",
|
|
||||||
Meta: map[string]string{"version": "v1"},
|
|
||||||
HTTPPort: 8082,
|
|
||||||
GRPCPort: 8077,
|
|
||||||
}
|
|
||||||
_, server2ConnectProxyV1, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
libassert.CatalogServiceExists(t, client, libservice.StaticServer2ServiceName)
|
|
||||||
|
|
||||||
serviceOptsV2 := &libservice.ServiceOpts{
|
|
||||||
Name: libservice.StaticServer2ServiceName,
|
|
||||||
ID: "static-server-2-v2",
|
|
||||||
Meta: map[string]string{"version": "v2"},
|
|
||||||
HTTPPort: 8083,
|
|
||||||
GRPCPort: 8076,
|
|
||||||
}
|
|
||||||
_, server2ConnectProxyV2, err := libservice.CreateAndRegisterStaticServerAndSidecar(node, serviceOptsV2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
libassert.CatalogServiceExists(t, client, libservice.StaticServer2ServiceName)
|
|
||||||
|
|
||||||
// Create a client proxy instance with the server as an upstream
|
|
||||||
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
libassert.CatalogServiceExists(t, client, fmt.Sprintf("%s-sidecar-proxy", libservice.StaticClientServiceName))
|
|
||||||
|
|
||||||
// return a map of all services created
|
|
||||||
tmpServices := map[string][]libservice.Service{}
|
|
||||||
tmpServices[libservice.StaticClientServiceName] = append(tmpServices[libservice.StaticClientServiceName], clientConnectProxy)
|
|
||||||
tmpServices[libservice.StaticServerServiceName] = append(tmpServices[libservice.StaticServerServiceName], serverConnectProxy)
|
|
||||||
tmpServices[libservice.StaticServer2ServiceName] = append(tmpServices[libservice.StaticServer2ServiceName], server2ConnectProxy)
|
|
||||||
tmpServices[libservice.StaticServer2ServiceName] = append(tmpServices[libservice.StaticServer2ServiceName], server2ConnectProxyV1)
|
|
||||||
tmpServices[libservice.StaticServer2ServiceName] = append(tmpServices[libservice.StaticServer2ServiceName], server2ConnectProxyV2)
|
|
||||||
|
|
||||||
return ®isteredServices{
|
|
||||||
client: clientConnectProxy,
|
|
||||||
services: tmpServices,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
package upgrade
|
||||||
|
|
||||||
|
var UpgradeFromVersions = []string{"1.13", "1.14"}
|
Loading…
Reference in New Issue