manually import the change (#17239)

pull/17242/head
hc-github-team-consul-core 2023-05-08 17:54:23 -04:00 committed by GitHub
parent 05e9af3fa5
commit 2ab3184c61
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 27 additions and 13 deletions

View File

@ -283,7 +283,7 @@ func (c *Cluster) Remove(n Agent) error {
// helpers below.
//
// This lets us have tests that assert that an upgrade will fail.
func (c *Cluster) StandardUpgrade(t *testing.T, ctx context.Context, targetVersion string) error {
func (c *Cluster) StandardUpgrade(t *testing.T, ctx context.Context, targetImage string, targetVersion string) error {
var err error
// We take a snapshot, but note that we currently do nothing with it.
if c.ACLEnabled {
@ -327,6 +327,7 @@ func (c *Cluster) StandardUpgrade(t *testing.T, ctx context.Context, targetVersi
upgradeFn := func(agent Agent, clientFactory func() (*api.Client, error)) error {
config := agent.GetConfig()
config.Image = targetImage
config.Version = targetVersion
if agent.IsServer() {

View File

@ -41,6 +41,7 @@ type BuiltCluster struct {
// It returns objects of the accepting cluster, dialing cluster, staticServerSvc, and staticClientSvcSidecar
func BasicPeeringTwoClustersSetup(
t *testing.T,
consulImage string,
consulVersion string,
peeringThroughMeshgateway bool,
) (*BuiltCluster, *BuiltCluster) {
@ -49,6 +50,7 @@ func BasicPeeringTwoClustersSetup(
NumClients: 1,
BuildOpts: &libcluster.BuildOptions{
Datacenter: "dc1",
ConsulImageName: consulImage,
ConsulVersion: consulVersion,
InjectAutoEncryption: true,
},
@ -60,6 +62,7 @@ func BasicPeeringTwoClustersSetup(
NumClients: 1,
BuildOpts: &libcluster.BuildOptions{
Datacenter: "dc2",
ConsulImageName: consulImage,
ConsulVersion: consulVersion,
InjectAutoEncryption: true,
},

View File

@ -33,6 +33,14 @@ func init() {
flag.BoolVar(&FollowLog, "follow-log", true, "follow container log in output (Default: true)")
}
func GetTargetImageName() string {
return TargetImageName
}
func GetLatestImageName() string {
return LatestImageName
}
func DockerImage(image, version string) string {
v := image + ":" + version
if image == DefaultImageNameENT && isSemVer(version) {

View File

@ -50,7 +50,7 @@ import (
func TestPeering_RotateServerAndCAThenFail_(t *testing.T) {
t.Parallel()
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.TargetVersion, false)
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetTargetImageName(), utils.TargetVersion, false)
var (
acceptingCluster = accepting.Cluster
dialingCluster = dialing.Cluster

View File

@ -41,6 +41,7 @@ func TestACL_Upgrade_Node_Token(t *testing.T) {
NumClients: 1,
BuildOpts: &libcluster.BuildOptions{
Datacenter: "dc1",
ConsulImageName: utils.GetLatestImageName(),
ConsulVersion: tc.oldversion,
InjectAutoEncryption: false,
ACLEnabled: true,
@ -52,7 +53,7 @@ func TestACL_Upgrade_Node_Token(t *testing.T) {
cluster.Agents[1].GetAgentName())
require.NoError(t, err)
err = cluster.StandardUpgrade(t, context.Background(), tc.targetVersion)
err = cluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), tc.targetVersion)
require.NoError(t, err)
// Post upgrade validation: agent token can be used to query the node

View File

@ -43,7 +43,7 @@ func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
run := func(t *testing.T, tc testcase) {
configCtx := libcluster.NewBuildContext(t, libcluster.BuildOptions{
ConsulImageName: utils.TargetImageName,
ConsulImageName: utils.GetLatestImageName(),
ConsulVersion: tc.oldversion,
})
@ -88,7 +88,7 @@ func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
// upgrade the cluster to the Target version
t.Logf("initiating standard upgrade to version=%q", tc.targetVersion)
err = cluster.StandardUpgrade(t, context.Background(), tc.targetVersion)
err = cluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), tc.targetVersion)
if !tc.expectErr {
require.NoError(t, err)
libcluster.WaitForLeader(t, cluster, client)
@ -102,7 +102,7 @@ func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
require.Equal(r, serviceName, service[0].ServiceName)
})
} else {
require.Error(t, fmt.Errorf("context deadline exceeded"))
require.ErrorContains(t, err, "context deadline exceeded")
}
}

View File

@ -11,6 +11,7 @@ import (
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
libutils "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
upgrade "github.com/hashicorp/consul/test/integration/consul-container/test/upgrade"
"github.com/hashicorp/go-version"
@ -373,7 +374,7 @@ func TestTrafficManagement_ServiceResolver(t *testing.T) {
tc.extraAssertion(staticClientProxy)
// Upgrade cluster, restart sidecars then begin service traffic validation
require.NoError(t, cluster.StandardUpgrade(t, context.Background(), targetVersion))
require.NoError(t, cluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), targetVersion))
require.NoError(t, staticClientProxy.Restart())
require.NoError(t, staticServerProxy.Restart())
restartFn()

View File

@ -42,7 +42,7 @@ func TestPeering_Upgrade_ControlPlane_MGW(t *testing.T) {
}
run := func(t *testing.T, tc testcase) {
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, tc.oldversion, true)
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetLatestImageName(), tc.oldversion, true)
var (
acceptingCluster = accepting.Cluster
dialingCluster = dialing.Cluster
@ -66,11 +66,11 @@ func TestPeering_Upgrade_ControlPlane_MGW(t *testing.T) {
"upstream_cx_total", 1)
// Upgrade the accepting cluster and assert peering is still ACTIVE
require.NoError(t, acceptingCluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
require.NoError(t, acceptingCluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), tc.targetVersion))
libassert.PeeringStatus(t, acceptingClient, libtopology.AcceptingPeerName, api.PeeringStateActive)
libassert.PeeringStatus(t, dialingClient, libtopology.DialingPeerName, api.PeeringStateActive)
require.NoError(t, dialingCluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
require.NoError(t, dialingCluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), tc.targetVersion))
libassert.PeeringStatus(t, acceptingClient, libtopology.AcceptingPeerName, api.PeeringStateActive)
libassert.PeeringStatus(t, dialingClient, libtopology.DialingPeerName, api.PeeringStateActive)

View File

@ -317,7 +317,7 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
}
run := func(t *testing.T, tc testcase) {
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, tc.oldversion, false)
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetLatestImageName(), tc.oldversion, false)
var (
acceptingCluster = accepting.Cluster
dialingCluster = dialing.Cluster
@ -339,11 +339,11 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
tc.extraAssertion(appPort)
// Upgrade the accepting cluster and assert peering is still ACTIVE
require.NoError(t, acceptingCluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
require.NoError(t, acceptingCluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), tc.targetVersion))
libassert.PeeringStatus(t, acceptingClient, libtopology.AcceptingPeerName, api.PeeringStateActive)
libassert.PeeringStatus(t, dialingClient, libtopology.DialingPeerName, api.PeeringStateActive)
require.NoError(t, dialingCluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
require.NoError(t, dialingCluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), tc.targetVersion))
libassert.PeeringStatus(t, acceptingClient, libtopology.AcceptingPeerName, api.PeeringStateActive)
libassert.PeeringStatus(t, dialingClient, libtopology.DialingPeerName, api.PeeringStateActive)