e2e/lifecycle: decentralized settings

Tests settings should be defined in the test source code itself
because conceptually the framework is a separate entity that not all
test authors can modify.

For the sake of backwards compatibility the name of the command line
flags are not changed.
pull/58/head
Patrick Ohly 2018-07-27 17:29:00 +02:00
parent 752203d3fa
commit 7305a3e395
2 changed files with 26 additions and 24 deletions

View File

@ -91,11 +91,9 @@ type TestContextType struct {
MinStartupPods int
// Timeout for waiting for system pods to be running
SystemPodsStartupTimeout time.Duration
UpgradeTarget string
EtcdUpgradeStorage string
EtcdUpgradeVersion string
IngressUpgradeImage string
UpgradeImage string
GCEUpgradeScript string
ContainerRuntime string
ContainerRuntimeEndpoint string
@ -278,10 +276,8 @@ func RegisterClusterFlags() {
flag.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
flag.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.")
flag.StringVar(&TestContext.UpgradeTarget, "upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
flag.StringVar(&TestContext.UpgradeImage, "upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
flag.StringVar(&TestContext.IngressUpgradeImage, "ingress-upgrade-image", "", "Image to upgrade to if doing an upgrade test for ingress.")
flag.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")

View File

@ -18,6 +18,7 @@ package lifecycle
import (
"encoding/xml"
"flag"
"fmt"
"os"
"path/filepath"
@ -39,6 +40,11 @@ import (
. "github.com/onsi/ginkgo"
)
var (
upgradeTarget = flag.String("upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
upgradeImage = flag.String("upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
)
var upgradeTests = []upgrades.Test{
&upgrades.ServiceUpgradeTest{},
&upgrades.SecretUpgradeTest{},
@ -89,7 +95,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
testFrameworks := createUpgradeFrameworks(upgradeTests)
Describe("master upgrade", func() {
It("should maintain a functioning cluster [Feature:MasterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Master upgrade"}
@ -112,7 +118,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
Describe("node upgrade", func() {
It("should maintain a functioning cluster [Feature:NodeUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Node upgrade"}
@ -125,7 +131,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
start := time.Now()
defer finalizeUpgradeTest(start, nodeUpgradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.NodeUpgrade, upgradeFunc)
@ -134,7 +140,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
Describe("cluster upgrade", func() {
It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Cluster upgrade"}
@ -146,7 +152,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
@ -163,7 +169,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
Describe("cluster downgrade", func() {
It("should maintain a functioning cluster [Feature:ClusterDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Cluster downgrade"}
@ -175,7 +181,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
defer finalizeUpgradeTest(start, clusterDowngradeTest)
// Yes this really is a downgrade. And nodes must downgrade first.
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
@ -268,7 +274,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
testFrameworks := createUpgradeFrameworks(gpuUpgradeTests)
Describe("master upgrade", func() {
It("should NOT disrupt gpu pod [Feature:GPUMasterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "GPU master upgrade"}
@ -286,7 +292,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
})
Describe("cluster upgrade", func() {
It("should be able to run gpu pod after upgrade [Feature:GPUClusterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "GPU cluster upgrade"}
@ -298,7 +304,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
@ -306,7 +312,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
})
Describe("cluster downgrade", func() {
It("should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "GPU cluster downgrade"}
@ -316,7 +322,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
start := time.Now()
defer finalizeUpgradeTest(start, gpuDowngradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
@ -334,7 +340,7 @@ var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func()
testFrameworks := createUpgradeFrameworks(statefulsetUpgradeTests)
framework.KubeDescribe("stateful upgrade", func() {
It("should maintain a functioning cluster", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Stateful upgrade"}
@ -346,7 +352,7 @@ var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func()
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, statefulsetUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
@ -365,7 +371,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
testFrameworks := createUpgradeFrameworks(kubeProxyUpgradeTests)
It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "kube-proxy upgrade"}
@ -381,7 +387,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, true))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, framework.TestContext.UpgradeImage, true))
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, true))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, kubeProxyUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
@ -392,7 +398,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
testFrameworks := createUpgradeFrameworks(kubeProxyDowngradeTests)
It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "kube-proxy downgrade"}
@ -407,7 +413,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
defer finalizeUpgradeTest(start, kubeProxyDowngradeTest)
// Yes this really is a downgrade. And nodes must downgrade first.
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, framework.TestContext.UpgradeImage, false))
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, false))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, false))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
@ -496,7 +502,7 @@ func runUpgradeSuite(
upgradeType upgrades.UpgradeType,
upgradeFunc func(),
) {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
framework.ExpectNoError(err)
cm := chaosmonkey.New(upgradeFunc)
@ -569,7 +575,7 @@ func getUpgradeContext(c discovery.DiscoveryInterface, upgradeTarget string) (*u
upgCtx.Versions = append(upgCtx.Versions, upgrades.VersionContext{
Version: *nextVer,
NodeImage: framework.TestContext.UpgradeImage,
NodeImage: *upgradeImage,
})
return upgCtx, nil