All e2e tests should use NewFramework

Allows someone to replace createTestingNS consistently
pull/6/head
Clayton Coleman 2015-10-21 22:28:39 -04:00
parent 507bc3de4f
commit a50fbd6636
11 changed files with 101 additions and 131 deletions

View File

@ -92,7 +92,7 @@ var masterPush = func(_ string) error {
return err
}
var nodeUpgrade = func(f Framework, replicas int, v string) error {
var nodeUpgrade = func(f *Framework, replicas int, v string) error {
// Perform the upgrade.
var err error
switch testContext.Provider {
@ -150,8 +150,6 @@ var _ = Describe("Skipped", func() {
svcName, replicas := "baz", 2
var rcName, ip, v string
var ingress api.LoadBalancerIngress
f := Framework{BaseName: "cluster-upgrade"}
var w *WebserverTest
BeforeEach(func() {
// The version is determined once at the beginning of the test so that
@ -162,9 +160,12 @@ var _ = Describe("Skipped", func() {
v, err = realVersion(testContext.UpgradeTarget)
expectNoError(err)
Logf("Version for %q is %q", testContext.UpgradeTarget, v)
})
f := NewFramework("cluster-upgrade")
var w *WebserverTest
BeforeEach(func() {
By("Setting up the service, RC, and pods")
f.beforeEach()
w = NewWebserverTest(f.Client, f.Namespace.Name, svcName)
rc := w.CreateWebserverRC(replicas)
rcName = rc.ObjectMeta.Name
@ -192,9 +193,7 @@ var _ = Describe("Skipped", func() {
// - volumes
// - persistent volumes
})
AfterEach(func() {
f.afterEach()
w.Cleanup()
})
@ -345,7 +344,7 @@ func checkMasterVersion(c *client.Client, want string) error {
return nil
}
func testNodeUpgrade(f Framework, nUp func(f Framework, n int, v string) error, replicas int, v string) {
func testNodeUpgrade(f *Framework, nUp func(f *Framework, n int, v string) error, replicas int, v string) {
Logf("Starting node upgrade")
expectNoError(nUp(f, replicas, v))
Logf("Node upgrade complete")
@ -412,7 +411,7 @@ func runCmd(command string, args ...string) (string, string, error) {
return stdout, stderr, nil
}
func validate(f Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int) error {
func validate(f *Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int) error {
Logf("Beginning cluster validation")
// Verify RC.
rcs, err := f.Client.ReplicationControllers(f.Namespace.Name).List(labels.Everything(), fields.Everything())

View File

@ -29,17 +29,14 @@ import (
)
var _ = Describe("Probing container", func() {
framework := Framework{BaseName: "container-probe"}
framework := NewFramework("container-probe")
var podClient client.PodInterface
probe := webserverProbeBuilder{}
BeforeEach(func() {
framework.beforeEach()
podClient = framework.Client.Pods(framework.Namespace.Name)
})
AfterEach(framework.afterEach)
It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() {
p, err := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
expectNoError(err)

View File

@ -185,7 +185,7 @@ func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Sele
var _ = Describe("DaemonRestart", func() {
framework := Framework{BaseName: "daemonrestart"}
framework := NewFramework("daemonrestart")
rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(util.NewUUID())
labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector()
existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
@ -197,11 +197,9 @@ var _ = Describe("DaemonRestart", func() {
var tracker *podTracker
BeforeEach(func() {
// These tests require SSH
// TODO(11834): Enable this test in GKE once experimental API there is switched on
SkipUnlessProviderIs("gce", "aws")
framework.beforeEach()
ns = framework.Namespace.Name
// All the restart tests need an rc and a watch on pods of the rc.
@ -247,7 +245,6 @@ var _ = Describe("DaemonRestart", func() {
})
AfterEach(func() {
defer framework.afterEach()
close(stopCh)
expectNoError(DeleteRC(framework.Client, ns, rcName))
})

View File

@ -48,7 +48,14 @@ const (
)
var _ = Describe("Daemon set", func() {
f := &Framework{BaseName: "daemonsets"}
var f *Framework
AfterEach(func() {
err := clearDaemonSetNodeLabels(f.Client)
Expect(err).NotTo(HaveOccurred())
})
f = NewFramework("daemonsets")
image := "gcr.io/google_containers/serve_hostname:1.1"
dsName := "daemon-set"
@ -57,19 +64,12 @@ var _ = Describe("Daemon set", func() {
var c *client.Client
BeforeEach(func() {
f.beforeEach()
ns = f.Namespace.Name
c = f.Client
err := clearDaemonSetNodeLabels(c)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
defer f.afterEach()
err := clearDaemonSetNodeLabels(f.Client)
Expect(err).NotTo(HaveOccurred())
})
It("should run and stop simple daemon", func() {
label := map[string]string{daemonsetNameLabel: dsName}

View File

@ -80,10 +80,39 @@ var _ = Describe("Density", func() {
var additionalPodsPrefix string
var ns string
var uuid string
framework := Framework{BaseName: "density", NamespaceDeletionTimeout: time.Hour}
// Gathers data prior to framework namespace teardown
AfterEach(func() {
// Remove any remaining pods from this test if the
// replication controller still exists and the replica count
// isn't 0. This means the controller wasn't cleaned up
// during the test so clean it up here. We want to do it separately
// to not cause a timeout on Namespace removal.
rc, err := c.ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller")
err := DeleteRC(c, ns, RCName)
expectNoError(err)
}
By("Removing additional pods if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
framework := NewFramework("density")
framework.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {
framework.beforeEach()
c = framework.Client
ns = framework.Namespace.Name
var err error
@ -115,37 +144,6 @@ var _ = Describe("Density", func() {
}
})
AfterEach(func() {
// We can't call it explicitly at the end, because it will not be called
// if Expect() fails.
defer framework.afterEach()
// Remove any remaining pods from this test if the
// replication controller still exists and the replica count
// isn't 0. This means the controller wasn't cleaned up
// during the test so clean it up here. We want to do it separately
// to not cause a timeout on Namespace removal.
rc, err := c.ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller")
err := DeleteRC(c, ns, RCName)
expectNoError(err)
}
By("Removing additional pods if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
// Tests with "Skipped" substring in their name will be skipped when running
// e2e test suite without --ginkgo.focus & --ginkgo.skip flags.
type Density struct {

View File

@ -31,7 +31,7 @@ import (
var _ = Describe("Etcd failure", func() {
var skipped bool
framework := Framework{BaseName: "etcd-failure"}
framework := NewFramework("etcd-failure")
BeforeEach(func() {
// This test requires:
@ -43,8 +43,6 @@ var _ = Describe("Etcd failure", func() {
SkipUnlessProviderIs("gce")
skipped = false
framework.beforeEach()
Expect(RunRC(RCConfig{
Client: framework.Client,
Name: "baz",
@ -54,14 +52,6 @@ var _ = Describe("Etcd failure", func() {
})).NotTo(HaveOccurred())
})
AfterEach(func() {
if skipped {
return
}
framework.afterEach()
})
It("should recover from network partition with master", func() {
etcdFailTest(
framework,
@ -79,12 +69,12 @@ var _ = Describe("Etcd failure", func() {
})
})
func etcdFailTest(framework Framework, failCommand, fixCommand string) {
func etcdFailTest(framework *Framework, failCommand, fixCommand string) {
doEtcdFailure(failCommand, fixCommand)
checkExistingRCRecovers(framework)
ServeImageOrFail(&framework, "basic", "gcr.io/google_containers/serve_hostname:1.1")
ServeImageOrFail(framework, "basic", "gcr.io/google_containers/serve_hostname:1.1")
}
// For this duration, etcd will be failed by executing a failCommand on the master.
@ -110,7 +100,7 @@ func masterExec(cmd string) {
}
}
func checkExistingRCRecovers(f Framework) {
func checkExistingRCRecovers(f *Framework) {
By("assert that the pre-existing replication controller recovers")
podClient := f.Client.Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector()

View File

@ -45,10 +45,31 @@ var _ = Describe("[Performance Suite] Latency", func() {
var additionalPodsPrefix string
var ns string
var uuid string
framework := Framework{BaseName: "latency", NamespaceDeletionTimeout: time.Hour}
AfterEach(func() {
By("Removing additional pods if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
By(fmt.Sprintf("Destroying namespace for this suite %v", ns))
if err := c.Namespaces().Delete(ns); err != nil {
Failf("Couldn't delete ns %s", err)
}
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
framework := NewFramework("latency")
framework.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {
framework.beforeEach()
c = framework.Client
ns = framework.Namespace.Name
var err error
@ -79,27 +100,6 @@ var _ = Describe("[Performance Suite] Latency", func() {
}
})
AfterEach(func() {
defer framework.afterEach()
By("Removing additional pods if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
By(fmt.Sprintf("Destroying namespace for this suite %v", ns))
if err := c.Namespaces().Delete(ns); err != nil {
Failf("Couldn't delete ns %s", err)
}
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
// Skipped to avoid running in e2e
It("[Skipped] pod start latency should be acceptable", func() {
runLatencyTest(nodeCount, c, ns)

View File

@ -52,10 +52,22 @@ var _ = Describe("Load capacity", func() {
var nodeCount int
var ns string
var configs []*RCConfig
framework := Framework{BaseName: "load", NamespaceDeletionTimeout: time.Hour}
// Gathers metrics before teardown
// TODO add flag that allows to skip cleanup on failure
AfterEach(func() {
deleteAllRC(configs)
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err, "Too many instances metrics above the threshold")
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0))
})
framework := NewFramework("load")
framework.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {
framework.beforeEach()
c = framework.Client
ns = framework.Namespace.Name
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
@ -72,20 +84,6 @@ var _ = Describe("Load capacity", func() {
expectNoError(resetMetrics(c))
})
// TODO add flag that allows to skip cleanup on failure
AfterEach(func() {
// We can't call it explicitly at the end, because it will not be called
// if Expect() fails.
defer framework.afterEach()
deleteAllRC(configs)
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err, "Too many instances metrics above the threshold")
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0))
})
type Load struct {
podsPerNode int
image string

View File

@ -384,18 +384,15 @@ func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replica
}
var _ = Describe("Nodes", func() {
framework := Framework{BaseName: "resize-nodes"}
framework := NewFramework("resize-nodes")
var c *client.Client
var ns string
BeforeEach(func() {
framework.beforeEach()
c = framework.Client
ns = framework.Namespace.Name
})
AfterEach(framework.afterEach)
Describe("Resize", func() {
var skipped bool

View File

@ -175,24 +175,13 @@ func waitForStableCluster(c *client.Client) int {
}
var _ = Describe("SchedulerPredicates", func() {
framework := Framework{BaseName: "sched-pred"}
var c *client.Client
var nodeList *api.NodeList
var totalPodCapacity int64
var RCName string
var ns string
BeforeEach(func() {
framework.beforeEach()
c = framework.Client
ns = framework.Namespace.Name
var err error
nodeList, err = c.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err)
})
AfterEach(func() {
defer framework.afterEach()
rc, err := c.ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller")
@ -201,6 +190,16 @@ var _ = Describe("SchedulerPredicates", func() {
}
})
framework := NewFramework("sched-pred")
BeforeEach(func() {
c = framework.Client
ns = framework.Namespace.Name
var err error
nodeList, err = c.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err)
})
// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable
// and cannot be run in parallel with any other test that touches Nodes or Pods. It is so because to check
// if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds.

View File

@ -210,19 +210,14 @@ var _ = Describe("ServiceLoadBalancer", func() {
var repoRoot string
var client *client.Client
framework := Framework{BaseName: "servicelb"}
framework := NewFramework("servicelb")
BeforeEach(func() {
framework.beforeEach()
client = framework.Client
ns = framework.Namespace.Name
repoRoot = testContext.RepoRoot
})
AfterEach(func() {
framework.afterEach()
})
It("should support simple GET on Ingress ips", func() {
for _, t := range getLoadBalancerControllers(repoRoot, client) {
By(fmt.Sprintf("Starting loadbalancer controller %v in namespace %v", t.getName(), ns))