Merge pull request #9148 from lavalamp/e2eTestFix

E2e test fix
pull/6/head
Brian Grant 2015-06-03 12:44:55 -07:00
commit e3b80db02c
1 changed files with 14 additions and 21 deletions

View File

@ -24,7 +24,6 @@ import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
@ -33,16 +32,10 @@ import (
)
var _ = Describe("Cluster level logging using Elasticsearch", func() {
var c *client.Client
BeforeEach(func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
})
f := NewFramework("es-logging")
It("should check that logs from pods on all nodes are ingested into Elasticsearch", func() {
ClusterLevelLoggingWithElasticsearch(c)
ClusterLevelLoggingWithElasticsearch(f)
})
})
@ -56,7 +49,7 @@ func bodyToJSON(body []byte) (map[string]interface{}, error) {
}
// ClusterLevelLoggingWithElasticsearch is an end to end test for cluster level logging.
func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
func ClusterLevelLoggingWithElasticsearch(f *Framework) {
// TODO: For now assume we are only testing cluster logging with Elasticsearch
// on GCE. Once we are sure that Elasticsearch cluster level logging
// works for other providers we should widen this scope of this test.
@ -67,7 +60,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
// Check for the existence of the Elasticsearch service.
By("Checking the Elasticsearch service exists.")
s := c.Services(api.NamespaceDefault)
s := f.Client.Services(api.NamespaceDefault)
// Make a few attempts to connect. This makes the test robust against
// being run as the first e2e test just after the e2e cluster has been created.
var err error
@ -83,10 +76,10 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
// Wait for the Elasticsearch pods to enter the running state.
By("Checking to make sure the Elasticsearch pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "elasticsearch-logging"}))
pods, err := c.Pods(api.NamespaceDefault).List(label, fields.Everything())
pods, err := f.Client.Pods(api.NamespaceDefault).List(label, fields.Everything())
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
err = waitForPodRunning(c, pod.Name)
err = waitForPodRunning(f.Client, pod.Name)
Expect(err).NotTo(HaveOccurred())
}
@ -97,7 +90,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
err = nil
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
// Query against the root URL for Elasticsearch.
body, err := c.Get().
body, err := f.Client.Get().
Namespace(api.NamespaceDefault).
Prefix("proxy").
Resource("services").
@ -141,7 +134,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
// Now assume we really are talking to an Elasticsearch instance.
// Check the cluster health.
By("Checking health of Elasticsearch service.")
body, err := c.Get().
body, err := f.Client.Get().
Namespace(api.NamespaceDefault).
Prefix("proxy").
Resource("services").
@ -163,7 +156,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
}
// Obtain a list of nodes so we can place one synthetic logger on each node.
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
nodes, err := f.Client.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {
Failf("Failed to list nodes: %v", err)
}
@ -175,7 +168,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
// Create a unique root name for the resources in this test to permit
// parallel executions of this test.
// Use a unique namespace for the resources created in this test.
ns := "es-logging-" + randomSuffix()
ns := f.Namespace.Name
name := "synthlogger"
// Form a unique name to taint log lines to be colelcted.
// Replace '-' characters with '_' to prevent the analyzer from breaking apart names.
@ -189,7 +182,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
// Instantiate a synthetic logger pod on each node.
for i, node := range nodes.Items {
podName := fmt.Sprintf("%s-%d", name, i)
_, err := c.Pods(ns).Create(&api.Pod{
_, err := f.Client.Pods(ns).Create(&api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": name},
@ -214,7 +207,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
// Cleanup the pods when we are done.
defer func() {
for _, pod := range podNames {
if err = c.Pods(ns).Delete(pod, nil); err != nil {
if err = f.Client.Pods(ns).Delete(pod, nil); err != nil {
Logf("Failed to delete pod %s: %v", pod, err)
}
}
@ -223,7 +216,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
// Wait for the syntehtic logging pods to finish.
By("Waiting for the pods to succeed.")
for _, pod := range podNames {
err = waitForPodSuccessInNamespace(c, pod, "synth-logger", ns)
err = waitForPodSuccessInNamespace(f.Client, pod, "synth-logger", ns)
Expect(err).NotTo(HaveOccurred())
}
@ -238,7 +231,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
// Ask Elasticsearch to return all the log lines that were tagged with the underscore
// verison of the name. Ask for twice as many log lines as we expect to check for
// duplication bugs.
body, err = c.Get().
body, err = f.Client.Get().
Namespace(api.NamespaceDefault).
Prefix("proxy").
Resource("services").