mirror of https://github.com/k3s-io/k3s
Merge pull request #5720 from satnam6502/loge2e
Run cluster level logging e2e test in its own namesapcepull/6/head
commit
d11ec2933f
|
@ -26,7 +26,6 @@ import (
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
@ -68,8 +67,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
|
||||||
|
|
||||||
// Check for the existence of the Elasticsearch service.
|
// Check for the existence of the Elasticsearch service.
|
||||||
By("Checking the Elasticsearch service exists.")
|
By("Checking the Elasticsearch service exists.")
|
||||||
const ns = api.NamespaceDefault
|
s := c.Services(api.NamespaceDefault)
|
||||||
s := c.Services(ns)
|
|
||||||
// Make a few attempts to connect. This makes the test robust against
|
// Make a few attempts to connect. This makes the test robust against
|
||||||
// being run as the first e2e test just after the e2e cluster has been created.
|
// being run as the first e2e test just after the e2e cluster has been created.
|
||||||
var err error
|
var err error
|
||||||
|
@ -85,7 +83,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
|
||||||
// Wait for the Elasticsearch pods to enter the running state.
|
// Wait for the Elasticsearch pods to enter the running state.
|
||||||
By("Checking to make sure the Elasticsearch pods are running")
|
By("Checking to make sure the Elasticsearch pods are running")
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "elasticsearch-logging"}))
|
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "elasticsearch-logging"}))
|
||||||
pods, err := c.Pods(ns).List(label)
|
pods, err := c.Pods(api.NamespaceDefault).List(label)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
for _, pod := range pods.Items {
|
for _, pod := range pods.Items {
|
||||||
err = waitForPodRunning(c, pod.Name)
|
err = waitForPodRunning(c, pod.Name)
|
||||||
|
@ -100,6 +98,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
|
||||||
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
|
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
|
||||||
// Query against the root URL for Elasticsearch.
|
// Query against the root URL for Elasticsearch.
|
||||||
body, err := c.Get().
|
body, err := c.Get().
|
||||||
|
Namespace(api.NamespaceDefault).
|
||||||
Prefix("proxy").
|
Prefix("proxy").
|
||||||
Resource("services").
|
Resource("services").
|
||||||
Name("elasticsearch-logging").
|
Name("elasticsearch-logging").
|
||||||
|
@ -143,6 +142,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
|
||||||
// Check the cluster health.
|
// Check the cluster health.
|
||||||
By("Checking health of Elasticsearch service.")
|
By("Checking health of Elasticsearch service.")
|
||||||
body, err := c.Get().
|
body, err := c.Get().
|
||||||
|
Namespace(api.NamespaceDefault).
|
||||||
Prefix("proxy").
|
Prefix("proxy").
|
||||||
Resource("services").
|
Resource("services").
|
||||||
Name("elasticsearch-logging").
|
Name("elasticsearch-logging").
|
||||||
|
@ -174,9 +174,12 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
|
||||||
|
|
||||||
// Create a unique root name for the resources in this test to permit
|
// Create a unique root name for the resources in this test to permit
|
||||||
// parallel executions of this test.
|
// parallel executions of this test.
|
||||||
name := "synthlogger-" + string(util.NewUUID())
|
// Use a unique namespace for the resources created in this test.
|
||||||
|
ns := "es-logging-" + randomSuffix()
|
||||||
|
name := "synthlogger"
|
||||||
|
// Form a unique name to taint log lines to be colelcted.
|
||||||
// Replace '-' characters with '_' to prevent the analyzer from breaking apart names.
|
// Replace '-' characters with '_' to prevent the analyzer from breaking apart names.
|
||||||
underscoreName := strings.Replace(name, "-", "_", -1)
|
taintName := strings.Replace(ns+name, "-", "_", -1)
|
||||||
|
|
||||||
// podNames records the names of the synthetic logging pods that are created in the
|
// podNames records the names of the synthetic logging pods that are created in the
|
||||||
// loop below.
|
// loop below.
|
||||||
|
@ -196,7 +199,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
|
||||||
{
|
{
|
||||||
Name: "synth-logger",
|
Name: "synth-logger",
|
||||||
Image: "ubuntu:14.04",
|
Image: "ubuntu:14.04",
|
||||||
Command: []string{"bash", "-c", fmt.Sprintf("i=0; while ((i < %d)); do echo \"%d %s $i %s\"; i=$(($i+1)); done", countTo, i, underscoreName, podName)},
|
Command: []string{"bash", "-c", fmt.Sprintf("i=0; while ((i < %d)); do echo \"%d %s $i %s\"; i=$(($i+1)); done", countTo, i, taintName, podName)},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Host: node.Name,
|
Host: node.Name,
|
||||||
|
@ -219,7 +222,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
|
||||||
// Wait for the syntehtic logging pods to finish.
|
// Wait for the syntehtic logging pods to finish.
|
||||||
By("Waiting for the pods to succeed.")
|
By("Waiting for the pods to succeed.")
|
||||||
for _, pod := range podNames {
|
for _, pod := range podNames {
|
||||||
err = waitForPodSuccess(c, pod, "synth-logger")
|
err = waitForPodSuccessInNamespace(c, pod, "synth-logger", ns)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,11 +238,12 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
|
||||||
// verison of the name. Ask for twice as many log lines as we expect to check for
|
// verison of the name. Ask for twice as many log lines as we expect to check for
|
||||||
// duplication bugs.
|
// duplication bugs.
|
||||||
body, err = c.Get().
|
body, err = c.Get().
|
||||||
|
Namespace(api.NamespaceDefault).
|
||||||
Prefix("proxy").
|
Prefix("proxy").
|
||||||
Resource("services").
|
Resource("services").
|
||||||
Name("elasticsearch-logging").
|
Name("elasticsearch-logging").
|
||||||
Suffix("_search").
|
Suffix("_search").
|
||||||
Param("q", fmt.Sprintf("log:%s", underscoreName)).
|
Param("q", fmt.Sprintf("log:%s", taintName)).
|
||||||
Param("size", strconv.Itoa(2*expected)).
|
Param("size", strconv.Itoa(2*expected)).
|
||||||
DoRaw()
|
DoRaw()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -97,9 +97,9 @@ func waitForPodNotPending(c *client.Client, ns, podName string) error {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitForPodSuccess returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
// waitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
||||||
func waitForPodSuccess(c *client.Client, podName string, contName string) error {
|
func waitForPodSuccessInNamespace(c *client.Client, podName string, contName string, namespace string) error {
|
||||||
return waitForPodCondition(c, api.NamespaceDefault, podName, "success or failure", func(pod *api.Pod) (bool, error) {
|
return waitForPodCondition(c, namespace, podName, "success or failure", func(pod *api.Pod) (bool, error) {
|
||||||
// Cannot use pod.Status.Phase == api.PodSucceeded/api.PodFailed due to #2632
|
// Cannot use pod.Status.Phase == api.PodSucceeded/api.PodFailed due to #2632
|
||||||
ci, ok := pod.Status.Info[contName]
|
ci, ok := pod.Status.Info[contName]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -112,15 +112,21 @@ func waitForPodSuccess(c *client.Client, podName string, contName string) error
|
||||||
} else {
|
} else {
|
||||||
return true, fmt.Errorf("pod %s terminated with failure: %+v", podName, ci.State.Termination)
|
return true, fmt.Errorf("pod %s terminated with failure: %+v", podName, ci.State.Termination)
|
||||||
}
|
}
|
||||||
Logf("Waiting for pod %q status to be success or failure", podName)
|
Logf("Waiting for pod %q in namespace %s status to be success or failure", podName, namespace)
|
||||||
} else {
|
} else {
|
||||||
Logf("Nil State.Termination for container %s in pod %s so far", contName, podName)
|
Logf("Nil State.Termination for container %s in pod %s in namespace %s so far", contName, podName, namespace)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// waitForPodSuccess returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
||||||
|
// The default namespace is used to identify pods.
|
||||||
|
func waitForPodSuccess(c *client.Client, podName string, contName string) error {
|
||||||
|
return waitForPodSuccessInNamespace(c, podName, contName, api.NamespaceDefault)
|
||||||
|
}
|
||||||
|
|
||||||
func loadConfig() (*client.Config, error) {
|
func loadConfig() (*client.Config, error) {
|
||||||
switch {
|
switch {
|
||||||
case testContext.kubeConfig != "":
|
case testContext.kubeConfig != "":
|
||||||
|
|
Loading…
Reference in New Issue