k3s/test/e2e_node/log_path_test.go

149 lines
5.0 KiB
Go
Raw Normal View History

/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_node
import (
2017-06-22 17:25:57 +00:00
"k8s.io/api/core/v1"
2017-06-22 18:24:23 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
2016-11-11 08:23:30 +00:00
const (
logString = "This is the expected log content of this node e2e test"
logPodName = "logger-pod"
logContName = "logger-container"
checkPodName = "checker-pod"
checkContName = "checker-container"
)
var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
f := framework.NewDefaultFramework("kubelet-container-log-path")
Describe("Pod with a container", func() {
Context("printed log to stdout", func() {
Skip log path tests when they are expected to fail. The log path test is not expected to pass unless the Docker is using the JSON logging driver, since that's what the log path is trying to find. When Docker is using the journald logging driver, there will be no JSON files in the logging directories for it to find. Furthermore, when SELinux support is enabled in the Docker daemon, SELinux will prevent processes running inside Docker containers from accessing the log files owned by Docker (which is what this test is trying to accomplish), so let's also skip this test in case SELinux support is enabled. Tested: - With Docker daemon started using --log-driver=journald: S [SKIPPING] in Spec Setup (BeforeEach) [8.193 seconds] [k8s.io] ContainerLogPath Pod with a container printed log to stdout should print log to correct log path [BeforeEach] Jan 3 18:33:44.869: Skipping because Docker daemon is using a logging driver other than "json-file": journald - With Docker daemon started using --selinux-enabled: S [SKIPPING] in Spec Setup (BeforeEach) [8.488 seconds] [k8s.io] ContainerLogPath Pod with a container printed log to stdout should print log to correct log path [BeforeEach] Jan 3 18:35:58.909: Skipping because Docker daemon is running with SELinux support enabled - With Docker started using JSON logging driver and with SELinux disabled: • [SLOW TEST:16.352 seconds] (passed) [k8s.io] ContainerLogPath Pod with a container printed log to stdout should print log to correct log path Ran 1 of 256 Specs in 36.428 seconds SUCCESS! -- 1 Passed | 0 Failed | 0 Pending | 255 Skipped
2018-01-03 18:09:46 +00:00
BeforeEach(func() {
if framework.TestContext.ContainerRuntime == "docker" {
// Container Log Path support requires JSON logging driver.
// It does not work when Docker daemon is logging to journald.
d, err := getDockerLoggingDriver()
framework.ExpectNoError(err)
if d != "json-file" {
framework.Skipf("Skipping because Docker daemon is using a logging driver other than \"json-file\": %s", d)
}
// Even if JSON logging is in use, this test fails if SELinux support
// is enabled, since the isolation provided by the SELinux policy
// prevents processes running inside Docker containers (under SELinux
// type svirt_lxc_net_t) from accessing the log files which are owned
// by Docker (and labeled with the container_var_lib_t type.)
//
// Therefore, let's also skip this test when running with SELinux
// support enabled.
e, err := isDockerSELinuxSupportEnabled()
framework.ExpectNoError(err)
if e {
framework.Skipf("Skipping because Docker daemon is running with SELinux support enabled")
}
}
})
It("should print log to correct log path", func() {
podClient := f.PodClient()
ns := f.Namespace.Name
logDirVolumeName := "log-dir-vol"
logDir := kubelet.ContainerLogsDir
2016-11-18 20:55:46 +00:00
logPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: logPodName,
},
2016-11-18 20:55:46 +00:00
Spec: v1.PodSpec{
// this pod is expected to exit successfully
2016-11-18 20:55:46 +00:00
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
2017-08-29 08:32:08 +00:00
Image: busyboxImage,
Name: logContName,
Command: []string{"sh", "-c", "echo " + logString},
},
},
},
}
podClient.Create(logPod)
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, logPodName, ns)
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", logPodName)
// get containerID from created Pod
2016-12-07 14:40:26 +00:00
createdLogPod, err := podClient.Get(logPodName, metav1.GetOptions{})
logConID := kubecontainer.ParseContainerID(createdLogPod.Status.ContainerStatuses[0].ContainerID)
framework.ExpectNoError(err, "Failed to get pod: %s", logPodName)
expectedlogFile := logDir + "/" + logPodName + "_" + ns + "_" + logContName + "-" + logConID.ID + ".log"
2017-06-18 09:51:10 +00:00
hostPathType := new(v1.HostPathType)
*hostPathType = v1.HostPathType(string(v1.HostPathFileOrCreate))
2016-11-18 20:55:46 +00:00
checkPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: checkPodName,
},
2016-11-18 20:55:46 +00:00
Spec: v1.PodSpec{
// this pod is expected to exit successfully
2016-11-18 20:55:46 +00:00
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
2017-08-29 08:32:08 +00:00
Image: busyboxImage,
Name: checkContName,
// If we find expected log file and contains right content, exit 0
// else, keep checking until test timeout
Command: []string{"sh", "-c", "while true; do if [ -e " + expectedlogFile + " ] && grep -q " + logString + " " + expectedlogFile + "; then exit 0; fi; sleep 1; done"},
2016-11-18 20:55:46 +00:00
VolumeMounts: []v1.VolumeMount{
{
Name: logDirVolumeName,
// mount ContainerLogsDir to the same path in container
MountPath: expectedlogFile,
ReadOnly: true,
},
},
},
},
2016-11-18 20:55:46 +00:00
Volumes: []v1.Volume{
{
Name: logDirVolumeName,
2016-11-18 20:55:46 +00:00
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: expectedlogFile,
2017-06-18 09:51:10 +00:00
Type: hostPathType,
},
},
},
},
},
}
podClient.Create(checkPod)
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, checkPodName, ns)
framework.ExpectNoError(err, "Failed waiting for pod: %s to enter success state", checkPodName)
})
})
})
})