Merge pull request #64125 from yujuhong/add-node-e2e-tags

Automatic merge from submit-queue (batch tested with PRs 61963, 64279, 64130, 64125, 64049). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Add node-exclusive tags to tests in test/e2e_node

Original issue: #59001
Depends on: #64128

Add node-exclusive tags based on the [proposal](https://docs.google.com/document/d/1BdNVUGtYO6NDx10x_fueRh_DLT-SVdlPC_SsXjYCHOE/edit#)

Follow-up PRs will:
 - Tag the tests in `test/e2e/common`
 - Change the test job configurations to use the new tests
 - Remove the unused, non-node-exclusive tags in `test/e2e_node`

**Release note**:

```release-note
NONE
```
pull/8/head
Kubernetes Submit Queue 2018-05-25 01:09:24 -07:00 committed by GitHub
commit 690e42b734
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 66 additions and 66 deletions

View File

@ -40,7 +40,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor]", func() {
var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() {
if isAppArmorEnabled() {
BeforeEach(func() {
By("Loading AppArmor profiles for testing")

View File

@ -75,7 +75,7 @@ func validateOOMScoreAdjSettingIsInRange(pid int, expectedMinOOMScoreAdj, expect
var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
f := framework.NewDefaultFramework("kubelet-container-manager")
Describe("Validate OOM score adjustments", func() {
Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() {
Context("once the node is setup", func() {
It("container runtime's oom-score-adj should be -999", func() {
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)

View File

@ -438,7 +438,7 @@ func runCPUManagerTests(f *framework.Framework) {
}
// Serial because the test updates kubelet configuration.
var _ = SIGDescribe("CPU Manager [Serial] [Feature:CPUManager]", func() {
var _ = SIGDescribe("CPU Manager [Serial] [Feature:CPUManager][NodeAlphaFeature:CPUManager]", func() {
f := framework.NewDefaultFramework("cpu-manager-test")
Context("With kubeconfig updated with static CPU Manager policy run the CPU Manager tests", func() {

View File

@ -40,7 +40,7 @@ const (
bestEffortPodName = "best-effort"
)
var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive]", func() {
var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]", func() {
f := framework.NewDefaultFramework("critical-pod-test")
Context("when we need to admit a critical pod", func() {

View File

@ -154,7 +154,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
for _, testArg := range dTests {
itArg := testArg
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark]", itArg.podsNr, itArg.interval)
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval)
It(desc, func() {
itArg.createMethod = "batch"
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
@ -192,7 +192,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
for _, testArg := range dTests {
itArg := testArg
Context("", func() {
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
// The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency.
// It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated.
// Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance.
@ -273,7 +273,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
for _, testArg := range dTests {
itArg := testArg
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark]", itArg.podsNr, itArg.bgPodsNr)
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark][NodeSpeicalFeature:Benchmark]", itArg.podsNr, itArg.bgPodsNr)
It(desc, func() {
itArg.createMethod = "sequence"
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)

View File

@ -44,7 +44,7 @@ const (
)
// Serial because the test restarts Kubelet
var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin] [Serial]", func() {
var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin][NodeFeature:DevicePlugin][Serial]", func() {
f := framework.NewDefaultFramework("device-plugin-errors")
Context("DevicePlugin", func() {

View File

@ -30,7 +30,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Docker features [Feature:Docker]", func() {
var _ = framework.KubeDescribe("Docker features [Feature:Docker][Legacy:Docker]", func() {
f := framework.NewDefaultFramework("docker-feature-test")
BeforeEach(func() {

View File

@ -43,7 +43,7 @@ const (
testCheckpointContent = `{"version":"v1","name":"fluentd-gcp-v2.0-vmnqx","namespace":"kube-system","data":{},"checksum":1799154314}`
)
var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker]", func() {
var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Docker]", func() {
f := framework.NewDefaultFramework("dockerhism-checkpoint-test")
BeforeEach(func() {

View File

@ -69,7 +69,7 @@ type nodeConfigTestCase struct {
}
// This test is marked [Disruptive] because the Kubelet restarts several times during this test.
var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig] [Serial] [Disruptive]", func() {
var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeAlphaFeature:DynamicKubeletConfig][Serial] [Disruptive]", func() {
f := framework.NewDefaultFramework("dynamic-kubelet-configuration-test")
var beforeNode *apiv1.Node
var beforeConfigMap *apiv1.ConfigMap

View File

@ -94,7 +94,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive]", fun
// ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images
// Disk pressure is induced by pulling large images
var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]", func() {
var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
f := framework.NewDefaultFramework("image-gc-eviction-test")
pressureTimeout := 10 * time.Minute
expectedNodeCondition := v1.NodeDiskPressure
@ -123,7 +123,7 @@ var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]",
// MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods.
// Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved.
var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive]", func() {
var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() {
f := framework.NewDefaultFramework("memory-allocatable-eviction-test")
expectedNodeCondition := v1.NodeMemoryPressure
pressureTimeout := 10 * time.Minute
@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
// Disk pressure is induced by running pods which consume disk space.
var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive]", func() {
var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() {
f := framework.NewDefaultFramework("localstorage-eviction-test")
pressureTimeout := 10 * time.Minute
expectedNodeCondition := v1.NodeDiskPressure
@ -183,7 +183,7 @@ var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
// Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold.
// Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run.
var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive]", func() {
var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() {
f := framework.NewDefaultFramework("localstorage-eviction-test")
pressureTimeout := 10 * time.Minute
expectedNodeCondition := v1.NodeDiskPressure
@ -218,7 +218,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup
})
// LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation]", func() {
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:Eviction]", func() {
f := framework.NewDefaultFramework("localstorage-eviction-test")
evictionTestTimeout := 10 * time.Minute
Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
@ -271,7 +271,7 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se
// PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods.
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
// the higher priority pod.
var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive]", func() {
var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() {
f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test")
expectedNodeCondition := v1.NodeMemoryPressure
pressureTimeout := 10 * time.Minute
@ -317,7 +317,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
// PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods.
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
// the higher priority pod.
var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive]", func() {
var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() {
f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test")
expectedNodeCondition := v1.NodeDiskPressure
pressureTimeout := 10 * time.Minute

View File

@ -71,7 +71,7 @@ type testRun struct {
// GarbageCollect tests that the Kubelet conforms to the Kubelet Garbage Collection Policy, found here:
// http://kubernetes.io/docs/admin/garbage-collection/
var _ = framework.KubeDescribe("GarbageCollect [Serial]", func() {
var _ = framework.KubeDescribe("GarbageCollect [Serial][NodeFeature:GarbageCollect]", func() {
f := framework.NewDefaultFramework("garbage-collect-test")
containerNamePrefix := "gc-test-container-"
podNamePrefix := "gc-test-pod-"

View File

@ -310,7 +310,7 @@ func checkDockerStorageDriver() error {
return fmt.Errorf("failed to find storage driver")
}
var _ = framework.KubeDescribe("GKE system requirements [Conformance] [Feature:GKEEnv]", func() {
var _ = framework.KubeDescribe("GKE system requirements [Conformance][NodeConformance][Feature:GKEEnv][NodeFeature:GKEEnv]", func() {
BeforeEach(func() {
framework.RunIfSystemSpecNameIs("gke")
})

View File

@ -37,7 +37,7 @@ const (
)
// Serial because the test restarts Kubelet
var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin] [Serial] [Disruptive]", func() {
var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeature:GPUDevicePlugin][Serial] [Disruptive]", func() {
f := framework.NewDefaultFramework("device-plugin-gpus-errors")
Context("DevicePlugin", func() {

View File

@ -191,7 +191,7 @@ func runHugePagesTests(f *framework.Framework) {
}
// Serial because the test updates kubelet configuration.
var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages]", func() {
var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeFeature:HugePages]", func() {
f := framework.NewDefaultFramework("hugepages-test")
Context("With config updated with hugepages feature enabled", func() {

View File

@ -26,7 +26,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("ImageID", func() {
var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() {
busyBoxImage := "k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff"

View File

@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
})
Context("when scheduling a busybox command in a pod", func() {
podName := "busybox-scheduling-" + string(uuid.NewUUID())
framework.ConformanceIt("it should print the output to logs", func() {
framework.ConformanceIt("it should print the output to logs [NodeConformance]", func() {
podClient.CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
@ -92,7 +92,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
})
})
It("should have an error terminated reason", func() {
It("should have an error terminated reason [NodeConformance]", func() {
Eventually(func() error {
podData, err := podClient.Get(podName, metav1.GetOptions{})
if err != nil {
@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
}, time.Minute, time.Second*4).Should(BeNil())
})
It("should be possible to delete", func() {
It("should be possible to delete [NodeConformance]", func() {
err := podClient.Delete(podName, &metav1.DeleteOptions{})
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
})
@ -120,7 +120,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
Context("when scheduling a busybox Pod with hostAliases", func() {
podName := "busybox-host-aliases" + string(uuid.NewUUID())
It("it should write entries to /etc/hosts", func() {
It("it should write entries to /etc/hosts [NodeConformance]", func() {
podClient.CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
@ -164,7 +164,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
})
Context("when scheduling a read only busybox container", func() {
podName := "busybox-readonly-fs" + string(uuid.NewUUID())
framework.ConformanceIt("it should not write to root filesystem", func() {
framework.ConformanceIt("it should not write to root filesystem [NodeConformance]", func() {
isReadOnly := true
podClient.CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{

View File

@ -84,7 +84,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
}, preStopWaitTimeout, podCheckInterval).Should(BeNil())
}
}
framework.ConformanceIt("should execute poststart exec hook properly", func() {
framework.ConformanceIt("should execute poststart exec hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PostStart: &v1.Handler{
Exec: &v1.ExecAction{
@ -95,7 +95,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
podWithHook := getPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
testPodWithHook(podWithHook)
})
framework.ConformanceIt("should execute prestop exec hook properly", func() {
framework.ConformanceIt("should execute prestop exec hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PreStop: &v1.Handler{
Exec: &v1.ExecAction{
@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
podWithHook := getPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
testPodWithHook(podWithHook)
})
framework.ConformanceIt("should execute poststart http hook properly", func() {
framework.ConformanceIt("should execute poststart http hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PostStart: &v1.Handler{
HTTPGet: &v1.HTTPGetAction{
@ -119,7 +119,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
podWithHook := getPodWithHook("pod-with-poststart-http-hook", imageutils.GetPauseImageName(), lifecycle)
testPodWithHook(podWithHook)
})
framework.ConformanceIt("should execute prestop http hook properly", func() {
framework.ConformanceIt("should execute prestop http hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PreStop: &v1.Handler{
HTTPGet: &v1.HTTPGetAction{

View File

@ -35,7 +35,7 @@ const (
checkContName = "checker-container"
)
var _ = framework.KubeDescribe("ContainerLogPath", func() {
var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
f := framework.NewDefaultFramework("kubelet-container-log-path")
Describe("Pod with a container", func() {
Context("printed log to stdout", func() {

View File

@ -57,7 +57,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
}, 2*time.Minute, time.Second*4).Should(BeNil())
})
framework.ConformanceIt("should be updated when static pod updated", func() {
framework.ConformanceIt("should be updated when static pod updated [NodeConformance]", func() {
By("get mirror pod uid")
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())
@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
Expect(len(pod.Spec.Containers)).Should(Equal(1))
Expect(pod.Spec.Containers[0].Image).Should(Equal(image))
})
framework.ConformanceIt("should be recreated when mirror pod gracefully deleted", func() {
framework.ConformanceIt("should be recreated when mirror pod gracefully deleted [NodeConformance]", func() {
By("get mirror pod uid")
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())
@ -94,7 +94,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
return checkMirrorPodRecreatedAndRunnig(f.ClientSet, mirrorPodName, ns, uid)
}, 2*time.Minute, time.Second*4).Should(BeNil())
})
framework.ConformanceIt("should be recreated when mirror pod forcibly deleted", func() {
framework.ConformanceIt("should be recreated when mirror pod forcibly deleted [NodeConformance]", func() {
By("get mirror pod uid")
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
Expect(err).ShouldNot(HaveOccurred())

View File

@ -56,7 +56,7 @@ func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration)
var _ = framework.KubeDescribe("Node Container Manager [Serial]", func() {
f := framework.NewDefaultFramework("node-container-manager")
Describe("Validate Node Allocatable", func() {
Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() {
It("set's up the node and runs the test", func() {
framework.ExpectNoError(runTest(f))
})

View File

@ -40,7 +40,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("NodeProblemDetector", func() {
var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector]", func() {
const (
pollInterval = 1 * time.Second
pollConsistent = 5 * time.Second

View File

@ -165,7 +165,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
})
})
Describe("Pod containers", func() {
Describe("Pod containers [NodeConformance]", func() {
Context("On scheduling a Guaranteed Pod", func() {
It("Pod containers should have been created under the cgroup-root", func() {
if !framework.TestContext.KubeletConfig.CgroupsPerQOS {

View File

@ -59,7 +59,7 @@ func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (
return runningPods
}
var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeature:ContainerRuntimeRestart]", func() {
const (
// Saturate the node. It's not necessary that all these pods enter
// Running/Ready, because we don't know the number of cores in the

View File

@ -46,7 +46,7 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
Describe("container runtime conformance blackbox test", func() {
Context("when starting a container that exits", func() {
framework.ConformanceIt("it should run with the expected status", func() {
framework.ConformanceIt("it should run with the expected status [NodeConformance]", func() {
restartCountVolumeName := "restart-count"
restartCountVolumePath := "/restart-count"
testContainer := v1.Container{
@ -127,7 +127,7 @@ while true; do sleep 1; done
By("it should get the expected 'State'")
Expect(GetContainerState(status.State)).To(Equal(testCase.State))
By("it should be possible to delete [Conformance]")
By("it should be possible to delete [Conformance][NodeConformance]")
Expect(terminateContainer.Delete()).To(Succeed())
Eventually(terminateContainer.Present, retryTimeout, pollInterval).Should(BeFalse())
}
@ -142,7 +142,7 @@ while true; do sleep 1; done
message gomegatypes.GomegaMatcher
}{
{
name: "if TerminationMessagePath is set [Conformance]",
name: "if TerminationMessagePath is set [Conformance][NodeConformance]",
container: v1.Container{
Image: busyboxImage,
Command: []string{"/bin/sh", "-c"},
@ -157,7 +157,7 @@ while true; do sleep 1; done
},
{
name: "if TerminationMessagePath is set as non-root user and at a non-default path [Conformance]",
name: "if TerminationMessagePath is set as non-root user and at a non-default path [Conformance][NodeConformance]",
container: v1.Container{
Image: busyboxImage,
Command: []string{"/bin/sh", "-c"},
@ -172,7 +172,7 @@ while true; do sleep 1; done
},
{
name: "from log output if TerminationMessagePolicy FallbackToLogOnError is set [Conformance]",
name: "from log output if TerminationMessagePolicy FallbackToLogOnError is set [Conformance][NodeConformance]",
container: v1.Container{
Image: busyboxImage,
Command: []string{"/bin/sh", "-c"},
@ -185,7 +185,7 @@ while true; do sleep 1; done
},
{
name: "as empty when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set",
name: "as empty when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]",
container: v1.Container{
Image: busyboxImage,
Command: []string{"/bin/sh", "-c"},
@ -198,7 +198,7 @@ while true; do sleep 1; done
},
{
name: "from file when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [Conformance]",
name: "from file when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [Conformance][NodeConformance]",
container: v1.Container{
Image: busyboxImage,
Command: []string{"/bin/sh", "-c"},
@ -313,7 +313,7 @@ while true; do sleep 1; done
},
} {
testCase := testCase
It(testCase.description+" [Conformance]", func() {
It(testCase.description+" [Conformance][NodeConformance]", func() {
name := "image-pull-test"
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
container := ConformanceContainer{

View File

@ -41,7 +41,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
podClient = f.PodClient()
})
Context("when pod PID namespace is configurable [Feature:ShareProcessNamespace]", func() {
Context("when pod PID namespace is configurable [Feature:ShareProcessNamespace][NodeAlphaFeature:ShareProcessNamespace]", func() {
It("containers in pods using isolated PID namespaces should all receive PID 1", func() {
By("Create a pod with isolated PID namespaces.")
f.PodClient().CreateSync(&v1.Pod{
@ -156,7 +156,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
nginxPid = strings.TrimSpace(output)
})
It("should show its pid in the host PID namespace", func() {
It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func() {
busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID())
createAndWaitHostPidPod(busyboxPodName, true)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
@ -176,7 +176,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
})
It("should not show its pid in the non-hostpid containers", func() {
It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func() {
busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID())
createAndWaitHostPidPod(busyboxPodName, false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
@ -232,7 +232,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
framework.Logf("Got host shared memory ID %q", hostSharedMemoryID)
})
It("should show the shared memory ID in the host IPC containers", func() {
It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() {
ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID())
createAndWaitHostIPCPod(ipcutilsPodName, true)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
@ -247,7 +247,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
})
It("should not show the shared memory ID in the non-hostIPC containers", func() {
It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func() {
ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID())
createAndWaitHostIPCPod(ipcutilsPodName, false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
@ -315,7 +315,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
framework.Logf("Opened a new tcp port %q", listeningPort)
})
It("should listen on same port in the host network containers", func() {
It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() {
busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID())
createAndWaitHostNetworkPod(busyboxPodName, true)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
@ -329,7 +329,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
})
It("shouldn't show the same port in the non-hostnetwork containers", func() {
It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func() {
busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID())
createAndWaitHostNetworkPod(busyboxPodName, false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
@ -382,11 +382,11 @@ var _ = framework.KubeDescribe("Security Context", func() {
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
It("should run the container with uid 65534", func() {
It("should run the container with uid 65534 [NodeConformance]", func() {
createAndWaitUserPod(65534)
})
It("should run the container with uid 0", func() {
It("should run the container with uid 0 [NodeConformance]", func() {
createAndWaitUserPod(0)
})
})
@ -429,11 +429,11 @@ var _ = framework.KubeDescribe("Security Context", func() {
return podName
}
It("should run the container with readonly rootfs when readOnlyRootFilesystem=true", func() {
It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [NodeConformance]", func() {
createAndWaitUserPod(true)
})
It("should run the container with writable rootfs when readOnlyRootFilesystem=false", func() {
It("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func() {
createAndWaitUserPod(false)
})
})
@ -497,14 +497,14 @@ var _ = framework.KubeDescribe("Security Context", func() {
return nil
}
It("should allow privilege escalation when not explicitly set and uid != 0", func() {
It("should allow privilege escalation when not explicitly set and uid != 0 [NodeConformance]", func() {
podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
It("should not allow privilege escalation when false", func() {
It("should not allow privilege escalation when false [NodeConformance]", func() {
podName := "alpine-nnp-false-" + string(uuid.NewUUID())
apeFalse := false
if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil {
@ -512,7 +512,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
})
It("should allow privilege escalation when true", func() {
It("should allow privilege escalation when true [NodeConformance]", func() {
podName := "alpine-nnp-true-" + string(uuid.NewUUID())
apeTrue := true
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil {
@ -555,7 +555,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
return podName
}
It("should run the container as privileged when true", func() {
It("should run the container as privileged when true [NodeFeature:HostAccess]", func() {
podName := createAndWaitUserPod(true)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
@ -568,7 +568,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
}
})
It("should run the container as unprivileged when false", func() {
It("should run the container as unprivileged when false [NodeConformance]", func() {
podName := createAndWaitUserPod(false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {

View File

@ -36,7 +36,7 @@ import (
"github.com/onsi/gomega/types"
)
var _ = framework.KubeDescribe("Summary API", func() {
var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
f := framework.NewDefaultFramework("summary-test")
Context("when querying /stats/summary", func() {
AfterEach(func() {

View File

@ -34,7 +34,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
f := framework.NewDefaultFramework("kubelet-volume-manager")
Describe("Volume Manager", func() {
Context("On terminatation of pod with memory backed volume", func() {
It("should remove the volume from the node", func() {
It("should remove the volume from the node [NodeConformance]", func() {
var (
memoryBackedPod *v1.Pod
volumeName string