mirror of https://github.com/k3s-io/k3s
shorten eviction testst and lengthen flaky suite timeout
parent
695d438508
commit
3365cca78a
|
@ -20,7 +20,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
|
||||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||||
|
@ -34,44 +34,30 @@ import (
|
||||||
// Eviction Policy is described here:
|
// Eviction Policy is described here:
|
||||||
// https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/kubelet-eviction.md
|
// https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/kubelet-eviction.md
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("AllocatableEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||||
f := framework.NewDefaultFramework("allocatable-eviction-test")
|
f := framework.NewDefaultFramework("memory-allocatable-eviction-test")
|
||||||
|
|
||||||
podTestSpecs := []podTestSpec{
|
podTestSpecs := []podTestSpec{
|
||||||
{
|
{
|
||||||
evictionPriority: 1, // This pod should be evicted before the innocent pod
|
evictionPriority: 1, // This pod should be evicted before the innocent pod
|
||||||
pod: *getMemhogPod("memory-hog-pod", "memory-hog", v1.ResourceRequirements{}),
|
pod: getMemhogPod("memory-hog-pod", "memory-hog", v1.ResourceRequirements{}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
evictionPriority: 0, // This pod should never be evicted
|
evictionPriority: 0, // This pod should never be evicted
|
||||||
pod: v1.Pod{
|
pod: getInnocentPod(),
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "innocent-pod"},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
RestartPolicy: v1.RestartPolicyNever,
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Image: "gcr.io/google_containers/busybox:1.24",
|
|
||||||
Name: "normal-memory-usage-container",
|
|
||||||
Command: []string{
|
|
||||||
"sh",
|
|
||||||
"-c", //make one big (5 Gb) file
|
|
||||||
"dd if=/dev/urandom of=largefile bs=5000000000 count=1; while true; do sleep 5; done",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
evictionTestTimeout := 40 * time.Minute
|
evictionTestTimeout := 10 * time.Minute
|
||||||
testCondition := "Memory Pressure"
|
testCondition := "Memory Pressure"
|
||||||
|
|
||||||
Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {
|
Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *componentconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *componentconfig.KubeletConfiguration) {
|
||||||
initialConfig.EvictionHard = "memory.available<10%"
|
|
||||||
// Set large system and kube reserved values to trigger allocatable thresholds far before hard eviction thresholds.
|
// Set large system and kube reserved values to trigger allocatable thresholds far before hard eviction thresholds.
|
||||||
initialConfig.SystemReserved = componentconfig.ConfigurationMap(map[string]string{"memory": "1Gi"})
|
kubeReserved := getNodeCPUAndMemoryCapacity(f)[v1.ResourceMemory]
|
||||||
initialConfig.KubeReserved = componentconfig.ConfigurationMap(map[string]string{"memory": "1Gi"})
|
// The default hard eviction threshold is 250Mb, so Allocatable = Capacity - Reserved - 250Mb
|
||||||
|
// We want Allocatable = 50Mb, so set Reserved = Capacity - Allocatable - 250Mb = Capacity - 300Mb
|
||||||
|
kubeReserved.Sub(resource.MustParse("300Mi"))
|
||||||
|
initialConfig.KubeReserved = componentconfig.ConfigurationMap(map[string]string{"memory": kubeReserved.String()})
|
||||||
initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey}
|
initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey}
|
||||||
initialConfig.ExperimentalNodeAllocatableIgnoreEvictionThreshold = false
|
initialConfig.ExperimentalNodeAllocatableIgnoreEvictionThreshold = false
|
||||||
initialConfig.CgroupsPerQOS = true
|
initialConfig.CgroupsPerQOS = true
|
||||||
|
|
|
@ -18,6 +18,7 @@ package e2e_node
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -45,22 +46,19 @@ const (
|
||||||
var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||||
f := framework.NewDefaultFramework("inode-eviction-test")
|
f := framework.NewDefaultFramework("inode-eviction-test")
|
||||||
|
|
||||||
|
volumeMountPath := "/test-empty-dir-mnt"
|
||||||
podTestSpecs := []podTestSpec{
|
podTestSpecs := []podTestSpec{
|
||||||
{
|
{
|
||||||
evictionPriority: 1, // This pod should be evicted before the normal memory usage pod
|
evictionPriority: 1, // This pod should be evicted before the normal memory usage pod
|
||||||
pod: v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "container-inode-hog-pod"},
|
ObjectMeta: metav1.ObjectMeta{Name: "container-inode-hog-pod"},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
RestartPolicy: v1.RestartPolicyNever,
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Image: "gcr.io/google_containers/busybox:1.24",
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
Name: "container-inode-hog-pod",
|
Name: "container-inode-hog-container",
|
||||||
Command: []string{
|
Command: getInodeConsumingCommand(""),
|
||||||
"sh",
|
|
||||||
"-c", // Make 100 billion small files (more than we have inodes)
|
|
||||||
"i=0; while [[ $i -lt 100000000000 ]]; do touch smallfile$i.txt; sleep 0.001; i=$((i+=1)); done;",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -68,21 +66,17 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
evictionPriority: 1, // This pod should be evicted before the normal memory usage pod
|
evictionPriority: 1, // This pod should be evicted before the normal memory usage pod
|
||||||
pod: v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "volume-inode-hog-pod"},
|
ObjectMeta: metav1.ObjectMeta{Name: "volume-inode-hog-pod"},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
RestartPolicy: v1.RestartPolicyNever,
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Image: "gcr.io/google_containers/busybox:1.24",
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
Name: "volume-inode-hog-pod",
|
Name: "volume-inode-hog-container",
|
||||||
Command: []string{
|
Command: getInodeConsumingCommand(volumeMountPath),
|
||||||
"sh",
|
|
||||||
"-c", // Make 100 billion small files (more than we have inodes)
|
|
||||||
"i=0; while [[ $i -lt 100000000000 ]]; do touch /test-empty-dir-mnt/smallfile$i.txt; sleep 0.001; i=$((i+=1)); done;",
|
|
||||||
},
|
|
||||||
VolumeMounts: []v1.VolumeMount{
|
VolumeMounts: []v1.VolumeMount{
|
||||||
{MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"},
|
{MountPath: volumeMountPath, Name: "test-empty-dir"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -94,23 +88,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
evictionPriority: 0, // This pod should never be evicted
|
evictionPriority: 0, // This pod should never be evicted
|
||||||
pod: v1.Pod{
|
pod: getInnocentPod(),
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "normal-memory-usage-pod"},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
RestartPolicy: v1.RestartPolicyNever,
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Image: "gcr.io/google_containers/busybox:1.24",
|
|
||||||
Name: "normal-memory-usage-pod",
|
|
||||||
Command: []string{
|
|
||||||
"sh",
|
|
||||||
"-c", //make one big (5 Gb) file
|
|
||||||
"dd if=/dev/urandom of=largefile bs=5000000000 count=1; while true; do sleep 5; done",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
evictionTestTimeout := 30 * time.Minute
|
evictionTestTimeout := 30 * time.Minute
|
||||||
|
@ -118,7 +96,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
|
||||||
|
|
||||||
Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {
|
Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(initialConfig *componentconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(initialConfig *componentconfig.KubeletConfiguration) {
|
||||||
initialConfig.EvictionHard = "nodefs.inodesFree<50%"
|
initialConfig.EvictionHard = "nodefs.inodesFree<70%"
|
||||||
})
|
})
|
||||||
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
|
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
|
||||||
Context("With kubeconfig updated", func() {
|
Context("With kubeconfig updated", func() {
|
||||||
|
@ -133,7 +111,7 @@ type podTestSpec struct {
|
||||||
// If two are ranked at 1, either is permitted to fail before the other.
|
// If two are ranked at 1, either is permitted to fail before the other.
|
||||||
// The test ends when all other than the 0 have been evicted
|
// The test ends when all other than the 0 have been evicted
|
||||||
evictionPriority int
|
evictionPriority int
|
||||||
pod v1.Pod
|
pod *v1.Pod
|
||||||
}
|
}
|
||||||
|
|
||||||
// runEvictionTest sets up a testing environment given the provided nodes, and checks a few things:
|
// runEvictionTest sets up a testing environment given the provided nodes, and checks a few things:
|
||||||
|
@ -148,7 +126,7 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs
|
||||||
By("seting up pods to be used by tests")
|
By("seting up pods to be used by tests")
|
||||||
for _, spec := range podTestSpecs {
|
for _, spec := range podTestSpecs {
|
||||||
By(fmt.Sprintf("creating pod with container: %s", spec.pod.Name))
|
By(fmt.Sprintf("creating pod with container: %s", spec.pod.Name))
|
||||||
f.PodClient().CreateSync(&spec.pod)
|
f.PodClient().CreateSync(spec.pod)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -342,3 +320,32 @@ func hasInodePressure(f *framework.Framework, testCondition string) (bool, error
|
||||||
}
|
}
|
||||||
return hasPressure, nil
|
return hasPressure, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// returns a pod that does not use any resources
|
||||||
|
func getInnocentPod() *v1.Pod {
|
||||||
|
return &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "innocent-pod"},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
|
Name: "innocent-container",
|
||||||
|
Command: []string{
|
||||||
|
"sh",
|
||||||
|
"-c", //make one large file
|
||||||
|
"dd if=/dev/urandom of=largefile bs=5000000000 count=1; while true; do sleep 5; done",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInodeConsumingCommand(path string) []string {
|
||||||
|
return []string{
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
|
fmt.Sprintf("i=0; while true; do touch %s${i}.txt; sleep 0.001; i=$((i+=1)); done;", filepath.Join(path, "smallfile")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -7,3 +7,5 @@ GINKGO_FLAGS='--focus="\[Flaky\]"'
|
||||||
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
|
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
|
||||||
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
|
||||||
PARALLELISM=1
|
PARALLELISM=1
|
||||||
|
TIMEOUT=2h
|
||||||
|
|
||||||
|
|
|
@ -57,7 +57,7 @@ var _ = framework.KubeDescribe("LocalStorageAllocatableEviction [Slow] [Serial]
|
||||||
podTestSpecs = []podTestSpec{
|
podTestSpecs = []podTestSpec{
|
||||||
{
|
{
|
||||||
evictionPriority: 1, // This pod should be evicted before the innocent pod
|
evictionPriority: 1, // This pod should be evicted before the innocent pod
|
||||||
pod: v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "container-disk-hog-pod"},
|
ObjectMeta: metav1.ObjectMeta{Name: "container-disk-hog-pod"},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
RestartPolicy: v1.RestartPolicyNever,
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
|
@ -74,7 +74,7 @@ var _ = framework.KubeDescribe("LocalStorageAllocatableEviction [Slow] [Serial]
|
||||||
|
|
||||||
{
|
{
|
||||||
evictionPriority: 0, // This pod should never be evicted
|
evictionPriority: 0, // This pod should never be evicted
|
||||||
pod: v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "idle-pod"},
|
ObjectMeta: metav1.ObjectMeta{Name: "idle-pod"},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
RestartPolicy: v1.RestartPolicyNever,
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
|
@ -151,7 +151,7 @@ func runLocalStorageEvictionTest(f *framework.Framework, conditionType v1.NodeCo
|
||||||
By("seting up pods to be used by tests")
|
By("seting up pods to be used by tests")
|
||||||
for _, spec := range *podTestSpecsP {
|
for _, spec := range *podTestSpecsP {
|
||||||
By(fmt.Sprintf("creating pod with container: %s", spec.pod.Name))
|
By(fmt.Sprintf("creating pod with container: %s", spec.pod.Name))
|
||||||
f.PodClient().CreateSync(&spec.pod)
|
f.PodClient().CreateSync(spec.pod)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue