fix scheduler and kubelet unit tests leaking feature flag changes

pull/58/head
Jordan Liggitt 2018-11-15 21:58:34 -05:00
parent 248d661327
commit de8bf9b63d
21 changed files with 117 additions and 161 deletions

View File

@ -84,6 +84,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library",

View File

@ -35,6 +35,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apiserver/pkg/storage/names" "k8s.io/apiserver/pkg/storage/names"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
@ -411,7 +412,7 @@ func clearExpectations(t *testing.T, manager *daemonSetsController, ds *apps.Dae
func TestDeleteFinalStateUnknown(t *testing.T) { func TestDeleteFinalStateUnknown(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
manager, _, _, err := newTestController() manager, _, _, err := newTestController()
if err != nil { if err != nil {
@ -443,16 +444,10 @@ func markPodReady(pod *v1.Pod) {
podutil.UpdatePodCondition(&pod.Status, &condition) podutil.UpdatePodCondition(&pod.Status, &condition)
} }
func setFeatureGate(t *testing.T, feature utilfeature.Feature, enabled bool) {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", feature, enabled)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t: %v", feature, enabled, err)
}
}
// DaemonSets without node selectors should launch pods on every node. // DaemonSets without node selectors should launch pods on every node.
func TestSimpleDaemonSetLaunchesPods(t *testing.T) { func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -470,12 +465,7 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
// When ScheduleDaemonSetPods is enabled, DaemonSets without node selectors should // When ScheduleDaemonSetPods is enabled, DaemonSets without node selectors should
// launch pods on every node by NodeAffinity. // launch pods on every node by NodeAffinity.
func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) { func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, true)()
// Rollback feature gate.
defer func() {
setFeatureGate(t, features.ScheduleDaemonSetPods, enabled)
}()
setFeatureGate(t, features.ScheduleDaemonSetPods, true)
nodeNum := 5 nodeNum := 5
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
@ -552,7 +542,7 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
// of 10 pods, and verify that the ds doesn't make 100 create calls per sync pass // of 10 pods, and verify that the ds doesn't make 100 create calls per sync pass
func TestSimpleDaemonSetPodCreateErrors(t *testing.T) { func TestSimpleDaemonSetPodCreateErrors(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -578,7 +568,7 @@ func TestSimpleDaemonSetPodCreateErrors(t *testing.T) {
func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) { func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -614,7 +604,7 @@ func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
// DaemonSets should do nothing if there aren't any nodes // DaemonSets should do nothing if there aren't any nodes
func TestNoNodesDoesNothing(t *testing.T) { func TestNoNodesDoesNothing(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
manager, podControl, _, err := newTestController() manager, podControl, _, err := newTestController()
if err != nil { if err != nil {
@ -632,7 +622,7 @@ func TestNoNodesDoesNothing(t *testing.T) {
// single node cluster. // single node cluster.
func TestOneNodeDaemonLaunchesPod(t *testing.T) { func TestOneNodeDaemonLaunchesPod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -650,7 +640,7 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) {
// DaemonSets should place onto NotReady nodes // DaemonSets should place onto NotReady nodes
func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -706,21 +696,7 @@ func allocatableResources(memory, cpu string) v1.ResourceList {
// When ScheduleDaemonSetPods is disabled, DaemonSets should not place onto nodes with insufficient free resource // When ScheduleDaemonSetPods is disabled, DaemonSets should not place onto nodes with insufficient free resource
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)()
// Rollback feature gate.
defer func() {
if enabled {
err := utilfeature.DefaultFeatureGate.Set("ScheduleDaemonSetPods=true")
if err != nil {
t.Fatalf("Failed to enable feature gate for ScheduleDaemonSetPods: %v", err)
}
}
}()
err := utilfeature.DefaultFeatureGate.Set("ScheduleDaemonSetPods=false")
if err != nil {
t.Fatalf("Failed to disable feature gate for ScheduleDaemonSetPods: %v", err)
}
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("too-much-mem", "75M", "75m") podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
@ -751,7 +727,7 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
// DaemonSets should not unschedule a daemonset pod from a node with insufficient free resource // DaemonSets should not unschedule a daemonset pod from a node with insufficient free resource
func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) { func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("too-much-mem", "75M", "75m") podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
podSpec.NodeName = "too-much-mem" podSpec.NodeName = "too-much-mem"
@ -792,7 +768,7 @@ func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T)
// DaemonSets should only place onto nodes with sufficient free resource and matched node selector // DaemonSets should only place onto nodes with sufficient free resource and matched node selector
func TestInsufficientCapacityNodeSufficientCapacityWithNodeLabelDaemonLaunchPod(t *testing.T) { func TestInsufficientCapacityNodeSufficientCapacityWithNodeLabelDaemonLaunchPod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
podSpec := resourcePodSpecWithoutNodeName("50M", "75m") podSpec := resourcePodSpecWithoutNodeName("50M", "75m")
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.Template.Spec = podSpec ds.Spec.Template.Spec = podSpec
@ -819,15 +795,7 @@ func TestInsufficientCapacityNodeSufficientCapacityWithNodeLabelDaemonLaunchPod(
// When ScheduleDaemonSetPods is disabled, DaemonSetPods should launch onto node with terminated pods if there // When ScheduleDaemonSetPods is disabled, DaemonSetPods should launch onto node with terminated pods if there
// are sufficient resources. // are sufficient resources.
func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) { func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)()
// Rollback feature gate.
defer func() {
if enabled {
setFeatureGate(t, features.ScheduleDaemonSetPods, true)
}
}()
setFeatureGate(t, features.ScheduleDaemonSetPods, false)
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("too-much-mem", "75M", "75m") podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
@ -851,15 +819,7 @@ func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
// When ScheduleDaemonSetPods is disabled, DaemonSets should place onto nodes with sufficient free resources. // When ScheduleDaemonSetPods is disabled, DaemonSets should place onto nodes with sufficient free resources.
func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) { func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)()
// Rollback feature gate.
defer func() {
if enabled {
setFeatureGate(t, features.ScheduleDaemonSetPods, true)
}
}()
setFeatureGate(t, features.ScheduleDaemonSetPods, false)
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m") podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
@ -884,7 +844,7 @@ func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) {
// DaemonSet should launch a pod on a node with taint NetworkUnavailable condition. // DaemonSet should launch a pod on a node with taint NetworkUnavailable condition.
func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) { func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("simple") ds := newDaemonSet("simple")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -908,7 +868,7 @@ func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) {
// DaemonSets not take any actions when being deleted // DaemonSets not take any actions when being deleted
func TestDontDoAnythingIfBeingDeleted(t *testing.T) { func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m") podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
@ -934,7 +894,7 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) { func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
// Bare client says it IS deleted. // Bare client says it IS deleted.
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
@ -963,15 +923,7 @@ func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) {
// When ScheduleDaemonSetPods is disabled, DaemonSets should not place onto nodes that would cause port conflicts. // When ScheduleDaemonSetPods is disabled, DaemonSets should not place onto nodes that would cause port conflicts.
func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) { func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)()
// Rollback feature gate.
defer func() {
if enabled {
setFeatureGate(t, features.ScheduleDaemonSetPods, true)
}
}()
setFeatureGate(t, features.ScheduleDaemonSetPods, false)
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
podSpec := v1.PodSpec{ podSpec := v1.PodSpec{
NodeName: "port-conflict", NodeName: "port-conflict",
@ -1005,7 +957,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
// Issue: https://github.com/kubernetes/kubernetes/issues/22309 // Issue: https://github.com/kubernetes/kubernetes/issues/22309
func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) { func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
podSpec := v1.PodSpec{ podSpec := v1.PodSpec{
NodeName: "port-conflict", NodeName: "port-conflict",
@ -1035,7 +987,7 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
// DaemonSets should place onto nodes that would not cause port conflicts // DaemonSets should place onto nodes that would not cause port conflicts
func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) { func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
podSpec1 := v1.PodSpec{ podSpec1 := v1.PodSpec{
NodeName: "no-port-conflict", NodeName: "no-port-conflict",
@ -1085,7 +1037,7 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
// should detect this misconfiguration and choose not to sync the DaemonSet. We should // should detect this misconfiguration and choose not to sync the DaemonSet. We should
// not observe a deletion of the pod on node1. // not observe a deletion of the pod on node1.
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1118,7 +1070,7 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods. // Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
func TestDealsWithExistingPods(t *testing.T) { func TestDealsWithExistingPods(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1140,7 +1092,7 @@ func TestDealsWithExistingPods(t *testing.T) {
// Daemon with node selector should launch pods on nodes matching selector. // Daemon with node selector should launch pods on nodes matching selector.
func TestSelectorDaemonLaunchesPods(t *testing.T) { func TestSelectorDaemonLaunchesPods(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
daemon := newDaemonSet("foo") daemon := newDaemonSet("foo")
daemon.Spec.UpdateStrategy = *strategy daemon.Spec.UpdateStrategy = *strategy
@ -1160,7 +1112,7 @@ func TestSelectorDaemonLaunchesPods(t *testing.T) {
// Daemon with node selector should delete pods from nodes that do not satisfy selector. // Daemon with node selector should delete pods from nodes that do not satisfy selector.
func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) { func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1184,7 +1136,7 @@ func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes. // DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
func TestSelectorDaemonDealsWithExistingPods(t *testing.T) { func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1212,7 +1164,7 @@ func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
// DaemonSet with node selector which does not match any node labels should not launch pods. // DaemonSet with node selector which does not match any node labels should not launch pods.
func TestBadSelectorDaemonDoesNothing(t *testing.T) { func TestBadSelectorDaemonDoesNothing(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
manager, podControl, _, err := newTestController() manager, podControl, _, err := newTestController()
if err != nil { if err != nil {
@ -1232,7 +1184,7 @@ func TestBadSelectorDaemonDoesNothing(t *testing.T) {
// DaemonSet with node name should launch pod on node with corresponding name. // DaemonSet with node name should launch pod on node with corresponding name.
func TestNameDaemonSetLaunchesPods(t *testing.T) { func TestNameDaemonSetLaunchesPods(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1251,7 +1203,7 @@ func TestNameDaemonSetLaunchesPods(t *testing.T) {
// DaemonSet with node name that does not exist should not launch pods. // DaemonSet with node name that does not exist should not launch pods.
func TestBadNameDaemonSetDoesNothing(t *testing.T) { func TestBadNameDaemonSetDoesNothing(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1270,7 +1222,7 @@ func TestBadNameDaemonSetDoesNothing(t *testing.T) {
// DaemonSet with node selector, and node name, matching a node, should launch a pod on the node. // DaemonSet with node selector, and node name, matching a node, should launch a pod on the node.
func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) { func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1291,7 +1243,7 @@ func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
// DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing. // DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing.
func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) { func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1312,7 +1264,7 @@ func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
// DaemonSet with node selector, matching some nodes, should launch pods on all the nodes. // DaemonSet with node selector, matching some nodes, should launch pods on all the nodes.
func TestSelectorDaemonSetLaunchesPods(t *testing.T) { func TestSelectorDaemonSetLaunchesPods(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
manager, podControl, _, err := newTestController(ds) manager, podControl, _, err := newTestController(ds)
@ -1329,7 +1281,7 @@ func TestSelectorDaemonSetLaunchesPods(t *testing.T) {
// Daemon with node affinity should launch pods on nodes matching affinity. // Daemon with node affinity should launch pods on nodes matching affinity.
func TestNodeAffinityDaemonLaunchesPods(t *testing.T) { func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
daemon := newDaemonSet("foo") daemon := newDaemonSet("foo")
daemon.Spec.UpdateStrategy = *strategy daemon.Spec.UpdateStrategy = *strategy
@ -1365,7 +1317,7 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
func TestNumberReadyStatus(t *testing.T) { func TestNumberReadyStatus(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1410,7 +1362,7 @@ func TestNumberReadyStatus(t *testing.T) {
func TestObservedGeneration(t *testing.T) { func TestObservedGeneration(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1457,7 +1409,7 @@ func TestDaemonKillFailedPods(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.test, func(t *testing.T) { t.Run(test.test, func(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1479,7 +1431,7 @@ func TestDaemonKillFailedPods(t *testing.T) {
// DaemonSet controller needs to backoff when killing failed pods to avoid hot looping and fighting with kubelet. // DaemonSet controller needs to backoff when killing failed pods to avoid hot looping and fighting with kubelet.
func TestDaemonKillFailedPodsBackoff(t *testing.T) { func TestDaemonKillFailedPodsBackoff(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
t.Run(string(strategy.Type), func(t *testing.T) { t.Run(string(strategy.Type), func(t *testing.T) {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
@ -1549,7 +1501,7 @@ func TestDaemonKillFailedPodsBackoff(t *testing.T) {
// tolerate the nodes NoSchedule taint // tolerate the nodes NoSchedule taint
func TestNoScheduleTaintedDoesntEvicitRunningIntolerantPod(t *testing.T) { func TestNoScheduleTaintedDoesntEvicitRunningIntolerantPod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("intolerant") ds := newDaemonSet("intolerant")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1573,7 +1525,7 @@ func TestNoScheduleTaintedDoesntEvicitRunningIntolerantPod(t *testing.T) {
// tolerate the nodes NoExecute taint // tolerate the nodes NoExecute taint
func TestNoExecuteTaintedDoesEvicitRunningIntolerantPod(t *testing.T) { func TestNoExecuteTaintedDoesEvicitRunningIntolerantPod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("intolerant") ds := newDaemonSet("intolerant")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1596,7 +1548,7 @@ func TestNoExecuteTaintedDoesEvicitRunningIntolerantPod(t *testing.T) {
// DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint. // DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint.
func TestTaintedNodeDaemonDoesNotLaunchIntolerantPod(t *testing.T) { func TestTaintedNodeDaemonDoesNotLaunchIntolerantPod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("intolerant") ds := newDaemonSet("intolerant")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1618,7 +1570,7 @@ func TestTaintedNodeDaemonDoesNotLaunchIntolerantPod(t *testing.T) {
// DaemonSet should launch a pod on a tainted node when the pod can tolerate that taint. // DaemonSet should launch a pod on a tainted node when the pod can tolerate that taint.
func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) { func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("tolerate") ds := newDaemonSet("tolerate")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1641,7 +1593,7 @@ func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) {
// DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute. // DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute.
func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) { func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("simple") ds := newDaemonSet("simple")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1666,7 +1618,7 @@ func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) {
// DaemonSet should launch a pod on an unreachable node with taint unreachable:NoExecute. // DaemonSet should launch a pod on an unreachable node with taint unreachable:NoExecute.
func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) { func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("simple") ds := newDaemonSet("simple")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1691,7 +1643,7 @@ func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) {
// DaemonSet should launch a pod on an untainted node when the pod has tolerations. // DaemonSet should launch a pod on an untainted node when the pod has tolerations.
func TestNodeDaemonLaunchesToleratePod(t *testing.T) { func TestNodeDaemonLaunchesToleratePod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("tolerate") ds := newDaemonSet("tolerate")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1711,7 +1663,7 @@ func TestNodeDaemonLaunchesToleratePod(t *testing.T) {
// DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute. // DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute.
func TestDaemonSetRespectsTermination(t *testing.T) { func TestDaemonSetRespectsTermination(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1742,12 +1694,8 @@ func setDaemonSetToleration(ds *apps.DaemonSet, tolerations []v1.Toleration) {
// DaemonSet should launch a critical pod even when the node with OutOfDisk taints. // DaemonSet should launch a critical pod even when the node with OutOfDisk taints.
// TODO(#48843) OutOfDisk taints will be removed in 1.10 // TODO(#48843) OutOfDisk taints will be removed in 1.10
func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) { func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation)
defer func() {
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, enabled)
}()
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("critical") ds := newDaemonSet("critical")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1765,12 +1713,12 @@ func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
// NOTE: Whether or not TaintNodesByCondition is enabled, it'll add toleration to DaemonSet pods. // NOTE: Whether or not TaintNodesByCondition is enabled, it'll add toleration to DaemonSet pods.
// Without enabling critical pod annotation feature gate, we shouldn't create critical pod // Without enabling critical pod annotation feature gate, we shouldn't create critical pod
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, false) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, false)()
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
// With enabling critical pod annotation feature gate, we will create critical pod // With enabling critical pod annotation feature gate, we will create critical pod
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, true) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)()
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0)
} }
@ -1779,13 +1727,8 @@ func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
// DaemonSet should launch a pod even when the node with MemoryPressure/DiskPressure taints. // DaemonSet should launch a pod even when the node with MemoryPressure/DiskPressure taints.
func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) { func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition)
defer func() {
setFeatureGate(t, features.TaintNodesByCondition, enabled)
}()
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("critical") ds := newDaemonSet("critical")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -1807,7 +1750,7 @@ func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
manager.nodeStore.Add(node) manager.nodeStore.Add(node)
// Enabling critical pod and taint nodes by condition feature gate should create critical pod // Enabling critical pod and taint nodes by condition feature gate should create critical pod
setFeatureGate(t, features.TaintNodesByCondition, true) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)()
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0)
} }
@ -1816,10 +1759,7 @@ func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
// When ScheduleDaemonSetPods is disabled, DaemonSet should launch a critical pod even when the node has insufficient free resource. // When ScheduleDaemonSetPods is disabled, DaemonSet should launch a critical pod even when the node has insufficient free resource.
func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) { func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)()
defer func() {
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, enabled)
}()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("too-much-mem", "75M", "75m") podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
ds := newDaemonSet("critical") ds := newDaemonSet("critical")
@ -1839,7 +1779,7 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
}) })
// Without enabling critical pod annotation feature gate, we shouldn't create critical pod // Without enabling critical pod annotation feature gate, we shouldn't create critical pod
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, false) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, false)()
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
switch strategy.Type { switch strategy.Type {
case apps.OnDeleteDaemonSetStrategyType: case apps.OnDeleteDaemonSetStrategyType:
@ -1851,7 +1791,7 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
} }
// Enabling critical pod annotation feature gate should create critical pod // Enabling critical pod annotation feature gate should create critical pod
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, true) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)()
switch strategy.Type { switch strategy.Type {
case apps.OnDeleteDaemonSetStrategyType: case apps.OnDeleteDaemonSetStrategyType:
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 2) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 2)
@ -1865,11 +1805,7 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
// When ScheduleDaemonSetPods is disabled, DaemonSets should NOT launch a critical pod when there are port conflicts. // When ScheduleDaemonSetPods is disabled, DaemonSets should NOT launch a critical pod when there are port conflicts.
func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) { func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)()
defer func() {
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, enabled)
}()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
podSpec := v1.PodSpec{ podSpec := v1.PodSpec{
NodeName: "port-conflict", NodeName: "port-conflict",
@ -1889,7 +1825,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
Spec: podSpec, Spec: podSpec,
}) })
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, true) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)()
ds := newDaemonSet("critical") ds := newDaemonSet("critical")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec ds.Spec.Template.Spec = podSpec
@ -1909,7 +1845,7 @@ func setDaemonSetCritical(ds *apps.DaemonSet) {
func TestNodeShouldRunDaemonPod(t *testing.T) { func TestNodeShouldRunDaemonPod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
var shouldCreate, wantToRun, shouldContinueRunning bool var shouldCreate, wantToRun, shouldContinueRunning bool
if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) {
shouldCreate = true shouldCreate = true
@ -2236,7 +2172,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
func TestUpdateNode(t *testing.T) { func TestUpdateNode(t *testing.T) {
var enqueued bool var enqueued bool
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
cases := []struct { cases := []struct {
test string test string
newNode *v1.Node newNode *v1.Node
@ -2545,7 +2481,7 @@ func TestDeleteNoDaemonPod(t *testing.T) {
func TestGetNodesToDaemonPods(t *testing.T) { func TestGetNodesToDaemonPods(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy ds.Spec.UpdateStrategy = *strategy
@ -2611,7 +2547,7 @@ func TestGetNodesToDaemonPods(t *testing.T) {
func TestAddNode(t *testing.T) { func TestAddNode(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
manager, _, _, err := newTestController() manager, _, _, err := newTestController()
if err != nil { if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err) t.Fatalf("error creating DaemonSets controller: %v", err)
@ -2640,7 +2576,7 @@ func TestAddNode(t *testing.T) {
func TestAddPod(t *testing.T) { func TestAddPod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
manager, _, _, err := newTestController() manager, _, _, err := newTestController()
if err != nil { if err != nil {
@ -2686,7 +2622,7 @@ func TestAddPod(t *testing.T) {
func TestAddPodOrphan(t *testing.T) { func TestAddPodOrphan(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
manager, _, _, err := newTestController() manager, _, _, err := newTestController()
if err != nil { if err != nil {
@ -2718,7 +2654,7 @@ func TestAddPodOrphan(t *testing.T) {
func TestUpdatePod(t *testing.T) { func TestUpdatePod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
manager, _, _, err := newTestController() manager, _, _, err := newTestController()
if err != nil { if err != nil {
@ -2768,7 +2704,7 @@ func TestUpdatePod(t *testing.T) {
func TestUpdatePodOrphanSameLabels(t *testing.T) { func TestUpdatePodOrphanSameLabels(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
manager, _, _, err := newTestController() manager, _, _, err := newTestController()
@ -2795,7 +2731,7 @@ func TestUpdatePodOrphanSameLabels(t *testing.T) {
func TestUpdatePodOrphanWithNewLabels(t *testing.T) { func TestUpdatePodOrphanWithNewLabels(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
manager, _, _, err := newTestController() manager, _, _, err := newTestController()
@ -2826,7 +2762,7 @@ func TestUpdatePodOrphanWithNewLabels(t *testing.T) {
func TestUpdatePodChangeControllerRef(t *testing.T) { func TestUpdatePodChangeControllerRef(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
@ -2854,7 +2790,7 @@ func TestUpdatePodChangeControllerRef(t *testing.T) {
func TestUpdatePodControllerRefRemoved(t *testing.T) { func TestUpdatePodControllerRefRemoved(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
manager, _, _, err := newTestController() manager, _, _, err := newTestController()
@ -2882,7 +2818,7 @@ func TestUpdatePodControllerRefRemoved(t *testing.T) {
func TestDeletePod(t *testing.T) { func TestDeletePod(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
manager, _, _, err := newTestController() manager, _, _, err := newTestController()
@ -2929,7 +2865,7 @@ func TestDeletePod(t *testing.T) {
func TestDeletePodOrphan(t *testing.T) { func TestDeletePodOrphan(t *testing.T) {
for _, f := range []bool{true, false} { for _, f := range []bool{true, false} {
setFeatureGate(t, features.ScheduleDaemonSetPods, f) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)()
for _, strategy := range updateStrategies() { for _, strategy := range updateStrategies() {
manager, _, _, err := newTestController() manager, _, _, err := newTestController()

View File

@ -60,6 +60,7 @@ go_test(
"//pkg/kubelet/types:go_default_library", "//pkg/kubelet/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library",
"//vendor/github.com/google/cadvisor/metrics:go_default_library", "//vendor/github.com/google/cadvisor/metrics:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library",

View File

@ -19,14 +19,15 @@ limitations under the License.
package cadvisor package cadvisor
import ( import (
"fmt" "testing"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"testing"
) )
func TestCapacityFromMachineInfo(t *testing.T) { func TestCapacityFromMachineInfo(t *testing.T) {
@ -42,7 +43,7 @@ func TestCapacityFromMachineInfo(t *testing.T) {
} }
// enable the features.HugePages // enable the features.HugePages
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.HugePages)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HugePages, true)()
resourceList := CapacityFromMachineInfo(machineInfo) resourceList := CapacityFromMachineInfo(machineInfo)

View File

@ -34,6 +34,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
@ -42,6 +43,7 @@ import (
// to "v1"? // to "v1"?
_ "k8s.io/kubernetes/pkg/apis/core/install" _ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/server/portforward" "k8s.io/kubernetes/pkg/kubelet/server/portforward"
@ -91,9 +93,7 @@ func TestDisabledSubpath(t *testing.T) {
}, },
} }
utilfeature.DefaultFeatureGate.Set("VolumeSubpath=false") defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSubpath, false)()
defer utilfeature.DefaultFeatureGate.Set("VolumeSubpath=true")
for name, test := range cases { for name, test := range cases {
_, _, err := makeMounts(&pod, "/pod", &test.container, "fakepodname", "", "", podVolumes, fm, nil) _, _, err := makeMounts(&pod, "/pod", &test.container, "fakepodname", "", "", podVolumes, fm, nil)
if err != nil && !test.expectError { if err != nil && !test.expectError {

View File

@ -46,11 +46,13 @@ go_test(
deps = [ deps = [
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/apis/scheduling:go_default_library", "//pkg/apis/scheduling:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/types:go_default_library", "//pkg/kubelet/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library",
], ],
) )

View File

@ -24,9 +24,11 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
kubeapi "k8s.io/kubernetes/pkg/apis/core" kubeapi "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/pkg/features"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
) )
@ -89,9 +91,7 @@ func getTestCriticalPodAdmissionHandler(podProvider *fakePodProvider, podKiller
} }
func TestEvictPodsToFreeRequests(t *testing.T) { func TestEvictPodsToFreeRequests(t *testing.T) {
if err := utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=true"); err != nil { defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)()
t.Errorf("failed to set ExperimentalCriticalPodAnnotation to true: %v", err)
}
type testRun struct { type testRun struct {
testName string testName string
inputPods []*v1.Pod inputPods []*v1.Pod
@ -159,6 +159,7 @@ func BenchmarkGetPodsToPreempt(t *testing.B) {
} }
func TestGetPodsToPreempt(t *testing.T) { func TestGetPodsToPreempt(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)()
type testRun struct { type testRun struct {
testName string testName string
preemptor *v1.Pod preemptor *v1.Pod

View File

@ -38,9 +38,11 @@ go_test(
], ],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/features:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library",
], ],

View File

@ -24,6 +24,8 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/features"
) )
func TestGetValidatedSources(t *testing.T) { func TestGetValidatedSources(t *testing.T) {
@ -116,9 +118,7 @@ func TestString(t *testing.T) {
} }
func TestIsCriticalPod(t *testing.T) { func TestIsCriticalPod(t *testing.T) {
if err := utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=true"); err != nil { defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)()
t.Errorf("failed to set ExperimentalCriticalPodAnnotation to true: %v", err)
}
cases := []struct { cases := []struct {
pod v1.Pod pod v1.Pod
expected bool expected bool

View File

@ -66,6 +66,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",

View File

@ -79,6 +79,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library",
], ],
) )

View File

@ -17,7 +17,6 @@ limitations under the License.
package priorities package priorities
import ( import (
"fmt"
"reflect" "reflect"
"testing" "testing"
@ -25,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
@ -44,7 +44,7 @@ func getExistingVolumeCountForNode(pods []*v1.Pod, maxVolumes int) int {
func TestBalancedResourceAllocation(t *testing.T) { func TestBalancedResourceAllocation(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation // Enable volumesOnNodeForBalancing to do balanced resource allocation
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
podwithVol1 := v1.PodSpec{ podwithVol1 := v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{ {

View File

@ -18,8 +18,10 @@ go_test(
srcs = ["plugins_test.go"], srcs = ["plugins_test.go"],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/features:go_default_library",
"//pkg/scheduler/factory:go_default_library", "//pkg/scheduler/factory:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
], ],
) )

View File

@ -21,6 +21,8 @@ import (
"testing" "testing"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/factory" "k8s.io/kubernetes/pkg/scheduler/factory"
) )
@ -90,7 +92,7 @@ func TestApplyFeatureGates(t *testing.T) {
} }
// Apply features for algorithm providers. // Apply features for algorithm providers.
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True") defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)()
ApplyFeatureGates() ApplyFeatureGates()

View File

@ -40,6 +40,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
], ],
) )

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
@ -92,7 +93,7 @@ func newNodeInfo(requestedResource *schedulercache.Resource,
// on node level. // on node level.
func TestAssumePodScheduled(t *testing.T) { func TestAssumePodScheduled(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation // Enable volumesOnNodeForBalancing to do balanced resource allocation
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node" nodeName := "node"
testPods := []*v1.Pod{ testPods := []*v1.Pod{
makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
@ -240,7 +241,7 @@ func assumeAndFinishBinding(cache *schedulerCache, pod *v1.Pod, assumedTime time
// The removal will be reflected in node info. // The removal will be reflected in node info.
func TestExpirePod(t *testing.T) { func TestExpirePod(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation // Enable volumesOnNodeForBalancing to do balanced resource allocation
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node" nodeName := "node"
testPods := []*v1.Pod{ testPods := []*v1.Pod{
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),
@ -299,7 +300,7 @@ func TestExpirePod(t *testing.T) {
// The pod info should still exist after manually expiring unconfirmed pods. // The pod info should still exist after manually expiring unconfirmed pods.
func TestAddPodWillConfirm(t *testing.T) { func TestAddPodWillConfirm(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation // Enable volumesOnNodeForBalancing to do balanced resource allocation
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node" nodeName := "node"
now := time.Now() now := time.Now()
ttl := 10 * time.Second ttl := 10 * time.Second
@ -455,7 +456,7 @@ func TestAddPodWillReplaceAssumed(t *testing.T) {
// TestAddPodAfterExpiration tests that a pod being Add()ed will be added back if expired. // TestAddPodAfterExpiration tests that a pod being Add()ed will be added back if expired.
func TestAddPodAfterExpiration(t *testing.T) { func TestAddPodAfterExpiration(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation // Enable volumesOnNodeForBalancing to do balanced resource allocation
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node" nodeName := "node"
ttl := 10 * time.Second ttl := 10 * time.Second
basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}) basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}})
@ -504,7 +505,7 @@ func TestAddPodAfterExpiration(t *testing.T) {
// TestUpdatePod tests that a pod will be updated if added before. // TestUpdatePod tests that a pod will be updated if added before.
func TestUpdatePod(t *testing.T) { func TestUpdatePod(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation // Enable volumesOnNodeForBalancing to do balanced resource allocation
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node" nodeName := "node"
ttl := 10 * time.Second ttl := 10 * time.Second
testPods := []*v1.Pod{ testPods := []*v1.Pod{
@ -630,7 +631,7 @@ func TestUpdatePodAndGet(t *testing.T) {
// TestExpireAddUpdatePod test the sequence that a pod is expired, added, then updated // TestExpireAddUpdatePod test the sequence that a pod is expired, added, then updated
func TestExpireAddUpdatePod(t *testing.T) { func TestExpireAddUpdatePod(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation // Enable volumesOnNodeForBalancing to do balanced resource allocation
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node" nodeName := "node"
ttl := 10 * time.Second ttl := 10 * time.Second
testPods := []*v1.Pod{ testPods := []*v1.Pod{
@ -727,7 +728,7 @@ func makePodWithEphemeralStorage(nodeName, ephemeralStorage string) *v1.Pod {
func TestEphemeralStorageResource(t *testing.T) { func TestEphemeralStorageResource(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation // Enable volumesOnNodeForBalancing to do balanced resource allocation
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node" nodeName := "node"
podE := makePodWithEphemeralStorage(nodeName, "500") podE := makePodWithEphemeralStorage(nodeName, "500")
tests := []struct { tests := []struct {
@ -772,7 +773,7 @@ func TestEphemeralStorageResource(t *testing.T) {
// TestRemovePod tests after added pod is removed, its information should also be subtracted. // TestRemovePod tests after added pod is removed, its information should also be subtracted.
func TestRemovePod(t *testing.T) { func TestRemovePod(t *testing.T) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation // Enable volumesOnNodeForBalancing to do balanced resource allocation
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
nodeName := "node" nodeName := "node"
basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}) basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}})
tests := []struct { tests := []struct {
@ -1126,7 +1127,7 @@ func BenchmarkList1kNodes30kPods(b *testing.B) {
func BenchmarkUpdate1kNodes30kPods(b *testing.B) { func BenchmarkUpdate1kNodes30kPods(b *testing.B) {
// Enable volumesOnNodeForBalancing to do balanced resource allocation // Enable volumesOnNodeForBalancing to do balanced resource allocation
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) defer utilfeaturetesting.SetFeatureGateDuringTest(nil, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
cache := setupCacheOf1kNodes30kPods(b) cache := setupCacheOf1kNodes30kPods(b)
b.ResetTimer() b.ResetTimer()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {

View File

@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientsetfake "k8s.io/client-go/kubernetes/fake" clientsetfake "k8s.io/client-go/kubernetes/fake"
corelister "k8s.io/client-go/listers/core/v1" corelister "k8s.io/client-go/listers/core/v1"

View File

@ -12,6 +12,7 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/util/tolerations:go_default_library", "//pkg/util/tolerations:go_default_library",
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
@ -21,6 +22,7 @@ go_test(
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/admission/initializer:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission/initializer:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",

View File

@ -27,10 +27,12 @@ import (
"k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/admission"
genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer" genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/pkg/util/tolerations" "k8s.io/kubernetes/pkg/util/tolerations"
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction" pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
@ -83,9 +85,7 @@ func TestPodAdmission(t *testing.T) {
}, },
} }
if err := utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=true"); err != nil { defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)()
t.Errorf("Failed to enable TaintByCondition feature: %v.", err)
}
tests := []struct { tests := []struct {
pod *api.Pod pod *api.Pod

View File

@ -21,6 +21,7 @@ go_test(
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/klog:go_default_library",
], ],

View File

@ -17,7 +17,6 @@ limitations under the License.
package priority package priority
import ( import (
"fmt"
"testing" "testing"
"k8s.io/klog" "k8s.io/klog"
@ -27,6 +26,7 @@ import (
"k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authentication/user"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/pkg/apis/scheduling"
@ -468,9 +468,9 @@ func TestPodAdmission(t *testing.T) {
}, },
} }
// Enable PodPriority feature gate. // Enable PodPriority feature gate.
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)()
// Enable ExperimentalCriticalPodAnnotation feature gate. // Enable ExperimentalCriticalPodAnnotation feature gate.
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.ExperimentalCriticalPodAnnotation)) defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)()
tests := []struct { tests := []struct {
name string name string
existingClasses []*scheduling.PriorityClass existingClasses []*scheduling.PriorityClass