diff --git a/pkg/controller/daemon/BUILD b/pkg/controller/daemon/BUILD index 1d8a3541dd..1b5e1f7841 100644 --- a/pkg/controller/daemon/BUILD +++ b/pkg/controller/daemon/BUILD @@ -84,6 +84,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index cad5e61e47..8352531fdb 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -35,6 +35,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apiserver/pkg/storage/names" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" @@ -411,7 +412,7 @@ func clearExpectations(t *testing.T, manager *daemonSetsController, ds *apps.Dae func TestDeleteFinalStateUnknown(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { manager, _, _, err := newTestController() if err != nil { @@ -443,16 +444,10 @@ func markPodReady(pod *v1.Pod) { podutil.UpdatePodCondition(&pod.Status, &condition) } -func setFeatureGate(t *testing.T, feature utilfeature.Feature, enabled bool) { - if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", feature, enabled)); err != nil { - t.Fatalf("Failed to set FeatureGate %v to %t: %v", feature, enabled, err) - } -} - // DaemonSets without node selectors should launch pods on every node. func TestSimpleDaemonSetLaunchesPods(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -470,12 +465,7 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) { // When ScheduleDaemonSetPods is enabled, DaemonSets without node selectors should // launch pods on every node by NodeAffinity. func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) { - enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) - // Rollback feature gate. - defer func() { - setFeatureGate(t, features.ScheduleDaemonSetPods, enabled) - }() - setFeatureGate(t, features.ScheduleDaemonSetPods, true) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, true)() nodeNum := 5 for _, strategy := range updateStrategies() { @@ -552,7 +542,7 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) { // of 10 pods, and verify that the ds doesn't make 100 create calls per sync pass func TestSimpleDaemonSetPodCreateErrors(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -578,7 +568,7 @@ func TestSimpleDaemonSetPodCreateErrors(t *testing.T) { func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -614,7 +604,7 @@ func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) { // DaemonSets should do nothing if there aren't any nodes func TestNoNodesDoesNothing(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { manager, podControl, _, err := newTestController() if err != nil { @@ -632,7 +622,7 @@ func TestNoNodesDoesNothing(t *testing.T) { // single node cluster. func TestOneNodeDaemonLaunchesPod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -650,7 +640,7 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) { // DaemonSets should place onto NotReady nodes func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -706,21 +696,7 @@ func allocatableResources(memory, cpu string) v1.ResourceList { // When ScheduleDaemonSetPods is disabled, DaemonSets should not place onto nodes with insufficient free resource func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { - enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) - // Rollback feature gate. - defer func() { - if enabled { - err := utilfeature.DefaultFeatureGate.Set("ScheduleDaemonSetPods=true") - if err != nil { - t.Fatalf("Failed to enable feature gate for ScheduleDaemonSetPods: %v", err) - } - } - }() - - err := utilfeature.DefaultFeatureGate.Set("ScheduleDaemonSetPods=false") - if err != nil { - t.Fatalf("Failed to disable feature gate for ScheduleDaemonSetPods: %v", err) - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() for _, strategy := range updateStrategies() { podSpec := resourcePodSpec("too-much-mem", "75M", "75m") ds := newDaemonSet("foo") @@ -751,7 +727,7 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { // DaemonSets should not unschedule a daemonset pod from a node with insufficient free resource func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { podSpec := resourcePodSpec("too-much-mem", "75M", "75m") podSpec.NodeName = "too-much-mem" @@ -792,7 +768,7 @@ func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) // DaemonSets should only place onto nodes with sufficient free resource and matched node selector func TestInsufficientCapacityNodeSufficientCapacityWithNodeLabelDaemonLaunchPod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() podSpec := resourcePodSpecWithoutNodeName("50M", "75m") ds := newDaemonSet("foo") ds.Spec.Template.Spec = podSpec @@ -819,15 +795,7 @@ func TestInsufficientCapacityNodeSufficientCapacityWithNodeLabelDaemonLaunchPod( // When ScheduleDaemonSetPods is disabled, DaemonSetPods should launch onto node with terminated pods if there // are sufficient resources. func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) { - enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) - // Rollback feature gate. - defer func() { - if enabled { - setFeatureGate(t, features.ScheduleDaemonSetPods, true) - } - }() - - setFeatureGate(t, features.ScheduleDaemonSetPods, false) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() for _, strategy := range updateStrategies() { podSpec := resourcePodSpec("too-much-mem", "75M", "75m") ds := newDaemonSet("foo") @@ -851,15 +819,7 @@ func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) { // When ScheduleDaemonSetPods is disabled, DaemonSets should place onto nodes with sufficient free resources. func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) { - enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) - // Rollback feature gate. - defer func() { - if enabled { - setFeatureGate(t, features.ScheduleDaemonSetPods, true) - } - }() - - setFeatureGate(t, features.ScheduleDaemonSetPods, false) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() for _, strategy := range updateStrategies() { podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m") @@ -884,7 +844,7 @@ func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) { // DaemonSet should launch a pod on a node with taint NetworkUnavailable condition. func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("simple") ds.Spec.UpdateStrategy = *strategy @@ -908,7 +868,7 @@ func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) { // DaemonSets not take any actions when being deleted func TestDontDoAnythingIfBeingDeleted(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m") ds := newDaemonSet("foo") @@ -934,7 +894,7 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) { func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { // Bare client says it IS deleted. ds := newDaemonSet("foo") @@ -963,15 +923,7 @@ func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) { // When ScheduleDaemonSetPods is disabled, DaemonSets should not place onto nodes that would cause port conflicts. func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) { - enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) - // Rollback feature gate. - defer func() { - if enabled { - setFeatureGate(t, features.ScheduleDaemonSetPods, true) - } - }() - - setFeatureGate(t, features.ScheduleDaemonSetPods, false) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() for _, strategy := range updateStrategies() { podSpec := v1.PodSpec{ NodeName: "port-conflict", @@ -1005,7 +957,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) { // Issue: https://github.com/kubernetes/kubernetes/issues/22309 func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { podSpec := v1.PodSpec{ NodeName: "port-conflict", @@ -1035,7 +987,7 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) { // DaemonSets should place onto nodes that would not cause port conflicts func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { podSpec1 := v1.PodSpec{ NodeName: "no-port-conflict", @@ -1085,7 +1037,7 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) { // should detect this misconfiguration and choose not to sync the DaemonSet. We should // not observe a deletion of the pod on node1. for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -1118,7 +1070,7 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) { // Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods. func TestDealsWithExistingPods(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -1140,7 +1092,7 @@ func TestDealsWithExistingPods(t *testing.T) { // Daemon with node selector should launch pods on nodes matching selector. func TestSelectorDaemonLaunchesPods(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { daemon := newDaemonSet("foo") daemon.Spec.UpdateStrategy = *strategy @@ -1160,7 +1112,7 @@ func TestSelectorDaemonLaunchesPods(t *testing.T) { // Daemon with node selector should delete pods from nodes that do not satisfy selector. func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -1184,7 +1136,7 @@ func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) { // DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes. func TestSelectorDaemonDealsWithExistingPods(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -1212,7 +1164,7 @@ func TestSelectorDaemonDealsWithExistingPods(t *testing.T) { // DaemonSet with node selector which does not match any node labels should not launch pods. func TestBadSelectorDaemonDoesNothing(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { manager, podControl, _, err := newTestController() if err != nil { @@ -1232,7 +1184,7 @@ func TestBadSelectorDaemonDoesNothing(t *testing.T) { // DaemonSet with node name should launch pod on node with corresponding name. func TestNameDaemonSetLaunchesPods(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -1251,7 +1203,7 @@ func TestNameDaemonSetLaunchesPods(t *testing.T) { // DaemonSet with node name that does not exist should not launch pods. func TestBadNameDaemonSetDoesNothing(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -1270,7 +1222,7 @@ func TestBadNameDaemonSetDoesNothing(t *testing.T) { // DaemonSet with node selector, and node name, matching a node, should launch a pod on the node. func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -1291,7 +1243,7 @@ func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) { // DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing. func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -1312,7 +1264,7 @@ func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) { // DaemonSet with node selector, matching some nodes, should launch pods on all the nodes. func TestSelectorDaemonSetLaunchesPods(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() ds := newDaemonSet("foo") ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel manager, podControl, _, err := newTestController(ds) @@ -1329,7 +1281,7 @@ func TestSelectorDaemonSetLaunchesPods(t *testing.T) { // Daemon with node affinity should launch pods on nodes matching affinity. func TestNodeAffinityDaemonLaunchesPods(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { daemon := newDaemonSet("foo") daemon.Spec.UpdateStrategy = *strategy @@ -1365,7 +1317,7 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) { func TestNumberReadyStatus(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -1410,7 +1362,7 @@ func TestNumberReadyStatus(t *testing.T) { func TestObservedGeneration(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -1457,7 +1409,7 @@ func TestDaemonKillFailedPods(t *testing.T) { for _, test := range tests { t.Run(test.test, func(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -1479,7 +1431,7 @@ func TestDaemonKillFailedPods(t *testing.T) { // DaemonSet controller needs to backoff when killing failed pods to avoid hot looping and fighting with kubelet. func TestDaemonKillFailedPodsBackoff(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { t.Run(string(strategy.Type), func(t *testing.T) { ds := newDaemonSet("foo") @@ -1549,7 +1501,7 @@ func TestDaemonKillFailedPodsBackoff(t *testing.T) { // tolerate the nodes NoSchedule taint func TestNoScheduleTaintedDoesntEvicitRunningIntolerantPod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("intolerant") ds.Spec.UpdateStrategy = *strategy @@ -1573,7 +1525,7 @@ func TestNoScheduleTaintedDoesntEvicitRunningIntolerantPod(t *testing.T) { // tolerate the nodes NoExecute taint func TestNoExecuteTaintedDoesEvicitRunningIntolerantPod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("intolerant") ds.Spec.UpdateStrategy = *strategy @@ -1596,7 +1548,7 @@ func TestNoExecuteTaintedDoesEvicitRunningIntolerantPod(t *testing.T) { // DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint. func TestTaintedNodeDaemonDoesNotLaunchIntolerantPod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("intolerant") ds.Spec.UpdateStrategy = *strategy @@ -1618,7 +1570,7 @@ func TestTaintedNodeDaemonDoesNotLaunchIntolerantPod(t *testing.T) { // DaemonSet should launch a pod on a tainted node when the pod can tolerate that taint. func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("tolerate") ds.Spec.UpdateStrategy = *strategy @@ -1641,7 +1593,7 @@ func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) { // DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute. func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("simple") ds.Spec.UpdateStrategy = *strategy @@ -1666,7 +1618,7 @@ func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) { // DaemonSet should launch a pod on an unreachable node with taint unreachable:NoExecute. func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("simple") ds.Spec.UpdateStrategy = *strategy @@ -1691,7 +1643,7 @@ func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) { // DaemonSet should launch a pod on an untainted node when the pod has tolerations. func TestNodeDaemonLaunchesToleratePod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("tolerate") ds.Spec.UpdateStrategy = *strategy @@ -1711,7 +1663,7 @@ func TestNodeDaemonLaunchesToleratePod(t *testing.T) { // DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute. func TestDaemonSetRespectsTermination(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -1742,12 +1694,8 @@ func setDaemonSetToleration(ds *apps.DaemonSet, tolerations []v1.Toleration) { // DaemonSet should launch a critical pod even when the node with OutOfDisk taints. // TODO(#48843) OutOfDisk taints will be removed in 1.10 func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) { - enabled := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) - defer func() { - setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, enabled) - }() for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("critical") ds.Spec.UpdateStrategy = *strategy @@ -1765,12 +1713,12 @@ func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) { // NOTE: Whether or not TaintNodesByCondition is enabled, it'll add toleration to DaemonSet pods. // Without enabling critical pod annotation feature gate, we shouldn't create critical pod - setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, false) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, false)() manager.dsStore.Add(ds) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) // With enabling critical pod annotation feature gate, we will create critical pod - setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, true) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)() manager.dsStore.Add(ds) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } @@ -1779,13 +1727,8 @@ func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) { // DaemonSet should launch a pod even when the node with MemoryPressure/DiskPressure taints. func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) { - enabled := utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) - defer func() { - setFeatureGate(t, features.TaintNodesByCondition, enabled) - }() - for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("critical") ds.Spec.UpdateStrategy = *strategy @@ -1807,7 +1750,7 @@ func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) { manager.nodeStore.Add(node) // Enabling critical pod and taint nodes by condition feature gate should create critical pod - setFeatureGate(t, features.TaintNodesByCondition, true) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)() manager.dsStore.Add(ds) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } @@ -1816,10 +1759,7 @@ func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) { // When ScheduleDaemonSetPods is disabled, DaemonSet should launch a critical pod even when the node has insufficient free resource. func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) { - enabled := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) - defer func() { - setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, enabled) - }() + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() for _, strategy := range updateStrategies() { podSpec := resourcePodSpec("too-much-mem", "75M", "75m") ds := newDaemonSet("critical") @@ -1839,7 +1779,7 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) { }) // Without enabling critical pod annotation feature gate, we shouldn't create critical pod - setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, false) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, false)() manager.dsStore.Add(ds) switch strategy.Type { case apps.OnDeleteDaemonSetStrategyType: @@ -1851,7 +1791,7 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) { } // Enabling critical pod annotation feature gate should create critical pod - setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, true) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)() switch strategy.Type { case apps.OnDeleteDaemonSetStrategyType: syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 2) @@ -1865,11 +1805,7 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) { // When ScheduleDaemonSetPods is disabled, DaemonSets should NOT launch a critical pod when there are port conflicts. func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) { - enabled := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) - defer func() { - setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, enabled) - }() - + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() for _, strategy := range updateStrategies() { podSpec := v1.PodSpec{ NodeName: "port-conflict", @@ -1889,7 +1825,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) { Spec: podSpec, }) - setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, true) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)() ds := newDaemonSet("critical") ds.Spec.UpdateStrategy = *strategy ds.Spec.Template.Spec = podSpec @@ -1909,7 +1845,7 @@ func setDaemonSetCritical(ds *apps.DaemonSet) { func TestNodeShouldRunDaemonPod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() var shouldCreate, wantToRun, shouldContinueRunning bool if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { shouldCreate = true @@ -2236,7 +2172,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { func TestUpdateNode(t *testing.T) { var enqueued bool for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() cases := []struct { test string newNode *v1.Node @@ -2545,7 +2481,7 @@ func TestDeleteNoDaemonPod(t *testing.T) { func TestGetNodesToDaemonPods(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy @@ -2611,7 +2547,7 @@ func TestGetNodesToDaemonPods(t *testing.T) { func TestAddNode(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() manager, _, _, err := newTestController() if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) @@ -2640,7 +2576,7 @@ func TestAddNode(t *testing.T) { func TestAddPod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { manager, _, _, err := newTestController() if err != nil { @@ -2686,7 +2622,7 @@ func TestAddPod(t *testing.T) { func TestAddPodOrphan(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { manager, _, _, err := newTestController() if err != nil { @@ -2718,7 +2654,7 @@ func TestAddPodOrphan(t *testing.T) { func TestUpdatePod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { manager, _, _, err := newTestController() if err != nil { @@ -2768,7 +2704,7 @@ func TestUpdatePod(t *testing.T) { func TestUpdatePodOrphanSameLabels(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { manager, _, _, err := newTestController() @@ -2795,7 +2731,7 @@ func TestUpdatePodOrphanSameLabels(t *testing.T) { func TestUpdatePodOrphanWithNewLabels(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { manager, _, _, err := newTestController() @@ -2826,7 +2762,7 @@ func TestUpdatePodOrphanWithNewLabels(t *testing.T) { func TestUpdatePodChangeControllerRef(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") @@ -2854,7 +2790,7 @@ func TestUpdatePodChangeControllerRef(t *testing.T) { func TestUpdatePodControllerRefRemoved(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { manager, _, _, err := newTestController() @@ -2882,7 +2818,7 @@ func TestUpdatePodControllerRefRemoved(t *testing.T) { func TestDeletePod(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { manager, _, _, err := newTestController() @@ -2929,7 +2865,7 @@ func TestDeletePod(t *testing.T) { func TestDeletePodOrphan(t *testing.T) { for _, f := range []bool{true, false} { - setFeatureGate(t, features.ScheduleDaemonSetPods, f) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() for _, strategy := range updateStrategies() { manager, _, _, err := newTestController() diff --git a/pkg/kubelet/cadvisor/BUILD b/pkg/kubelet/cadvisor/BUILD index fb3f66a1ce..f8421a3977 100644 --- a/pkg/kubelet/cadvisor/BUILD +++ b/pkg/kubelet/cadvisor/BUILD @@ -60,6 +60,7 @@ go_test( "//pkg/kubelet/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/metrics:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubelet/cadvisor/util_test.go b/pkg/kubelet/cadvisor/util_test.go index dd8657f934..5d83e861cd 100644 --- a/pkg/kubelet/cadvisor/util_test.go +++ b/pkg/kubelet/cadvisor/util_test.go @@ -19,14 +19,15 @@ limitations under the License. package cadvisor import ( - "fmt" + "testing" + info "github.com/google/cadvisor/info/v1" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/api/resource" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" - "testing" ) func TestCapacityFromMachineInfo(t *testing.T) { @@ -42,7 +43,7 @@ func TestCapacityFromMachineInfo(t *testing.T) { } // enable the features.HugePages - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.HugePages)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HugePages, true)() resourceList := CapacityFromMachineInfo(machineInfo) diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index 42221bdb40..0b4e0e0278 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" core "k8s.io/client-go/testing" "k8s.io/client-go/tools/record" @@ -42,6 +43,7 @@ import ( // to "v1"? _ "k8s.io/kubernetes/pkg/apis/core/install" + "k8s.io/kubernetes/pkg/features" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" "k8s.io/kubernetes/pkg/kubelet/server/portforward" @@ -91,9 +93,7 @@ func TestDisabledSubpath(t *testing.T) { }, } - utilfeature.DefaultFeatureGate.Set("VolumeSubpath=false") - defer utilfeature.DefaultFeatureGate.Set("VolumeSubpath=true") - + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeSubpath, false)() for name, test := range cases { _, _, err := makeMounts(&pod, "/pod", &test.container, "fakepodname", "", "", podVolumes, fm, nil) if err != nil && !test.expectError { diff --git a/pkg/kubelet/preemption/BUILD b/pkg/kubelet/preemption/BUILD index fa8f7ed813..8fab53ffa8 100644 --- a/pkg/kubelet/preemption/BUILD +++ b/pkg/kubelet/preemption/BUILD @@ -46,11 +46,13 @@ go_test( deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/scheduling:go_default_library", + "//pkg/features:go_default_library", "//pkg/kubelet/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", ], ) diff --git a/pkg/kubelet/preemption/preemption_test.go b/pkg/kubelet/preemption/preemption_test.go index 3e85348078..e0edae5776 100644 --- a/pkg/kubelet/preemption/preemption_test.go +++ b/pkg/kubelet/preemption/preemption_test.go @@ -24,9 +24,11 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/client-go/tools/record" kubeapi "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/scheduling" + "k8s.io/kubernetes/pkg/features" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -89,9 +91,7 @@ func getTestCriticalPodAdmissionHandler(podProvider *fakePodProvider, podKiller } func TestEvictPodsToFreeRequests(t *testing.T) { - if err := utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=true"); err != nil { - t.Errorf("failed to set ExperimentalCriticalPodAnnotation to true: %v", err) - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)() type testRun struct { testName string inputPods []*v1.Pod @@ -159,6 +159,7 @@ func BenchmarkGetPodsToPreempt(t *testing.B) { } func TestGetPodsToPreempt(t *testing.T) { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)() type testRun struct { testName string preemptor *v1.Pod diff --git a/pkg/kubelet/types/BUILD b/pkg/kubelet/types/BUILD index bc62f58d67..cd0d4d8f44 100644 --- a/pkg/kubelet/types/BUILD +++ b/pkg/kubelet/types/BUILD @@ -38,9 +38,11 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//pkg/features:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", ], diff --git a/pkg/kubelet/types/pod_update_test.go b/pkg/kubelet/types/pod_update_test.go index 4efc451e1a..73a93d7e17 100644 --- a/pkg/kubelet/types/pod_update_test.go +++ b/pkg/kubelet/types/pod_update_test.go @@ -24,6 +24,8 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" + "k8s.io/kubernetes/pkg/features" ) func TestGetValidatedSources(t *testing.T) { @@ -116,9 +118,7 @@ func TestString(t *testing.T) { } func TestIsCriticalPod(t *testing.T) { - if err := utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=true"); err != nil { - t.Errorf("failed to set ExperimentalCriticalPodAnnotation to true: %v", err) - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)() cases := []struct { pod v1.Pod expected bool diff --git a/pkg/scheduler/BUILD b/pkg/scheduler/BUILD index b833e399be..98e4bbc140 100644 --- a/pkg/scheduler/BUILD +++ b/pkg/scheduler/BUILD @@ -66,6 +66,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", diff --git a/pkg/scheduler/algorithm/priorities/BUILD b/pkg/scheduler/algorithm/priorities/BUILD index 1eb3d94a69..7f287117f5 100644 --- a/pkg/scheduler/algorithm/priorities/BUILD +++ b/pkg/scheduler/algorithm/priorities/BUILD @@ -79,6 +79,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) diff --git a/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go index ae7b601b84..ccbfe45959 100644 --- a/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go +++ b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go @@ -17,7 +17,6 @@ limitations under the License. package priorities import ( - "fmt" "reflect" "testing" @@ -25,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/kubernetes/pkg/features" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" @@ -44,7 +44,7 @@ func getExistingVolumeCountForNode(pods []*v1.Pod, maxVolumes int) int { func TestBalancedResourceAllocation(t *testing.T) { // Enable volumesOnNodeForBalancing to do balanced resource allocation - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)() podwithVol1 := v1.PodSpec{ Containers: []v1.Container{ { diff --git a/pkg/scheduler/algorithmprovider/BUILD b/pkg/scheduler/algorithmprovider/BUILD index a4afc8fa47..c06cbd716e 100644 --- a/pkg/scheduler/algorithmprovider/BUILD +++ b/pkg/scheduler/algorithmprovider/BUILD @@ -18,8 +18,10 @@ go_test( srcs = ["plugins_test.go"], embed = [":go_default_library"], deps = [ + "//pkg/features:go_default_library", "//pkg/scheduler/factory:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", ], ) diff --git a/pkg/scheduler/algorithmprovider/plugins_test.go b/pkg/scheduler/algorithmprovider/plugins_test.go index c2f993ea17..958ce7cb60 100644 --- a/pkg/scheduler/algorithmprovider/plugins_test.go +++ b/pkg/scheduler/algorithmprovider/plugins_test.go @@ -21,6 +21,8 @@ import ( "testing" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/factory" ) @@ -90,7 +92,7 @@ func TestApplyFeatureGates(t *testing.T) { } // Apply features for algorithm providers. - utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True") + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)() ApplyFeatureGates() diff --git a/pkg/scheduler/internal/cache/BUILD b/pkg/scheduler/internal/cache/BUILD index 6d7e3da8bc..f2273ee9ee 100644 --- a/pkg/scheduler/internal/cache/BUILD +++ b/pkg/scheduler/internal/cache/BUILD @@ -40,6 +40,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", ], ) diff --git a/pkg/scheduler/internal/cache/cache_test.go b/pkg/scheduler/internal/cache/cache_test.go index 8186784968..9730172515 100644 --- a/pkg/scheduler/internal/cache/cache_test.go +++ b/pkg/scheduler/internal/cache/cache_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/kubernetes/pkg/features" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" @@ -92,7 +93,7 @@ func newNodeInfo(requestedResource *schedulercache.Resource, // on node level. func TestAssumePodScheduled(t *testing.T) { // Enable volumesOnNodeForBalancing to do balanced resource allocation - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)() nodeName := "node" testPods := []*v1.Pod{ makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), @@ -240,7 +241,7 @@ func assumeAndFinishBinding(cache *schedulerCache, pod *v1.Pod, assumedTime time // The removal will be reflected in node info. func TestExpirePod(t *testing.T) { // Enable volumesOnNodeForBalancing to do balanced resource allocation - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)() nodeName := "node" testPods := []*v1.Pod{ makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}), @@ -299,7 +300,7 @@ func TestExpirePod(t *testing.T) { // The pod info should still exist after manually expiring unconfirmed pods. func TestAddPodWillConfirm(t *testing.T) { // Enable volumesOnNodeForBalancing to do balanced resource allocation - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)() nodeName := "node" now := time.Now() ttl := 10 * time.Second @@ -455,7 +456,7 @@ func TestAddPodWillReplaceAssumed(t *testing.T) { // TestAddPodAfterExpiration tests that a pod being Add()ed will be added back if expired. func TestAddPodAfterExpiration(t *testing.T) { // Enable volumesOnNodeForBalancing to do balanced resource allocation - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)() nodeName := "node" ttl := 10 * time.Second basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}) @@ -504,7 +505,7 @@ func TestAddPodAfterExpiration(t *testing.T) { // TestUpdatePod tests that a pod will be updated if added before. func TestUpdatePod(t *testing.T) { // Enable volumesOnNodeForBalancing to do balanced resource allocation - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)() nodeName := "node" ttl := 10 * time.Second testPods := []*v1.Pod{ @@ -630,7 +631,7 @@ func TestUpdatePodAndGet(t *testing.T) { // TestExpireAddUpdatePod test the sequence that a pod is expired, added, then updated func TestExpireAddUpdatePod(t *testing.T) { // Enable volumesOnNodeForBalancing to do balanced resource allocation - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)() nodeName := "node" ttl := 10 * time.Second testPods := []*v1.Pod{ @@ -727,7 +728,7 @@ func makePodWithEphemeralStorage(nodeName, ephemeralStorage string) *v1.Pod { func TestEphemeralStorageResource(t *testing.T) { // Enable volumesOnNodeForBalancing to do balanced resource allocation - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)() nodeName := "node" podE := makePodWithEphemeralStorage(nodeName, "500") tests := []struct { @@ -772,7 +773,7 @@ func TestEphemeralStorageResource(t *testing.T) { // TestRemovePod tests after added pod is removed, its information should also be subtracted. func TestRemovePod(t *testing.T) { // Enable volumesOnNodeForBalancing to do balanced resource allocation - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)() nodeName := "node" basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}) tests := []struct { @@ -1126,7 +1127,7 @@ func BenchmarkList1kNodes30kPods(b *testing.B) { func BenchmarkUpdate1kNodes30kPods(b *testing.B) { // Enable volumesOnNodeForBalancing to do balanced resource allocation - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.BalanceAttachedNodeVolumes)) + defer utilfeaturetesting.SetFeatureGateDuringTest(nil, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)() cache := setupCacheOf1kNodes30kPods(b) b.ResetTimer() for n := 0; n < b.N; n++ { diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 8a5c40f4f8..4dac1a7bd2 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/client-go/informers" clientsetfake "k8s.io/client-go/kubernetes/fake" corelister "k8s.io/client-go/listers/core/v1" diff --git a/plugin/pkg/admission/podtolerationrestriction/BUILD b/plugin/pkg/admission/podtolerationrestriction/BUILD index f547df30e9..20b854020c 100644 --- a/plugin/pkg/admission/podtolerationrestriction/BUILD +++ b/plugin/pkg/admission/podtolerationrestriction/BUILD @@ -12,6 +12,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", + "//pkg/features:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/util/tolerations:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library", @@ -21,6 +22,7 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission/initializer:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", diff --git a/plugin/pkg/admission/podtolerationrestriction/admission_test.go b/plugin/pkg/admission/podtolerationrestriction/admission_test.go index 688c0671c8..10bfa2e6a6 100644 --- a/plugin/pkg/admission/podtolerationrestriction/admission_test.go +++ b/plugin/pkg/admission/podtolerationrestriction/admission_test.go @@ -27,10 +27,12 @@ import ( "k8s.io/apiserver/pkg/admission" genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/features" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/pkg/util/tolerations" pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction" @@ -83,9 +85,7 @@ func TestPodAdmission(t *testing.T) { }, } - if err := utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=true"); err != nil { - t.Errorf("Failed to enable TaintByCondition feature: %v.", err) - } + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)() tests := []struct { pod *api.Pod diff --git a/plugin/pkg/admission/priority/BUILD b/plugin/pkg/admission/priority/BUILD index 0649ae1d69..97abdf9f9c 100644 --- a/plugin/pkg/admission/priority/BUILD +++ b/plugin/pkg/admission/priority/BUILD @@ -21,6 +21,7 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/plugin/pkg/admission/priority/admission_test.go b/plugin/pkg/admission/priority/admission_test.go index f3785f5763..8404467c82 100644 --- a/plugin/pkg/admission/priority/admission_test.go +++ b/plugin/pkg/admission/priority/admission_test.go @@ -17,7 +17,6 @@ limitations under the License. package priority import ( - "fmt" "testing" "k8s.io/klog" @@ -27,6 +26,7 @@ import ( "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authentication/user" utilfeature "k8s.io/apiserver/pkg/util/feature" + utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/client-go/informers" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/scheduling" @@ -468,9 +468,9 @@ func TestPodAdmission(t *testing.T) { }, } // Enable PodPriority feature gate. - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodPriority, true)() // Enable ExperimentalCriticalPodAnnotation feature gate. - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.ExperimentalCriticalPodAnnotation)) + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExperimentalCriticalPodAnnotation, true)() tests := []struct { name string existingClasses []*scheduling.PriorityClass