Merge pull request #64954 from k82cn/k8s_61312_2

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions here: https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md.

Added unschedulable and network-unavailable toleration.

Signed-off-by: Da K. Ma <klaus1982.cn@gmail.com>

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
part of #61312
fixes: https://github.com/kubernetes/kubernetes/issues/67606

**Release note**:

```release-note
If `TaintNodesByCondition` is enabled, add `node.kubernetes.io/unschedulable` and
 `node.kubernetes.io/network-unavailable` automatically to DaemonSet pods.
```
pull/8/head
Kubernetes Submit Queue 2018-09-09 19:46:37 -07:00 committed by GitHub
commit 323e1375b3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 181 additions and 115 deletions

View File

@ -16,7 +16,6 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/controller/daemon",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/daemon/util:go_default_library",
"//pkg/features:go_default_library",

View File

@ -51,7 +51,6 @@ import (
"k8s.io/client-go/util/integer"
"k8s.io/client-go/util/workqueue"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/daemon/util"
"k8s.io/kubernetes/pkg/features"
@ -1012,7 +1011,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
if err != nil {
generation = nil
}
template := util.CreatePodTemplate(ds.Spec.Template, generation, hash)
template := util.CreatePodTemplate(ds.Namespace, ds.Spec.Template, generation, hash)
// Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
// and double with each successful iteration in a kind of "slow start".
// This handles attempts to start large numbers of pods that would
@ -1039,7 +1038,6 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
// should be no conflicting node affinity with the target node.
podTemplate.Spec.Affinity = util.ReplaceDaemonSetPodNodeNameNodeAffinity(
podTemplate.Spec.Affinity, nodesNeedingDaemonPods[ix])
podTemplate.Spec.Tolerations = util.AppendNoScheduleTolerationIfNotExist(podTemplate.Spec.Tolerations)
err = dsc.podControl.CreatePodsWithControllerRef(ds.Namespace, podTemplate,
ds, metav1.NewControllerRef(ds, controllerKind))
@ -1289,49 +1287,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
}
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) {
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
// Add infinite toleration for taint notReady:NoExecute here
// to survive taint-based eviction enforced by NodeController
// when node turns not ready.
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
Key: algorithm.TaintNodeNotReady,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
// Add infinite toleration for taint unreachable:NoExecute here
// to survive taint-based eviction enforced by NodeController
// when node turns unreachable.
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
Key: algorithm.TaintNodeUnreachable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
// According to TaintNodesByCondition, all DaemonSet pods should tolerate
// MemoryPressure and DisPressure taints, and the critical pods should tolerate
// OutOfDisk taint additional.
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
Key: algorithm.TaintNodeDiskPressure,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
Key: algorithm.TaintNodeMemoryPressure,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
// TODO(#48843) OutOfDisk taints will be removed in 1.10
if kubelettypes.IsCriticalPod(newPod) {
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
Key: algorithm.TaintNodeOutOfDisk,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
}
util.AddOrUpdateDaemonPodTolerations(&newPod.Spec, kubelettypes.IsCriticalPod(newPod))
objects, err := dsc.podNodeIndex.ByIndex("nodeName", node.Name)
if err != nil {

View File

@ -437,6 +437,12 @@ func markPodReady(pod *v1.Pod) {
podutil.UpdatePodCondition(&pod.Status, &condition)
}
func setFeatureGate(t *testing.T, feature utilfeature.Feature, enabled bool) {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", feature, enabled)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t: %v", feature, enabled, err)
}
}
// DaemonSets without node selectors should launch pods on every node.
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
for _, strategy := range updateStrategies() {
@ -460,12 +466,9 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
// Rollback feature gate.
defer func() {
if !enabled {
utilfeature.DefaultFeatureGate.Set("ScheduleDaemonSetPods=false")
}
setFeatureGate(t, features.ScheduleDaemonSetPods, enabled)
}()
utilfeature.DefaultFeatureGate.Set("ScheduleDaemonSetPods=true")
setFeatureGate(t, features.ScheduleDaemonSetPods, true)
nodeNum := 5
@ -1576,6 +1579,11 @@ func setDaemonSetToleration(ds *apps.DaemonSet, tolerations []v1.Toleration) {
// DaemonSet should launch a critical pod even when the node with OutOfDisk taints.
// TODO(#48843) OutOfDisk taints will be removed in 1.10
func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation)
defer func() {
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, enabled)
}()
for _, strategy := range updateStrategies() {
ds := newDaemonSet("critical")
ds.Spec.UpdateStrategy = *strategy
@ -1593,25 +1601,24 @@ func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
// NOTE: Whether or not TaintNodesByCondition is enabled, it'll add toleration to DaemonSet pods.
// Without enabling critical pod annotation feature gate, we shouldn't create critical pod
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False")
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True")
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, false)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
// With enabling critical pod annotation feature gate, we will create critical pod
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=False")
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, true)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0)
// Rollback feature gate to false.
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=False")
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False")
}
}
// DaemonSet should launch a pod even when the node with MemoryPressure/DiskPressure taints.
func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition)
defer func() {
setFeatureGate(t, features.TaintNodesByCondition, enabled)
}()
for _, strategy := range updateStrategies() {
ds := newDaemonSet("critical")
ds.Spec.UpdateStrategy = *strategy
@ -1633,17 +1640,19 @@ func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
manager.nodeStore.Add(node)
// Enabling critical pod and taint nodes by condition feature gate should create critical pod
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=True")
setFeatureGate(t, features.TaintNodesByCondition, true)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0)
// Rollback feature gate to false.
utilfeature.DefaultFeatureGate.Set("TaintNodesByCondition=False")
}
}
// DaemonSet should launch a critical pod even when the node has insufficient free resource.
func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation)
defer func() {
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, enabled)
}()
for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
ds := newDaemonSet("critical")
@ -1663,7 +1672,7 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
})
// Without enabling critical pod annotation feature gate, we shouldn't create critical pod
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False")
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, false)
manager.dsStore.Add(ds)
switch strategy.Type {
case apps.OnDeleteDaemonSetStrategyType:
@ -1675,7 +1684,7 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
}
// Enabling critical pod annotation feature gate should create critical pod
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, true)
switch strategy.Type {
case apps.OnDeleteDaemonSetStrategyType:
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 2)
@ -1689,6 +1698,11 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
// DaemonSets should NOT launch a critical pod when there are port conflicts.
func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation)
defer func() {
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, enabled)
}()
for _, strategy := range updateStrategies() {
podSpec := v1.PodSpec{
NodeName: "port-conflict",
@ -1708,7 +1722,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
Spec: podSpec,
})
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
setFeatureGate(t, features.ExperimentalCriticalPodAnnotation, true)
ds := newDaemonSet("critical")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec

View File

@ -19,7 +19,6 @@ go_library(
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
],

View File

@ -23,7 +23,6 @@ import (
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@ -49,16 +48,13 @@ func GetTemplateGeneration(ds *apps.DaemonSet) (*int64, error) {
return &generation, nil
}
// CreatePodTemplate returns copy of provided template with additional
// label which contains templateGeneration (for backward compatibility),
// hash of provided template and sets default daemon tolerations.
func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec {
newTemplate := *template.DeepCopy()
// AddOrUpdateDaemonPodTolerations apply necessary tolerations to DeamonSet Pods, e.g. node.kubernetes.io/not-ready:NoExecute.
func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) {
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
// Add infinite toleration for taint notReady:NoExecute here
// to survive taint-based eviction enforced by NodeController
// when node turns not ready.
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeNotReady,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
@ -68,36 +64,67 @@ func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash stri
// Add infinite toleration for taint unreachable:NoExecute here
// to survive taint-based eviction enforced by NodeController
// when node turns unreachable.
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeUnreachable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
// According to TaintNodesByCondition feature, all DaemonSet pods should tolerate
// MemoryPressure and DisPressure taints, and the critical pods should tolerate
// OutOfDisk taint.
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
// MemoryPressure, DisPressure, Unschedulable and NetworkUnavailable taints,
// and the critical pods should tolerate OutOfDisk taint.
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeDiskPressure,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeMemoryPressure,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeUnschedulable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
if spec.HostNetwork {
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeNetworkUnavailable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
}
// TODO(#48843) OutOfDisk taints will be removed in 1.10
if utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
kubelettypes.IsCritical(newTemplate.Namespace, newTemplate.Annotations) {
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
if isCritical {
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeOutOfDisk,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeOutOfDisk,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
}
}
// CreatePodTemplate returns copy of provided template with additional
// label which contains templateGeneration (for backward compatibility),
// hash of provided template and sets default daemon tolerations.
func CreatePodTemplate(ns string, template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec {
newTemplate := *template.DeepCopy()
// TODO(k82cn): when removing CritialPod feature, also remove 'ns' parameter.
isCritical := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
kubelettypes.IsCritical(ns, newTemplate.Annotations)
AddOrUpdateDaemonPodTolerations(&newTemplate.Spec, isCritical)
if newTemplate.ObjectMeta.Labels == nil {
newTemplate.ObjectMeta.Labels = make(map[string]string)
@ -185,31 +212,6 @@ func ReplaceDaemonSetPodNodeNameNodeAffinity(affinity *v1.Affinity, nodename str
return affinity
}
// AppendNoScheduleTolerationIfNotExist appends unschedulable toleration to `.spec` if not exist; otherwise,
// no changes to `.spec.tolerations`.
func AppendNoScheduleTolerationIfNotExist(tolerations []v1.Toleration) []v1.Toleration {
unschedulableToleration := v1.Toleration{
Key: algorithm.TaintNodeUnschedulable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
}
unschedulableTaintExist := false
for _, t := range tolerations {
if apiequality.Semantic.DeepEqual(t, unschedulableToleration) {
unschedulableTaintExist = true
break
}
}
if !unschedulableTaintExist {
tolerations = append(tolerations, unschedulableToleration)
}
return tolerations
}
// GetTargetNodeName get the target node name of DaemonSet pods. If `.spec.NodeName` is not empty (nil),
// return `.spec.NodeName`; otherwise, retrieve node name of pending pods from NodeAffinity. Return error
// if failed to retrieve node name from `.spec.NodeName` and NodeAffinity.

View File

@ -154,7 +154,7 @@ func TestCreatePodTemplate(t *testing.T) {
}
for _, test := range tests {
podTemplateSpec := v1.PodTemplateSpec{}
newPodTemplate := CreatePodTemplate(podTemplateSpec, test.templateGeneration, test.hash)
newPodTemplate := CreatePodTemplate("", podTemplateSpec, test.templateGeneration, test.hash)
val, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
if !exists || val != fmt.Sprint(*test.templateGeneration) {
t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", *test.templateGeneration, val)

View File

@ -94,6 +94,9 @@ func setupScheduler(
return
}
// Enable Features.
algorithmprovider.ApplyFeatureGates()
schedulerConfigFactory := factory.NewConfigFactory(&factory.ConfigFactoryArgs{
SchedulerName: v1.DefaultSchedulerName,
Client: cs,
@ -297,7 +300,8 @@ func validateDaemonSetPodsAndMarkReady(
podClient corev1typed.PodInterface,
podInformer cache.SharedIndexInformer,
numberPods int,
t *testing.T) {
t *testing.T,
) {
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
objects := podInformer.GetIndexer().List()
if len(objects) != numberPods {
@ -484,11 +488,15 @@ func forEachFeatureGate(t *testing.T, tf func(t *testing.T)) {
func() {
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
defer func() {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", fg, enabled)
}
}()
for _, f := range []bool{true, false} {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", fg, f)
}
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
}
}()
@ -739,11 +747,15 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) {
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
defer func() {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
features.ScheduleDaemonSetPods, enabled))
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
features.ScheduleDaemonSetPods, enabled)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, enabled)
}
}()
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true))
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.ScheduleDaemonSetPods, true)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.ScheduleDaemonSetPods, true)
}
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t)
@ -980,3 +992,87 @@ func TestTaintedNode(t *testing.T) {
})
})
}
// TestUnschedulableNodeDaemonDoesLaunchPod tests that the DaemonSet Pods can still be scheduled
// to the Unschedulable nodes when TaintNodesByCondition are enabled.
func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
enabledTaint := utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition)
defer func() {
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t",
features.TaintNodesByCondition, enabledTaint)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, enabledTaint)
}
}()
if err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%t", features.TaintNodesByCondition, true)); err != nil {
t.Fatalf("Failed to set FeatureGate %v to %t", features.TaintNodesByCondition, true)
}
forEachFeatureGate(t, func(t *testing.T) {
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t)
defer closeFn()
ns := framework.CreateTestingNamespace("daemonset-unschedulable-test", server, t)
defer framework.DeleteTestingNamespace(ns, server, t)
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
podClient := clientset.CoreV1().Pods(ns.Name)
nodeClient := clientset.CoreV1().Nodes()
podInformer := informers.Core().V1().Pods().Informer()
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
go dc.Run(5, stopCh)
// Start Scheduler
setupScheduler(t, clientset, informers, stopCh)
ds := newDaemonSet("foo", ns.Name)
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec.HostNetwork = true
_, err := dsClient.Create(ds)
if err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err)
}
defer cleanupDaemonSets(t, clientset, ds)
// Creates unschedulable node.
node := newNode("unschedulable-node", nil)
node.Spec.Unschedulable = true
node.Spec.Taints = []v1.Taint{
{
Key: algorithm.TaintNodeUnschedulable,
Effect: v1.TaintEffectNoSchedule,
},
}
_, err = nodeClient.Create(node)
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}
// Creates network-unavailable node.
nodeNU := newNode("network-unavailable-node", nil)
nodeNU.Status.Conditions = []v1.NodeCondition{
{Type: v1.NodeReady, Status: v1.ConditionFalse},
{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue},
}
nodeNU.Spec.Taints = []v1.Taint{
{
Key: algorithm.TaintNodeNetworkUnavailable,
Effect: v1.TaintEffectNoSchedule,
},
}
_, err = nodeClient.Create(nodeNU)
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t)
validateDaemonSetStatus(dsClient, ds.Name, 2, t)
})
})
}