k3s/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go

529 lines
19 KiB
Go
Raw Normal View History

2019-04-07 17:07:55 +00:00
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
import (
"fmt"
"reflect"
"strings"
2019-04-07 17:07:55 +00:00
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic/dynamicinformer"
2020-08-10 17:43:49 +00:00
"k8s.io/klog/v2"
2019-09-27 21:51:53 +00:00
v1 "k8s.io/api/core/v1"
2019-04-07 17:07:55 +00:00
storagev1 "k8s.io/api/storage/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apiserver/pkg/util/feature"
2019-12-12 01:27:03 +00:00
"k8s.io/client-go/informers"
2019-04-07 17:07:55 +00:00
"k8s.io/client-go/tools/cache"
v1helper "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
2019-12-12 01:27:03 +00:00
"k8s.io/kubernetes/pkg/scheduler/internal/queue"
2020-06-23 22:12:14 +00:00
"k8s.io/kubernetes/pkg/scheduler/profile"
2019-04-07 17:07:55 +00:00
)
func (sched *Scheduler) onStorageClassAdd(obj interface{}) {
sc, ok := obj.(*storagev1.StorageClass)
if !ok {
klog.ErrorS(nil, "Cannot convert to *storagev1.StorageClass", "obj", obj)
2019-04-07 17:07:55 +00:00
return
}
// CheckVolumeBindingPred fails if pod has unbound immediate PVCs. If these
// PVCs have specified StorageClass name, creating StorageClass objects
// with late binding will cause predicates to pass, so we need to move pods
// to active queue.
// We don't need to invalidate cached results because results will not be
// cached for pod that has unbound immediate PVCs.
if sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.StorageClassAdd, nil)
2019-04-07 17:07:55 +00:00
}
}
func (sched *Scheduler) addNodeToCache(obj interface{}) {
node, ok := obj.(*v1.Node)
if !ok {
klog.ErrorS(nil, "Cannot convert to *v1.Node", "obj", obj)
2019-04-07 17:07:55 +00:00
return
}
nodeInfo := sched.SchedulerCache.AddNode(node)
klog.V(3).InfoS("Add event for node", "node", klog.KObj(node))
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.NodeAdd, preCheckForNode(nodeInfo))
2019-04-07 17:07:55 +00:00
}
func (sched *Scheduler) updateNodeInCache(oldObj, newObj interface{}) {
oldNode, ok := oldObj.(*v1.Node)
if !ok {
klog.ErrorS(nil, "Cannot convert oldObj to *v1.Node", "oldObj", oldObj)
2019-04-07 17:07:55 +00:00
return
}
newNode, ok := newObj.(*v1.Node)
if !ok {
klog.ErrorS(nil, "Cannot convert newObj to *v1.Node", "newObj", newObj)
2019-04-07 17:07:55 +00:00
return
}
nodeInfo := sched.SchedulerCache.UpdateNode(oldNode, newNode)
// Only requeue unschedulable pods if the node became more schedulable.
if event := nodeSchedulingPropertiesChange(newNode, oldNode); event != nil {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(*event, preCheckForNode(nodeInfo))
2019-04-07 17:07:55 +00:00
}
}
func (sched *Scheduler) deleteNodeFromCache(obj interface{}) {
var node *v1.Node
switch t := obj.(type) {
case *v1.Node:
node = t
case cache.DeletedFinalStateUnknown:
var ok bool
node, ok = t.Obj.(*v1.Node)
if !ok {
klog.ErrorS(nil, "Cannot convert to *v1.Node", "obj", t.Obj)
2019-04-07 17:07:55 +00:00
return
}
default:
klog.ErrorS(nil, "Cannot convert to *v1.Node", "obj", t)
2019-04-07 17:07:55 +00:00
return
}
klog.V(3).InfoS("Delete event for node", "node", klog.KObj(node))
2019-04-07 17:07:55 +00:00
// NOTE: Updates must be written to scheduler cache before invalidating
// equivalence cache, because we could snapshot equivalence cache after the
// invalidation and then snapshot the cache itself. If the cache is
// snapshotted before updates are written, we would update equivalence
// cache with stale information which is based on snapshot of old cache.
2019-09-27 21:51:53 +00:00
if err := sched.SchedulerCache.RemoveNode(node); err != nil {
klog.ErrorS(err, "Scheduler cache RemoveNode failed")
2019-04-07 17:07:55 +00:00
}
}
2019-09-27 21:51:53 +00:00
2019-04-07 17:07:55 +00:00
func (sched *Scheduler) addPodToSchedulingQueue(obj interface{}) {
2020-03-26 21:07:15 +00:00
pod := obj.(*v1.Pod)
klog.V(3).InfoS("Add event for unscheduled pod", "pod", klog.KObj(pod))
2020-03-26 21:07:15 +00:00
if err := sched.SchedulingQueue.Add(pod); err != nil {
2019-04-07 17:07:55 +00:00
utilruntime.HandleError(fmt.Errorf("unable to queue %T: %v", obj, err))
}
}
func (sched *Scheduler) updatePodInSchedulingQueue(oldObj, newObj interface{}) {
oldPod, newPod := oldObj.(*v1.Pod), newObj.(*v1.Pod)
// Bypass update event that carries identical objects; otherwise, a duplicated
// Pod may go through scheduling and cause unexpected behavior (see #96071).
if oldPod.ResourceVersion == newPod.ResourceVersion {
2019-04-07 17:07:55 +00:00
return
}
isAssumed, err := sched.SchedulerCache.IsAssumedPod(newPod)
if err != nil {
utilruntime.HandleError(fmt.Errorf("failed to check whether pod %s/%s is assumed: %v", newPod.Namespace, newPod.Name, err))
}
if isAssumed {
return
}
if err := sched.SchedulingQueue.Update(oldPod, newPod); err != nil {
2019-04-07 17:07:55 +00:00
utilruntime.HandleError(fmt.Errorf("unable to update %T: %v", newObj, err))
}
}
func (sched *Scheduler) deletePodFromSchedulingQueue(obj interface{}) {
var pod *v1.Pod
switch t := obj.(type) {
case *v1.Pod:
pod = obj.(*v1.Pod)
case cache.DeletedFinalStateUnknown:
var ok bool
pod, ok = t.Obj.(*v1.Pod)
if !ok {
utilruntime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, sched))
return
}
default:
utilruntime.HandleError(fmt.Errorf("unable to handle object in %T: %T", sched, obj))
return
}
klog.V(3).InfoS("Delete event for unscheduled pod", "pod", klog.KObj(pod))
2019-09-27 21:51:53 +00:00
if err := sched.SchedulingQueue.Delete(pod); err != nil {
2019-04-07 17:07:55 +00:00
utilruntime.HandleError(fmt.Errorf("unable to dequeue %T: %v", obj, err))
}
fwk, err := sched.frameworkForPod(pod)
2020-03-26 21:07:15 +00:00
if err != nil {
// This shouldn't happen, because we only accept for scheduling the pods
// which specify a scheduler name that matches one of the profiles.
klog.ErrorS(err, "Unable to get profile", "pod", klog.KObj(pod))
2020-03-26 21:07:15 +00:00
return
}
fwk.RejectWaitingPod(pod.UID)
2019-04-07 17:07:55 +00:00
}
func (sched *Scheduler) addPodToCache(obj interface{}) {
pod, ok := obj.(*v1.Pod)
if !ok {
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", obj)
2019-04-07 17:07:55 +00:00
return
}
klog.V(3).InfoS("Add event for scheduled pod", "pod", klog.KObj(pod))
2019-04-07 17:07:55 +00:00
2019-09-27 21:51:53 +00:00
if err := sched.SchedulerCache.AddPod(pod); err != nil {
klog.ErrorS(err, "Scheduler cache AddPod failed", "pod", klog.KObj(pod))
2019-04-07 17:07:55 +00:00
}
2019-09-27 21:51:53 +00:00
sched.SchedulingQueue.AssignedPodAdded(pod)
2019-04-07 17:07:55 +00:00
}
func (sched *Scheduler) updatePodInCache(oldObj, newObj interface{}) {
oldPod, ok := oldObj.(*v1.Pod)
if !ok {
klog.ErrorS(nil, "Cannot convert oldObj to *v1.Pod", "oldObj", oldObj)
2019-04-07 17:07:55 +00:00
return
}
newPod, ok := newObj.(*v1.Pod)
if !ok {
klog.ErrorS(nil, "Cannot convert newObj to *v1.Pod", "newObj", newObj)
2019-04-07 17:07:55 +00:00
return
}
2020-08-10 17:43:49 +00:00
// A Pod delete event followed by an immediate Pod add event may be merged
// into a Pod update event. In this case, we should invalidate the old Pod, and
// then add the new Pod.
if oldPod.UID != newPod.UID {
sched.deletePodFromCache(oldObj)
sched.addPodToCache(newObj)
return
}
2019-04-07 17:07:55 +00:00
// NOTE: Updates must be written to scheduler cache before invalidating
// equivalence cache, because we could snapshot equivalence cache after the
// invalidation and then snapshot the cache itself. If the cache is
// snapshotted before updates are written, we would update equivalence
// cache with stale information which is based on snapshot of old cache.
2019-09-27 21:51:53 +00:00
if err := sched.SchedulerCache.UpdatePod(oldPod, newPod); err != nil {
klog.ErrorS(err, "Scheduler cache UpdatePod failed", "oldPod", klog.KObj(oldPod), "newPod", klog.KObj(newPod))
2019-04-07 17:07:55 +00:00
}
2019-09-27 21:51:53 +00:00
sched.SchedulingQueue.AssignedPodUpdated(newPod)
2019-04-07 17:07:55 +00:00
}
func (sched *Scheduler) deletePodFromCache(obj interface{}) {
var pod *v1.Pod
switch t := obj.(type) {
case *v1.Pod:
pod = t
case cache.DeletedFinalStateUnknown:
var ok bool
pod, ok = t.Obj.(*v1.Pod)
if !ok {
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t.Obj)
2019-04-07 17:07:55 +00:00
return
}
default:
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t)
2019-04-07 17:07:55 +00:00
return
}
klog.V(3).InfoS("Delete event for scheduled pod", "pod", klog.KObj(pod))
2019-04-07 17:07:55 +00:00
// NOTE: Updates must be written to scheduler cache before invalidating
// equivalence cache, because we could snapshot equivalence cache after the
// invalidation and then snapshot the cache itself. If the cache is
// snapshotted before updates are written, we would update equivalence
// cache with stale information which is based on snapshot of old cache.
2019-09-27 21:51:53 +00:00
if err := sched.SchedulerCache.RemovePod(pod); err != nil {
klog.ErrorS(err, "Scheduler cache RemovePod failed", "pod", klog.KObj(pod))
2019-04-07 17:07:55 +00:00
}
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.AssignedPodDelete, nil)
2019-04-07 17:07:55 +00:00
}
// assignedPod selects pods that are assigned (scheduled and running).
func assignedPod(pod *v1.Pod) bool {
return len(pod.Spec.NodeName) != 0
}
// responsibleForPod returns true if the pod has asked to be scheduled by the given scheduler.
2020-03-26 21:07:15 +00:00
func responsibleForPod(pod *v1.Pod, profiles profile.Map) bool {
return profiles.HandlesSchedulerName(pod.Spec.SchedulerName)
2019-04-07 17:07:55 +00:00
}
2020-03-26 21:07:15 +00:00
// addAllEventHandlers is a helper function used in tests and in Scheduler
2019-04-07 17:07:55 +00:00
// to add event handlers for various informers.
2020-03-26 21:07:15 +00:00
func addAllEventHandlers(
2019-04-07 17:07:55 +00:00
sched *Scheduler,
2019-12-12 01:27:03 +00:00
informerFactory informers.SharedInformerFactory,
dynInformerFactory dynamicinformer.DynamicSharedInformerFactory,
gvkMap map[framework.GVK]framework.ActionType,
2019-04-07 17:07:55 +00:00
) {
// scheduled pod cache
informerFactory.Core().V1().Pods().Informer().AddEventHandler(
2019-04-07 17:07:55 +00:00
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *v1.Pod:
return assignedPod(t)
case cache.DeletedFinalStateUnknown:
if pod, ok := t.Obj.(*v1.Pod); ok {
return assignedPod(pod)
}
utilruntime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, sched))
return false
default:
utilruntime.HandleError(fmt.Errorf("unable to handle object in %T: %T", sched, obj))
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: sched.addPodToCache,
UpdateFunc: sched.updatePodInCache,
DeleteFunc: sched.deletePodFromCache,
},
},
)
// unscheduled pod queue
informerFactory.Core().V1().Pods().Informer().AddEventHandler(
2019-04-07 17:07:55 +00:00
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *v1.Pod:
2020-03-26 21:07:15 +00:00
return !assignedPod(t) && responsibleForPod(t, sched.Profiles)
2019-04-07 17:07:55 +00:00
case cache.DeletedFinalStateUnknown:
if pod, ok := t.Obj.(*v1.Pod); ok {
2020-03-26 21:07:15 +00:00
return !assignedPod(pod) && responsibleForPod(pod, sched.Profiles)
2019-04-07 17:07:55 +00:00
}
utilruntime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, sched))
return false
default:
utilruntime.HandleError(fmt.Errorf("unable to handle object in %T: %T", sched, obj))
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: sched.addPodToSchedulingQueue,
UpdateFunc: sched.updatePodInSchedulingQueue,
DeleteFunc: sched.deletePodFromSchedulingQueue,
},
},
)
2019-12-12 01:27:03 +00:00
informerFactory.Core().V1().Nodes().Informer().AddEventHandler(
2019-04-07 17:07:55 +00:00
cache.ResourceEventHandlerFuncs{
AddFunc: sched.addNodeToCache,
UpdateFunc: sched.updateNodeInCache,
DeleteFunc: sched.deleteNodeFromCache,
},
)
buildEvtResHandler := func(at framework.ActionType, gvk framework.GVK, shortGVK string) cache.ResourceEventHandlerFuncs {
funcs := cache.ResourceEventHandlerFuncs{}
if at&framework.Add != 0 {
evt := framework.ClusterEvent{Resource: gvk, ActionType: framework.Add, Label: fmt.Sprintf("%vAdd", shortGVK)}
funcs.AddFunc = func(_ interface{}) {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(evt, nil)
}
}
if at&framework.Update != 0 {
evt := framework.ClusterEvent{Resource: gvk, ActionType: framework.Update, Label: fmt.Sprintf("%vUpdate", shortGVK)}
funcs.UpdateFunc = func(_, _ interface{}) {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(evt, nil)
}
}
if at&framework.Delete != 0 {
evt := framework.ClusterEvent{Resource: gvk, ActionType: framework.Delete, Label: fmt.Sprintf("%vDelete", shortGVK)}
funcs.DeleteFunc = func(_ interface{}) {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(evt, nil)
}
}
return funcs
}
for gvk, at := range gvkMap {
switch gvk {
case framework.Node, framework.Pod:
// Do nothing.
case framework.CSINode:
informerFactory.Storage().V1().CSINodes().Informer().AddEventHandler(
buildEvtResHandler(at, framework.CSINode, "CSINode"),
)
case framework.CSIDriver:
informerFactory.Storage().V1().CSIDrivers().Informer().AddEventHandler(
buildEvtResHandler(at, framework.CSIDriver, "CSIDriver"),
)
case framework.CSIStorageCapacity:
informerFactory.Storage().V1beta1().CSIStorageCapacities().Informer().AddEventHandler(
buildEvtResHandler(at, framework.CSIStorageCapacity, "CSIStorageCapacity"),
)
case framework.PersistentVolume:
2019-04-07 17:07:55 +00:00
// MaxPDVolumeCountPredicate: since it relies on the counts of PV.
//
// PvAdd: Pods created when there are no PVs available will be stuck in
// unschedulable queue. But unbound PVs created for static provisioning and
// delay binding storage class are skipped in PV controller dynamic
// provisioning and binding process, will not trigger events to schedule pod
// again. So we need to move pods to active queue on PV add for this
// scenario.
//
// PvUpdate: Scheduler.bindVolumesWorker may fail to update assumed pod volume
// bindings due to conflicts if PVs are updated by PV controller or other
// parties, then scheduler will add pod back to unschedulable queue. We
// need to move pods to active queue on PV update for this scenario.
informerFactory.Core().V1().PersistentVolumes().Informer().AddEventHandler(
buildEvtResHandler(at, framework.PersistentVolume, "Pv"),
)
case framework.PersistentVolumeClaim:
// MaxPDVolumeCountPredicate: add/update PVC will affect counts of PV when it is bound.
informerFactory.Core().V1().PersistentVolumeClaims().Informer().AddEventHandler(
buildEvtResHandler(at, framework.PersistentVolumeClaim, "Pvc"),
)
case framework.StorageClass:
if at&framework.Add != 0 {
informerFactory.Storage().V1().StorageClasses().Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: sched.onStorageClassAdd,
},
)
}
if at&framework.Update != 0 {
informerFactory.Storage().V1().StorageClasses().Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, _ interface{}) {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(queue.StorageClassUpdate, nil)
},
},
)
}
case framework.Service:
// ServiceAffinity: affected by the selector of the service is updated.
// Also, if new service is added, equivalence cache will also become invalid since
// existing pods may be "captured" by this service and change this predicate result.
informerFactory.Core().V1().Services().Informer().AddEventHandler(
buildEvtResHandler(at, framework.Service, "Service"),
)
default:
// Tests may not instantiate dynInformerFactory.
if dynInformerFactory == nil {
continue
}
// GVK is expected to be at least 3-folded, separated by dots.
// <kind in plural>.<version>.<group>
// Valid examples:
// - foos.v1.example.com
// - bars.v1beta1.a.b.c
// Invalid examples:
// - foos.v1 (2 sections)
// - foo.v1.example.com (the first section should be plural)
if strings.Count(string(gvk), ".") < 2 {
klog.ErrorS(nil, "incorrect event registration", "gvk", gvk)
continue
}
// Fall back to try dynamic informers.
gvr, _ := schema.ParseResourceArg(string(gvk))
dynInformer := dynInformerFactory.ForResource(*gvr).Informer()
dynInformer.AddEventHandler(
buildEvtResHandler(at, gvk, strings.Title(gvr.Resource)),
)
go dynInformer.Run(sched.StopEverything)
}
}
2019-04-07 17:07:55 +00:00
}
func nodeSchedulingPropertiesChange(newNode *v1.Node, oldNode *v1.Node) *framework.ClusterEvent {
2019-04-07 17:07:55 +00:00
if nodeSpecUnschedulableChanged(newNode, oldNode) {
return &queue.NodeSpecUnschedulableChange
2019-04-07 17:07:55 +00:00
}
if nodeAllocatableChanged(newNode, oldNode) {
return &queue.NodeAllocatableChange
2019-04-07 17:07:55 +00:00
}
if nodeLabelsChanged(newNode, oldNode) {
return &queue.NodeLabelChange
2019-04-07 17:07:55 +00:00
}
if nodeTaintsChanged(newNode, oldNode) {
return &queue.NodeTaintChange
2019-04-07 17:07:55 +00:00
}
if nodeConditionsChanged(newNode, oldNode) {
return &queue.NodeConditionChange
2019-04-07 17:07:55 +00:00
}
return nil
2019-04-07 17:07:55 +00:00
}
func nodeAllocatableChanged(newNode *v1.Node, oldNode *v1.Node) bool {
return !reflect.DeepEqual(oldNode.Status.Allocatable, newNode.Status.Allocatable)
}
func nodeLabelsChanged(newNode *v1.Node, oldNode *v1.Node) bool {
return !reflect.DeepEqual(oldNode.GetLabels(), newNode.GetLabels())
}
func nodeTaintsChanged(newNode *v1.Node, oldNode *v1.Node) bool {
return !reflect.DeepEqual(newNode.Spec.Taints, oldNode.Spec.Taints)
}
func nodeConditionsChanged(newNode *v1.Node, oldNode *v1.Node) bool {
strip := func(conditions []v1.NodeCondition) map[v1.NodeConditionType]v1.ConditionStatus {
conditionStatuses := make(map[v1.NodeConditionType]v1.ConditionStatus, len(conditions))
for i := range conditions {
conditionStatuses[conditions[i].Type] = conditions[i].Status
}
return conditionStatuses
}
return !reflect.DeepEqual(strip(oldNode.Status.Conditions), strip(newNode.Status.Conditions))
}
func nodeSpecUnschedulableChanged(newNode *v1.Node, oldNode *v1.Node) bool {
return newNode.Spec.Unschedulable != oldNode.Spec.Unschedulable && !newNode.Spec.Unschedulable
2019-04-07 17:07:55 +00:00
}
func preCheckForNode(nodeInfo *framework.NodeInfo) queue.PreEnqueueCheck {
// In addition to the checks in kubelet (pkg/kubelet/lifecycle/predicate.go#GeneralPredicates),
// the following logic appends a taint/toleration check.
// TODO: verify if kubelet should also apply the taint/toleration check, and then unify the
// logic with kubelet and move to a shared place.
//
// Note: the following checks doesn't take preemption into considerations, in very rare
// cases (e.g., node resizing), "pod" may still fail a check but preemption helps. We deliberately
// chose to ignore those cases as unschedulable pods will be re-queued eventually.
return func(pod *v1.Pod) bool {
if len(noderesources.Fits(pod, nodeInfo, feature.DefaultFeatureGate.Enabled(features.PodOverhead))) != 0 {
return false
}
// Ignore parsing errors for backwards compatibility.
matches, _ := nodeaffinity.GetRequiredNodeAffinity(pod).Match(nodeInfo.Node())
if !matches {
return false
}
if !nodename.Fits(pod, nodeInfo) {
return false
}
if !nodeports.Fits(pod, nodeInfo) {
return false
}
_, isUntolerated := v1helper.FindMatchingUntoleratedTaint(nodeInfo.Node().Spec.Taints, pod.Spec.Tolerations, func(t *v1.Taint) bool {
// PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints.
return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute
})
return !isUntolerated
}
}