mirror of https://github.com/k3s-io/k3s
Merge pull request #59636 from k82cn/k8s_59194_1
Automatic merge from submit-queue (batch tested with PRs 59636, 62429, 61862). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Moved sync pod on Node logic to func. Signed-off-by: Da K. Ma <madaxa@cn.ibm.com> **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Part of #59194 **Release note**: ```release-note None ```pull/8/head
commit
026212da4c
|
@ -801,29 +801,20 @@ func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controll
|
|||
return ds
|
||||
}
|
||||
|
||||
// manage manages the scheduling and running of Pods of ds on nodes.
|
||||
// After figuring out which nodes should run a Pod of ds but not yet running one and
|
||||
// which nodes should not run a Pod of ds but currently running one, it calls function
|
||||
// syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds.
|
||||
func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error {
|
||||
// Find out which nodes are running the daemon pods controlled by ds.
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
}
|
||||
// podsShouldBeOnNode figures out the DaemonSet pods to be created and deleted on the given node:
|
||||
// - nodesNeedingDaemonPods: the pods need to start on the node
|
||||
// - podsToDelete: the Pods need to be deleted on the node
|
||||
// - failedPodsObserved: the number of failed pods on node
|
||||
// - err: unexpected error
|
||||
func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
||||
node *v1.Node,
|
||||
nodeToDaemonPods map[string][]*v1.Pod,
|
||||
ds *apps.DaemonSet,
|
||||
) (nodesNeedingDaemonPods, podsToDelete []string, failedPodsObserved int, err error) {
|
||||
|
||||
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
|
||||
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
|
||||
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get list of nodes when syncing daemon set %#v: %v", ds, err)
|
||||
}
|
||||
var nodesNeedingDaemonPods, podsToDelete []string
|
||||
var failedPodsObserved int
|
||||
for _, node := range nodeList {
|
||||
wantToRun, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
if err != nil {
|
||||
continue
|
||||
return
|
||||
}
|
||||
|
||||
daemonPods, exists := nodeToDaemonPods[node.Name]
|
||||
|
@ -870,6 +861,40 @@ func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error {
|
|||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nodesNeedingDaemonPods, podsToDelete, failedPodsObserved, nil
|
||||
}
|
||||
|
||||
// manage manages the scheduling and running of Pods of ds on nodes.
|
||||
// After figuring out which nodes should run a Pod of ds but not yet running one and
|
||||
// which nodes should not run a Pod of ds but currently running one, it calls function
|
||||
// syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds.
|
||||
func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error {
|
||||
// Find out which nodes are running the daemon pods controlled by ds.
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
}
|
||||
|
||||
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
|
||||
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
|
||||
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get list of nodes when syncing daemon set %#v: %v", ds, err)
|
||||
}
|
||||
var nodesNeedingDaemonPods, podsToDelete []string
|
||||
var failedPodsObserved int
|
||||
for _, node := range nodeList {
|
||||
nodesNeedingDaemonPodsOnNode, podsToDeleteOnNode, failedPodsObservedOnNode, err := dsc.podsShouldBeOnNode(
|
||||
node, nodeToDaemonPods, ds)
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, nodesNeedingDaemonPodsOnNode...)
|
||||
podsToDelete = append(podsToDelete, podsToDeleteOnNode...)
|
||||
failedPodsObserved += failedPodsObservedOnNode
|
||||
}
|
||||
|
||||
// Label new pods using the hash label value of the current history when creating them
|
||||
|
|
Loading…
Reference in New Issue