Merge pull request #75080 from draveness/feature/get-nodelists-sync-daemonset

fix: list nodes in sync daemonset
k3s-v1.15.3
Kubernetes Prow Robot 2019-04-02 08:22:35 -07:00 committed by GitHub
commit b1186435eb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 21 additions and 25 deletions

View File

@ -76,6 +76,7 @@ go_test(
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",

View File

@ -936,7 +936,7 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
// After figuring out which nodes should run a Pod of ds but not yet running one and
// which nodes should not run a Pod of ds but currently running one, it calls function
// syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds.
func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error {
func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
// Find out the pods which are created for the nodes by DaemonSet.
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
@ -945,10 +945,6 @@ func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error {
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
nodeList, err := dsc.nodeLister.List(labels.Everything())
if err != nil {
return fmt.Errorf("couldn't get list of nodes when syncing daemon set %#v: %v", ds, err)
}
var nodesNeedingDaemonPods, podsToDelete []string
var failedPodsObserved int
for _, node := range nodeList {
@ -1149,18 +1145,13 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
return updateErr
}
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash string, updateObservedGen bool) error {
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, nodeList []*v1.Node, hash string, updateObservedGen bool) error {
klog.V(4).Infof("Updating daemon set status")
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
nodeList, err := dsc.nodeLister.List(labels.Everything())
if err != nil {
return fmt.Errorf("couldn't get list of nodes when updating daemon set %#v: %v", ds, err)
}
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable int
for _, node := range nodeList {
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
@ -1230,6 +1221,11 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
return fmt.Errorf("unable to retrieve ds %v from store: %v", key, err)
}
nodeList, err := dsc.nodeLister.List(labels.Everything())
if err != nil {
return fmt.Errorf("couldn't get list of nodes when syncing daemon set %#v: %v", ds, err)
}
everything := metav1.LabelSelector{}
if reflect.DeepEqual(ds.Spec.Selector, &everything) {
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, SelectingAllReason, "This daemon set is selecting all pods. A non-empty selector is required.")
@ -1265,10 +1261,10 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
if !dsc.expectations.SatisfiedExpectations(dsKey) {
// Only update status. Don't raise observedGeneration since controller didn't process object of that generation.
return dsc.updateDaemonSetStatus(ds, hash, false)
return dsc.updateDaemonSetStatus(ds, nodeList, hash, false)
}
err = dsc.manage(ds, hash)
err = dsc.manage(ds, nodeList, hash)
if err != nil {
return err
}
@ -1278,7 +1274,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
switch ds.Spec.UpdateStrategy.Type {
case apps.OnDeleteDaemonSetStrategyType:
case apps.RollingUpdateDaemonSetStrategyType:
err = dsc.rollingUpdate(ds, hash)
err = dsc.rollingUpdate(ds, nodeList, hash)
}
if err != nil {
return err
@ -1290,7 +1286,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
return fmt.Errorf("failed to clean up revisions of DaemonSet: %v", err)
}
return dsc.updateDaemonSetStatus(ds, hash, true)
return dsc.updateDaemonSetStatus(ds, nodeList, hash, true)
}
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]predicates.PredicateFailureReason, *schedulernodeinfo.NodeInfo, error) {

View File

@ -40,14 +40,14 @@ import (
// rollingUpdate deletes old daemon set pods making sure that no more than
// ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable
func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, hash string) error {
func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
_, oldPods := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods, hash)
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeToDaemonPods)
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeList, nodeToDaemonPods)
if err != nil {
return fmt.Errorf("Couldn't get unavailable numbers: %v", err)
}
@ -392,14 +392,8 @@ func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *apps.DaemonSet, nodeToD
return newPods, oldPods
}
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeList []*v1.Node, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
klog.V(4).Infof("Getting unavailable numbers")
// TODO: get nodeList once in syncDaemonSet and pass it to other functions
nodeList, err := dsc.nodeLister.List(labels.Everything())
if err != nil {
return -1, -1, fmt.Errorf("couldn't get list of nodes during rolling update of daemon set %#v: %v", ds, err)
}
var numUnavailable, desiredNumberScheduled int
for i := range nodeList {
node := nodeList[i]

View File

@ -22,6 +22,7 @@ import (
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
)
@ -293,7 +294,11 @@ func TestGetUnavailableNumbers(t *testing.T) {
for _, c := range cases {
c.Manager.dsStore.Add(c.ds)
maxUnavailable, numUnavailable, err := c.Manager.getUnavailableNumbers(c.ds, c.nodeToPods)
nodeList, err := c.Manager.nodeLister.List(labels.Everything())
if err != nil {
t.Fatalf("error listing nodes: %v", err)
}
maxUnavailable, numUnavailable, err := c.Manager.getUnavailableNumbers(c.ds, nodeList, c.nodeToPods)
if err != nil && c.Err != nil {
if c.Err != err {
t.Errorf("Test case: %s. Expected error: %v but got: %v", c.name, c.Err, err)