mirror of https://github.com/k3s-io/k3s
Fix a bug checking DaemonSet pods are updated in e2e test
parent
09d896bfd0
commit
9bd720b39e
|
@ -660,20 +660,25 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nod
|
|||
|
||||
func checkRunningOnAllNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
nodeNames := make([]string, 0)
|
||||
for _, node := range nodeList.Items {
|
||||
if !canScheduleOnNode(node, ds) {
|
||||
framework.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints)
|
||||
continue
|
||||
}
|
||||
nodeNames = append(nodeNames, node.Name)
|
||||
}
|
||||
nodeNames := schedulableNodes(f.ClientSet, ds)
|
||||
return checkDaemonPodOnNodes(f, ds, nodeNames)()
|
||||
}
|
||||
}
|
||||
|
||||
func schedulableNodes(c clientset.Interface, ds *extensions.DaemonSet) []string {
|
||||
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
nodeNames := make([]string, 0)
|
||||
for _, node := range nodeList.Items {
|
||||
if !canScheduleOnNode(node, ds) {
|
||||
framework.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints)
|
||||
continue
|
||||
}
|
||||
nodeNames = append(nodeNames, node.Name)
|
||||
}
|
||||
return nodeNames
|
||||
}
|
||||
|
||||
func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]string, newImage string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
|
@ -724,15 +729,16 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *extensions.D
|
|||
pods := podList.Items
|
||||
|
||||
unavailablePods := 0
|
||||
allImagesUpdated := true
|
||||
nodesToUpdatedPodCount := make(map[string]int)
|
||||
for _, pod := range pods {
|
||||
if !metav1.IsControlledBy(&pod, ds) {
|
||||
continue
|
||||
}
|
||||
podImage := pod.Spec.Containers[0].Image
|
||||
if podImage != image {
|
||||
allImagesUpdated = false
|
||||
framework.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage)
|
||||
} else {
|
||||
nodesToUpdatedPodCount[pod.Spec.NodeName] += 1
|
||||
}
|
||||
if !podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
framework.Logf("Pod %s is not available", pod.Name)
|
||||
|
@ -742,8 +748,12 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *extensions.D
|
|||
if unavailablePods > maxUnavailable {
|
||||
return false, fmt.Errorf("number of unavailable pods: %d is greater than maxUnavailable: %d", unavailablePods, maxUnavailable)
|
||||
}
|
||||
if !allImagesUpdated {
|
||||
return false, nil
|
||||
// Make sure every daemon pod on the node has been updated
|
||||
nodeNames := schedulableNodes(c, ds)
|
||||
for _, node := range nodeNames {
|
||||
if nodesToUpdatedPodCount[node] == 0 {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue