fix test failure and delete unused code

Removing the template generation that was left behind when the API was
updated in 22fb5c4762

Also cleaned up many unused methods left behind
pull/6/head
Davanum Srinivas 2018-03-03 15:25:45 -05:00
parent 3d60b3cd67
commit 4cbf397280
2 changed files with 1 additions and 135 deletions

View File

@ -29,7 +29,6 @@ go_library(
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/daemon:go_default_library", "//pkg/controller/daemon:go_default_library",
"//pkg/controller/deployment/util:go_default_library", "//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/job:go_default_library", "//pkg/controller/job:go_default_library",

View File

@ -34,7 +34,6 @@ import (
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/pkg/controller/daemon"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/scheduler/schedulercache" "k8s.io/kubernetes/pkg/scheduler/schedulercache"
@ -315,8 +314,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func() { framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func() {
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
templateGeneration := int64(999) framework.Logf("Creating simple daemon set %s", dsName)
framework.Logf("Creating simple daemon set %s with templateGeneration %d", dsName, templateGeneration)
ds := newDaemonSet(dsName, image, label) ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType} ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds) ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
@ -326,10 +324,6 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
Expect(err).NotTo(HaveOccurred())
// Check history and labels // Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -343,16 +337,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage) patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
templateGeneration++
By("Check that daemon pods images are updated.") By("Check that daemon pods images are updated.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods are still running on every node of the cluster.") By("Check that daemon pods are still running on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
@ -443,23 +432,6 @@ func getDaemonSetImagePatch(containerName, containerImage string) string {
return fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, containerName, containerImage) return fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, containerName, containerImage)
} }
// deleteDaemonSetAndOrphan deletes the given DaemonSet and orphans all its dependents.
// It also checks that all dependents are orphaned, and the DaemonSet is deleted.
func deleteDaemonSetAndOrphan(c clientset.Interface, ds *apps.DaemonSet) {
trueVar := true
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
err := c.AppsV1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be orphaned")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(c, ds.Namespace, ds.Name))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
}
func newDaemonSet(dsName, image string, label map[string]string) *apps.DaemonSet { func newDaemonSet(dsName, image string, label map[string]string) *apps.DaemonSet {
return &apps.DaemonSet{ return &apps.DaemonSet{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -700,111 +672,6 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonS
} }
} }
func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label map[string]string, templateGeneration string) error {
pods := listDaemonPods(c, ns, label)
for _, pod := range pods.Items {
// We don't care about inactive pods
if !controller.IsPodActive(&pod) {
continue
}
podTemplateGeneration := pod.Labels[apps.DeprecatedTemplateGeneration]
if podTemplateGeneration != templateGeneration {
return fmt.Errorf("expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration)
}
}
return nil
}
func checkDaemonSetDeleted(c clientset.Interface, ns, name string) func() (bool, error) {
return func() (bool, error) {
_, err := c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) {
return false, err
}
return true, nil
}
}
func checkDaemonSetPodsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
return func() (bool, error) {
pods := listDaemonPods(c, ns, label)
for _, pod := range pods.Items {
// This pod is orphaned only when controller ref is cleared
if controllerRef := metav1.GetControllerOf(&pod); controllerRef != nil {
return false, nil
}
}
return true, nil
}
}
func checkDaemonSetHistoryOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
return func() (bool, error) {
histories := listDaemonHistories(c, ns, label)
for _, history := range histories.Items {
// This history is orphaned only when controller ref is cleared
if controllerRef := metav1.GetControllerOf(&history); controllerRef != nil {
return false, nil
}
}
return true, nil
}
}
func checkDaemonSetPodsAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
return func() (bool, error) {
pods := listDaemonPods(c, ns, label)
for _, pod := range pods.Items {
// This pod is adopted only when its controller ref is update
if controllerRef := metav1.GetControllerOf(&pod); controllerRef == nil || controllerRef.UID != dsUID {
return false, nil
}
}
return true, nil
}
}
func checkDaemonSetHistoryAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
return func() (bool, error) {
histories := listDaemonHistories(c, ns, label)
for _, history := range histories.Items {
// This history is adopted only when its controller ref is update
if controllerRef := metav1.GetControllerOf(&history); controllerRef == nil || controllerRef.UID != dsUID {
return false, nil
}
}
return true, nil
}
}
func waitDaemonSetAdoption(c clientset.Interface, ds *apps.DaemonSet, podPrefix string, podTemplateGeneration int64) {
ns := ds.Namespace
label := ds.Spec.Template.Labels
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsAdopted(c, ns, ds.UID, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be adopted")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryAdopted(c, ns, ds.UID, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be adopted")
framework.Logf("Make sure no daemon pod updated its template generation %d", podTemplateGeneration)
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(podTemplateGeneration))
Expect(err).NotTo(HaveOccurred())
framework.Logf("Make sure no pods are recreated by looking at their names")
err = checkDaemonSetPodsName(c, ns, podPrefix, label)
Expect(err).NotTo(HaveOccurred())
}
func checkDaemonSetPodsName(c clientset.Interface, ns, prefix string, label map[string]string) error {
pods := listDaemonPods(c, ns, label)
for _, pod := range pods.Items {
if !strings.HasPrefix(pod.Name, prefix) {
return fmt.Errorf("expected pod %s name to be prefixed %q", pod.Name, prefix)
}
}
return nil
}
func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) { func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
for _, pod := range podList.Items { for _, pod := range podList.Items {
podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey] podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]