From f52e7ef4bf66a24b18df2f453a50922070d14c51 Mon Sep 17 00:00:00 2001 From: Kenneth Owens Date: Wed, 14 Feb 2018 10:35:38 -0800 Subject: [PATCH 1/5] Update the DaemonSet controller to use the apps/v1 API --- cmd/kube-controller-manager/app/apps.go | 21 +++ cmd/kube-controller-manager/app/extensions.go | 19 --- pkg/controller/controller_ref_manager.go | 14 +- pkg/controller/daemon/daemon_controller.go | 87 +++++----- .../daemon/daemon_controller_test.go | 152 +++++++++--------- pkg/controller/daemon/update.go | 53 +++--- pkg/controller/daemon/update_test.go | 43 +++-- pkg/controller/daemon/util/daemonset_util.go | 40 +++-- .../daemon/util/daemonset_util_test.go | 26 +-- 9 files changed, 245 insertions(+), 210 deletions(-) diff --git a/cmd/kube-controller-manager/app/apps.go b/cmd/kube-controller-manager/app/apps.go index c70a608cbc..57898b5fce 100644 --- a/cmd/kube-controller-manager/app/apps.go +++ b/cmd/kube-controller-manager/app/apps.go @@ -21,10 +21,31 @@ limitations under the License. package app import ( + "fmt" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/pkg/controller/statefulset" ) +func startDaemonSetController(ctx ControllerContext) (bool, error) { + if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}] { + return false, nil + } + dsc, err := daemon.NewDaemonSetsController( + ctx.InformerFactory.Apps().V1().DaemonSets(), + ctx.InformerFactory.Apps().V1().ControllerRevisions(), + ctx.InformerFactory.Core().V1().Pods(), + ctx.InformerFactory.Core().V1().Nodes(), + ctx.ClientBuilder.ClientOrDie("daemon-set-controller"), + ) + if err != nil { + return true, fmt.Errorf("error creating DaemonSets controller: %v", err) + } + go dsc.Run(int(ctx.ComponentConfig.ConcurrentDaemonSetSyncs), ctx.Stop) + return true, nil +} + func startStatefulSetController(ctx ControllerContext) (bool, error) { if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "statefulsets"}] { return false, nil diff --git a/cmd/kube-controller-manager/app/extensions.go b/cmd/kube-controller-manager/app/extensions.go index d369748c6a..2c30fde86b 100644 --- a/cmd/kube-controller-manager/app/extensions.go +++ b/cmd/kube-controller-manager/app/extensions.go @@ -24,29 +24,10 @@ import ( "fmt" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/pkg/controller/deployment" "k8s.io/kubernetes/pkg/controller/replicaset" ) -func startDaemonSetController(ctx ControllerContext) (bool, error) { - if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "daemonsets"}] { - return false, nil - } - dsc, err := daemon.NewDaemonSetsController( - ctx.InformerFactory.Extensions().V1beta1().DaemonSets(), - ctx.InformerFactory.Apps().V1beta1().ControllerRevisions(), - ctx.InformerFactory.Core().V1().Pods(), - ctx.InformerFactory.Core().V1().Nodes(), - ctx.ClientBuilder.ClientOrDie("daemon-set-controller"), - ) - if err != nil { - return true, fmt.Errorf("error creating DaemonSets controller: %v", err) - } - go dsc.Run(int(ctx.ComponentConfig.ConcurrentDaemonSetSyncs), ctx.Stop) - return true, nil -} - func startDeploymentController(ctx ControllerContext) (bool, error) { if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}] { return false, nil diff --git a/pkg/controller/controller_ref_manager.go b/pkg/controller/controller_ref_manager.go index 4f0fd93a7f..21d7aa302e 100644 --- a/pkg/controller/controller_ref_manager.go +++ b/pkg/controller/controller_ref_manager.go @@ -21,7 +21,7 @@ import ( "sync" "github.com/golang/glog" - appsv1beta1 "k8s.io/api/apps/v1beta1" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" @@ -436,18 +436,18 @@ func NewControllerRevisionControllerRefManager( // If the error is nil, either the reconciliation succeeded, or no // reconciliation was necessary. The list of ControllerRevisions that you now own is // returned. -func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*appsv1beta1.ControllerRevision) ([]*appsv1beta1.ControllerRevision, error) { - var claimed []*appsv1beta1.ControllerRevision +func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) { + var claimed []*apps.ControllerRevision var errlist []error match := func(obj metav1.Object) bool { return m.Selector.Matches(labels.Set(obj.GetLabels())) } adopt := func(obj metav1.Object) error { - return m.AdoptControllerRevision(obj.(*appsv1beta1.ControllerRevision)) + return m.AdoptControllerRevision(obj.(*apps.ControllerRevision)) } release := func(obj metav1.Object) error { - return m.ReleaseControllerRevision(obj.(*appsv1beta1.ControllerRevision)) + return m.ReleaseControllerRevision(obj.(*apps.ControllerRevision)) } for _, h := range histories { @@ -465,7 +465,7 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor // AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if // the patching fails. -func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *appsv1beta1.ControllerRevision) error { +func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *apps.ControllerRevision) error { if err := m.CanAdopt(); err != nil { return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err) } @@ -480,7 +480,7 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history // ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller. // It returns the error if the patching fails. 404 and 422 errors are ignored. -func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *appsv1beta1.ControllerRevision) error { +func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error { glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s", history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID) diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index fe80e362c0..100b5d19f3 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -23,9 +23,8 @@ import ( "sync" "time" - apps "k8s.io/api/apps/v1beta1" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -34,16 +33,14 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" - appsinformers "k8s.io/client-go/informers/apps/v1beta1" + appsinformers "k8s.io/client-go/informers/apps/v1" coreinformers "k8s.io/client-go/informers/core/v1" - extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" + unversionedapps "k8s.io/client-go/kubernetes/typed/apps/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1" - unversionedextensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - appslisters "k8s.io/client-go/listers/apps/v1beta1" + appslisters "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - extensionslisters "k8s.io/client-go/listers/extensions/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/integer" @@ -82,7 +79,7 @@ const ( ) // controllerKind contains the schema.GroupVersionKind for this controller type. -var controllerKind = extensions.SchemeGroupVersion.WithKind("DaemonSet") +var controllerKind = apps.SchemeGroupVersion.WithKind("DaemonSet") // DaemonSetsController is responsible for synchronizing DaemonSet objects stored // in the system with actual running pods. @@ -99,12 +96,12 @@ type DaemonSetsController struct { // To allow injection of syncDaemonSet for testing. syncHandler func(dsKey string) error // used for unit testing - enqueueDaemonSet func(ds *extensions.DaemonSet) - enqueueDaemonSetRateLimited func(ds *extensions.DaemonSet) + enqueueDaemonSet func(ds *apps.DaemonSet) + enqueueDaemonSetRateLimited func(ds *apps.DaemonSet) // A TTLCache of pod creates/deletes each ds expects to see expectations controller.ControllerExpectationsInterface // dsLister can list/get daemonsets from the shared informer's store - dsLister extensionslisters.DaemonSetLister + dsLister appslisters.DaemonSetLister // dsStoreSynced returns true if the daemonset store has been synced at least once. // Added as a member to the struct to allow injection for testing. dsStoreSynced cache.InformerSynced @@ -134,7 +131,7 @@ type DaemonSetsController struct { } // NewDaemonSetsController creates a new DaemonSetsController -func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface) (*DaemonSetsController, error) { +func NewDaemonSetsController(daemonSetInformer appsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface) (*DaemonSetsController, error) { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. @@ -163,13 +160,13 @@ func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInfo daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - ds := obj.(*extensions.DaemonSet) + ds := obj.(*apps.DaemonSet) glog.V(4).Infof("Adding daemon set %s", ds.Name) dsc.enqueueDaemonSet(ds) }, UpdateFunc: func(old, cur interface{}) { - oldDS := old.(*extensions.DaemonSet) - curDS := cur.(*extensions.DaemonSet) + oldDS := old.(*apps.DaemonSet) + curDS := cur.(*apps.DaemonSet) glog.V(4).Infof("Updating daemon set %s", oldDS.Name) dsc.enqueueDaemonSet(curDS) }, @@ -211,14 +208,14 @@ func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInfo } func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) { - ds, ok := obj.(*extensions.DaemonSet) + ds, ok := obj.(*apps.DaemonSet) if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) return } - ds, ok = tombstone.Obj.(*extensions.DaemonSet) + ds, ok = tombstone.Obj.(*apps.DaemonSet) if !ok { utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a DaemonSet %#v", obj)) return @@ -272,7 +269,7 @@ func (dsc *DaemonSetsController) processNextWorkItem() bool { return true } -func (dsc *DaemonSetsController) enqueue(ds *extensions.DaemonSet) { +func (dsc *DaemonSetsController) enqueue(ds *apps.DaemonSet) { key, err := controller.KeyFunc(ds) if err != nil { utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", ds, err)) @@ -283,7 +280,7 @@ func (dsc *DaemonSetsController) enqueue(ds *extensions.DaemonSet) { dsc.queue.Add(key) } -func (dsc *DaemonSetsController) enqueueRateLimited(ds *extensions.DaemonSet) { +func (dsc *DaemonSetsController) enqueueRateLimited(ds *apps.DaemonSet) { key, err := controller.KeyFunc(ds) if err != nil { utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", ds, err)) @@ -305,7 +302,7 @@ func (dsc *DaemonSetsController) enqueueDaemonSetAfter(obj interface{}, after ti } // getDaemonSetsForPod returns a list of DaemonSets that potentially match the pod. -func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*extensions.DaemonSet { +func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*apps.DaemonSet { sets, err := dsc.dsLister.GetPodDaemonSets(pod) if err != nil { return nil @@ -320,7 +317,7 @@ func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*extensions. // getDaemonSetsForHistory returns a list of DaemonSets that potentially // match a ControllerRevision. -func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.ControllerRevision) []*extensions.DaemonSet { +func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.ControllerRevision) []*apps.DaemonSet { daemonSets, err := dsc.dsLister.GetHistoryDaemonSets(history) if err != nil || len(daemonSets) == 0 { return nil @@ -736,7 +733,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) { // This also reconciles ControllerRef by adopting/orphaning. // Note that returned Pods are pointers to objects in the cache. // If you want to modify one, you need to deep-copy it first. -func (dsc *DaemonSetsController) getDaemonPods(ds *extensions.DaemonSet) ([]*v1.Pod, error) { +func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, error) { selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) if err != nil { return nil, err @@ -751,7 +748,7 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *extensions.DaemonSet) ([]*v1. // If any adoptions are attempted, we should first recheck for deletion with // an uncached quorum read sometime after listing Pods (see #42639). dsNotDeleted := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { - fresh, err := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{}) + fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -770,7 +767,7 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *extensions.DaemonSet) ([]*v1. // This also reconciles ControllerRef by adopting/orphaning. // Note that returned Pods are pointers to objects in the cache. // If you want to modify one, you need to deep-copy it first. -func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*v1.Pod, error) { +func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *apps.DaemonSet) (map[string][]*v1.Pod, error) { claimedPods, err := dsc.getDaemonPods(ds) if err != nil { return nil, err @@ -787,7 +784,7 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) // resolveControllerRef returns the controller referenced by a ControllerRef, // or nil if the ControllerRef could not be resolved to a matching controller // of the correct Kind. -func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *extensions.DaemonSet { +func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.DaemonSet { // We can't look up by UID, so look up by Name and then verify UID. // Don't even try to look up by Name if it's the wrong Kind. if controllerRef.Kind != controllerKind.Kind { @@ -809,7 +806,7 @@ func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controll // After figuring out which nodes should run a Pod of ds but not yet running one and // which nodes should not run a Pod of ds but currently running one, it calls function // syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds. -func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet, hash string) error { +func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error { // Find out which nodes are running the daemon pods controlled by ds. nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds) if err != nil { @@ -891,7 +888,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet, hash string) e // syncNodes deletes given pods and creates new daemon set pods on the given nodes // returns slice with erros if any -func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error { +func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error { // We need to set expectations before creating/deleting pods to avoid race conditions. dsKey, err := controller.KeyFunc(ds) if err != nil { @@ -915,7 +912,13 @@ func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelet glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff) createWait := sync.WaitGroup{} - template := util.CreatePodTemplate(ds.Spec.Template, ds.Spec.TemplateGeneration, hash) + // If the returned error is not nil we have a parse error. + // The controller handles this via the hash. + generation, err := util.GetTemplateGeneration(ds) + if err != nil { + generation = nil + } + template := util.CreatePodTemplate(ds.Spec.Template, generation, hash) // Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize // and double with each successful iteration in a kind of "slow start". // This handles attempts to start large numbers of pods that would @@ -989,7 +992,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelet return utilerrors.NewAggregate(errors) } -func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int) error { +func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int) error { if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled && int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled && int(ds.Status.NumberMisscheduled) == numberMisscheduled && @@ -1028,7 +1031,7 @@ func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds return updateErr } -func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet, hash string) error { +func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash string) error { glog.V(4).Infof("Updating daemon set status") nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds) if err != nil { @@ -1063,7 +1066,13 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet, numberAvailable++ } } - if util.IsPodUpdated(ds.Spec.TemplateGeneration, pod, hash) { + // If the returned error is not nil we have a parse error. + // The controller handles this via the hash. + generation, err := util.GetTemplateGeneration(ds) + if err != nil { + generation = nil + } + if util.IsPodUpdated(pod, hash, generation) { updatedNumberScheduled++ } } @@ -1075,7 +1084,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet, } numberUnavailable := desiredNumberScheduled - numberAvailable - err = storeDaemonSetStatus(dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable) + err = storeDaemonSetStatus(dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable) if err != nil { return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err) } @@ -1122,7 +1131,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error { if err != nil { return fmt.Errorf("failed to construct revisions of DaemonSet: %v", err) } - hash := cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] + hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey] if ds.DeletionTimestamp != nil || !dsc.expectations.SatisfiedExpectations(dsKey) { // Only update status. @@ -1137,8 +1146,8 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error { // Process rolling updates if we're ready. if dsc.expectations.SatisfiedExpectations(dsKey) { switch ds.Spec.UpdateStrategy.Type { - case extensions.OnDeleteDaemonSetStrategyType: - case extensions.RollingUpdateDaemonSetStrategyType: + case apps.OnDeleteDaemonSetStrategyType: + case apps.RollingUpdateDaemonSetStrategyType: err = dsc.rollingUpdate(ds, hash) } if err != nil { @@ -1154,7 +1163,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error { return dsc.updateDaemonSetStatus(ds, hash) } -func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *extensions.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) { +func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) { // DaemonSet pods shouldn't be deleted by NodeController in case of node problems. // Add infinite toleration for taint notReady:NoExecute here // to survive taint-based eviction enforced by NodeController @@ -1240,7 +1249,7 @@ func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *ext // * shouldContinueRunning: // Returns true when a daemonset should continue running on a node if a daemonset pod is already // running on that node. -func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *extensions.DaemonSet) (wantToRun, shouldSchedule, shouldContinueRunning bool, err error) { +func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.DaemonSet) (wantToRun, shouldSchedule, shouldContinueRunning bool, err error) { newPod := NewPod(ds, node.Name) // Because these bools require an && of all their required conditions, we start @@ -1325,7 +1334,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten } // NewPod creates a new pod -func NewPod(ds *extensions.DaemonSet, nodeName string) *v1.Pod { +func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod { newPod := &v1.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta} newPod.Namespace = ds.Namespace newPod.Spec.NodeName = nodeName @@ -1363,7 +1372,7 @@ func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorit } // byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker. -type byCreationTimestamp []*extensions.DaemonSet +type byCreationTimestamp []*apps.DaemonSet func (o byCreationTimestamp) Len() int { return len(o) } func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index 71d3d46a40..881586c9bb 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -24,8 +24,8 @@ import ( "sync" "testing" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -40,7 +40,6 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/api/testapi" podutil "k8s.io/kubernetes/pkg/api/v1/pod" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/controller" @@ -83,7 +82,7 @@ var ( }} ) -func getKey(ds *extensions.DaemonSet, t *testing.T) string { +func getKey(ds *apps.DaemonSet, t *testing.T) string { key, err := controller.KeyFunc(ds) if err != nil { @@ -92,19 +91,18 @@ func getKey(ds *extensions.DaemonSet, t *testing.T) string { return key } -func newDaemonSet(name string) *extensions.DaemonSet { +func newDaemonSet(name string) *apps.DaemonSet { two := int32(2) - return &extensions.DaemonSet{ - TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()}, + return &apps.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), Name: name, Namespace: metav1.NamespaceDefault, }, - Spec: extensions.DaemonSetSpec{ + Spec: apps.DaemonSetSpec{ RevisionHistoryLimit: &two, - UpdateStrategy: extensions.DaemonSetUpdateStrategy{ - Type: extensions.OnDeleteDaemonSetStrategyType, + UpdateStrategy: apps.DaemonSetUpdateStrategy{ + Type: apps.OnDeleteDaemonSetStrategyType, }, Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Template: v1.PodTemplateSpec{ @@ -127,22 +125,22 @@ func newDaemonSet(name string) *extensions.DaemonSet { } } -func newRollbackStrategy() *extensions.DaemonSetUpdateStrategy { +func newRollbackStrategy() *apps.DaemonSetUpdateStrategy { one := intstr.FromInt(1) - return &extensions.DaemonSetUpdateStrategy{ - Type: extensions.RollingUpdateDaemonSetStrategyType, - RollingUpdate: &extensions.RollingUpdateDaemonSet{MaxUnavailable: &one}, + return &apps.DaemonSetUpdateStrategy{ + Type: apps.RollingUpdateDaemonSetStrategyType, + RollingUpdate: &apps.RollingUpdateDaemonSet{MaxUnavailable: &one}, } } -func newOnDeleteStrategy() *extensions.DaemonSetUpdateStrategy { - return &extensions.DaemonSetUpdateStrategy{ - Type: extensions.OnDeleteDaemonSetStrategyType, +func newOnDeleteStrategy() *apps.DaemonSetUpdateStrategy { + return &apps.DaemonSetUpdateStrategy{ + Type: apps.OnDeleteDaemonSetStrategyType, } } -func updateStrategies() []*extensions.DaemonSetUpdateStrategy { - return []*extensions.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()} +func updateStrategies() []*apps.DaemonSetUpdateStrategy { + return []*apps.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()} } func newNode(name string, label map[string]string) *v1.Node { @@ -170,14 +168,14 @@ func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string] } } -func newPod(podName string, nodeName string, label map[string]string, ds *extensions.DaemonSet) *v1.Pod { +func newPod(podName string, nodeName string, label map[string]string, ds *apps.DaemonSet) *v1.Pod { // Add hash unique label to the pod newLabels := label var podSpec v1.PodSpec // Copy pod spec from DaemonSet template, or use a default one if DaemonSet is nil if ds != nil { hash := fmt.Sprint(controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount)) - newLabels = labelsutil.CloneAndAddLabel(label, extensions.DefaultDaemonSetUniqueLabelKey, hash) + newLabels = labelsutil.CloneAndAddLabel(label, apps.DefaultDaemonSetUniqueLabelKey, hash) podSpec = ds.Spec.Template.Spec } else { podSpec = v1.PodSpec{ @@ -212,14 +210,14 @@ func newPod(podName string, nodeName string, label map[string]string, ds *extens return pod } -func addPods(podStore cache.Store, nodeName string, label map[string]string, ds *extensions.DaemonSet, number int) { +func addPods(podStore cache.Store, nodeName string, label map[string]string, ds *apps.DaemonSet, number int) { for i := 0; i < number; i++ { pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds) podStore.Add(pod) } } -func addFailedPods(podStore cache.Store, nodeName string, label map[string]string, ds *extensions.DaemonSet, number int) { +func addFailedPods(podStore cache.Store, nodeName string, label map[string]string, ds *apps.DaemonSet, number int) { for i := 0; i < number; i++ { pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds) pod.Status = v1.PodStatus{Phase: v1.PodFailed} @@ -299,8 +297,8 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController, informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) dsc, err := NewDaemonSetsController( - informerFactory.Extensions().V1beta1().DaemonSets(), - informerFactory.Apps().V1beta1().ControllerRevisions(), + informerFactory.Apps().V1().DaemonSets(), + informerFactory.Apps().V1().ControllerRevisions(), informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Nodes(), clientset, @@ -322,8 +320,8 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController, return &daemonSetsController{ dsc, - informerFactory.Extensions().V1beta1().DaemonSets().Informer().GetStore(), - informerFactory.Apps().V1beta1().ControllerRevisions().Informer().GetStore(), + informerFactory.Apps().V1().DaemonSets().Informer().GetStore(), + informerFactory.Apps().V1().ControllerRevisions().Informer().GetStore(), informerFactory.Core().V1().Pods().Informer().GetStore(), informerFactory.Core().V1().Nodes().Informer().GetStore(), fakeRecorder, @@ -346,7 +344,7 @@ func validateSyncDaemonSets(t *testing.T, manager *daemonSetsController, fakePod } // Make sure the ControllerRefs are correct. for _, controllerRef := range fakePodControl.ControllerRefs { - if got, want := controllerRef.APIVersion, "extensions/v1beta1"; got != want { + if got, want := controllerRef.APIVersion, "apps/v1"; got != want { t.Errorf("controllerRef.APIVersion = %q, want %q", got, want) } if got, want := controllerRef.Kind, "DaemonSet"; got != want { @@ -358,7 +356,7 @@ func validateSyncDaemonSets(t *testing.T, manager *daemonSetsController, fakePod } } -func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) { +func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) { key, err := controller.KeyFunc(ds) if err != nil { t.Errorf("Could not get key for daemon.") @@ -368,7 +366,7 @@ func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds * } // clearExpectations copies the FakePodControl to PodStore and clears the create and delete expectations. -func clearExpectations(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, fakePodControl *fakePodControl) { +func clearExpectations(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, fakePodControl *fakePodControl) { fakePodControl.Clear() key, err := controller.KeyFunc(ds) @@ -459,13 +457,13 @@ func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) { t.Fatalf("error creating DaemonSets controller: %v", err) } - var updated *extensions.DaemonSet + var updated *apps.DaemonSet clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { if action.GetSubresource() != "status" { return false, nil, nil } if u, ok := action.(core.UpdateAction); ok { - updated = u.GetObject().(*extensions.DaemonSet) + updated = u.GetObject().(*apps.DaemonSet) } return false, nil, nil }) @@ -585,9 +583,9 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { }) manager.dsStore.Add(ds) switch strategy.Type { - case extensions.OnDeleteDaemonSetStrategyType: + case apps.OnDeleteDaemonSetStrategyType: syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2) - case extensions.RollingUpdateDaemonSetStrategyType: + case apps.RollingUpdateDaemonSetStrategyType: syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3) default: t.Fatalf("unexpected UpdateStrategy %+v", strategy) @@ -615,9 +613,9 @@ func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) }) manager.dsStore.Add(ds) switch strategy.Type { - case extensions.OnDeleteDaemonSetStrategyType: + case apps.OnDeleteDaemonSetStrategyType: syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2) - case extensions.RollingUpdateDaemonSetStrategyType: + case apps.RollingUpdateDaemonSetStrategyType: syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3) default: t.Fatalf("unexpected UpdateStrategy %+v", strategy) @@ -1123,13 +1121,13 @@ func TestNumberReadyStatus(t *testing.T) { if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - var updated *extensions.DaemonSet + var updated *apps.DaemonSet clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { if action.GetSubresource() != "status" { return false, nil, nil } if u, ok := action.(core.UpdateAction); ok { - updated = u.GetObject().(*extensions.DaemonSet) + updated = u.GetObject().(*apps.DaemonSet) } return false, nil, nil }) @@ -1166,13 +1164,13 @@ func TestObservedGeneration(t *testing.T) { if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - var updated *extensions.DaemonSet + var updated *apps.DaemonSet clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { if action.GetSubresource() != "status" { return false, nil, nil } if u, ok := action.(core.UpdateAction); ok { - updated = u.GetObject().(*extensions.DaemonSet) + updated = u.GetObject().(*apps.DaemonSet) } return false, nil, nil }) @@ -1385,7 +1383,7 @@ func setNodeTaint(node *v1.Node, taints []v1.Taint) { node.Spec.Taints = taints } -func setDaemonSetToleration(ds *extensions.DaemonSet, tolerations []v1.Toleration) { +func setDaemonSetToleration(ds *apps.DaemonSet, tolerations []v1.Toleration) { ds.Spec.Template.Spec.Tolerations = tolerations } @@ -1482,9 +1480,9 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) { utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False") manager.dsStore.Add(ds) switch strategy.Type { - case extensions.OnDeleteDaemonSetStrategyType: + case apps.OnDeleteDaemonSetStrategyType: syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2) - case extensions.RollingUpdateDaemonSetStrategyType: + case apps.RollingUpdateDaemonSetStrategyType: syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3) default: t.Fatalf("unexpected UpdateStrategy %+v", strategy) @@ -1493,9 +1491,9 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) { // Enabling critical pod annotation feature gate should create critical pod utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True") switch strategy.Type { - case extensions.OnDeleteDaemonSetStrategyType: + case apps.OnDeleteDaemonSetStrategyType: syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 2) - case extensions.RollingUpdateDaemonSetStrategyType: + case apps.RollingUpdateDaemonSetStrategyType: syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 3) default: t.Fatalf("unexpected UpdateStrategy %+v", strategy) @@ -1534,7 +1532,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) { } } -func setDaemonSetCritical(ds *extensions.DaemonSet) { +func setDaemonSetCritical(ds *apps.DaemonSet) { ds.Namespace = api.NamespaceSystem if ds.Spec.Template.ObjectMeta.Annotations == nil { ds.Spec.Template.ObjectMeta.Annotations = make(map[string]string) @@ -1547,14 +1545,14 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { predicateName string podsOnNode []*v1.Pod nodeCondition []v1.NodeCondition - ds *extensions.DaemonSet + ds *apps.DaemonSet wantToRun, shouldSchedule, shouldContinueRunning bool err error }{ { predicateName: "ShouldRunDaemonPod", - ds: &extensions.DaemonSet{ - Spec: extensions.DaemonSetSpec{ + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -1570,8 +1568,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { }, { predicateName: "InsufficientResourceError", - ds: &extensions.DaemonSet{ - Spec: extensions.DaemonSetSpec{ + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -1587,8 +1585,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { }, { predicateName: "ErrPodNotMatchHostName", - ds: &extensions.DaemonSet{ - Spec: extensions.DaemonSetSpec{ + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -1615,8 +1613,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { }, }, }, - ds: &extensions.DaemonSet{ - Spec: extensions.DaemonSetSpec{ + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -1650,8 +1648,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { }, }, }, - ds: &extensions.DaemonSet{ - Spec: extensions.DaemonSetSpec{ + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -1679,8 +1677,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { }, }, }, - ds: &extensions.DaemonSet{ - Spec: extensions.DaemonSetSpec{ + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -1696,8 +1694,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { }, { predicateName: "ErrNodeSelectorNotMatch", - ds: &extensions.DaemonSet{ - Spec: extensions.DaemonSetSpec{ + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -1715,8 +1713,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { }, { predicateName: "ShouldRunDaemonPod", - ds: &extensions.DaemonSet{ - Spec: extensions.DaemonSetSpec{ + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -1734,8 +1732,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { }, { predicateName: "ErrPodAffinityNotMatch", - ds: &extensions.DaemonSet{ - Spec: extensions.DaemonSetSpec{ + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -1769,8 +1767,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { }, { predicateName: "ShouldRunDaemonPod", - ds: &extensions.DaemonSet{ - Spec: extensions.DaemonSetSpec{ + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -1845,14 +1843,14 @@ func TestUpdateNode(t *testing.T) { test string newNode *v1.Node oldNode *v1.Node - ds *extensions.DaemonSet + ds *apps.DaemonSet shouldEnqueue bool }{ { test: "Nothing changed, should not enqueue", oldNode: newNode("node1", nil), newNode: newNode("node1", nil), - ds: func() *extensions.DaemonSet { + ds: func() *apps.DaemonSet { ds := newDaemonSet("ds") ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel return ds @@ -1863,7 +1861,7 @@ func TestUpdateNode(t *testing.T) { test: "Node labels changed", oldNode: newNode("node1", nil), newNode: newNode("node1", simpleNodeLabel), - ds: func() *extensions.DaemonSet { + ds: func() *apps.DaemonSet { ds := newDaemonSet("ds") ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel return ds @@ -1893,7 +1891,7 @@ func TestUpdateNode(t *testing.T) { manager.dsStore.Add(c.ds) syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 0) - manager.enqueueDaemonSet = func(ds *extensions.DaemonSet) { + manager.enqueueDaemonSet = func(ds *apps.DaemonSet) { if ds.Name == "ds" { enqueued = true } @@ -1917,7 +1915,7 @@ func TestDeleteNoDaemonPod(t *testing.T) { node *v1.Node existPods []*v1.Pod deletedPod *v1.Pod - ds *extensions.DaemonSet + ds *apps.DaemonSet shouldEnqueue bool }{ { @@ -1952,7 +1950,7 @@ func TestDeleteNoDaemonPod(t *testing.T) { Spec: podSpec, } }(), - ds: func() *extensions.DaemonSet { + ds: func() *apps.DaemonSet { ds := newDaemonSet("ds") ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m") return ds @@ -1997,7 +1995,7 @@ func TestDeleteNoDaemonPod(t *testing.T) { Spec: podSpec, } }(), - ds: func() *extensions.DaemonSet { + ds: func() *apps.DaemonSet { ds := newDaemonSet("ds") ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m") return ds @@ -2039,7 +2037,7 @@ func TestDeleteNoDaemonPod(t *testing.T) { Spec: podSpec, } }(), - ds: func() *extensions.DaemonSet { + ds: func() *apps.DaemonSet { ds := newDaemonSet("ds") ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m") return ds @@ -2061,15 +2059,15 @@ func TestDeleteNoDaemonPod(t *testing.T) { manager.podStore.Add(pod) } switch strategy.Type { - case extensions.OnDeleteDaemonSetStrategyType: + case apps.OnDeleteDaemonSetStrategyType: syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 2) - case extensions.RollingUpdateDaemonSetStrategyType: + case apps.RollingUpdateDaemonSetStrategyType: syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 3) default: t.Fatalf("unexpected UpdateStrategy %+v", strategy) } - manager.enqueueDaemonSetRateLimited = func(ds *extensions.DaemonSet) { + manager.enqueueDaemonSetRateLimited = func(ds *apps.DaemonSet) { if ds.Name == "ds" { enqueued = true } diff --git a/pkg/controller/daemon/update.go b/pkg/controller/daemon/update.go index bbc9015a28..6f1c1a9d3d 100644 --- a/pkg/controller/daemon/update.go +++ b/pkg/controller/daemon/update.go @@ -23,9 +23,8 @@ import ( "github.com/golang/glog" - apps "k8s.io/api/apps/v1beta1" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -41,7 +40,7 @@ import ( // rollingUpdate deletes old daemon set pods making sure that no more than // ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable -func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet, hash string) error { +func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, hash string) error { nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds) if err != nil { return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err) @@ -82,7 +81,7 @@ func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet, hash st // constructHistory finds all histories controlled by the given DaemonSet, and // update current history revision number, or create current history if need to. // It also deduplicates current history, and adds missing unique labels to existing histories. -func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) { +func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) { var histories []*apps.ControllerRevision var currentHistories []*apps.ControllerRevision histories, err = dsc.controlledHistories(ds) @@ -92,10 +91,10 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur for _, history := range histories { // Add the unique label if it's not already added to the history // We use history name instead of computing hash, so that we don't need to worry about hash collision - if _, ok := history.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; !ok { + if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok { toUpdate := history.DeepCopy() - toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name - history, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Update(toUpdate) + toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name + history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate) if err != nil { return nil, nil, err } @@ -130,7 +129,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur if cur.Revision < currRevision { toUpdate := cur.DeepCopy() toUpdate.Revision = currRevision - _, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Update(toUpdate) + _, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate) if err != nil { return nil, nil, err } @@ -139,7 +138,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur return cur, old, err } -func (dsc *DaemonSetsController) cleanupHistory(ds *extensions.DaemonSet, old []*apps.ControllerRevision) error { +func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps.ControllerRevision) error { nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ds) if err != nil { return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err) @@ -155,7 +154,7 @@ func (dsc *DaemonSetsController) cleanupHistory(ds *extensions.DaemonSet, old [] liveHashes := make(map[string]bool) for _, pods := range nodesToDaemonPods { for _, pod := range pods { - if hash := pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; len(hash) > 0 { + if hash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]; len(hash) > 0 { liveHashes[hash] = true } } @@ -164,7 +163,7 @@ func (dsc *DaemonSetsController) cleanupHistory(ds *extensions.DaemonSet, old [] // Find all live history with the above hashes liveHistory := make(map[string]bool) for _, history := range old { - if hash := history.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; liveHashes[hash] { + if hash := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; liveHashes[hash] { liveHistory[history.Name] = true } } @@ -199,7 +198,7 @@ func maxRevision(histories []*apps.ControllerRevision) int64 { return max } -func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) { +func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) { if len(curHistories) == 1 { return curHistories[0], nil } @@ -222,12 +221,12 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, cur return nil, err } for _, pod := range pods { - if pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] != keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] { + if pod.Labels[apps.DefaultDaemonSetUniqueLabelKey] != keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey] { toUpdate := pod.DeepCopy() if toUpdate.Labels == nil { toUpdate.Labels = make(map[string]string) } - toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] + toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey] _, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate) if err != nil { return nil, err @@ -247,7 +246,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, cur // This also reconciles ControllerRef by adopting/orphaning. // Note that returned histories are pointers to objects in the cache. // If you want to modify one, you need to deep-copy it first. -func (dsc *DaemonSetsController) controlledHistories(ds *extensions.DaemonSet) ([]*apps.ControllerRevision, error) { +func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*apps.ControllerRevision, error) { selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) if err != nil { return nil, err @@ -277,7 +276,7 @@ func (dsc *DaemonSetsController) controlledHistories(ds *extensions.DaemonSet) ( } // Match check if the given DaemonSet's template matches the template stored in the given history. -func Match(ds *extensions.DaemonSet, history *apps.ControllerRevision) (bool, error) { +func Match(ds *apps.DaemonSet, history *apps.ControllerRevision) (bool, error) { patch, err := getPatch(ds) if err != nil { return false, err @@ -289,7 +288,7 @@ func Match(ds *extensions.DaemonSet, history *apps.ControllerRevision) (bool, er // previous version. If the returned error is nil the patch is valid. The current state that we save is just the // PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously // recorded patches. -func getPatch(ds *extensions.DaemonSet) ([]byte, error) { +func getPatch(ds *apps.DaemonSet) ([]byte, error) { dsBytes, err := json.Marshal(ds) if err != nil { return nil, err @@ -312,7 +311,7 @@ func getPatch(ds *extensions.DaemonSet) ([]byte, error) { return patch, err } -func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int64) (*apps.ControllerRevision, error) { +func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*apps.ControllerRevision, error) { patch, err := getPatch(ds) if err != nil { return nil, err @@ -323,7 +322,7 @@ func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ds.Namespace, - Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, extensions.DefaultDaemonSetUniqueLabelKey, hash), + Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, apps.DefaultDaemonSetUniqueLabelKey, hash), Annotations: ds.Annotations, OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, controllerKind)}, }, @@ -331,10 +330,10 @@ func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int Revision: revision, } - history, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Create(history) + history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(history) if errors.IsAlreadyExists(err) { // TODO: Is it okay to get from historyLister? - existedHistory, getErr := dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{}) + existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{}) if getErr != nil { return nil, getErr } @@ -367,13 +366,19 @@ func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int return history, err } -func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod, hash string) ([]*v1.Pod, []*v1.Pod) { +func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod, hash string) ([]*v1.Pod, []*v1.Pod) { var newPods []*v1.Pod var oldPods []*v1.Pod for _, pods := range nodeToDaemonPods { for _, pod := range pods { - if util.IsPodUpdated(ds.Spec.TemplateGeneration, pod, hash) { + // If the returned error is not nil we have a parse error. + // The controller handles this via the hash. + generation, err := util.GetTemplateGeneration(ds) + if err != nil { + generation = nil + } + if util.IsPodUpdated(pod, hash, generation) { newPods = append(newPods, pod) } else { oldPods = append(oldPods, pod) @@ -383,7 +388,7 @@ func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet, n return newPods, oldPods } -func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) { +func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) { glog.V(4).Infof("Getting unavailable numbers") // TODO: get nodeList once in syncDaemonSet and pass it to other functions nodeList, err := dsc.nodeLister.List(labels.Everything()) diff --git a/pkg/controller/daemon/update_test.go b/pkg/controller/daemon/update_test.go index 0521bb576b..a26d774e6f 100644 --- a/pkg/controller/daemon/update_test.go +++ b/pkg/controller/daemon/update_test.go @@ -19,8 +19,8 @@ package daemon import ( "testing" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -38,10 +38,9 @@ func TestDaemonSetUpdatesPods(t *testing.T) { markPodsReady(podControl.podStore) ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" - ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType + ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType intStr := intstr.FromInt(maxUnavailable) - ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} - ds.Spec.TemplateGeneration++ + ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} manager.dsStore.Update(ds) clearExpectations(t, manager, ds, podControl) @@ -80,10 +79,9 @@ func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) { markPodsReady(podControl.podStore) ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" - ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType + ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType intStr := intstr.FromInt(maxUnavailable) - ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} - ds.Spec.TemplateGeneration++ + ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} manager.dsStore.Update(ds) // new pods are not ready numUnavailable == maxUnavailable @@ -109,10 +107,9 @@ func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) { syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" - ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType + ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType intStr := intstr.FromInt(maxUnavailable) - ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} - ds.Spec.TemplateGeneration++ + ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} manager.dsStore.Update(ds) // all old pods are unavailable so should be removed @@ -137,9 +134,9 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) { manager.dsStore.Add(ds) syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) - ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType + ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType intStr := intstr.FromInt(maxUnavailable) - ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} + ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} manager.dsStore.Update(ds) // template is not changed no pod should be removed @@ -152,7 +149,7 @@ func TestGetUnavailableNumbers(t *testing.T) { cases := []struct { name string Manager *daemonSetsController - ds *extensions.DaemonSet + ds *apps.DaemonSet nodeToPods map[string][]*v1.Pod maxUnavailable int numUnavailable int @@ -167,10 +164,10 @@ func TestGetUnavailableNumbers(t *testing.T) { } return manager }(), - ds: func() *extensions.DaemonSet { + ds: func() *apps.DaemonSet { ds := newDaemonSet("x") intStr := intstr.FromInt(0) - ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} + ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} return ds }(), nodeToPods: make(map[string][]*v1.Pod), @@ -187,10 +184,10 @@ func TestGetUnavailableNumbers(t *testing.T) { addNodes(manager.nodeStore, 0, 2, nil) return manager }(), - ds: func() *extensions.DaemonSet { + ds: func() *apps.DaemonSet { ds := newDaemonSet("x") intStr := intstr.FromInt(1) - ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} + ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} return ds }(), nodeToPods: func() map[string][]*v1.Pod { @@ -216,10 +213,10 @@ func TestGetUnavailableNumbers(t *testing.T) { addNodes(manager.nodeStore, 0, 2, nil) return manager }(), - ds: func() *extensions.DaemonSet { + ds: func() *apps.DaemonSet { ds := newDaemonSet("x") intStr := intstr.FromInt(0) - ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} + ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} return ds }(), nodeToPods: func() map[string][]*v1.Pod { @@ -242,10 +239,10 @@ func TestGetUnavailableNumbers(t *testing.T) { addNodes(manager.nodeStore, 0, 2, nil) return manager }(), - ds: func() *extensions.DaemonSet { + ds: func() *apps.DaemonSet { ds := newDaemonSet("x") intStr := intstr.FromString("50%") - ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} + ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} return ds }(), nodeToPods: func() map[string][]*v1.Pod { @@ -271,10 +268,10 @@ func TestGetUnavailableNumbers(t *testing.T) { addNodes(manager.nodeStore, 0, 2, nil) return manager }(), - ds: func() *extensions.DaemonSet { + ds: func() *apps.DaemonSet { ds := newDaemonSet("x") intStr := intstr.FromString("50%") - ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} + ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} return ds }(), nodeToPods: func() map[string][]*v1.Pod { diff --git a/pkg/controller/daemon/util/daemonset_util.go b/pkg/controller/daemon/util/daemonset_util.go index ee5ad568f5..ea91813b40 100644 --- a/pkg/controller/daemon/util/daemonset_util.go +++ b/pkg/controller/daemon/util/daemonset_util.go @@ -18,7 +18,9 @@ package util import ( "fmt" + "strconv" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,13 +30,28 @@ import ( "k8s.io/kubernetes/pkg/features" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/scheduler/algorithm" - labelsutil "k8s.io/kubernetes/pkg/util/labels" ) +// GetTemplateGeneration gets the template generation associated with a v1.DaemonSet by extracting it from the +// deprecated annotation. If no annotation is found nil is returned. If the annotation is found and fails to parse +// nil is returned with an error. If the generation can be parsed from the annotation, a pointer to the parsed int64 +// value is returned. +func GetTemplateGeneration(ds *apps.DaemonSet) (*int64, error) { + annotation, found := ds.Annotations[apps.DeprecatedTemplateGeneration] + if !found { + return nil, nil + } + generation, err := strconv.ParseInt(annotation, 10, 64) + if err != nil { + return nil, err + } + return &generation, nil +} + // CreatePodTemplate returns copy of provided template with additional // label which contains templateGeneration (for backward compatibility), // hash of provided template and sets default daemon tolerations. -func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash string) v1.PodTemplateSpec { +func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec { newTemplate := *template.DeepCopy() // DaemonSet pods shouldn't be deleted by NodeController in case of node problems. // Add infinite toleration for taint notReady:NoExecute here @@ -81,12 +98,12 @@ func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash strin }) } - templateGenerationStr := fmt.Sprint(generation) - newTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel( - template.ObjectMeta.Labels, - extensions.DaemonSetTemplateGenerationKey, - templateGenerationStr, - ) + if newTemplate.ObjectMeta.Labels == nil { + newTemplate.ObjectMeta.Labels = make(map[string]string) + } + if generation != nil { + newTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey] = fmt.Sprint(*generation) + } // TODO: do we need to validate if the DaemonSet is RollingUpdate or not? if len(hash) > 0 { newTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = hash @@ -94,10 +111,11 @@ func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash strin return newTemplate } -// IsPodUpdated checks if pod contains label value that either matches templateGeneration or hash -func IsPodUpdated(dsTemplateGeneration int64, pod *v1.Pod, hash string) bool { +// IsPodUpdate checks if pod contains label value that either matches templateGeneration or hash +func IsPodUpdated(pod *v1.Pod, hash string, dsTemplateGeneration *int64) bool { // Compare with hash to see if the pod is updated, need to maintain backward compatibility of templateGeneration - templateMatches := pod.Labels[extensions.DaemonSetTemplateGenerationKey] == fmt.Sprint(dsTemplateGeneration) + templateMatches := dsTemplateGeneration != nil && + pod.Labels[extensions.DaemonSetTemplateGenerationKey] == fmt.Sprint(dsTemplateGeneration) hashMatches := len(hash) > 0 && pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] == hash return hashMatches || templateMatches } diff --git a/pkg/controller/daemon/util/daemonset_util_test.go b/pkg/controller/daemon/util/daemonset_util_test.go index 2a9cb9203d..21060362ec 100644 --- a/pkg/controller/daemon/util/daemonset_util_test.go +++ b/pkg/controller/daemon/util/daemonset_util_test.go @@ -47,13 +47,14 @@ func newPod(podName string, nodeName string, label map[string]string) *v1.Pod { } func TestIsPodUpdated(t *testing.T) { - templateGeneration := int64(12345) + templateGeneration := int64Ptr(12345) + badGeneration := int64Ptr(12345) hash := "55555" labels := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration), extensions.DefaultDaemonSetUniqueLabelKey: hash} labelsNoHash := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration)} tests := []struct { test string - templateGeneration int64 + templateGeneration *int64 pod *v1.Pod hash string isUpdated bool @@ -95,14 +96,14 @@ func TestIsPodUpdated(t *testing.T) { }, { "templateGeneration doesn't match, hash does", - templateGeneration + 1, + badGeneration, newPod("pod1", "node1", labels), hash, true, }, { "templateGeneration and hash don't match", - templateGeneration + 1, + badGeneration, newPod("pod1", "node1", labels), hash + "123", false, @@ -130,7 +131,7 @@ func TestIsPodUpdated(t *testing.T) { }, } for _, test := range tests { - updated := IsPodUpdated(test.templateGeneration, test.pod, test.hash) + updated := IsPodUpdated(test.pod, test.hash, test.templateGeneration) if updated != test.isUpdated { t.Errorf("%s: IsPodUpdated returned wrong value. Expected %t, got %t", test.test, test.isUpdated, updated) } @@ -139,19 +140,19 @@ func TestIsPodUpdated(t *testing.T) { func TestCreatePodTemplate(t *testing.T) { tests := []struct { - templateGeneration int64 + templateGeneration *int64 hash string expectUniqueLabel bool }{ - {int64(1), "", false}, - {int64(2), "3242341807", true}, + {int64Ptr(1), "", false}, + {int64Ptr(2), "3242341807", true}, } for _, test := range tests { podTemplateSpec := v1.PodTemplateSpec{} newPodTemplate := CreatePodTemplate(podTemplateSpec, test.templateGeneration, test.hash) val, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey] - if !exists || val != fmt.Sprint(test.templateGeneration) { - t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", test.templateGeneration, val) + if !exists || val != fmt.Sprint(*test.templateGeneration) { + t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", *test.templateGeneration, val) } val, exists = newPodTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey] if test.expectUniqueLabel && (!exists || val != test.hash) { @@ -162,3 +163,8 @@ func TestCreatePodTemplate(t *testing.T) { } } } + +func int64Ptr(i int) *int64 { + li := int64(i) + return &li +} From cf80186a6c1b21b7d3a8a21ef00ebf448f90d37b Mon Sep 17 00:00:00 2001 From: Kenneth Owens Date: Fri, 23 Feb 2018 08:06:16 -0800 Subject: [PATCH 2/5] Fix golint warning --- pkg/controller/daemon/util/daemonset_util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/daemon/util/daemonset_util.go b/pkg/controller/daemon/util/daemonset_util.go index ea91813b40..f6510ea9be 100644 --- a/pkg/controller/daemon/util/daemonset_util.go +++ b/pkg/controller/daemon/util/daemonset_util.go @@ -111,7 +111,7 @@ func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash stri return newTemplate } -// IsPodUpdate checks if pod contains label value that either matches templateGeneration or hash +// IsPodUpdated checks if pod contains label value that either matches templateGeneration or hash func IsPodUpdated(pod *v1.Pod, hash string, dsTemplateGeneration *int64) bool { // Compare with hash to see if the pod is updated, need to maintain backward compatibility of templateGeneration templateMatches := dsTemplateGeneration != nil && From f89afa008f822be220c7e8fc2ef6f556a252c142 Mon Sep 17 00:00:00 2001 From: Kenneth Owens Date: Wed, 14 Feb 2018 10:36:50 -0800 Subject: [PATCH 3/5] Update versioned portions of kubectl to use apps/v1 with DaemonSet --- pkg/kubectl/history.go | 24 +++++++++------------ pkg/kubectl/rollback.go | 48 +++++------------------------------------ 2 files changed, 15 insertions(+), 57 deletions(-) diff --git a/pkg/kubectl/history.go b/pkg/kubectl/history.go index 3e4a02ac99..9344e194ad 100644 --- a/pkg/kubectl/history.go +++ b/pkg/kubectl/history.go @@ -23,9 +23,8 @@ import ( "text/tabwriter" appsv1 "k8s.io/api/apps/v1" - appsv1beta1 "k8s.io/api/apps/v1beta1" "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -35,8 +34,6 @@ import ( "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/kubernetes" clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - clientappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" - clientextv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" api "k8s.io/kubernetes/pkg/apis/core" apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" @@ -187,11 +184,11 @@ type DaemonSetHistoryViewer struct { // ViewHistory returns a revision-to-history map as the revision history of a deployment // TODO: this should be a describer func (h *DaemonSetHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { - ds, history, err := daemonSetHistory(h.c.ExtensionsV1beta1(), h.c.AppsV1beta1(), namespace, name) + ds, history, err := daemonSetHistory(h.c.AppsV1(), namespace, name) if err != nil { return "", err } - historyInfo := make(map[int64]*appsv1beta1.ControllerRevision) + historyInfo := make(map[int64]*appsv1.ControllerRevision) for _, history := range history { // TODO: for now we assume revisions don't overlap, we may need to handle it historyInfo[history.Revision] = history @@ -290,11 +287,11 @@ func controlledHistoryV1( // controlledHistories returns all ControllerRevisions in namespace that selected by selector and owned by accessor func controlledHistory( - apps clientappsv1beta1.AppsV1beta1Interface, + apps clientappsv1.AppsV1Interface, namespace string, selector labels.Selector, - accessor metav1.Object) ([]*appsv1beta1.ControllerRevision, error) { - var result []*appsv1beta1.ControllerRevision + accessor metav1.Object) ([]*appsv1.ControllerRevision, error) { + var result []*appsv1.ControllerRevision historyList, err := apps.ControllerRevisions(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return nil, err @@ -311,10 +308,9 @@ func controlledHistory( // daemonSetHistory returns the DaemonSet named name in namespace and all ControllerRevisions in its history. func daemonSetHistory( - ext clientextv1beta1.ExtensionsV1beta1Interface, - apps clientappsv1beta1.AppsV1beta1Interface, - namespace, name string) (*extensionsv1beta1.DaemonSet, []*appsv1beta1.ControllerRevision, error) { - ds, err := ext.DaemonSets(namespace).Get(name, metav1.GetOptions{}) + apps clientappsv1.AppsV1Interface, + namespace, name string) (*appsv1.DaemonSet, []*appsv1.ControllerRevision, error) { + ds, err := apps.DaemonSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return nil, nil, fmt.Errorf("failed to retrieve DaemonSet %s: %v", name, err) } @@ -357,7 +353,7 @@ func statefulSetHistory( } // applyDaemonSetHistory returns a specific revision of DaemonSet by applying the given history to a copy of the given DaemonSet -func applyDaemonSetHistory(ds *extensionsv1beta1.DaemonSet, history *appsv1beta1.ControllerRevision) (*extensionsv1beta1.DaemonSet, error) { +func applyDaemonSetHistory(ds *appsv1.DaemonSet, history *appsv1.ControllerRevision) (*appsv1.DaemonSet, error) { clone := ds.DeepCopy() cloneBytes, err := json.Marshal(clone) if err != nil { diff --git a/pkg/kubectl/rollback.go b/pkg/kubectl/rollback.go index ab91163c7f..10ede4b53d 100644 --- a/pkg/kubectl/rollback.go +++ b/pkg/kubectl/rollback.go @@ -25,7 +25,6 @@ import ( "syscall" appsv1 "k8s.io/api/apps/v1" - appsv1beta1 "k8s.io/api/apps/v1beta1" "k8s.io/api/core/v1" extv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/meta" @@ -257,7 +256,7 @@ func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations ma if err != nil { return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) } - ds, history, err := daemonSetHistory(r.c.ExtensionsV1beta1(), r.c.AppsV1beta1(), accessor.GetNamespace(), accessor.GetName()) + ds, history, err := daemonSetHistory(r.c.AppsV1(), accessor.GetNamespace(), accessor.GetName()) if err != nil { return "", err } @@ -316,7 +315,7 @@ func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations return "", fmt.Errorf("no last revision to roll back to") } - toHistory := findHistoryV1(toRevision, history) + toHistory := findHistory(toRevision, history) if toHistory == nil { return "", revisionNotFoundErr(toRevision) } @@ -346,44 +345,16 @@ func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations return rollbackSuccess, nil } -// TODO: When all the controllers have been updated to use v1, rename this function findHistoryV1()->findHistory() and -// TODO: remove the original findHistory() -// findHistoryV1 returns a controllerrevision of a specific revision from the given controllerrevisions. +// findHistory returns a controllerrevision of a specific revision from the given controllerrevisions. // It returns nil if no such controllerrevision exists. // If toRevision is 0, the last previously used history is returned. -func findHistoryV1(toRevision int64, allHistory []*appsv1.ControllerRevision) *appsv1.ControllerRevision { +func findHistory(toRevision int64, allHistory []*appsv1.ControllerRevision) *appsv1.ControllerRevision { if toRevision == 0 && len(allHistory) <= 1 { return nil } // Find the history to rollback to var toHistory *appsv1.ControllerRevision - if toRevision == 0 { - // If toRevision == 0, find the latest revision (2nd max) - sort.Sort(historiesByRevisionV1(allHistory)) - toHistory = allHistory[len(allHistory)-2] - } else { - for _, h := range allHistory { - if h.Revision == toRevision { - // If toRevision != 0, find the history with matching revision - return h - } - } - } - - return toHistory -} - -// findHistory returns a controllerrevision of a specific revision from the given controllerrevisions. -// It returns nil if no such controllerrevision exists. -// If toRevision is 0, the last previously used history is returned. -func findHistory(toRevision int64, allHistory []*appsv1beta1.ControllerRevision) *appsv1beta1.ControllerRevision { - if toRevision == 0 && len(allHistory) <= 1 { - return nil - } - - // Find the history to rollback to - var toHistory *appsv1beta1.ControllerRevision if toRevision == 0 { // If toRevision == 0, find the latest revision (2nd max) sort.Sort(historiesByRevision(allHistory)) @@ -417,19 +388,10 @@ func revisionNotFoundErr(r int64) error { } // TODO: copied from daemon controller, should extract to a library -type historiesByRevision []*appsv1beta1.ControllerRevision +type historiesByRevision []*appsv1.ControllerRevision func (h historiesByRevision) Len() int { return len(h) } func (h historiesByRevision) Swap(i, j int) { h[i], h[j] = h[j], h[i] } func (h historiesByRevision) Less(i, j int) bool { return h[i].Revision < h[j].Revision } - -// TODO: copied from daemon controller, should extract to a library -type historiesByRevisionV1 []*appsv1.ControllerRevision - -func (h historiesByRevisionV1) Len() int { return len(h) } -func (h historiesByRevisionV1) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h historiesByRevisionV1) Less(i, j int) bool { - return h[i].Revision < h[j].Revision -} From 22fb5c4762a92dbbffae0084df69c0003e9cc7c8 Mon Sep 17 00:00:00 2001 From: Kenneth Owens Date: Wed, 14 Feb 2018 10:37:40 -0800 Subject: [PATCH 4/5] Update e2e and integration to use apps/v1 for DaemonSet --- test/e2e/apps/daemon_set.go | 194 +++++-------------- test/e2e/framework/util.go | 7 +- test/integration/daemonset/daemonset_test.go | 51 +++-- 3 files changed, 77 insertions(+), 175 deletions(-) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index a8090eaf2a..e196c7a813 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -22,10 +22,8 @@ import ( "strings" "time" - apps "k8s.io/api/apps/v1beta1" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" - apiequality "k8s.io/apimachinery/pkg/api/equality" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -67,7 +65,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { AfterEach(func() { // Clean up - daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}) + daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets") if daemonsets != nil && len(daemonsets.Items) > 0 { for _, ds := range daemonsets.Items { @@ -80,7 +78,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped") } } - if daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { + if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { framework.Logf("daemonset: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), daemonsets)) } else { framework.Logf("unable to dump daemonsets: %v", err) @@ -114,7 +112,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { label := map[string]string{daemonsetNameLabel: dsName} By(fmt.Sprintf("Creating simple DaemonSet %q", dsName)) - ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) + ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) Expect(err).NotTo(HaveOccurred()) By("Check that daemon pods launch on every node of the cluster.") @@ -138,7 +136,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("Creating daemon %q with a node selector", dsName) ds := newDaemonSet(dsName, image, complexLabel) ds.Spec.Template.Spec.NodeSelector = nodeSelector - ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds) + ds, err := c.AppsV1().DaemonSets(ns).Create(ds) Expect(err).NotTo(HaveOccurred()) By("Initially, daemon pods should not be running on any nodes.") @@ -167,7 +165,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate") patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`, daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel]) - ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) + ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) Expect(err).NotTo(HaveOccurred(), "error patching daemon set") daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels) Expect(len(daemonSetLabels)).To(Equal(1)) @@ -199,7 +197,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { }, }, } - ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds) + ds, err := c.AppsV1().DaemonSets(ns).Create(ds) Expect(err).NotTo(HaveOccurred()) By("Initially, daemon pods should not be running on any nodes.") @@ -229,7 +227,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { label := map[string]string{daemonsetNameLabel: dsName} By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName)) - ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) + ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) Expect(err).NotTo(HaveOccurred()) By("Check that daemon pods launch on every node of the cluster.") @@ -253,54 +251,43 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { label := map[string]string{daemonsetNameLabel: dsName} framework.Logf("Creating simple daemon set %s", dsName) - ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) + ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label)) Expect(err).NotTo(HaveOccurred()) - Expect(ds.Spec.TemplateGeneration).To(Equal(int64(1))) By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") - By("Make sure all daemon pods have correct template generation 1") - templateGeneration := "1" - err = checkDaemonPodsTemplateGeneration(c, ns, label, "1") - Expect(err).NotTo(HaveOccurred()) - // Check history and labels - ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) waitForHistoryCreated(c, ns, label, 1) first := curHistory(listDaemonHistories(c, ns, label), ds) - firstHash := first.Labels[extensions.DefaultDaemonSetUniqueLabelKey] + firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey] Expect(first.Revision).To(Equal(int64(1))) - checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash, templateGeneration) + checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash) By("Update daemon pods image.") patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage) - ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) + ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) Expect(err).NotTo(HaveOccurred()) - Expect(ds.Spec.TemplateGeneration).To(Equal(int64(2))) By("Check that daemon pods images aren't updated.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0)) Expect(err).NotTo(HaveOccurred()) - By("Make sure all daemon pods have correct template generation 1") - err = checkDaemonPodsTemplateGeneration(c, ns, label, templateGeneration) - Expect(err).NotTo(HaveOccurred()) - By("Check that daemon pods are still running on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") // Check history and labels - ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) waitForHistoryCreated(c, ns, label, 2) cur := curHistory(listDaemonHistories(c, ns, label), ds) Expect(cur.Revision).To(Equal(int64(2))) - Expect(cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]).NotTo(Equal(firstHash)) - checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash, templateGeneration) + Expect(cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]).NotTo(Equal(firstHash)) + checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash) }) It("Should update pod when spec was updated and update strategy is RollingUpdate", func() { @@ -309,11 +296,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { templateGeneration := int64(999) framework.Logf("Creating simple daemon set %s with templateGeneration %d", dsName, templateGeneration) ds := newDaemonSet(dsName, image, label) - ds.Spec.TemplateGeneration = templateGeneration - ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType} - ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds) + ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType} + ds, err := c.AppsV1().DaemonSets(ns).Create(ds) Expect(err).NotTo(HaveOccurred()) - Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration)) By("Check that daemon pods launch on every node of the cluster.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) @@ -324,20 +309,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { Expect(err).NotTo(HaveOccurred()) // Check history and labels - ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) waitForHistoryCreated(c, ns, label, 1) cur := curHistory(listDaemonHistories(c, ns, label), ds) - hash := cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] + hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey] Expect(cur.Revision).To(Equal(int64(1))) - checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration)) + checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash) By("Update daemon pods image.") patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage) - ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) + ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) Expect(err).NotTo(HaveOccurred()) templateGeneration++ - Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration)) By("Check that daemon pods images are updated.") err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1)) @@ -352,90 +336,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") // Check history and labels - ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) + ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) waitForHistoryCreated(c, ns, label, 2) cur = curHistory(listDaemonHistories(c, ns, label), ds) - hash = cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] + hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey] Expect(cur.Revision).To(Equal(int64(2))) - checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration)) - }) - - It("Should adopt existing pods when creating a RollingUpdate DaemonSet regardless of templateGeneration", func() { - label := map[string]string{daemonsetNameLabel: dsName} - - // 1. Create a RollingUpdate DaemonSet - templateGeneration := int64(999) - framework.Logf("Creating simple RollingUpdate DaemonSet %s with templateGeneration %d", dsName, templateGeneration) - ds := newDaemonSet(dsName, image, label) - ds.Spec.TemplateGeneration = templateGeneration - ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType} - ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds) - Expect(err).NotTo(HaveOccurred()) - Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration)) - - framework.Logf("Check that daemon pods launch on every node of the cluster.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) - Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start") - - framework.Logf("Make sure all daemon pods have correct template generation %d", templateGeneration) - err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration)) - Expect(err).NotTo(HaveOccurred()) - - // 2. Orphan DaemonSet pods - framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", dsName) - deleteDaemonSetAndOrphan(c, ds) - - // 3. Adopt DaemonSet pods (no restart) - newDSName := "adopt" - framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newDSName) - newDS := newDaemonSet(newDSName, image, label) - newDS.Spec.TemplateGeneration = templateGeneration - newDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType} - newDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newDS) - Expect(err).NotTo(HaveOccurred()) - Expect(newDS.Spec.TemplateGeneration).To(Equal(templateGeneration)) - Expect(apiequality.Semantic.DeepEqual(newDS.Spec.Template, ds.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods") - - framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newDS.Name) - waitDaemonSetAdoption(c, newDS, ds.Name, templateGeneration) - - // 4. Orphan DaemonSet pods again - framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newDSName) - deleteDaemonSetAndOrphan(c, newDS) - - // 5. Adopt DaemonSet pods (no restart) as long as template matches, even when templateGeneration doesn't match - newAdoptDSName := "adopt-template-matches" - framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName) - newAdoptDS := newDaemonSet(newAdoptDSName, image, label) - newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType} - newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS) - Expect(err).NotTo(HaveOccurred()) - Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(int64(1))) - Expect(newAdoptDS.Spec.TemplateGeneration).NotTo(Equal(templateGeneration)) - Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods") - - framework.Logf(fmt.Sprintf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name)) - waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration) - - // 6. Orphan DaemonSet pods again - framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newAdoptDSName) - deleteDaemonSetAndOrphan(c, newAdoptDS) - - // 7. Adopt DaemonSet pods (no restart) as long as templateGeneration matches, even when template doesn't match - newAdoptDSName = "adopt-template-generation-matches" - framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName) - newAdoptDS = newDaemonSet(newAdoptDSName, image, label) - newAdoptDS.Spec.Template.Spec.Containers[0].Name = "not-match" - newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType} - newAdoptDS.Spec.TemplateGeneration = templateGeneration - newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS) - Expect(err).NotTo(HaveOccurred()) - Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(templateGeneration)) - Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).NotTo(BeTrue(), "DaemonSet template should not match") - - framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name) - waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration) + checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash) }) It("Should rollback without unnecessary restarts", func() { @@ -445,8 +352,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("Create a RollingUpdate DaemonSet") label := map[string]string{daemonsetNameLabel: dsName} ds := newDaemonSet(dsName, image, label) - ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType} - ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds) + ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType} + ds, err := c.AppsV1().DaemonSets(ns).Create(ds) Expect(err).NotTo(HaveOccurred()) framework.Logf("Check that daemon pods launch on every node of the cluster") @@ -456,7 +363,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("Update the DaemonSet to trigger a rollout") // We use a nonexistent image here, so that we make sure it won't finish newImage := "foo:non-existent" - newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) { + newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) { update.Spec.Template.Spec.Containers[0].Image = newImage }) Expect(err).NotTo(HaveOccurred()) @@ -483,7 +390,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { Expect(len(newPods)).NotTo(Equal(0)) framework.Logf("Roll back the DaemonSet before rollout is complete") - rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) { + rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) { update.Spec.Template.Spec.Containers[0].Image = image }) Expect(err).NotTo(HaveOccurred()) @@ -511,11 +418,11 @@ func getDaemonSetImagePatch(containerName, containerImage string) string { // deleteDaemonSetAndOrphan deletes the given DaemonSet and orphans all its dependents. // It also checks that all dependents are orphaned, and the DaemonSet is deleted. -func deleteDaemonSetAndOrphan(c clientset.Interface, ds *extensions.DaemonSet) { +func deleteDaemonSetAndOrphan(c clientset.Interface, ds *apps.DaemonSet) { trueVar := true deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar} deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID)) - err := c.ExtensionsV1beta1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions) + err := c.AppsV1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions) Expect(err).NotTo(HaveOccurred()) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ds.Namespace, ds.Spec.Template.Labels)) @@ -526,12 +433,12 @@ func deleteDaemonSetAndOrphan(c clientset.Interface, ds *extensions.DaemonSet) { Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted") } -func newDaemonSet(dsName, image string, label map[string]string) *extensions.DaemonSet { - return &extensions.DaemonSet{ +func newDaemonSet(dsName, image string, label map[string]string) *apps.DaemonSet { + return &apps.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: dsName, }, - Spec: extensions.DaemonSetSpec{ + Spec: apps.DaemonSetSpec{ Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: label, @@ -623,7 +530,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s return newNode, nil } -func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nodeNames []string) func() (bool, error) { +func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames []string) func() (bool, error) { return func() (bool, error) { podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) if err != nil { @@ -662,14 +569,14 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nod } } -func checkRunningOnAllNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) { +func checkRunningOnAllNodes(f *framework.Framework, ds *apps.DaemonSet) func() (bool, error) { return func() (bool, error) { nodeNames := schedulableNodes(f.ClientSet, ds) return checkDaemonPodOnNodes(f, ds, nodeNames)() } } -func schedulableNodes(c clientset.Interface, ds *extensions.DaemonSet) []string { +func schedulableNodes(c clientset.Interface, ds *apps.DaemonSet) []string { nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err) nodeNames := make([]string, 0) @@ -696,7 +603,7 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st } // canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node -func canScheduleOnNode(node v1.Node, ds *extensions.DaemonSet) bool { +func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool { newPod := daemon.NewPod(ds, node.Name) nodeInfo := schedulercache.NewNodeInfo() nodeInfo.SetNode(&node) @@ -708,12 +615,12 @@ func canScheduleOnNode(node v1.Node, ds *extensions.DaemonSet) bool { return fit } -func checkRunningOnNoNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) { +func checkRunningOnNoNodes(f *framework.Framework, ds *apps.DaemonSet) func() (bool, error) { return checkDaemonPodOnNodes(f, ds, make([]string, 0)) } func checkDaemonStatus(f *framework.Framework, dsName string) error { - ds, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{}) + ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("Could not get daemon set from v1.") } @@ -724,7 +631,7 @@ func checkDaemonStatus(f *framework.Framework, dsName string) error { return nil } -func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *extensions.DaemonSet, image string, maxUnavailable int) func() (bool, error) { +func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonSet, image string, maxUnavailable int) func() (bool, error) { return func() (bool, error) { podList, err := c.CoreV1().Pods(ds.Namespace).List(metav1.ListOptions{}) if err != nil { @@ -770,7 +677,7 @@ func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label m if !controller.IsPodActive(&pod) { continue } - podTemplateGeneration := pod.Labels[extensions.DaemonSetTemplateGenerationKey] + podTemplateGeneration := pod.Labels[apps.DeprecatedTemplateGeneration] if podTemplateGeneration != templateGeneration { return fmt.Errorf("expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration) } @@ -780,7 +687,7 @@ func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label m func checkDaemonSetDeleted(c clientset.Interface, ns, name string) func() (bool, error) { return func() (bool, error) { - _, err := c.ExtensionsV1beta1().DaemonSets(ns).Get(name, metav1.GetOptions{}) + _, err := c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{}) if !apierrs.IsNotFound(err) { return false, err } @@ -840,7 +747,7 @@ func checkDaemonSetHistoryAdopted(c clientset.Interface, ns string, dsUID types. } } -func waitDaemonSetAdoption(c clientset.Interface, ds *extensions.DaemonSet, podPrefix string, podTemplateGeneration int64) { +func waitDaemonSetAdoption(c clientset.Interface, ds *apps.DaemonSet, podPrefix string, podTemplateGeneration int64) { ns := ds.Namespace label := ds.Spec.Template.Labels @@ -868,16 +775,13 @@ func checkDaemonSetPodsName(c clientset.Interface, ns, prefix string, label map[ return nil } -func checkDaemonSetPodsLabels(podList *v1.PodList, hash, templateGeneration string) { +func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) { for _, pod := range podList.Items { - podHash := pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] - podTemplate := pod.Labels[extensions.DaemonSetTemplateGenerationKey] + podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey] Expect(len(podHash)).To(BeNumerically(">", 0)) if len(hash) > 0 { Expect(podHash).To(Equal(hash)) } - Expect(len(podTemplate)).To(BeNumerically(">", 0)) - Expect(podTemplate).To(Equal(templateGeneration)) } } @@ -902,19 +806,19 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList { selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - historyList, err := c.AppsV1beta1().ControllerRevisions(ns).List(options) + historyList, err := c.AppsV1().ControllerRevisions(ns).List(options) Expect(err).NotTo(HaveOccurred()) Expect(len(historyList.Items)).To(BeNumerically(">", 0)) return historyList } -func curHistory(historyList *apps.ControllerRevisionList, ds *extensions.DaemonSet) *apps.ControllerRevision { +func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *apps.ControllerRevision { var curHistory *apps.ControllerRevision foundCurHistories := 0 for i := range historyList.Items { history := &historyList.Items[i] // Every history should have the hash label - Expect(len(history.Labels[extensions.DefaultDaemonSetUniqueLabelKey])).To(BeNumerically(">", 0)) + Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(BeNumerically(">", 0)) match, err := daemon.Match(ds, history) Expect(err).NotTo(HaveOccurred()) if match { diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 7f6cb443c9..3236ef3eef 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -50,6 +50,7 @@ import ( . "github.com/onsi/gomega" gomegatypes "github.com/onsi/gomega/types" + apps "k8s.io/api/apps/v1" batch "k8s.io/api/batch/v1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" @@ -3176,10 +3177,10 @@ func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Obj }) } -type updateDSFunc func(*extensions.DaemonSet) +type updateDSFunc func(*apps.DaemonSet) -func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *extensions.DaemonSet, err error) { - daemonsets := c.ExtensionsV1beta1().DaemonSets(namespace) +func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *apps.DaemonSet, err error) { + daemonsets := c.AppsV1().DaemonSets(namespace) var updateErr error pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil { diff --git a/test/integration/daemonset/daemonset_test.go b/test/integration/daemonset/daemonset_test.go index 45b17a604d..3f7b98e765 100644 --- a/test/integration/daemonset/daemonset_test.go +++ b/test/integration/daemonset/daemonset_test.go @@ -22,16 +22,16 @@ import ( "testing" "time" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" + appstyped "k8s.io/client-go/kubernetes/typed/apps/v1" corev1typed "k8s.io/client-go/kubernetes/typed/core/v1" - extensionsv1beta1typed "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" podutil "k8s.io/kubernetes/pkg/api/v1/pod" @@ -53,8 +53,8 @@ func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *daemon.DaemonS informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-informers")), resyncPeriod) metrics.UnregisterMetricAndUntrackRateLimiterUsage("daemon_controller") dc, err := daemon.NewDaemonSetsController( - informers.Extensions().V1beta1().DaemonSets(), - informers.Apps().V1beta1().ControllerRevisions(), + informers.Apps().V1().DaemonSets(), + informers.Apps().V1().ControllerRevisions(), informers.Core().V1().Pods(), informers.Core().V1().Nodes(), clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "daemonset-controller")), @@ -70,22 +70,22 @@ func testLabels() map[string]string { return map[string]string{"name": "test"} } -func newDaemonSet(name, namespace string) *v1beta1.DaemonSet { +func newDaemonSet(name, namespace string) *apps.DaemonSet { two := int32(2) - return &v1beta1.DaemonSet{ + return &apps.DaemonSet{ TypeMeta: metav1.TypeMeta{ Kind: "DaemonSet", - APIVersion: "extensions/v1beta1", + APIVersion: "apps/v1", }, ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: name, }, - Spec: v1beta1.DaemonSetSpec{ + Spec: apps.DaemonSetSpec{ RevisionHistoryLimit: &two, Selector: &metav1.LabelSelector{MatchLabels: testLabels()}, - UpdateStrategy: v1beta1.DaemonSetUpdateStrategy{ - Type: v1beta1.OnDeleteDaemonSetStrategyType, + UpdateStrategy: apps.DaemonSetUpdateStrategy{ + Type: apps.OnDeleteDaemonSetStrategyType, }, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -99,22 +99,22 @@ func newDaemonSet(name, namespace string) *v1beta1.DaemonSet { } } -func newRollbackStrategy() *v1beta1.DaemonSetUpdateStrategy { +func newRollbackStrategy() *apps.DaemonSetUpdateStrategy { one := intstr.FromInt(1) - return &v1beta1.DaemonSetUpdateStrategy{ - Type: v1beta1.RollingUpdateDaemonSetStrategyType, - RollingUpdate: &v1beta1.RollingUpdateDaemonSet{MaxUnavailable: &one}, + return &apps.DaemonSetUpdateStrategy{ + Type: apps.RollingUpdateDaemonSetStrategyType, + RollingUpdate: &apps.RollingUpdateDaemonSet{MaxUnavailable: &one}, } } -func newOnDeleteStrategy() *v1beta1.DaemonSetUpdateStrategy { - return &v1beta1.DaemonSetUpdateStrategy{ - Type: v1beta1.OnDeleteDaemonSetStrategyType, +func newOnDeleteStrategy() *apps.DaemonSetUpdateStrategy { + return &apps.DaemonSetUpdateStrategy{ + Type: apps.OnDeleteDaemonSetStrategyType, } } -func updateStrategies() []*v1beta1.DaemonSetUpdateStrategy { - return []*v1beta1.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()} +func updateStrategies() []*apps.DaemonSetUpdateStrategy { + return []*apps.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()} } func allocatableResources(memory, cpu string) v1.ResourceList { @@ -189,9 +189,6 @@ func validateDaemonSetPodsAndMarkReady( return false, fmt.Errorf("Pod %s has %d OwnerReferences, expected only 1", pod.Name, len(ownerReferences)) } controllerRef := ownerReferences[0] - if got, want := controllerRef.APIVersion, "extensions/v1beta1"; got != want { - t.Errorf("controllerRef.APIVersion = %q, want %q", got, want) - } if got, want := controllerRef.Kind, "DaemonSet"; got != want { t.Errorf("controllerRef.Kind = %q, want %q", got, want) } @@ -219,7 +216,7 @@ func validateDaemonSetPodsAndMarkReady( } func validateDaemonSetStatus( - dsClient extensionsv1beta1typed.DaemonSetInterface, + dsClient appstyped.DaemonSetInterface, dsName string, dsNamespace string, expectedNumberReady int32, @@ -267,7 +264,7 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) { ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t) defer framework.DeleteTestingNamespace(ns, server, t) - dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name) + dsClient := clientset.AppsV1().DaemonSets(ns.Name) podClient := clientset.CoreV1().Pods(ns.Name) nodeClient := clientset.CoreV1().Nodes() podInformer := informers.Core().V1().Pods().Informer() @@ -300,7 +297,7 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) { ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t) defer framework.DeleteTestingNamespace(ns, server, t) - dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name) + dsClient := clientset.AppsV1().DaemonSets(ns.Name) podClient := clientset.CoreV1().Pods(ns.Name) nodeClient := clientset.CoreV1().Nodes() podInformer := informers.Core().V1().Pods().Informer() @@ -330,7 +327,7 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t) defer framework.DeleteTestingNamespace(ns, server, t) - dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name) + dsClient := clientset.AppsV1().DaemonSets(ns.Name) podClient := clientset.CoreV1().Pods(ns.Name) nodeClient := clientset.CoreV1().Nodes() podInformer := informers.Core().V1().Pods().Informer() @@ -367,7 +364,7 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { ns := framework.CreateTestingNamespace("insufficient-capacity", server, t) defer framework.DeleteTestingNamespace(ns, server, t) - dsClient := clientset.ExtensionsV1beta1().DaemonSets(ns.Name) + dsClient := clientset.AppsV1().DaemonSets(ns.Name) nodeClient := clientset.CoreV1().Nodes() eventClient := corev1typed.New(clientset.CoreV1().RESTClient()).Events(ns.Namespace) stopCh := make(chan struct{}) From 5e8ec4f9e953c4898005f1827ead07ae1806a572 Mon Sep 17 00:00:00 2001 From: Kenneth Owens Date: Wed, 14 Feb 2018 10:49:22 -0800 Subject: [PATCH 5/5] generated code --- pkg/controller/BUILD | 2 +- pkg/controller/daemon/BUILD | 14 +++++--------- pkg/controller/daemon/util/BUILD | 2 +- test/e2e/apps/BUILD | 2 -- test/integration/daemonset/BUILD | 4 ++-- 5 files changed, 9 insertions(+), 15 deletions(-) diff --git a/pkg/controller/BUILD b/pkg/controller/BUILD index ff6226daec..a63a131231 100644 --- a/pkg/controller/BUILD +++ b/pkg/controller/BUILD @@ -61,7 +61,7 @@ go_library( "//pkg/util/taints:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", - "//vendor/k8s.io/api/apps/v1beta1:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", diff --git a/pkg/controller/daemon/BUILD b/pkg/controller/daemon/BUILD index 1956c789f6..8dddb58b12 100644 --- a/pkg/controller/daemon/BUILD +++ b/pkg/controller/daemon/BUILD @@ -27,9 +27,8 @@ go_library( "//pkg/util/labels:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/apps/v1beta1:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -42,16 +41,14 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/informers/apps/v1:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/listers/apps/v1:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/util/integer:go_default_library", @@ -68,7 +65,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/api/legacyscheme:go_default_library", - "//pkg/api/testapi:go_default_library", "//pkg/api/v1/pod:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/controller:go_default_library", @@ -76,8 +72,8 @@ go_test( "//pkg/scheduler/algorithm:go_default_library", "//pkg/securitycontext:go_default_library", "//pkg/util/labels:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/controller/daemon/util/BUILD b/pkg/controller/daemon/util/BUILD index 13d8bbdb4a..0f0baa655a 100644 --- a/pkg/controller/daemon/util/BUILD +++ b/pkg/controller/daemon/util/BUILD @@ -16,7 +16,7 @@ go_library( "//pkg/features:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/scheduler/algorithm:go_default_library", - "//pkg/util/labels:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index 7f6b8f67c6..0cac4a6ef3 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -48,13 +48,11 @@ go_library( "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", - "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/test/integration/daemonset/BUILD b/test/integration/daemonset/BUILD index 93baa1467d..f091f92c76 100644 --- a/test/integration/daemonset/BUILD +++ b/test/integration/daemonset/BUILD @@ -18,16 +18,16 @@ go_test( "//pkg/controller/daemon:go_default_library", "//pkg/util/metrics:go_default_library", "//test/integration/framework:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ],