diff --git a/cmd/cloud-controller-manager/app/BUILD b/cmd/cloud-controller-manager/app/BUILD index bd911aa7c9..27132e405b 100644 --- a/cmd/cloud-controller-manager/app/BUILD +++ b/cmd/cloud-controller-manager/app/BUILD @@ -15,6 +15,7 @@ go_library( "//cmd/cloud-controller-manager/app/options:go_default_library", "//pkg/api:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", + "//pkg/client/informers/informers_generated:go_default_library", "//pkg/client/leaderelection:go_default_library", "//pkg/client/leaderelection/resourcelock:go_default_library", "//pkg/cloudprovider:go_default_library", diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index 2060155834..84ec566ca9 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/cmd/cloud-controller-manager/app/options" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + newinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated" "k8s.io/kubernetes/pkg/client/leaderelection" "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock" "k8s.io/kubernetes/pkg/cloudprovider" @@ -193,7 +194,10 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc client := func(serviceAccountName string) clientset.Interface { return rootClientBuilder.ClientOrDie(serviceAccountName) } - sharedInformers := informers.NewSharedInformerFactory(client("shared-informers"), nil, resyncPeriod(s)()) + versionedClient := client("shared-informers") + // TODO replace sharedInformers with newSharedInformers + sharedInformers := informers.NewSharedInformerFactory(versionedClient, nil, resyncPeriod(s)()) + newSharedInformers := newinformers.NewSharedInformerFactory(nil, versionedClient, resyncPeriod(s)()) _, clusterCIDR, err := net.ParseCIDR(s.ClusterCIDR) if err != nil { @@ -202,7 +206,7 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc // Start the CloudNodeController nodeController, err := nodecontroller.NewCloudNodeController( - sharedInformers.Nodes(), + newSharedInformers.Core().V1().Nodes(), client("cloud-node-controller"), cloud, s.NodeMonitorPeriod.Duration) if err != nil { @@ -247,7 +251,9 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc glog.Fatalf("Failed to get api versions from server: %v", err) } + // TODO replace sharedInformers with newSharedInformers sharedInformers.Start(stop) + newSharedInformers.Start(stop) select {} } diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD index 640a59dc24..299c54b7cf 100644 --- a/cmd/kube-controller-manager/app/BUILD +++ b/cmd/kube-controller-manager/app/BUILD @@ -30,6 +30,7 @@ go_library( "//pkg/apis/componentconfig:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", + "//pkg/client/informers/informers_generated:go_default_library", "//pkg/client/leaderelection:go_default_library", "//pkg/client/leaderelection/resourcelock:go_default_library", "//pkg/cloudprovider:go_default_library", diff --git a/cmd/kube-controller-manager/app/batch.go b/cmd/kube-controller-manager/app/batch.go index 7b599812b6..ce40a9bcb0 100644 --- a/cmd/kube-controller-manager/app/batch.go +++ b/cmd/kube-controller-manager/app/batch.go @@ -33,8 +33,8 @@ func startJobController(ctx ControllerContext) (bool, error) { return false, nil } go job.NewJobController( - ctx.InformerFactory.Pods().Informer(), - ctx.InformerFactory.Jobs(), + ctx.NewInformerFactory.Core().V1().Pods(), + ctx.NewInformerFactory.Batch().V1().Jobs(), ctx.ClientBuilder.ClientOrDie("job-controller"), ).Run(int(ctx.Options.ConcurrentJobSyncs), ctx.Stop) return true, nil diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 6c77d4f2d6..8c22ddc1f5 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -46,6 +46,7 @@ import ( "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + newinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated" "k8s.io/kubernetes/pkg/client/leaderelection" "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock" "k8s.io/kubernetes/pkg/cloudprovider" @@ -210,9 +211,14 @@ type ControllerContext struct { // ClientBuilder will provide a client for this controller to use ClientBuilder controller.ControllerClientBuilder - // InformerFactory gives access to informers for the controller + // InformerFactory gives access to informers for the controller. + // TODO delete this instance once the conversion to generated informers is complete. InformerFactory informers.SharedInformerFactory + // NewInformerFactory gives access to informers for the controller. + // TODO rename this to InformerFactory once the conversion to generated informers is complete. + NewInformerFactory newinformers.SharedInformerFactory + // Options provides access to init options for a given controller Options options.CMServer @@ -326,7 +332,10 @@ func getAvailableResources(clientBuilder controller.ControllerClientBuilder) (ma } func StartControllers(controllers map[string]InitFunc, s *options.CMServer, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}) error { - sharedInformers := informers.NewSharedInformerFactory(rootClientBuilder.ClientOrDie("shared-informers"), nil, ResyncPeriod(s)()) + versionedClient := rootClientBuilder.ClientOrDie("shared-informers") + // TODO replace sharedInformers with newSharedInformers + sharedInformers := informers.NewSharedInformerFactory(versionedClient, nil, ResyncPeriod(s)()) + newSharedInformers := newinformers.NewSharedInformerFactory(nil, versionedClient, ResyncPeriod(s)()) // always start the SA token controller first using a full-power client, since it needs to mint tokens for the rest if len(s.ServiceAccountKeyFile) > 0 { @@ -366,6 +375,7 @@ func StartControllers(controllers map[string]InitFunc, s *options.CMServer, root ctx := ControllerContext{ ClientBuilder: clientBuilder, InformerFactory: sharedInformers, + NewInformerFactory: newSharedInformers, Options: *s, AvailableResources: availableResources, Stop: stop, @@ -406,11 +416,24 @@ func StartControllers(controllers map[string]InitFunc, s *options.CMServer, root glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", s.ServiceCIDR, err) } nodeController, err := nodecontroller.NewNodeController( - sharedInformers.Pods(), sharedInformers.Nodes(), sharedInformers.DaemonSets(), - cloud, clientBuilder.ClientOrDie("node-controller"), - s.PodEvictionTimeout.Duration, s.NodeEvictionRate, s.SecondaryNodeEvictionRate, s.LargeClusterSizeThreshold, s.UnhealthyZoneThreshold, s.NodeMonitorGracePeriod.Duration, - s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, - int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs) + newSharedInformers.Core().V1().Pods(), + newSharedInformers.Core().V1().Nodes(), + newSharedInformers.Extensions().V1beta1().DaemonSets(), + cloud, + clientBuilder.ClientOrDie("node-controller"), + s.PodEvictionTimeout.Duration, + s.NodeEvictionRate, + s.SecondaryNodeEvictionRate, + s.LargeClusterSizeThreshold, + s.UnhealthyZoneThreshold, + s.NodeMonitorGracePeriod.Duration, + s.NodeStartupGracePeriod.Duration, + s.NodeMonitorPeriod.Duration, + clusterCIDR, + serviceCIDR, + int(s.NodeCIDRMaskSize), + s.AllocateNodeCIDRs, + ) if err != nil { return fmt.Errorf("failed to initialize nodecontroller: %v", err) } @@ -463,10 +486,10 @@ func StartControllers(controllers map[string]InitFunc, s *options.CMServer, root attachDetachController, attachDetachControllerErr := attachdetach.NewAttachDetachController( clientBuilder.ClientOrDie("attachdetach-controller"), - sharedInformers.Pods().Informer(), - sharedInformers.Nodes().Informer(), - sharedInformers.PersistentVolumeClaims().Informer(), - sharedInformers.PersistentVolumes().Informer(), + newSharedInformers.Core().V1().Pods(), + newSharedInformers.Core().V1().Nodes(), + newSharedInformers.Core().V1().PersistentVolumeClaims(), + newSharedInformers.Core().V1().PersistentVolumes(), cloud, ProbeAttachableVolumePlugins(s.VolumeConfiguration), s.DisableAttachDetachReconcilerSync, @@ -478,7 +501,9 @@ func StartControllers(controllers map[string]InitFunc, s *options.CMServer, root go attachDetachController.Run(stop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) + // TODO replace sharedInformers with newSharedInformers sharedInformers.Start(stop) + newSharedInformers.Start(stop) select {} } diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index 133b52e348..6c61e03aad 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -53,8 +53,8 @@ func startEndpointController(ctx ControllerContext) (bool, error) { func startReplicationController(ctx ControllerContext) (bool, error) { go replicationcontroller.NewReplicationManager( - ctx.InformerFactory.Pods().Informer(), - ctx.InformerFactory.ReplicationControllers().Informer(), + ctx.NewInformerFactory.Core().V1().Pods(), + ctx.NewInformerFactory.Core().V1().ReplicationControllers(), ctx.ClientBuilder.ClientOrDie("replication-controller"), replicationcontroller.BurstReplicas, int(ctx.Options.LookupCacheSizeForRC), @@ -66,7 +66,7 @@ func startReplicationController(ctx ControllerContext) (bool, error) { func startPodGCController(ctx ControllerContext) (bool, error) { go podgc.NewPodGC( ctx.ClientBuilder.ClientOrDie("pod-garbage-collector"), - ctx.InformerFactory.Pods().Informer(), + ctx.NewInformerFactory.Core().V1().Pods(), int(ctx.Options.TerminatedPodGCThreshold), ).Run(ctx.Stop) return true, nil diff --git a/cmd/kube-controller-manager/app/extensions.go b/cmd/kube-controller-manager/app/extensions.go index c7ac8f88f3..3fe4ea6b41 100644 --- a/cmd/kube-controller-manager/app/extensions.go +++ b/cmd/kube-controller-manager/app/extensions.go @@ -32,9 +32,9 @@ func startDaemonSetController(ctx ControllerContext) (bool, error) { return false, nil } go daemon.NewDaemonSetsController( - ctx.InformerFactory.DaemonSets(), - ctx.InformerFactory.Pods(), - ctx.InformerFactory.Nodes(), + ctx.NewInformerFactory.Extensions().V1beta1().DaemonSets(), + ctx.NewInformerFactory.Core().V1().Pods(), + ctx.NewInformerFactory.Core().V1().Nodes(), ctx.ClientBuilder.ClientOrDie("daemon-set-controller"), int(ctx.Options.LookupCacheSizeForDaemonSet), ).Run(int(ctx.Options.ConcurrentDaemonSetSyncs), ctx.Stop) @@ -46,9 +46,9 @@ func startDeploymentController(ctx ControllerContext) (bool, error) { return false, nil } go deployment.NewDeploymentController( - ctx.InformerFactory.Deployments(), - ctx.InformerFactory.ReplicaSets(), - ctx.InformerFactory.Pods(), + ctx.NewInformerFactory.Extensions().V1beta1().Deployments(), + ctx.NewInformerFactory.Extensions().V1beta1().ReplicaSets(), + ctx.NewInformerFactory.Core().V1().Pods(), ctx.ClientBuilder.ClientOrDie("deployment-controller"), ).Run(int(ctx.Options.ConcurrentDeploymentSyncs), ctx.Stop) return true, nil @@ -59,8 +59,8 @@ func startReplicaSetController(ctx ControllerContext) (bool, error) { return false, nil } go replicaset.NewReplicaSetController( - ctx.InformerFactory.ReplicaSets(), - ctx.InformerFactory.Pods(), + ctx.NewInformerFactory.Extensions().V1beta1().ReplicaSets(), + ctx.NewInformerFactory.Core().V1().Pods(), ctx.ClientBuilder.ClientOrDie("replicaset-controller"), replicaset.BurstReplicas, int(ctx.Options.LookupCacheSizeForRS), diff --git a/pkg/controller/cloud/BUILD b/pkg/controller/cloud/BUILD index 657b01230b..73ebc4deec 100644 --- a/pkg/controller/cloud/BUILD +++ b/pkg/controller/cloud/BUILD @@ -16,8 +16,8 @@ go_library( "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", + "//pkg/client/informers/informers_generated/core/v1:go_default_library", "//pkg/cloudprovider:go_default_library", - "//pkg/controller/informers:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/types", @@ -38,10 +38,10 @@ go_test( "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library", + "//pkg/client/informers/informers_generated:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/fake:go_default_library", "//pkg/controller:go_default_library", - "//pkg/controller/informers:go_default_library", "//pkg/controller/node/testutil:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", diff --git a/pkg/controller/cloud/nodecontroller.go b/pkg/controller/cloud/nodecontroller.go index fac2a7b721..82ce93cad5 100644 --- a/pkg/controller/cloud/nodecontroller.go +++ b/pkg/controller/cloud/nodecontroller.go @@ -32,12 +32,12 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1" "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/controller/informers" ) type CloudNodeController struct { - nodeInformer informers.NodeInformer + nodeInformer coreinformers.NodeInformer kubeClient clientset.Interface recorder record.EventRecorder @@ -59,7 +59,7 @@ const ( // NewCloudNodeController creates a CloudNodeController object func NewCloudNodeController( - nodeInformer informers.NodeInformer, + nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeMonitorPeriod time.Duration) (*CloudNodeController, error) { diff --git a/pkg/controller/cloud/nodecontroller_test.go b/pkg/controller/cloud/nodecontroller_test.go index 9eea3afc04..8884dd8544 100644 --- a/pkg/controller/cloud/nodecontroller_test.go +++ b/pkg/controller/cloud/nodecontroller_test.go @@ -30,10 +30,10 @@ import ( clientv1 "k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" "k8s.io/kubernetes/pkg/cloudprovider" fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/node/testutil" ) @@ -99,12 +99,12 @@ func TestNodeDeleted(t *testing.T) { DeleteWaitChan: make(chan struct{}), } - factory := informers.NewSharedInformerFactory(fnh, nil, controller.NoResyncPeriodFunc()) + factory := informers.NewSharedInformerFactory(nil, fnh, controller.NoResyncPeriodFunc()) eventBroadcaster := record.NewBroadcaster() cloudNodeController := &CloudNodeController{ kubeClient: fnh, - nodeInformer: factory.Nodes(), + nodeInformer: factory.Core().V1().Nodes(), cloud: &fakecloud.FakeCloud{Err: cloudprovider.InstanceNotFound}, nodeMonitorPeriod: 5 * time.Second, recorder: eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "controllermanager"}), diff --git a/pkg/controller/daemon/BUILD b/pkg/controller/daemon/BUILD index 36d6aa9f47..3423fb2440 100644 --- a/pkg/controller/daemon/BUILD +++ b/pkg/controller/daemon/BUILD @@ -21,13 +21,16 @@ go_library( "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library", - "//pkg/client/legacylisters:go_default_library", + "//pkg/client/informers/informers_generated/core/v1:go_default_library", + "//pkg/client/informers/informers_generated/extensions/v1beta1:go_default_library", + "//pkg/client/listers/core/v1:go_default_library", + "//pkg/client/listers/extensions/v1beta1:go_default_library", "//pkg/controller:go_default_library", - "//pkg/controller/informers:go_default_library", "//pkg/util/metrics:go_default_library", "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/labels", "//vendor:k8s.io/apimachinery/pkg/util/errors", @@ -52,8 +55,8 @@ go_test( "//pkg/api/v1:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library", + "//pkg/client/informers/informers_generated:go_default_library", "//pkg/controller:go_default_library", - "//pkg/controller/informers:go_default_library", "//pkg/securitycontext:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/resource", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", diff --git a/pkg/controller/daemon/daemoncontroller.go b/pkg/controller/daemon/daemoncontroller.go index d0c54ff8e8..f5fa1ead90 100644 --- a/pkg/controller/daemon/daemoncontroller.go +++ b/pkg/controller/daemon/daemoncontroller.go @@ -23,6 +23,7 @@ import ( "sync" "time" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -38,9 +39,11 @@ import ( extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1" - "k8s.io/kubernetes/pkg/client/legacylisters" + coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1" + extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" + extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" @@ -74,18 +77,18 @@ type DaemonSetsController struct { syncHandler func(dsKey string) error // A TTLCache of pod creates/deletes each ds expects to see expectations controller.ControllerExpectationsInterface - // A store of daemon sets - dsStore *listers.StoreToDaemonSetLister - // A store of pods - podStore *listers.StoreToPodLister - // A store of nodes - nodeStore *listers.StoreToNodeLister + // dsLister can list/get daemonsets from the shared informer's store + dsLister extensionslisters.DaemonSetLister // dsStoreSynced returns true if the daemonset store has been synced at least once. // Added as a member to the struct to allow injection for testing. dsStoreSynced cache.InformerSynced + // podLister get list/get pods from the shared informers's store + podLister corelisters.PodLister // podStoreSynced returns true if the pod store has been synced at least once. // Added as a member to the struct to allow injection for testing. podStoreSynced cache.InformerSynced + // nodeLister can list/get nodes from the shared informer's store + nodeLister corelisters.NodeLister // nodeStoreSynced returns true if the node store has been synced at least once. // Added as a member to the struct to allow injection for testing. nodeStoreSynced cache.InformerSynced @@ -96,7 +99,7 @@ type DaemonSetsController struct { queue workqueue.RateLimitingInterface } -func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podInformer informers.PodInformer, nodeInformer informers.NodeInformer, kubeClient clientset.Interface, lookupCacheSize int) *DaemonSetsController { +func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface, lookupCacheSize int) *DaemonSetsController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. @@ -146,7 +149,7 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI }, DeleteFunc: dsc.deleteDaemonset, }) - dsc.dsStore = daemonSetInformer.Lister() + dsc.dsLister = daemonSetInformer.Lister() dsc.dsStoreSynced = daemonSetInformer.Informer().HasSynced // Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete @@ -156,7 +159,7 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI UpdateFunc: dsc.updatePod, DeleteFunc: dsc.deletePod, }) - dsc.podStore = podInformer.Lister() + dsc.podLister = podInformer.Lister() dsc.podStoreSynced = podInformer.Informer().HasSynced nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -165,7 +168,7 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI }, ) dsc.nodeStoreSynced = nodeInformer.Informer().HasSynced - dsc.nodeStore = nodeInformer.Lister() + dsc.nodeLister = nodeInformer.Lister() dsc.syncHandler = dsc.syncDaemonSet dsc.lookupCache = controller.NewMatchingCache(lookupCacheSize) @@ -198,6 +201,7 @@ func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) { glog.Infof("Starting Daemon Sets controller manager") if !cache.WaitForCacheSync(stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.dsStoreSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) return } @@ -258,7 +262,7 @@ func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.Daemon return ds } } - sets, err := dsc.dsStore.GetPodDaemonSets(pod) + sets, err := dsc.dsLister.GetPodDaemonSets(pod) if err != nil { glog.V(4).Infof("No daemon sets found for pod %v, daemon set controller will avoid syncing", pod.Name) return nil @@ -272,16 +276,16 @@ func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.Daemon } // update lookup cache - dsc.lookupCache.Update(pod, &sets[0]) + dsc.lookupCache.Update(pod, sets[0]) - return &sets[0] + return sets[0] } // isCacheValid check if the cache is valid func (dsc *DaemonSetsController) isCacheValid(pod *v1.Pod, cachedDS *extensions.DaemonSet) bool { - _, exists, err := dsc.dsStore.Get(cachedDS) + _, err := dsc.dsLister.DaemonSets(cachedDS.Namespace).Get(cachedDS.Name) // ds has been deleted or updated, cache is invalid - if err != nil || !exists || !isDaemonSetMatch(pod, cachedDS) { + if err != nil || !isDaemonSetMatch(pod, cachedDS) { return false } return true @@ -380,14 +384,14 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) { func (dsc *DaemonSetsController) addNode(obj interface{}) { // TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too). - dsList, err := dsc.dsStore.List() + dsList, err := dsc.dsLister.List(labels.Everything()) if err != nil { glog.V(4).Infof("Error enqueueing daemon sets: %v", err) return } node := obj.(*v1.Node) - for i := range dsList.Items { - ds := &dsList.Items[i] + for i := range dsList { + ds := dsList[i] _, shouldSchedule, _, err := dsc.nodeShouldRunDaemonPod(node, ds) if err != nil { continue @@ -405,14 +409,14 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) { // If node labels didn't change, we can ignore this update. return } - dsList, err := dsc.dsStore.List() + dsList, err := dsc.dsLister.List(labels.Everything()) if err != nil { glog.V(4).Infof("Error enqueueing daemon sets: %v", err) return } // TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too). - for i := range dsList.Items { - ds := &dsList.Items[i] + for i := range dsList { + ds := dsList[i] _, oldShouldSchedule, oldShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(oldNode, ds) if err != nil { continue @@ -434,7 +438,7 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) if err != nil { return nil, err } - daemonPods, err := dsc.podStore.Pods(ds.Namespace).List(selector) + daemonPods, err := dsc.podLister.Pods(ds.Namespace).List(selector) if err != nil { return nodeToDaemonPods, err } @@ -456,14 +460,15 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error { // For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon // pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node. - nodeList, err := dsc.nodeStore.List() + nodeList, err := dsc.nodeLister.List(labels.Everything()) if err != nil { return fmt.Errorf("couldn't get list of nodes when syncing daemon set %#v: %v", ds, err) } var nodesNeedingDaemonPods, podsToDelete []string var failedPodsObserved int - for _, node := range nodeList.Items { - _, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(&node, ds) + for i := range nodeList { + node := nodeList[i] + _, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds) if err != nil { continue } @@ -615,14 +620,15 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet) return fmt.Errorf("error getting node to daemon pod mapping for daemon set %#v: %v", ds, err) } - nodeList, err := dsc.nodeStore.List() + nodeList, err := dsc.nodeLister.List(labels.Everything()) if err != nil { return fmt.Errorf("couldn't get list of nodes when updating daemon set %#v: %v", ds, err) } var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int - for _, node := range nodeList.Items { - wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(&node, ds) + for i := range nodeList { + node := nodeList[i] + wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds) if err != nil { return err } @@ -661,16 +667,19 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error { glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime)) }() - obj, exists, err := dsc.dsStore.Store.GetByKey(key) + namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - return fmt.Errorf("unable to retrieve ds %v from store: %v", key, err) + return err } - if !exists { + ds, err := dsc.dsLister.DaemonSets(namespace).Get(name) + if errors.IsNotFound(err) { glog.V(3).Infof("daemon set has been deleted %v", key) dsc.expectations.DeleteExpectations(key) return nil } - ds := obj.(*extensions.DaemonSet) + if err != nil { + return fmt.Errorf("unable to retrieve ds %v from store: %v", key, err) + } everything := metav1.LabelSelector{} if reflect.DeepEqual(ds.Spec.Selector, &everything) { @@ -733,8 +742,12 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten pods := []*v1.Pod{} - for _, m := range dsc.podStore.Indexer.List() { - pod := m.(*v1.Pod) + podList, err := dsc.podLister.List(labels.Everything()) + if err != nil { + return false, false, false, err + } + for i := range podList { + pod := podList[i] if pod.Spec.NodeName != node.Name { continue } @@ -807,7 +820,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten } // byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker. -type byCreationTimestamp []extensions.DaemonSet +type byCreationTimestamp []*extensions.DaemonSet func (o byCreationTimestamp) Len() int { return len(o) } func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } diff --git a/pkg/controller/daemon/daemoncontroller_test.go b/pkg/controller/daemon/daemoncontroller_test.go index 7f714d516b..c50b4727c3 100644 --- a/pkg/controller/daemon/daemoncontroller_test.go +++ b/pkg/controller/daemon/daemoncontroller_test.go @@ -32,8 +32,8 @@ import ( "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/securitycontext" ) @@ -147,11 +147,25 @@ func addFailedPods(podStore cache.Store, nodeName string, label map[string]strin } } -func newTestController(initialObjects ...runtime.Object) (*DaemonSetsController, *controller.FakePodControl, *fake.Clientset) { - clientset := fake.NewSimpleClientset(initialObjects...) - informerFactory := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc()) +type daemonSetsController struct { + *DaemonSetsController - manager := NewDaemonSetsController(informerFactory.DaemonSets(), informerFactory.Pods(), informerFactory.Nodes(), clientset, 0) + dsStore cache.Store + podStore cache.Store + nodeStore cache.Store +} + +func newTestController(initialObjects ...runtime.Object) (*daemonSetsController, *controller.FakePodControl, *fake.Clientset) { + clientset := fake.NewSimpleClientset(initialObjects...) + informerFactory := informers.NewSharedInformerFactory(nil, clientset, controller.NoResyncPeriodFunc()) + + manager := NewDaemonSetsController( + informerFactory.Extensions().V1beta1().DaemonSets(), + informerFactory.Core().V1().Pods(), + informerFactory.Core().V1().Nodes(), + clientset, + 0, + ) manager.eventRecorder = record.NewFakeRecorder(100) manager.podStoreSynced = alwaysReady @@ -159,7 +173,13 @@ func newTestController(initialObjects ...runtime.Object) (*DaemonSetsController, manager.dsStoreSynced = alwaysReady podControl := &controller.FakePodControl{} manager.podControl = podControl - return manager, podControl, clientset + + return &daemonSetsController{ + manager, + informerFactory.Extensions().V1beta1().DaemonSets().Informer().GetStore(), + informerFactory.Core().V1().Pods().Informer().GetStore(), + informerFactory.Core().V1().Nodes().Informer().GetStore(), + }, podControl, clientset } func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes int) { @@ -171,7 +191,7 @@ func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodCont } } -func syncAndValidateDaemonSets(t *testing.T, manager *DaemonSetsController, ds *extensions.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) { +func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) { key, err := controller.KeyFunc(ds) if err != nil { t.Errorf("Could not get key for daemon.") @@ -182,7 +202,7 @@ func syncAndValidateDaemonSets(t *testing.T, manager *DaemonSetsController, ds * func TestDeleteFinalStateUnknown(t *testing.T) { manager, _, _ := newTestController() - addNodes(manager.nodeStore.Store, 0, 1, nil) + addNodes(manager.nodeStore, 0, 1, nil) ds := newDaemonSet("foo") // DeletedFinalStateUnknown should queue the embedded DS if found. manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds}) @@ -195,7 +215,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) { // DaemonSets without node selectors should launch pods on every node. func TestSimpleDaemonSetLaunchesPods(t *testing.T) { manager, podControl, _ := newTestController() - addNodes(manager.nodeStore.Store, 0, 5, nil) + addNodes(manager.nodeStore, 0, 5, nil) ds := newDaemonSet("foo") manager.dsStore.Add(ds) syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0) @@ -269,7 +289,7 @@ func TestInsufficentCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { node := newNode("too-much-mem", nil) node.Status.Allocatable = allocatableResources("100M", "200m") manager.nodeStore.Add(node) - manager.podStore.Indexer.Add(&v1.Pod{ + manager.podStore.Add(&v1.Pod{ Spec: podSpec, }) ds := newDaemonSet("foo") @@ -286,7 +306,7 @@ func TestInsufficentCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) node := newNode("too-much-mem", nil) node.Status.Allocatable = allocatableResources("100M", "200m") manager.nodeStore.Add(node) - manager.podStore.Indexer.Add(&v1.Pod{ + manager.podStore.Add(&v1.Pod{ Spec: podSpec, }) ds := newDaemonSet("foo") @@ -301,7 +321,7 @@ func TestSufficentCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) { node := newNode("too-much-mem", nil) node.Status.Allocatable = allocatableResources("100M", "200m") manager.nodeStore.Add(node) - manager.podStore.Indexer.Add(&v1.Pod{ + manager.podStore.Add(&v1.Pod{ Spec: podSpec, Status: v1.PodStatus{Phase: v1.PodSucceeded}, }) @@ -318,7 +338,7 @@ func TestSufficentCapacityNodeDaemonLaunchesPod(t *testing.T) { node := newNode("not-too-much-mem", nil) node.Status.Allocatable = allocatableResources("200M", "200m") manager.nodeStore.Add(node) - manager.podStore.Indexer.Add(&v1.Pod{ + manager.podStore.Add(&v1.Pod{ Spec: podSpec, }) ds := newDaemonSet("foo") @@ -334,7 +354,7 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) { node := newNode("not-too-much-mem", nil) node.Status.Allocatable = allocatableResources("200M", "200m") manager.nodeStore.Add(node) - manager.podStore.Indexer.Add(&v1.Pod{ + manager.podStore.Add(&v1.Pod{ Spec: podSpec, }) ds := newDaemonSet("foo") @@ -358,7 +378,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) { manager, podControl, _ := newTestController() node := newNode("port-conflict", nil) manager.nodeStore.Add(node) - manager.podStore.Indexer.Add(&v1.Pod{ + manager.podStore.Add(&v1.Pod{ Spec: podSpec, }) @@ -384,7 +404,7 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) { manager, podControl, _ := newTestController() node := newNode("port-conflict", nil) manager.nodeStore.Add(node) - manager.podStore.Indexer.Add(&v1.Pod{ + manager.podStore.Add(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: simpleDaemonSetLabel, Namespace: metav1.NamespaceDefault, @@ -418,7 +438,7 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) { manager, podControl, _ := newTestController() node := newNode("no-port-conflict", nil) manager.nodeStore.Add(node) - manager.podStore.Indexer.Add(&v1.Pod{ + manager.podStore.Add(&v1.Pod{ Spec: podSpec1, }) ds := newDaemonSet("foo") @@ -432,9 +452,9 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) { // issue https://github.com/kubernetes/kubernetes/pull/23223 func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) { manager, podControl, _ := newTestController() - manager.nodeStore.Store.Add(newNode("node1", nil)) + manager.nodeStore.Add(newNode("node1", nil)) // Create pod not controlled by a daemonset. - manager.podStore.Indexer.Add(&v1.Pod{ + manager.podStore.Add(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"bang": "boom"}, Namespace: metav1.NamespaceDefault, @@ -465,11 +485,11 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) { // Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods. func TestDealsWithExistingPods(t *testing.T) { manager, podControl, _ := newTestController() - addNodes(manager.nodeStore.Store, 0, 5, nil) - addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 1) - addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 2) - addPods(manager.podStore.Indexer, "node-3", simpleDaemonSetLabel, 5) - addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel2, 2) + addNodes(manager.nodeStore, 0, 5, nil) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 1) + addPods(manager.podStore, "node-2", simpleDaemonSetLabel, 2) + addPods(manager.podStore, "node-3", simpleDaemonSetLabel, 5) + addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, 2) ds := newDaemonSet("foo") manager.dsStore.Add(ds) syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5) @@ -478,8 +498,8 @@ func TestDealsWithExistingPods(t *testing.T) { // Daemon with node selector should launch pods on nodes matching selector. func TestSelectorDaemonLaunchesPods(t *testing.T) { manager, podControl, _ := newTestController() - addNodes(manager.nodeStore.Store, 0, 4, nil) - addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) daemon := newDaemonSet("foo") daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel manager.dsStore.Add(daemon) @@ -489,12 +509,12 @@ func TestSelectorDaemonLaunchesPods(t *testing.T) { // Daemon with node selector should delete pods from nodes that do not satisfy selector. func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) { manager, podControl, _ := newTestController() - addNodes(manager.nodeStore.Store, 0, 5, nil) - addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel) - addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel2, 2) - addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3) - addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 1) - addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel, 1) + addNodes(manager.nodeStore, 0, 5, nil) + addNodes(manager.nodeStore, 5, 5, simpleNodeLabel) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, 2) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 3) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, 1) + addPods(manager.podStore, "node-4", simpleDaemonSetLabel, 1) daemon := newDaemonSet("foo") daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel manager.dsStore.Add(daemon) @@ -504,16 +524,16 @@ func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) { // DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes. func TestSelectorDaemonDealsWithExistingPods(t *testing.T) { manager, podControl, _ := newTestController() - addNodes(manager.nodeStore.Store, 0, 5, nil) - addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel) - addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1) - addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3) - addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 2) - addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 4) - addPods(manager.podStore.Indexer, "node-6", simpleDaemonSetLabel, 13) - addPods(manager.podStore.Indexer, "node-7", simpleDaemonSetLabel2, 4) - addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel, 1) - addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel2, 1) + addNodes(manager.nodeStore, 0, 5, nil) + addNodes(manager.nodeStore, 5, 5, simpleNodeLabel) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 3) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, 2) + addPods(manager.podStore, "node-2", simpleDaemonSetLabel, 4) + addPods(manager.podStore, "node-6", simpleDaemonSetLabel, 13) + addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, 4) + addPods(manager.podStore, "node-9", simpleDaemonSetLabel, 1) + addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, 1) ds := newDaemonSet("foo") ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel manager.dsStore.Add(ds) @@ -523,8 +543,8 @@ func TestSelectorDaemonDealsWithExistingPods(t *testing.T) { // DaemonSet with node selector which does not match any node labels should not launch pods. func TestBadSelectorDaemonDoesNothing(t *testing.T) { manager, podControl, _ := newTestController() - addNodes(manager.nodeStore.Store, 0, 4, nil) - addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) ds := newDaemonSet("foo") ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2 manager.dsStore.Add(ds) @@ -534,7 +554,7 @@ func TestBadSelectorDaemonDoesNothing(t *testing.T) { // DaemonSet with node name should launch pod on node with corresponding name. func TestNameDaemonSetLaunchesPods(t *testing.T) { manager, podControl, _ := newTestController() - addNodes(manager.nodeStore.Store, 0, 5, nil) + addNodes(manager.nodeStore, 0, 5, nil) ds := newDaemonSet("foo") ds.Spec.Template.Spec.NodeName = "node-0" manager.dsStore.Add(ds) @@ -544,7 +564,7 @@ func TestNameDaemonSetLaunchesPods(t *testing.T) { // DaemonSet with node name that does not exist should not launch pods. func TestBadNameDaemonSetDoesNothing(t *testing.T) { manager, podControl, _ := newTestController() - addNodes(manager.nodeStore.Store, 0, 5, nil) + addNodes(manager.nodeStore, 0, 5, nil) ds := newDaemonSet("foo") ds.Spec.Template.Spec.NodeName = "node-10" manager.dsStore.Add(ds) @@ -554,8 +574,8 @@ func TestBadNameDaemonSetDoesNothing(t *testing.T) { // DaemonSet with node selector, and node name, matching a node, should launch a pod on the node. func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) { manager, podControl, _ := newTestController() - addNodes(manager.nodeStore.Store, 0, 4, nil) - addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) ds := newDaemonSet("foo") ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel ds.Spec.Template.Spec.NodeName = "node-6" @@ -566,8 +586,8 @@ func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) { // DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing. func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) { manager, podControl, _ := newTestController() - addNodes(manager.nodeStore.Store, 0, 4, nil) - addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) ds := newDaemonSet("foo") ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel ds.Spec.Template.Spec.NodeName = "node-0" @@ -578,8 +598,8 @@ func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) { // Daemon with node affinity should launch pods on nodes matching affinity. func TestNodeAffinityDaemonLaunchesPods(t *testing.T) { manager, podControl, _ := newTestController() - addNodes(manager.nodeStore.Store, 0, 4, nil) - addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) daemon := newDaemonSet("foo") daemon.Spec.Template.Spec.Affinity = &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ @@ -615,9 +635,9 @@ func TestNumberReadyStatus(t *testing.T) { } return false, nil, nil }) - addNodes(manager.nodeStore.Store, 0, 2, simpleNodeLabel) - addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1) - addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 1) + addNodes(manager.nodeStore, 0, 2, simpleNodeLabel) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 1) manager.dsStore.Add(daemon) syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0) @@ -626,7 +646,7 @@ func TestNumberReadyStatus(t *testing.T) { } selector, _ := metav1.LabelSelectorAsSelector(daemon.Spec.Selector) - daemonPods, _ := manager.podStore.Pods(daemon.Namespace).List(selector) + daemonPods, _ := manager.podLister.Pods(daemon.Namespace).List(selector) for _, pod := range daemonPods { condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} pod.Status.Conditions = append(pod.Status.Conditions, condition) @@ -653,8 +673,8 @@ func TestObservedGeneration(t *testing.T) { return false, nil, nil }) - addNodes(manager.nodeStore.Store, 0, 1, simpleNodeLabel) - addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1) + addNodes(manager.nodeStore, 0, 1, simpleNodeLabel) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1) manager.dsStore.Add(daemon) syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0) @@ -679,9 +699,9 @@ func TestDaemonKillFailedPods(t *testing.T) { for _, test := range tests { t.Logf("test case: %s\n", test.test) manager, podControl, _ := newTestController() - addNodes(manager.nodeStore.Store, 0, 1, nil) - addFailedPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, test.numFailedPods) - addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, test.numNormalPods) + addNodes(manager.nodeStore, 0, 1, nil) + addFailedPods(manager.podStore, "node-0", simpleDaemonSetLabel, test.numFailedPods) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, test.numNormalPods) ds := newDaemonSet("foo") manager.dsStore.Add(ds) syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes) @@ -782,9 +802,9 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { node := newNode("test-node", nil) node.Status.Allocatable = allocatableResources("100M", "1") manager, _, _ := newTestController() - manager.nodeStore.Store.Add(node) + manager.nodeStore.Add(node) for _, p := range c.podsOnNode { - manager.podStore.Indexer.Add(p) + manager.podStore.Add(p) p.Spec.NodeName = "test-node" } wantToRun, shouldSchedule, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds) diff --git a/pkg/controller/deployment/BUILD b/pkg/controller/deployment/BUILD index 11377f3248..e98896a5c9 100644 --- a/pkg/controller/deployment/BUILD +++ b/pkg/controller/deployment/BUILD @@ -24,10 +24,12 @@ go_library( "//pkg/api/v1:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/legacylisters:go_default_library", + "//pkg/client/informers/informers_generated/core/v1:go_default_library", + "//pkg/client/informers/informers_generated/extensions/v1beta1:go_default_library", + "//pkg/client/listers/core/v1:go_default_library", + "//pkg/client/listers/extensions/v1beta1:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/deployment/util:go_default_library", - "//pkg/controller/informers:go_default_library", "//pkg/util/labels:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor:github.com/golang/glog", @@ -63,9 +65,9 @@ go_test( "//pkg/api/v1:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library", + "//pkg/client/informers/informers_generated:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/deployment/util:go_default_library", - "//pkg/controller/informers:go_default_library", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/runtime", "//vendor:k8s.io/apimachinery/pkg/runtime/schema", diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index 4a6470a1e4..cf97da398f 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -44,10 +44,12 @@ import ( "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/pkg/client/legacylisters" + coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1" + extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" + extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -79,12 +81,12 @@ type DeploymentController struct { // used for unit testing enqueueDeployment func(deployment *extensions.Deployment) - // A store of deployments, populated by the dController - dLister *listers.StoreToDeploymentLister - // A store of ReplicaSets, populated by the rsController - rsLister *listers.StoreToReplicaSetLister - // A store of pods, populated by the podController - podLister *listers.StoreToPodLister + // dLister can list/get deployments from the shared informer's store + dLister extensionslisters.DeploymentLister + // rsLister can list/get replica sets from the shared informer's store + rsLister extensionslisters.ReplicaSetLister + // podLister can list/get pods from the shared informer's store + podLister corelisters.PodLister // dListerSynced returns true if the Deployment store has been synced at least once. // Added as a member to the struct to allow injection for testing. @@ -103,7 +105,7 @@ type DeploymentController struct { } // NewDeploymentController creates a new DeploymentController. -func NewDeploymentController(dInformer informers.DeploymentInformer, rsInformer informers.ReplicaSetInformer, podInformer informers.PodInformer, client clientset.Interface) *DeploymentController { +func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) *DeploymentController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. @@ -159,6 +161,7 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) { glog.Infof("Starting deployment controller") if !cache.WaitForCacheSync(stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) return } @@ -497,17 +500,20 @@ func (dc *DeploymentController) syncDeployment(key string) error { glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Now().Sub(startTime)) }() - obj, exists, err := dc.dLister.Indexer.GetByKey(key) + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + deployment, err := dc.dLister.Deployments(namespace).Get(name) + if errors.IsNotFound(err) { + glog.Infof("Deployment has been deleted %v", key) + return nil + } if err != nil { utilruntime.HandleError(fmt.Errorf("Unable to retrieve deployment %v from store: %v", key, err)) return err } - if !exists { - glog.Infof("Deployment has been deleted %v", key) - return nil - } - deployment := obj.(*extensions.Deployment) // Deep-copy otherwise we are mutating our cache. // TODO: Deep-copy only when needed. d, err := util.DeploymentDeepCopy(deployment) @@ -766,15 +772,18 @@ func (dc *DeploymentController) checkNextItemForProgress() bool { // checkForProgress checks the progress for the provided deployment. Meant to be called // by the progressWorker and work on items synced in a secondary queue. func (dc *DeploymentController) checkForProgress(key string) (bool, error) { - obj, exists, err := dc.dLister.Indexer.GetByKey(key) + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return false, err + } + deployment, err := dc.dLister.Deployments(namespace).Get(name) + if errors.IsNotFound(err) { + return false, nil + } if err != nil { glog.V(2).Infof("Cannot retrieve deployment %q found in the secondary queue: %#v", key, err) return false, err } - if !exists { - return false, nil - } - deployment := obj.(*extensions.Deployment) cond := util.GetDeploymentCondition(deployment.Status, extensions.DeploymentProgressing) // Already marked with a terminal reason - no need to add it back to the main queue. if cond != nil && (cond.Reason == util.TimedOutReason || cond.Reason == util.NewRSAvailableReason) { diff --git a/pkg/controller/deployment/deployment_controller_test.go b/pkg/controller/deployment/deployment_controller_test.go index 08c7fd55f6..85aed3d3db 100644 --- a/pkg/controller/deployment/deployment_controller_test.go +++ b/pkg/controller/deployment/deployment_controller_test.go @@ -32,9 +32,9 @@ import ( "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/controller/informers" ) var ( @@ -166,20 +166,20 @@ func newFixture(t *testing.T) *fixture { func (f *fixture) newController() (*DeploymentController, informers.SharedInformerFactory) { f.client = fake.NewSimpleClientset(f.objects...) - informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc()) - c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client) + informers := informers.NewSharedInformerFactory(nil, f.client, controller.NoResyncPeriodFunc()) + c := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), f.client) c.eventRecorder = &record.FakeRecorder{} c.dListerSynced = alwaysReady c.rsListerSynced = alwaysReady c.podListerSynced = alwaysReady for _, d := range f.dLister { - c.dLister.Indexer.Add(d) + informers.Extensions().V1beta1().Deployments().Informer().GetIndexer().Add(d) } for _, rs := range f.rsLister { - c.rsLister.Indexer.Add(rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) } for _, pod := range f.podLister { - c.podLister.Indexer.Add(pod) + informers.Core().V1().Pods().Informer().GetIndexer().Add(pod) } return c, informers } @@ -246,8 +246,8 @@ func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) { // issue: https://github.com/kubernetes/kubernetes/issues/23218 func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) { fake := &fake.Clientset{} - informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc()) - controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake) + informers := informers.NewSharedInformerFactory(nil, fake, controller.NoResyncPeriodFunc()) + controller := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), fake) controller.eventRecorder = &record.FakeRecorder{} controller.dListerSynced = alwaysReady controller.rsListerSynced = alwaysReady @@ -260,7 +260,7 @@ func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) empty := metav1.LabelSelector{} d.Spec.Selector = &empty - controller.dLister.Indexer.Add(d) + informers.Extensions().V1beta1().Deployments().Informer().GetIndexer().Add(d) // We expect the deployment controller to not take action here since it's configuration // is invalid, even though no replicasets exist that match it's selector. controller.syncDeployment(fmt.Sprintf("%s/%s", d.ObjectMeta.Namespace, d.ObjectMeta.Name)) diff --git a/pkg/controller/deployment/recreate_test.go b/pkg/controller/deployment/recreate_test.go index 2d048c37fd..4fb0c7803a 100644 --- a/pkg/controller/deployment/recreate_test.go +++ b/pkg/controller/deployment/recreate_test.go @@ -25,8 +25,8 @@ import ( "k8s.io/kubernetes/pkg/api" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/informers" ) func TestScaleDownOldReplicaSets(t *testing.T) { @@ -68,8 +68,8 @@ func TestScaleDownOldReplicaSets(t *testing.T) { } kc := fake.NewSimpleClientset(expected...) - informers := informers.NewSharedInformerFactory(kc, nil, controller.NoResyncPeriodFunc()) - c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), kc) + informers := informers.NewSharedInformerFactory(nil, kc, controller.NoResyncPeriodFunc()) + c := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), kc) c.eventRecorder = &record.FakeRecorder{} c.scaleDownOldReplicaSetsForRecreate(oldRSs, test.d) diff --git a/pkg/controller/deployment/sync_test.go b/pkg/controller/deployment/sync_test.go index a2dc827b83..63e397e006 100644 --- a/pkg/controller/deployment/sync_test.go +++ b/pkg/controller/deployment/sync_test.go @@ -26,9 +26,9 @@ import ( "k8s.io/client-go/tools/record" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" "k8s.io/kubernetes/pkg/controller" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/controller/informers" ) func maxSurge(val int) *intstr.IntOrString { @@ -371,15 +371,15 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { for i := range tests { test := tests[i] fake := &fake.Clientset{} - informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc()) - controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake) + informers := informers.NewSharedInformerFactory(nil, fake, controller.NoResyncPeriodFunc()) + controller := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), fake) controller.eventRecorder = &record.FakeRecorder{} controller.dListerSynced = alwaysReady controller.rsListerSynced = alwaysReady controller.podListerSynced = alwaysReady for _, rs := range test.oldRSs { - controller.rsLister.Indexer.Add(rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) } stopCh := make(chan struct{}) diff --git a/pkg/controller/deployment/util/BUILD b/pkg/controller/deployment/util/BUILD index 93f7454ab4..e4e79acd7f 100644 --- a/pkg/controller/deployment/util/BUILD +++ b/pkg/controller/deployment/util/BUILD @@ -25,7 +25,8 @@ go_library( "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", "//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library", - "//pkg/client/legacylisters:go_default_library", + "//pkg/client/listers/core/v1:go_default_library", + "//pkg/client/listers/extensions/v1beta1:go_default_library", "//pkg/client/retry:go_default_library", "//pkg/controller:go_default_library", "//pkg/util/hash:go_default_library", diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index d201b60280..677fb578f5 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -40,7 +40,7 @@ import ( internalextensions "k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/pkg/client/legacylisters" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" "k8s.io/kubernetes/pkg/controller" labelsutil "k8s.io/kubernetes/pkg/util/labels" ) @@ -685,7 +685,7 @@ func WaitForPodsHashPopulated(c clientset.Interface, desiredGeneration int64, na // LabelPodsWithHash labels all pods in the given podList with the new hash label. // The returned bool value can be used to tell if all pods are actually labeled. -func LabelPodsWithHash(podList *v1.PodList, c clientset.Interface, podLister *listers.StoreToPodLister, namespace, name, hash string) error { +func LabelPodsWithHash(podList *v1.PodList, c clientset.Interface, podLister corelisters.PodLister, namespace, name, hash string) error { for _, pod := range podList.Items { // Only label the pod that doesn't already have the new hash if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash { diff --git a/pkg/controller/deployment/util/pod_util.go b/pkg/controller/deployment/util/pod_util.go index 970ec46c44..b2d4e8464e 100644 --- a/pkg/controller/deployment/util/pod_util.go +++ b/pkg/controller/deployment/util/pod_util.go @@ -26,7 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1" - "k8s.io/kubernetes/pkg/client/legacylisters" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" "k8s.io/kubernetes/pkg/client/retry" hashutil "k8s.io/kubernetes/pkg/util/hash" ) @@ -56,7 +56,7 @@ type updatePodFunc func(pod *v1.Pod) error // UpdatePodWithRetries updates a pod with given applyUpdate function. Note that pod not found error is ignored. // The returned bool value can be used to tell if the pod is actually updated. -func UpdatePodWithRetries(podClient v1core.PodInterface, podLister *listers.StoreToPodLister, namespace, name string, applyUpdate updatePodFunc) (*v1.Pod, error) { +func UpdatePodWithRetries(podClient v1core.PodInterface, podLister corelisters.PodLister, namespace, name string, applyUpdate updatePodFunc) (*v1.Pod, error) { var pod *v1.Pod retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error { diff --git a/pkg/controller/deployment/util/replicaset_util.go b/pkg/controller/deployment/util/replicaset_util.go index 83535a4b73..52f68eb962 100644 --- a/pkg/controller/deployment/util/replicaset_util.go +++ b/pkg/controller/deployment/util/replicaset_util.go @@ -26,7 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1" - "k8s.io/kubernetes/pkg/client/legacylisters" + extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/retry" labelsutil "k8s.io/kubernetes/pkg/util/labels" ) @@ -37,7 +37,7 @@ type updateRSFunc func(rs *extensions.ReplicaSet) error // UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored. // The returned bool value can be used to tell if the RS is actually updated. -func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsLister *listers.StoreToReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) { +func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsLister extensionslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) { var rs *extensions.ReplicaSet retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error { diff --git a/pkg/controller/job/BUILD b/pkg/controller/job/BUILD index 03e7817b1c..5a72cdfd9e 100644 --- a/pkg/controller/job/BUILD +++ b/pkg/controller/job/BUILD @@ -21,10 +21,11 @@ go_library( "//pkg/api/v1:go_default_library", "//pkg/apis/batch/v1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/legacylisters:go_default_library", + "//pkg/client/informers/informers_generated/batch/v1:go_default_library", + "//pkg/client/informers/informers_generated/core/v1:go_default_library", "//pkg/client/listers/batch/v1:go_default_library", + "//pkg/client/listers/core/v1:go_default_library", "//pkg/controller:go_default_library", - "//pkg/controller/informers:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", @@ -53,8 +54,8 @@ go_test( "//pkg/apis/batch/v1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library", + "//pkg/client/informers/informers_generated:go_default_library", "//pkg/controller:go_default_library", - "//pkg/controller/informers:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/equality", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/util/rand", diff --git a/pkg/controller/job/jobcontroller.go b/pkg/controller/job/jobcontroller.go index ffa0493a75..cbbbc03b98 100644 --- a/pkg/controller/job/jobcontroller.go +++ b/pkg/controller/job/jobcontroller.go @@ -36,10 +36,11 @@ import ( "k8s.io/kubernetes/pkg/api/v1" batch "k8s.io/kubernetes/pkg/apis/batch/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/pkg/client/legacylisters" + batchinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/batch/v1" + coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1" batchv1listers "k8s.io/kubernetes/pkg/client/listers/batch/v1" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/util/metrics" "github.com/golang/glog" @@ -66,7 +67,7 @@ type JobController struct { jobLister batchv1listers.JobLister // A store of pods, populated by the podController - podStore listers.StoreToPodLister + podStore corelisters.PodLister // Jobs that need to be updated queue workqueue.RateLimitingInterface @@ -74,7 +75,7 @@ type JobController struct { recorder record.EventRecorder } -func NewJobController(podInformer cache.SharedIndexInformer, jobInformer informers.JobInformer, kubeClient clientset.Interface) *JobController { +func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *JobController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. @@ -107,13 +108,13 @@ func NewJobController(podInformer cache.SharedIndexInformer, jobInformer informe jm.jobLister = jobInformer.Lister() jm.jobStoreSynced = jobInformer.Informer().HasSynced - podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: jm.addPod, UpdateFunc: jm.updatePod, DeleteFunc: jm.deletePod, }) - jm.podStore.Indexer = podInformer.GetIndexer() - jm.podStoreSynced = podInformer.HasSynced + jm.podStore = podInformer.Lister() + jm.podStoreSynced = podInformer.Informer().HasSynced jm.updateHandler = jm.updateJobStatus jm.syncHandler = jm.syncJob @@ -126,6 +127,7 @@ func (jm *JobController) Run(workers int, stopCh <-chan struct{}) { defer jm.queue.ShutDown() if !cache.WaitForCacheSync(stopCh, jm.podStoreSynced, jm.jobStoreSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) return } diff --git a/pkg/controller/job/jobcontroller_test.go b/pkg/controller/job/jobcontroller_test.go index f1f2c73f5b..99eb755ea1 100644 --- a/pkg/controller/job/jobcontroller_test.go +++ b/pkg/controller/job/jobcontroller_test.go @@ -34,8 +34,8 @@ import ( batch "k8s.io/kubernetes/pkg/apis/batch/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/informers" ) var alwaysReady = func() bool { return true } @@ -89,8 +89,8 @@ func getKey(job *batch.Job, t *testing.T) string { } func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*JobController, informers.SharedInformerFactory) { - sharedInformers := informers.NewSharedInformerFactory(kubeClient, nil, resyncPeriod()) - jm := NewJobController(sharedInformers.Pods().Informer(), sharedInformers.Jobs(), kubeClient) + sharedInformers := informers.NewSharedInformerFactory(nil, kubeClient, resyncPeriod()) + jm := NewJobController(sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), kubeClient) return jm, sharedInformers } @@ -246,8 +246,8 @@ func TestControllerSyncJob(t *testing.T) { now := metav1.Now() job.DeletionTimestamp = &now } - sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job) - podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer() + sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job) + podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer() for _, pod := range newPodList(tc.pendingPods, v1.PodPending, job) { podIndexer.Add(&pod) } @@ -349,8 +349,8 @@ func TestSyncJobPastDeadline(t *testing.T) { job.Spec.ActiveDeadlineSeconds = &tc.activeDeadlineSeconds start := metav1.Unix(metav1.Now().Time.Unix()-tc.startTime, 0) job.Status.StartTime = &start - sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job) - podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer() + sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job) + podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer() for _, pod := range newPodList(tc.activePods, v1.PodRunning, job) { podIndexer.Add(&pod) } @@ -422,7 +422,7 @@ func TestSyncPastDeadlineJobFinished(t *testing.T) { start := metav1.Unix(metav1.Now().Time.Unix()-15, 0) job.Status.StartTime = &start job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline")) - sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job) + sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job) err := manager.syncJob(getKey(job, t)) if err != nil { t.Errorf("Unexpected error when syncing jobs %v", err) @@ -448,7 +448,7 @@ func TestSyncJobComplete(t *testing.T) { job := newJob(1, 1) job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, "", "")) - sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job) + sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job) err := manager.syncJob(getKey(job, t)) if err != nil { t.Fatalf("Unexpected error when syncing jobs %v", err) @@ -497,7 +497,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) { return updateError } job := newJob(2, 2) - sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job) + sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job) err := manager.syncJob(getKey(job, t)) if err == nil || err != updateError { t.Errorf("Expected error %v when syncing jobs, got %v", updateError, err) @@ -577,7 +577,7 @@ func TestJobPodLookup(t *testing.T) { }, } for _, tc := range testCases { - sharedInformerFactory.Jobs().Informer().GetIndexer().Add(tc.job) + sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(tc.job) if job := manager.getPodJob(tc.pod); job != nil { if tc.expectedName != job.Name { t.Errorf("Got job %+v expected %+v", job.Name, tc.expectedName) @@ -611,9 +611,9 @@ func TestSyncJobExpectations(t *testing.T) { manager.updateHandler = func(job *batch.Job) error { return nil } job := newJob(2, 2) - sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job) + sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job) pods := newPodList(2, v1.PodPending, job) - podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer() + podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer() podIndexer.Add(&pods[0]) manager.expectations = FakeJobExpectations{ @@ -687,7 +687,7 @@ func TestWatchPods(t *testing.T) { manager.jobStoreSynced = alwaysReady // Put one job and one pod into the store - sharedInformerFactory.Jobs().Informer().GetIndexer().Add(testJob) + sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(testJob) received := make(chan struct{}) // The pod update sent through the fakeWatcher should figure out the managing job and // send it into the syncHandler. @@ -712,7 +712,7 @@ func TestWatchPods(t *testing.T) { // and make sure it hits the sync method for the right job. stopCh := make(chan struct{}) defer close(stopCh) - go sharedInformerFactory.Pods().Informer().Run(stopCh) + go sharedInformerFactory.Core().V1().Pods().Informer().Run(stopCh) go wait.Until(manager.worker, 10*time.Millisecond, stopCh) pods := newPodList(1, v1.PodRunning, testJob) diff --git a/pkg/controller/node/BUILD b/pkg/controller/node/BUILD index 44456c772f..2edab568d0 100644 --- a/pkg/controller/node/BUILD +++ b/pkg/controller/node/BUILD @@ -24,9 +24,11 @@ go_library( "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/legacylisters:go_default_library", + "//pkg/client/informers/informers_generated/core/v1:go_default_library", + "//pkg/client/informers/informers_generated/extensions/v1beta1:go_default_library", + "//pkg/client/listers/core/v1:go_default_library", + "//pkg/client/listers/extensions/v1beta1:go_default_library", "//pkg/cloudprovider:go_default_library", - "//pkg/controller/informers:go_default_library", "//pkg/kubelet/util/format:go_default_library", "//pkg/util/metrics:go_default_library", "//pkg/util/node:go_default_library", @@ -67,10 +69,12 @@ go_test( "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library", + "//pkg/client/informers/informers_generated:go_default_library", + "//pkg/client/informers/informers_generated/core/v1:go_default_library", + "//pkg/client/informers/informers_generated/extensions/v1beta1:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/fake:go_default_library", "//pkg/controller:go_default_library", - "//pkg/controller/informers:go_default_library", "//pkg/controller/node/testutil:go_default_library", "//pkg/util/node:go_default_library", "//vendor:github.com/golang/glog", @@ -82,7 +86,6 @@ go_test( "//vendor:k8s.io/apimachinery/pkg/util/sets", "//vendor:k8s.io/apimachinery/pkg/util/wait", "//vendor:k8s.io/client-go/testing", - "//vendor:k8s.io/client-go/tools/cache", "//vendor:k8s.io/client-go/util/flowcontrol", ], ) diff --git a/pkg/controller/node/controller_utils.go b/pkg/controller/node/controller_utils.go index b7f570edac..e6b0460cf3 100644 --- a/pkg/controller/node/controller_utils.go +++ b/pkg/controller/node/controller_utils.go @@ -31,7 +31,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/pkg/client/legacylisters" + extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/util/node" @@ -47,7 +47,7 @@ const ( // deletePods will delete all pods from master running on given node, and return true // if any pods were deleted, or were found pending deletion. -func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore listers.StoreToDaemonSetLister) (bool, error) { +func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore extensionslisters.DaemonSetLister) (bool, error) { remaining := false selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String() options := metav1.ListOptions{FieldSelector: selector} @@ -160,7 +160,11 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) { return } - nodeObj, found, err := nc.nodeStore.Store.GetByKey(pod.Spec.NodeName) + node, err := nc.nodeLister.Get(pod.Spec.NodeName) + // if there is no such node, do nothing and let the podGC clean it up. + if errors.IsNotFound(err) { + return + } if err != nil { // this can only happen if the Store.KeyFunc has a problem creating // a key for the pod. If it happens once, it will happen again so @@ -169,17 +173,11 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) { return } - // if there is no such node, do nothing and let the podGC clean it up. - if !found { - return - } - // delete terminating pods that have been scheduled on // nodes that do not support graceful termination // TODO(mikedanese): this can be removed when we no longer // guarantee backwards compatibility of master API to kubelets with // versions less than 1.1.0 - node := nodeObj.(*v1.Node) v, err := utilversion.ParseSemantic(node.Status.NodeInfo.KubeletVersion) if err != nil { glog.V(0).Infof("Couldn't parse version %q of node: %v", node.Status.NodeInfo.KubeletVersion, err) diff --git a/pkg/controller/node/nodecontroller.go b/pkg/controller/node/nodecontroller.go index 10a171428b..9e8965ab50 100644 --- a/pkg/controller/node/nodecontroller.go +++ b/pkg/controller/node/nodecontroller.go @@ -25,6 +25,7 @@ import ( "github.com/golang/glog" apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -39,9 +40,11 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/pkg/client/legacylisters" + coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1" + extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" + extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1" "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/util/metrics" utilnode "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/system" @@ -134,13 +137,15 @@ type NodeController struct { // The maximum duration before a pod evicted from a node can be forcefully terminated. maximumGracePeriod time.Duration recorder record.EventRecorder - podInformer informers.PodInformer - nodeInformer informers.NodeInformer - daemonSetInformer informers.DaemonSetInformer - podStore listers.StoreToPodLister - nodeStore listers.StoreToNodeLister - daemonSetStore listers.StoreToDaemonSetLister + nodeLister corelisters.NodeLister + nodeInformerSynced cache.InformerSynced + + daemonSetStore extensionslisters.DaemonSetLister + daemonSetInformerSynced cache.InformerSynced + + podInformerSynced cache.InformerSynced + // allocate/recycle CIDRs for node if allocateNodeCIDRs == true cidrAllocator CIDRAllocator @@ -155,13 +160,6 @@ type NodeController struct { secondaryEvictionLimiterQPS float32 largeClusterThreshold int32 unhealthyZoneThreshold float32 - - // internalPodInformer is used to hold a personal informer. If we're using - // a normal shared informer, then the informer will be started for us. If - // we have a personal informer, we must start it ourselves. If you start - // the controller using NewDaemonSetsController(passing SharedInformer), this - // will be null - internalPodInformer cache.SharedIndexInformer } // NewNodeController returns a new node controller to sync instances from cloudprovider. @@ -169,9 +167,9 @@ type NodeController struct { // podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes // currently, this should be handled as a fatal error. func NewNodeController( - podInformer informers.PodInformer, - nodeInformer informers.NodeInformer, - daemonSetInformer informers.DaemonSetInformer, + podInformer coreinformers.PodInformer, + nodeInformer coreinformers.NodeInformer, + daemonSetInformer extensionsinformers.DaemonSetInformer, cloud cloudprovider.Interface, kubeClient clientset.Interface, podEvictionTimeout time.Duration, @@ -234,9 +232,6 @@ func NewNodeController( largeClusterThreshold: largeClusterThreshold, unhealthyZoneThreshold: unhealthyZoneThreshold, zoneStates: make(map[string]zoneState), - podInformer: podInformer, - nodeInformer: nodeInformer, - daemonSetInformer: daemonSetInformer, } nc.enterPartialDisruptionFunc = nc.ReducedQPSFunc nc.enterFullDisruptionFunc = nc.HealthyQPSFunc @@ -246,7 +241,7 @@ func NewNodeController( AddFunc: nc.maybeDeleteTerminatingPod, UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) }, }) - nc.podStore = *podInformer.Lister() + nc.podInformerSynced = podInformer.Informer().HasSynced nodeEventHandlerFuncs := cache.ResourceEventHandlerFuncs{} if nc.allocateNodeCIDRs { @@ -347,9 +342,11 @@ func NewNodeController( } nodeInformer.Informer().AddEventHandler(nodeEventHandlerFuncs) - nc.nodeStore = *nodeInformer.Lister() + nc.nodeLister = nodeInformer.Lister() + nc.nodeInformerSynced = nodeInformer.Informer().HasSynced - nc.daemonSetStore = *daemonSetInformer.Lister() + nc.daemonSetStore = daemonSetInformer.Lister() + nc.daemonSetInformerSynced = daemonSetInformer.Informer().HasSynced return nc, nil } @@ -359,8 +356,8 @@ func (nc *NodeController) Run() { go func() { defer utilruntime.HandleCrash() - if !cache.WaitForCacheSync(wait.NeverStop, nc.nodeInformer.Informer().HasSynced, nc.podInformer.Informer().HasSynced, nc.daemonSetInformer.Informer().HasSynced) { - utilruntime.HandleError(errors.New("NodeController timed out while waiting for informers to sync...")) + if !cache.WaitForCacheSync(wait.NeverStop, nc.nodeInformerSynced, nc.podInformerSynced, nc.daemonSetInformerSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) return } @@ -379,13 +376,13 @@ func (nc *NodeController) Run() { defer nc.evictorLock.Unlock() for k := range nc.zonePodEvictor { nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) { - obj, exists, err := nc.nodeStore.GetByKey(value.Value) + node, err := nc.nodeLister.Get(value.Value) + if apierrors.IsNotFound(err) { + glog.Warningf("Node %v no longer present in nodeLister!", value.Value) + } if err != nil { - glog.Warningf("Failed to get Node %v from the nodeStore: %v", value.Value, err) - } else if !exists { - glog.Warningf("Node %v no longer present in nodeStore!", value.Value) + glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err) } else { - node, _ := obj.(*v1.Node) zone := utilnode.GetZoneKey(node) EvictionsNumber.WithLabelValues(zone).Inc() } @@ -413,11 +410,11 @@ func (nc *NodeController) Run() { func (nc *NodeController) monitorNodeStatus() error { // We are listing nodes from local cache as we can tolerate some small delays // comparing to state from etcd and there is eventual consistency anyway. - nodes, err := nc.nodeStore.List() + nodes, err := nc.nodeLister.List(labels.Everything()) if err != nil { return err } - added, deleted := nc.checkForNodeAddedDeleted(&nodes) + added, deleted := nc.checkForNodeAddedDeleted(nodes) for i := range added { glog.V(1).Infof("NodeController observed a new Node: %#v", added[i].Name) recordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", added[i].Name)) @@ -442,11 +439,11 @@ func (nc *NodeController) monitorNodeStatus() error { } zoneToNodeConditions := map[string][]*v1.NodeCondition{} - for i := range nodes.Items { + for i := range nodes { var gracePeriod time.Duration var observedReadyCondition v1.NodeCondition var currentReadyCondition *v1.NodeCondition - nodeCopy, err := api.Scheme.DeepCopy(&nodes.Items[i]) + nodeCopy, err := api.Scheme.DeepCopy(nodes[i]) if err != nil { utilruntime.HandleError(err) continue @@ -528,12 +525,12 @@ func (nc *NodeController) monitorNodeStatus() error { } } } - nc.handleDisruption(zoneToNodeConditions, &nodes) + nc.handleDisruption(zoneToNodeConditions, nodes) return nil } -func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*v1.NodeCondition, nodes *v1.NodeList) { +func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*v1.NodeCondition, nodes []*v1.Node) { newZoneStates := map[string]zoneState{} allAreFullyDisrupted := true for k, v := range zoneToNodeConditions { @@ -574,8 +571,8 @@ func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*v1 // We're switching to full disruption mode if allAreFullyDisrupted { glog.V(0).Info("NodeController detected that all Nodes are not-Ready. Entering master disruption mode.") - for i := range nodes.Items { - nc.cancelPodEviction(&nodes.Items[i]) + for i := range nodes { + nc.cancelPodEviction(nodes[i]) } // We stop all evictions. for k := range nc.zonePodEvictor { @@ -592,11 +589,11 @@ func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*v1 glog.V(0).Info("NodeController detected that some Nodes are Ready. Exiting master disruption mode.") // When exiting disruption mode update probe timestamps on all Nodes. now := nc.now() - for i := range nodes.Items { - v := nc.nodeStatusMap[nodes.Items[i].Name] + for i := range nodes { + v := nc.nodeStatusMap[nodes[i].Name] v.probeTimestamp = now v.readyTransitionTimestamp = now - nc.nodeStatusMap[nodes.Items[i].Name] = v + nc.nodeStatusMap[nodes[i].Name] = v } // We reset all rate limiters to settings appropriate for the given state. for k := range nc.zonePodEvictor { @@ -797,21 +794,21 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1. return gracePeriod, observedReadyCondition, currentReadyCondition, err } -func (nc *NodeController) checkForNodeAddedDeleted(nodes *v1.NodeList) (added, deleted []*v1.Node) { - for i := range nodes.Items { - if _, has := nc.knownNodeSet[nodes.Items[i].Name]; !has { - added = append(added, &nodes.Items[i]) +func (nc *NodeController) checkForNodeAddedDeleted(nodes []*v1.Node) (added, deleted []*v1.Node) { + for i := range nodes { + if _, has := nc.knownNodeSet[nodes[i].Name]; !has { + added = append(added, nodes[i]) } } // If there's a difference between lengths of known Nodes and observed nodes // we must have removed some Node. - if len(nc.knownNodeSet)+len(added) != len(nodes.Items) { + if len(nc.knownNodeSet)+len(added) != len(nodes) { knowSetCopy := map[string]*v1.Node{} for k, v := range nc.knownNodeSet { knowSetCopy[k] = v } - for i := range nodes.Items { - delete(knowSetCopy, nodes.Items[i].Name) + for i := range nodes { + delete(knowSetCopy, nodes[i].Name) } for i := range knowSetCopy { deleted = append(deleted, knowSetCopy[i]) diff --git a/pkg/controller/node/nodecontroller_test.go b/pkg/controller/node/nodecontroller_test.go index 4415a5b0d1..f25af25c86 100644 --- a/pkg/controller/node/nodecontroller_test.go +++ b/pkg/controller/node/nodecontroller_test.go @@ -29,15 +29,16 @@ import ( "k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/wait" testcore "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" + coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1" + extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1" "k8s.io/kubernetes/pkg/cloudprovider" fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/node/testutil" "k8s.io/kubernetes/pkg/util/node" ) @@ -51,6 +52,14 @@ const ( testUnhealtyThreshold = float32(0.55) ) +func alwaysReady() bool { return true } + +type nodeController struct { + *NodeController + nodeInformer coreinformers.NodeInformer + daemonSetInformer extensionsinformers.DaemonSetInformer +} + func NewNodeControllerFromClient( cloud cloudprovider.Interface, kubeClient clientset.Interface, @@ -65,21 +74,44 @@ func NewNodeControllerFromClient( clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, nodeCIDRMaskSize int, - allocateNodeCIDRs bool) (*NodeController, error) { + allocateNodeCIDRs bool) (*nodeController, error) { - factory := informers.NewSharedInformerFactory(kubeClient, nil, controller.NoResyncPeriodFunc()) + factory := informers.NewSharedInformerFactory(nil, kubeClient, controller.NoResyncPeriodFunc()) - nc, err := NewNodeController(factory.Pods(), factory.Nodes(), factory.DaemonSets(), cloud, kubeClient, podEvictionTimeout, evictionLimiterQPS, secondaryEvictionLimiterQPS, - largeClusterThreshold, unhealthyZoneThreshold, nodeMonitorGracePeriod, nodeStartupGracePeriod, nodeMonitorPeriod, clusterCIDR, - serviceCIDR, nodeCIDRMaskSize, allocateNodeCIDRs) + nodeInformer := factory.Core().V1().Nodes() + daemonSetInformer := factory.Extensions().V1beta1().DaemonSets() + + nc, err := NewNodeController( + factory.Core().V1().Pods(), + nodeInformer, + daemonSetInformer, + cloud, + kubeClient, + podEvictionTimeout, + evictionLimiterQPS, + secondaryEvictionLimiterQPS, + largeClusterThreshold, + unhealthyZoneThreshold, + nodeMonitorGracePeriod, + nodeStartupGracePeriod, + nodeMonitorPeriod, + clusterCIDR, + serviceCIDR, + nodeCIDRMaskSize, + allocateNodeCIDRs, + ) if err != nil { return nil, err } - return nc, nil + nc.podInformerSynced = alwaysReady + nc.nodeInformerSynced = alwaysReady + nc.daemonSetInformerSynced = alwaysReady + + return &nodeController{nc, nodeInformer, daemonSetInformer}, nil } -func syncNodeStore(nc *NodeController, fakeNodeHandler *testutil.FakeNodeHandler) error { +func syncNodeStore(nc *nodeController, fakeNodeHandler *testutil.FakeNodeHandler) error { nodes, err := fakeNodeHandler.List(metav1.ListOptions{}) if err != nil { return err @@ -88,7 +120,7 @@ func syncNodeStore(nc *NodeController, fakeNodeHandler *testutil.FakeNodeHandler for i := range nodes.Items { newElems = append(newElems, &nodes.Items[i]) } - return nc.nodeStore.Replace(newElems, "newRV") + return nc.nodeInformer.Informer().GetStore().Replace(newElems, "newRV") } func TestMonitorNodeStatusEvictPods(t *testing.T) { @@ -518,7 +550,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) { testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) nodeController.now = func() metav1.Time { return fakeNow } for _, ds := range item.daemonSets { - nodeController.daemonSetStore.Add(&ds) + nodeController.daemonSetInformer.Informer().GetStore().Add(&ds) } if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) @@ -541,7 +573,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) { for _, zone := range zones { nodeController.zonePodEvictor[zone].Try(func(value TimedValue) (bool, time.Duration) { nodeUid, _ := value.UID.(string) - deletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUid, nodeController.daemonSetStore) + deletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUid, nodeController.daemonSetInformer.Lister()) return true, 0 }) } @@ -1910,8 +1942,7 @@ func TestCheckPod(t *testing.T) { } nc, _ := NewNodeControllerFromClient(nil, fake.NewSimpleClientset(), 0, 0, 0, 0, 0, 0, 0, 0, nil, nil, 0, false) - nc.nodeStore.Store = cache.NewStore(cache.MetaNamespaceKeyFunc) - nc.nodeStore.Store.Add(&v1.Node{ + nc.nodeInformer.Informer().GetStore().Add(&v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "new", }, @@ -1921,7 +1952,7 @@ func TestCheckPod(t *testing.T) { }, }, }) - nc.nodeStore.Store.Add(&v1.Node{ + nc.nodeInformer.Informer().GetStore().Add(&v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "old", }, @@ -1931,7 +1962,7 @@ func TestCheckPod(t *testing.T) { }, }, }) - nc.nodeStore.Store.Add(&v1.Node{ + nc.nodeInformer.Informer().GetStore().Add(&v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "older", }, @@ -1941,7 +1972,7 @@ func TestCheckPod(t *testing.T) { }, }, }) - nc.nodeStore.Store.Add(&v1.Node{ + nc.nodeInformer.Informer().GetStore().Add(&v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "oldest", }, diff --git a/pkg/controller/podgc/BUILD b/pkg/controller/podgc/BUILD index 42a76fe84b..49e27c693d 100644 --- a/pkg/controller/podgc/BUILD +++ b/pkg/controller/podgc/BUILD @@ -18,9 +18,8 @@ go_library( deps = [ "//pkg/api/v1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/legacylisters:go_default_library", - "//pkg/controller:go_default_library", - "//pkg/controller/informers:go_default_library", + "//pkg/client/informers/informers_generated/core/v1:go_default_library", + "//pkg/client/listers/core/v1:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", @@ -39,7 +38,11 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", + "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library", + "//pkg/client/informers/informers_generated:go_default_library", + "//pkg/client/informers/informers_generated/core/v1:go_default_library", + "//pkg/controller:go_default_library", "//pkg/controller/node/testutil:go_default_library", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/labels", diff --git a/pkg/controller/podgc/gc_controller.go b/pkg/controller/podgc/gc_controller.go index ec493f7fb1..185428c282 100644 --- a/pkg/controller/podgc/gc_controller.go +++ b/pkg/controller/podgc/gc_controller.go @@ -17,6 +17,7 @@ limitations under the License. package podgc import ( + "fmt" "sort" "sync" "time" @@ -29,9 +30,8 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/pkg/client/legacylisters" - "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/informers" + coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" "k8s.io/kubernetes/pkg/util/metrics" "github.com/golang/glog" @@ -44,21 +44,14 @@ const ( type PodGCController struct { kubeClient clientset.Interface - // internalPodInformer is used to hold a personal informer. If we're using - // a normal shared informer, then the informer will be started for us. If - // we have a personal informer, we must start it ourselves. If you start - // the controller using NewPodGC(..., passing SharedInformer, ...), this - // will be null - internalPodInformer cache.SharedIndexInformer - - podStore listers.StoreToPodLister - podController cache.Controller + podLister corelisters.PodLister + podListerSynced cache.InformerSynced deletePod func(namespace, name string) error terminatedPodThreshold int } -func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInformer, terminatedPodThreshold int) *PodGCController { +func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInformer, terminatedPodThreshold int) *PodGCController { if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.Core().RESTClient().GetRateLimiter()) } @@ -71,36 +64,24 @@ func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInfor }, } - gcc.podStore.Indexer = podInformer.GetIndexer() - gcc.podController = podInformer.GetController() + gcc.podLister = podInformer.Lister() + gcc.podListerSynced = podInformer.Informer().HasSynced return gcc } -func NewFromClient( - kubeClient clientset.Interface, - terminatedPodThreshold int, -) *PodGCController { - podInformer := informers.NewPodInformer(kubeClient, controller.NoResyncPeriodFunc()) - controller := NewPodGC(kubeClient, podInformer, terminatedPodThreshold) - controller.internalPodInformer = podInformer - return controller -} - func (gcc *PodGCController) Run(stop <-chan struct{}) { - if gcc.internalPodInformer != nil { - go gcc.podController.Run(stop) + if !cache.WaitForCacheSync(stop, gcc.podListerSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) + return } + go wait.Until(gcc.gc, gcCheckPeriod, stop) <-stop } func (gcc *PodGCController) gc() { - if !gcc.podController.HasSynced() { - glog.V(2).Infof("PodGCController is waiting for informer sync...") - return - } - pods, err := gcc.podStore.List(labels.Everything()) + pods, err := gcc.podLister.List(labels.Everything()) if err != nil { glog.Errorf("Error while listing all Pods: %v", err) return diff --git a/pkg/controller/podgc/gc_controller_test.go b/pkg/controller/podgc/gc_controller_test.go index d3a1ad7206..fa89073290 100644 --- a/pkg/controller/podgc/gc_controller_test.go +++ b/pkg/controller/podgc/gc_controller_test.go @@ -25,7 +25,11 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" + coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1" + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/node/testutil" ) @@ -41,6 +45,16 @@ func (*FakeController) LastSyncResourceVersion() string { return "" } +func alwaysReady() bool { return true } + +func NewFromClient(kubeClient clientset.Interface, terminatedPodThreshold int) (*PodGCController, coreinformers.PodInformer) { + informerFactory := informers.NewSharedInformerFactory(nil, kubeClient, controller.NoResyncPeriodFunc()) + podInformer := informerFactory.Core().V1().Pods() + controller := NewPodGC(kubeClient, podInformer, terminatedPodThreshold) + controller.podListerSynced = alwaysReady + return controller, podInformer +} + func TestGCTerminated(t *testing.T) { type nameToPhase struct { name string @@ -99,7 +113,7 @@ func TestGCTerminated(t *testing.T) { for i, test := range testCases { client := fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*testutil.NewNode("node")}}) - gcc := NewFromClient(client, test.threshold) + gcc, podInformer := NewFromClient(client, test.threshold) deletedPodNames := make([]string, 0) var lock sync.Mutex gcc.deletePod = func(_, name string) error { @@ -112,15 +126,13 @@ func TestGCTerminated(t *testing.T) { creationTime := time.Unix(0, 0) for _, pod := range test.pods { creationTime = creationTime.Add(1 * time.Hour) - gcc.podStore.Indexer.Add(&v1.Pod{ + podInformer.Informer().GetStore().Add(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}}, Status: v1.PodStatus{Phase: pod.phase}, Spec: v1.PodSpec{NodeName: "node"}, }) } - gcc.podController = &FakeController{} - gcc.gc() pass := true @@ -168,7 +180,7 @@ func TestGCOrphaned(t *testing.T) { for i, test := range testCases { client := fake.NewSimpleClientset() - gcc := NewFromClient(client, test.threshold) + gcc, podInformer := NewFromClient(client, test.threshold) deletedPodNames := make([]string, 0) var lock sync.Mutex gcc.deletePod = func(_, name string) error { @@ -181,16 +193,14 @@ func TestGCOrphaned(t *testing.T) { creationTime := time.Unix(0, 0) for _, pod := range test.pods { creationTime = creationTime.Add(1 * time.Hour) - gcc.podStore.Indexer.Add(&v1.Pod{ + podInformer.Informer().GetStore().Add(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}}, Status: v1.PodStatus{Phase: pod.phase}, Spec: v1.PodSpec{NodeName: "node"}, }) } - gcc.podController = &FakeController{} - - pods, err := gcc.podStore.List(labels.Everything()) + pods, err := podInformer.Lister().List(labels.Everything()) if err != nil { t.Errorf("Error while listing all Pods: %v", err) return @@ -247,7 +257,7 @@ func TestGCUnscheduledTerminating(t *testing.T) { for i, test := range testCases { client := fake.NewSimpleClientset() - gcc := NewFromClient(client, -1) + gcc, podInformer := NewFromClient(client, -1) deletedPodNames := make([]string, 0) var lock sync.Mutex gcc.deletePod = func(_, name string) error { @@ -260,7 +270,7 @@ func TestGCUnscheduledTerminating(t *testing.T) { creationTime := time.Unix(0, 0) for _, pod := range test.pods { creationTime = creationTime.Add(1 * time.Hour) - gcc.podStore.Indexer.Add(&v1.Pod{ + podInformer.Informer().GetStore().Add(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}, DeletionTimestamp: pod.deletionTimeStamp}, Status: v1.PodStatus{Phase: pod.phase}, @@ -268,9 +278,7 @@ func TestGCUnscheduledTerminating(t *testing.T) { }) } - gcc.podController = &FakeController{} - - pods, err := gcc.podStore.List(labels.Everything()) + pods, err := podInformer.Lister().List(labels.Everything()) if err != nil { t.Errorf("Error while listing all Pods: %v", err) return diff --git a/pkg/controller/replicaset/BUILD b/pkg/controller/replicaset/BUILD index 945789dc4a..1ae940cc3a 100644 --- a/pkg/controller/replicaset/BUILD +++ b/pkg/controller/replicaset/BUILD @@ -22,9 +22,11 @@ go_library( "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library", - "//pkg/client/legacylisters:go_default_library", + "//pkg/client/informers/informers_generated/core/v1:go_default_library", + "//pkg/client/informers/informers_generated/extensions/v1beta1:go_default_library", + "//pkg/client/listers/core/v1:go_default_library", + "//pkg/client/listers/extensions/v1beta1:go_default_library", "//pkg/controller:go_default_library", - "//pkg/controller/informers:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", @@ -54,9 +56,8 @@ go_test( "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library", - "//pkg/client/legacylisters:go_default_library", + "//pkg/client/informers/informers_generated:go_default_library", "//pkg/controller:go_default_library", - "//pkg/controller/informers:go_default_library", "//pkg/securitycontext:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/equality", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", diff --git a/pkg/controller/replicaset/replica_set.go b/pkg/controller/replicaset/replica_set.go index 6a26722c57..6cc58a3455 100644 --- a/pkg/controller/replicaset/replica_set.go +++ b/pkg/controller/replicaset/replica_set.go @@ -43,9 +43,11 @@ import ( "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/pkg/client/legacylisters" + coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1" + extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" + extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -77,10 +79,14 @@ type ReplicaSetController struct { // A TTLCache of pod creates/deletes each rc expects to see. expectations *controller.UIDTrackingControllerExpectations - // A store of ReplicaSets, populated by the rsController - rsLister *listers.StoreToReplicaSetLister - // A store of pods, populated by the podController - podLister *listers.StoreToPodLister + // A store of ReplicaSets, populated by the shared informer passed to NewReplicaSetController + rsLister extensionslisters.ReplicaSetLister + // rsListerSynced returns true if the pod store has been synced at least once. + // Added as a member to the struct to allow injection for testing. + rsListerSynced cache.InformerSynced + + // A store of pods, populated by the shared informer passed to NewReplicaSetController + podLister corelisters.PodLister // podListerSynced returns true if the pod store has been synced at least once. // Added as a member to the struct to allow injection for testing. podListerSynced cache.InformerSynced @@ -96,7 +102,7 @@ type ReplicaSetController struct { } // NewReplicaSetController configures a replica set controller with the specified event recorder -func NewReplicaSetController(rsInformer informers.ReplicaSetInformer, podInformer informers.PodInformer, kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController { +func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController { if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().RESTClient().GetRateLimiter()) } @@ -124,6 +130,9 @@ func NewReplicaSetController(rsInformer informers.ReplicaSetInformer, podInforme // way of achieving this is by performing a `stop` operation on the replica set. DeleteFunc: rsc.enqueueReplicaSet, }) + rsc.rsLister = rsInformer.Lister() + rsc.rsListerSynced = rsInformer.Informer().HasSynced + podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: rsc.addPod, // This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like @@ -132,12 +141,12 @@ func NewReplicaSetController(rsInformer informers.ReplicaSetInformer, podInforme UpdateFunc: rsc.updatePod, DeleteFunc: rsc.deletePod, }) - - rsc.syncHandler = rsc.syncReplicaSet - rsc.rsLister = rsInformer.Lister() rsc.podLister = podInformer.Lister() rsc.podListerSynced = podInformer.Informer().HasSynced + + rsc.syncHandler = rsc.syncReplicaSet rsc.lookupCache = controller.NewMatchingCache(lookupCacheSize) + return rsc } @@ -156,7 +165,8 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) { glog.Infof("Starting ReplicaSet controller") - if !cache.WaitForCacheSync(stopCh, rsc.podListerSynced) { + if !cache.WaitForCacheSync(stopCh, rsc.podListerSynced, rsc.rsListerSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) return } @@ -561,16 +571,19 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error { glog.V(4).Infof("Finished syncing replica set %q (%v)", key, time.Now().Sub(startTime)) }() - obj, exists, err := rsc.rsLister.Indexer.GetByKey(key) + namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return err } - if !exists { + rs, err := rsc.rsLister.ReplicaSets(namespace).Get(name) + if errors.IsNotFound(err) { glog.V(4).Infof("ReplicaSet has been deleted %v", key) rsc.expectations.DeleteExpectations(key) return nil } - rs := *obj.(*extensions.ReplicaSet) + if err != nil { + return err + } rsNeedsSync := rsc.expectations.SatisfiedExpectations(key) selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) @@ -633,9 +646,15 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error { var manageReplicasErr error if rsNeedsSync && rs.DeletionTimestamp == nil { - manageReplicasErr = rsc.manageReplicas(filteredPods, &rs) + manageReplicasErr = rsc.manageReplicas(filteredPods, rs) } + copy, err := api.Scheme.DeepCopy(rs) + if err != nil { + return err + } + rs = copy.(*extensions.ReplicaSet) + newStatus := calculateStatus(rs, filteredPods, manageReplicasErr) // Always updates status as pods come up or die. diff --git a/pkg/controller/replicaset/replica_set_test.go b/pkg/controller/replicaset/replica_set_test.go index c830afa73a..f6bfe671cf 100644 --- a/pkg/controller/replicaset/replica_set_test.go +++ b/pkg/controller/replicaset/replica_set_test.go @@ -46,19 +46,27 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" - "k8s.io/kubernetes/pkg/client/legacylisters" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/securitycontext" ) -func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh chan struct{}, burstReplicas int, lookupCacheSize int) *ReplicaSetController { - informers := informers.NewSharedInformerFactory(client, nil, controller.NoResyncPeriodFunc()) - ret := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, burstReplicas, lookupCacheSize, false) - ret.podLister = &listers.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})} - ret.rsLister = &listers.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})} - informers.Start(stopCh) - return ret +func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh chan struct{}, burstReplicas int, lookupCacheSize int) (*ReplicaSetController, informers.SharedInformerFactory) { + informers := informers.NewSharedInformerFactory(nil, client, controller.NoResyncPeriodFunc()) + + ret := NewReplicaSetController( + informers.Extensions().V1beta1().ReplicaSets(), + informers.Core().V1().Pods(), + client, + burstReplicas, + lookupCacheSize, + false, + ) + + ret.podListerSynced = alwaysReady + ret.rsListerSynced = alwaysReady + + return ret, informers } func filterInformerActions(actions []core.Action) []core.Action { @@ -203,14 +211,13 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) { fakePodControl := controller.FakePodControl{} stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) // 2 running pods, a controller with 2 replicas, sync is a no-op labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) - manager.rsLister.Indexer.Add(rsSpec) - newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rsSpec, "pod") + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) + newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod") manager.podControl = &fakePodControl manager.syncReplicaSet(getKey(rsSpec, t)) @@ -222,15 +229,14 @@ func TestSyncReplicaSetDeletes(t *testing.T) { fakePodControl := controller.FakePodControl{} stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) manager.podControl = &fakePodControl // 2 running pods and a controller with 1 replica, one pod delete expected labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(1, labelMap) - manager.rsLister.Indexer.Add(rsSpec) - newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rsSpec, "pod") + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) + newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod") manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 1, 0) @@ -241,8 +247,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) { fakePodControl := controller.FakePodControl{} stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) manager.podControl = &fakePodControl received := make(chan string) @@ -255,7 +260,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) { // the controller matching the selectors of the deleted pod into the work queue. labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(1, labelMap) - manager.rsLister.Indexer.Add(rsSpec) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) pods := newPodList(nil, 1, v1.PodRunning, labelMap, rsSpec, "pod") manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]}) @@ -276,13 +281,12 @@ func TestSyncReplicaSetCreates(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) // A controller with 2 replicas and no pods in the store, 2 creates expected labelMap := map[string]string{"foo": "bar"} rs := newReplicaSet(2, labelMap) - manager.rsLister.Indexer.Add(rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl @@ -302,16 +306,15 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) // Steady state for the ReplicaSet, no Status.Replicas updates expected activePods := 5 labelMap := map[string]string{"foo": "bar"} rs := newReplicaSet(activePods, labelMap) - manager.rsLister.Indexer.Add(rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) rs.Status = extensions.ReplicaSetStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)} - newPodList(manager.podLister.Indexer, activePods, v1.PodRunning, labelMap, rs, "pod") + newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), activePods, v1.PodRunning, labelMap, rs, "pod") fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl @@ -348,8 +351,7 @@ func TestControllerUpdateReplicas(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) // Insufficient number of pods in the system, and Status.Replicas is wrong; // Status.Replica should update to match number of pods in system, 1 new pod should be created. @@ -357,11 +359,11 @@ func TestControllerUpdateReplicas(t *testing.T) { extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"} rs := newReplicaSet(5, labelMap) rs.Spec.Template.Labels = extraLabelMap - manager.rsLister.Indexer.Add(rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) rs.Status = extensions.ReplicaSetStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0} rs.Generation = 1 - newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rs, "pod") - newPodList(manager.podLister.Indexer, 2, v1.PodRunning, extraLabelMap, rs, "podWithExtraLabel") + newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rs, "pod") + newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, extraLabelMap, rs, "podWithExtraLabel") // This response body is just so we don't err out decoding the http response response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{}) @@ -398,15 +400,14 @@ func TestSyncReplicaSetDormancy(t *testing.T) { fakePodControl := controller.FakePodControl{} stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) manager.podControl = &fakePodControl labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) - manager.rsLister.Indexer.Add(rsSpec) - newPodList(manager.podLister.Indexer, 1, v1.PodRunning, labelMap, rsSpec, "pod") + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) + newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap, rsSpec, "pod") // Creates a replica and sets expectations rsSpec.Status.Replicas = 1 @@ -455,8 +456,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) { func TestPodControllerLookup(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}), stopCh, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}), stopCh, BurstReplicas, 0) testCases := []struct { inRSs []*extensions.ReplicaSet pod *v1.Pod @@ -502,7 +502,7 @@ func TestPodControllerLookup(t *testing.T) { } for _, c := range testCases { for _, r := range c.inRSs { - manager.rsLister.Indexer.Add(r) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(r) } if rs := manager.getPodReplicaSet(c.pod); rs != nil { if c.outRSName != rs.Name { @@ -521,14 +521,20 @@ type FakeWatcher struct { func TestWatchControllers(t *testing.T) { fakeWatch := watch.NewFake() - client := &fake.Clientset{} - client.AddWatchReactor("replicasets", core.DefaultWatchReactor(fakeWatch, nil)) + client := fake.NewSimpleClientset() + client.PrependWatchReactor("replicasets", core.DefaultWatchReactor(fakeWatch, nil)) stopCh := make(chan struct{}) defer close(stopCh) - informers := informers.NewSharedInformerFactory(client, nil, controller.NoResyncPeriodFunc()) - manager := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, BurstReplicas, 0, false) + informers := informers.NewSharedInformerFactory(nil, client, controller.NoResyncPeriodFunc()) + manager := NewReplicaSetController( + informers.Extensions().V1beta1().ReplicaSets(), + informers.Core().V1().Pods(), + client, + BurstReplicas, + 0, + false, + ) informers.Start(stopCh) - manager.podListerSynced = alwaysReady var testRSSpec extensions.ReplicaSet received := make(chan string) @@ -537,7 +543,7 @@ func TestWatchControllers(t *testing.T) { // and eventually into the syncHandler. The handler validates the received controller // and closes the received channel to indicate that the test can finish. manager.syncHandler = func(key string) error { - obj, exists, err := manager.rsLister.Indexer.GetByKey(key) + obj, exists, err := informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().GetByKey(key) if !exists || err != nil { t.Errorf("Expected to find replica set under key %v", key) } @@ -563,36 +569,44 @@ func TestWatchControllers(t *testing.T) { } func TestWatchPods(t *testing.T) { + client := fake.NewSimpleClientset() + fakeWatch := watch.NewFake() - client := &fake.Clientset{} - client.AddWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil)) + client.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil)) + stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) - manager.podListerSynced = alwaysReady - // Put one ReplicaSet and one pod into the controller's stores + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) + + // Put one ReplicaSet into the shared informer labelMap := map[string]string{"foo": "bar"} testRSSpec := newReplicaSet(1, labelMap) - manager.rsLister.Indexer.Add(testRSSpec) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec) + received := make(chan string) // The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and // send it into the syncHandler. manager.syncHandler = func(key string) error { - obj, exists, err := manager.rsLister.Indexer.GetByKey(key) - if !exists || err != nil { - t.Errorf("Expected to find replica set under key %v", key) + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + t.Errorf("Error splitting key: %v", err) + } + rsSpec, err := manager.rsLister.ReplicaSets(namespace).Get(name) + if err != nil { + t.Errorf("Expected to find replica set under key %v: %v", key, err) } - rsSpec := obj.(*extensions.ReplicaSet) if !apiequality.Semantic.DeepDerivative(rsSpec, testRSSpec) { t.Errorf("\nExpected %#v,\nbut got %#v", testRSSpec, rsSpec) } close(received) return nil } + // Start only the pod watcher and the workqueue, send a watch event, // and make sure it hits the sync method for the right ReplicaSet. - go wait.Until(manager.worker, 10*time.Millisecond, stopCh) + go informers.Core().V1().Pods().Informer().Run(stopCh) + go manager.Run(1, stopCh) pods := newPodList(nil, 1, v1.PodRunning, labelMap, testRSSpec, "pod") testPod := pods.Items[0] @@ -609,36 +623,39 @@ func TestWatchPods(t *testing.T) { func TestUpdatePods(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(fake.NewSimpleClientset(), stopCh, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(fake.NewSimpleClientset(), stopCh, BurstReplicas, 0) received := make(chan string) manager.syncHandler = func(key string) error { - obj, exists, err := manager.rsLister.Indexer.GetByKey(key) - if !exists || err != nil { - t.Errorf("Expected to find replica set under key %v", key) + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + t.Errorf("Error splitting key: %v", err) } - received <- obj.(*extensions.ReplicaSet).Name + rsSpec, err := manager.rsLister.ReplicaSets(namespace).Get(name) + if err != nil { + t.Errorf("Expected to find replica set under key %v: %v", key, err) + } + received <- rsSpec.Name return nil } go wait.Until(manager.worker, 10*time.Millisecond, stopCh) - // Put 2 ReplicaSets and one pod into the controller's stores + // Put 2 ReplicaSets and one pod into the informers labelMap1 := map[string]string{"foo": "bar"} testRSSpec1 := newReplicaSet(1, labelMap1) - manager.rsLister.Indexer.Add(testRSSpec1) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec1) testRSSpec2 := *testRSSpec1 labelMap2 := map[string]string{"bar": "foo"} testRSSpec2.Spec.Selector = &metav1.LabelSelector{MatchLabels: labelMap2} testRSSpec2.Name = "barfoo" - manager.rsLister.Indexer.Add(&testRSSpec2) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(&testRSSpec2) // case 1: We put in the podLister a pod with labels matching testRSSpec1, // then update its labels to match testRSSpec2. We expect to receive a sync // request for both replica sets. - pod1 := newPodList(manager.podLister.Indexer, 1, v1.PodRunning, labelMap1, testRSSpec1, "pod").Items[0] + pod1 := newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap1, testRSSpec1, "pod").Items[0] pod1.ResourceVersion = "1" pod2 := pod1 pod2.Labels = labelMap2 @@ -690,14 +707,13 @@ func TestControllerUpdateRequeue(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) - manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) labelMap := map[string]string{"foo": "bar"} rs := newReplicaSet(1, labelMap) - manager.rsLister.Indexer.Add(rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) rs.Status = extensions.ReplicaSetStatus{Replicas: 2} - newPodList(manager.podLister.Indexer, 1, v1.PodRunning, labelMap, rs, "pod") + newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap, rs, "pod") fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl @@ -721,7 +737,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) { fakeRSClient := fakeClient.Extensions().ReplicaSets("default") numReplicas := int32(10) newStatus := extensions.ReplicaSetStatus{Replicas: numReplicas} - updateReplicaSetStatus(fakeRSClient, *rs, newStatus) + updateReplicaSetStatus(fakeRSClient, rs, newStatus) updates, gets := 0, 0 for _, a := range fakeClient.Actions() { if a.GetResource().Resource != "replicasets" { @@ -762,13 +778,12 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) fakePodControl := controller.FakePodControl{} stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, burstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, burstReplicas, 0) manager.podControl = &fakePodControl labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(numReplicas, labelMap) - manager.rsLister.Indexer.Add(rsSpec) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) expectedPods := int32(0) pods := newPodList(nil, numReplicas, v1.PodPending, labelMap, rsSpec, "pod") @@ -782,14 +797,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) for _, replicas := range []int32{int32(numReplicas), 0} { *(rsSpec.Spec.Replicas) = replicas - manager.rsLister.Indexer.Add(rsSpec) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) for i := 0; i < numReplicas; i += burstReplicas { manager.syncReplicaSet(getKey(rsSpec, t)) // The store accrues active pods. It's also used by the ReplicaSet to determine how many // replicas to create. - activePods := int32(len(manager.podLister.Indexer.List())) + activePods := int32(len(informers.Core().V1().Pods().Informer().GetIndexer().List())) if replicas != 0 { // This is the number of pods currently "in flight". They were created by the // ReplicaSet controller above, which then puts the ReplicaSet to sleep till @@ -804,7 +819,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) // This simulates the watch events for all but 1 of the expected pods. // None of these should wake the controller because it has expectations==BurstReplicas. for i := int32(0); i < expectedPods-1; i++ { - manager.podLister.Indexer.Add(&pods.Items[i]) + informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[i]) manager.addPod(&pods.Items[i]) } @@ -840,7 +855,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) // has exactly one expectation at the end, to verify that we // don't double delete. for i := range podsToDelete[1:] { - manager.podLister.Indexer.Delete(podsToDelete[i]) + informers.Core().V1().Pods().Informer().GetIndexer().Delete(podsToDelete[i]) manager.deletePod(podsToDelete[i]) } podExp, exists, err := manager.expectations.GetExpectations(rsKey) @@ -861,7 +876,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) // The last add pod will decrease the expectation of the ReplicaSet to 0, // which will cause it to create/delete the remaining replicas up to burstReplicas. if replicas != 0 { - manager.podLister.Indexer.Add(&pods.Items[expectedPods-1]) + informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[expectedPods-1]) manager.addPod(&pods.Items[expectedPods-1]) } else { expectedDel := manager.expectations.GetUIDs(getKey(rsSpec, t)) @@ -876,14 +891,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) Labels: rsSpec.Spec.Selector.MatchLabels, }, } - manager.podLister.Indexer.Delete(lastPod) + informers.Core().V1().Pods().Informer().GetIndexer().Delete(lastPod) manager.deletePod(lastPod) } pods.Items = pods.Items[expectedPods:] } // Confirm that we've created the right number of replicas - activePods := int32(len(manager.podLister.Indexer.List())) + activePods := int32(len(informers.Core().V1().Pods().Informer().GetIndexer().List())) if activePods != *(rsSpec.Spec.Replicas) { t.Fatalf("Unexpected number of active pods, expected %d, got %d", *(rsSpec.Spec.Replicas), activePods) } @@ -916,15 +931,14 @@ func TestRSSyncExpectations(t *testing.T) { fakePodControl := controller.FakePodControl{} stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, 2, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, 2, 0) manager.podControl = &fakePodControl labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) - manager.rsLister.Indexer.Add(rsSpec) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec) pods := newPodList(nil, 2, v1.PodPending, labelMap, rsSpec, "pod") - manager.podLister.Indexer.Add(&pods.Items[0]) + informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[0]) postExpectationsPod := pods.Items[1] manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRSExpectations{ @@ -932,7 +946,7 @@ func TestRSSyncExpectations(t *testing.T) { // If we check active pods before checking expectataions, the // ReplicaSet will create a new replica because it doesn't see // this pod, but has fulfilled its expectations. - manager.podLister.Indexer.Add(&postExpectationsPod) + informers.Core().V1().Pods().Informer().GetIndexer().Add(&postExpectationsPod) }, }) manager.syncReplicaSet(getKey(rsSpec, t)) @@ -943,11 +957,10 @@ func TestDeleteControllerAndExpectations(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, 10, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, 10, 0) rs := newReplicaSet(1, map[string]string{"foo": "bar"}) - manager.rsLister.Indexer.Add(rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl @@ -969,7 +982,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) { if !exists || err != nil { t.Errorf("No expectations found for ReplicaSet") } - manager.rsLister.Indexer.Delete(rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Delete(rs) manager.syncReplicaSet(getKey(rs, t)) if _, exists, err = manager.expectations.GetExpectations(rsKey); exists { @@ -978,7 +991,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) { // This should have no effect, since we've deleted the ReplicaSet. podExp.Add(-1, 0) - manager.podLister.Indexer.Replace(make([]interface{}, 0), "0") + informers.Core().V1().Pods().Informer().GetIndexer().Replace(make([]interface{}, 0), "0") manager.syncReplicaSet(getKey(rs, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0) } @@ -1002,8 +1015,7 @@ func TestOverlappingRSs(t *testing.T) { func() { stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, 10, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, 10, 0) // Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store var controllers []*extensions.ReplicaSet @@ -1015,7 +1027,7 @@ func TestOverlappingRSs(t *testing.T) { } shuffledControllers := shuffle(controllers) for j := range shuffledControllers { - manager.rsLister.Indexer.Add(shuffledControllers[j]) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(shuffledControllers[j]) } // Add a pod and make sure only the oldest ReplicaSet is synced pods := newPodList(nil, 1, v1.PodPending, labelMap, controllers[0], "pod") @@ -1035,11 +1047,10 @@ func TestDeletionTimestamp(t *testing.T) { labelMap := map[string]string{"foo": "bar"} stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(c, stopCh, 10, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(c, stopCh, 10, 0) rs := newReplicaSet(1, labelMap) - manager.rsLister.Indexer.Add(rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) rsKey, err := controller.KeyFunc(rs) if err != nil { t.Errorf("Couldn't get key for object %#v: %v", rs, err) @@ -1125,15 +1136,14 @@ func TestDeletionTimestamp(t *testing.T) { // setupManagerWithGCEnabled creates a RS manager with a fakePodControl // and with garbageCollectorEnabled set to true -func setupManagerWithGCEnabled(stopCh chan struct{}, objs ...runtime.Object) (manager *ReplicaSetController, fakePodControl *controller.FakePodControl) { +func setupManagerWithGCEnabled(stopCh chan struct{}, objs ...runtime.Object) (manager *ReplicaSetController, fakePodControl *controller.FakePodControl, informers informers.SharedInformerFactory) { c := fakeclientset.NewSimpleClientset(objs...) fakePodControl = &controller.FakePodControl{} - manager = testNewReplicaSetControllerFromClient(c, stopCh, BurstReplicas, 0) + manager, informers = testNewReplicaSetControllerFromClient(c, stopCh, BurstReplicas, 0) manager.garbageCollectorEnabled = true - manager.podListerSynced = alwaysReady manager.podControl = fakePodControl - return manager, fakePodControl + return manager, fakePodControl, informers } func TestDoNotPatchPodWithOtherControlRef(t *testing.T) { @@ -1141,14 +1151,14 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) { rs := newReplicaSet(2, labelMap) stopCh := make(chan struct{}) defer close(stopCh) - manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs) - manager.rsLister.Indexer.Add(rs) + manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) var trueVar = true otherControllerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1beta1", Kind: "ReplicaSet", Name: "AnotherRS", Controller: &trueVar} // add to podLister a matching Pod controlled by another controller. Expect no patch. pod := newPod("pod", rs, v1.PodRunning, nil) pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference} - manager.podLister.Indexer.Add(pod) + informers.Core().V1().Pods().Informer().GetIndexer().Add(pod) err := manager.syncReplicaSet(getKey(rs, t)) if err != nil { t.Fatal(err) @@ -1162,15 +1172,15 @@ func TestPatchPodWithOtherOwnerRef(t *testing.T) { rs := newReplicaSet(2, labelMap) stopCh := make(chan struct{}) defer close(stopCh) - manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs) - manager.rsLister.Indexer.Add(rs) + manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) // add to podLister one more matching pod that doesn't have a controller // ref, but has an owner ref pointing to other object. Expect a patch to // take control of it. unrelatedOwnerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"} pod := newPod("pod", rs, v1.PodRunning, nil) pod.OwnerReferences = []metav1.OwnerReference{unrelatedOwnerReference} - manager.podLister.Indexer.Add(pod) + informers.Core().V1().Pods().Informer().GetIndexer().Add(pod) err := manager.syncReplicaSet(getKey(rs, t)) if err != nil { @@ -1185,14 +1195,14 @@ func TestPatchPodWithCorrectOwnerRef(t *testing.T) { rs := newReplicaSet(2, labelMap) stopCh := make(chan struct{}) defer close(stopCh) - manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs) - manager.rsLister.Indexer.Add(rs) + manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) // add to podLister a matching pod that has an ownerRef pointing to the rs, // but ownerRef.Controller is false. Expect a patch to take control it. rsOwnerReference := metav1.OwnerReference{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name} pod := newPod("pod", rs, v1.PodRunning, nil) pod.OwnerReferences = []metav1.OwnerReference{rsOwnerReference} - manager.podLister.Indexer.Add(pod) + informers.Core().V1().Pods().Informer().GetIndexer().Add(pod) err := manager.syncReplicaSet(getKey(rs, t)) if err != nil { @@ -1207,12 +1217,12 @@ func TestPatchPodFails(t *testing.T) { rs := newReplicaSet(2, labelMap) stopCh := make(chan struct{}) defer close(stopCh) - manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs) - manager.rsLister.Indexer.Add(rs) + manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) // add to podLister two matching pods. Expect two patches to take control // them. - manager.podLister.Indexer.Add(newPod("pod1", rs, v1.PodRunning, nil)) - manager.podLister.Indexer.Add(newPod("pod2", rs, v1.PodRunning, nil)) + informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod1", rs, v1.PodRunning, nil)) + informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod2", rs, v1.PodRunning, nil)) // let both patches fail. The rs controller will assume it fails to take // control of the pods and create new ones. fakePodControl.Err = fmt.Errorf("Fake Error") @@ -1229,13 +1239,13 @@ func TestPatchExtraPodsThenDelete(t *testing.T) { rs := newReplicaSet(2, labelMap) stopCh := make(chan struct{}) defer close(stopCh) - manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs) - manager.rsLister.Indexer.Add(rs) + manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) // add to podLister three matching pods. Expect three patches to take control // them, and later delete one of them. - manager.podLister.Indexer.Add(newPod("pod1", rs, v1.PodRunning, nil)) - manager.podLister.Indexer.Add(newPod("pod2", rs, v1.PodRunning, nil)) - manager.podLister.Indexer.Add(newPod("pod3", rs, v1.PodRunning, nil)) + informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod1", rs, v1.PodRunning, nil)) + informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod2", rs, v1.PodRunning, nil)) + informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod3", rs, v1.PodRunning, nil)) err := manager.syncReplicaSet(getKey(rs, t)) if err != nil { t.Fatal(err) @@ -1249,8 +1259,8 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) { rs := newReplicaSet(2, labelMap) stopCh := make(chan struct{}) defer close(stopCh) - manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs) - manager.rsLister.Indexer.Add(rs) + manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) // put one pod in the podLister pod := newPod("pod", rs, v1.PodRunning, nil) pod.ResourceVersion = "1" @@ -1264,7 +1274,7 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) { // add the updatedPod to the store. This is consistent with the behavior of // the Informer: Informer updates the store before call the handler // (updatePod() in this case). - manager.podLister.Indexer.Add(&updatedPod) + informers.Core().V1().Pods().Informer().GetIndexer().Add(&updatedPod) // send a update of the same pod with modified labels manager.updatePod(pod, &updatedPod) // verifies that rs is added to the queue @@ -1290,16 +1300,16 @@ func TestUpdateSelectorControllerRef(t *testing.T) { rs := newReplicaSet(2, labelMap) stopCh := make(chan struct{}) defer close(stopCh) - manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs) + manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs) // put 2 pods in the podLister - newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rs, "pod") + newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rs, "pod") // update the RS so that its selector no longer matches the pods updatedRS := *rs updatedRS.Spec.Selector.MatchLabels = map[string]string{"foo": "baz"} // put the updatedRS into the store. This is consistent with the behavior of // the Informer: Informer updates the store before call the handler // (updateRS() in this case). - manager.rsLister.Indexer.Add(&updatedRS) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(&updatedRS) manager.updateRS(rs, &updatedRS) // verifies that the rs is added to the queue rsKey := getKey(rs, t) @@ -1326,12 +1336,12 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) { rs := newReplicaSet(2, labelMap) stopCh := make(chan struct{}) defer close(stopCh) - manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs) + manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs) now := metav1.Now() rs.DeletionTimestamp = &now - manager.rsLister.Indexer.Add(rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) pod1 := newPod("pod1", rs, v1.PodRunning, nil) - manager.podLister.Indexer.Add(pod1) + informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1) // no patch, no create err := manager.syncReplicaSet(getKey(rs, t)) @@ -1354,18 +1364,17 @@ func TestReadyReplicas(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) // Status.Replica should update to match number of pods in system, 1 new pod should be created. labelMap := map[string]string{"foo": "bar"} rs := newReplicaSet(2, labelMap) rs.Status = extensions.ReplicaSetStatus{Replicas: 2, ReadyReplicas: 0, AvailableReplicas: 0, ObservedGeneration: 1} rs.Generation = 1 - manager.rsLister.Indexer.Add(rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) - newPodList(manager.podLister.Indexer, 2, v1.PodPending, labelMap, rs, "pod") - newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rs, "pod") + newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodPending, labelMap, rs, "pod") + newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rs, "pod") // This response body is just so we don't err out decoding the http response response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{}) @@ -1397,8 +1406,7 @@ func TestAvailableReplicas(t *testing.T) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) stopCh := make(chan struct{}) defer close(stopCh) - manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) // Status.Replica should update to match number of pods in system, 1 new pod should be created. labelMap := map[string]string{"foo": "bar"} @@ -1407,17 +1415,17 @@ func TestAvailableReplicas(t *testing.T) { rs.Generation = 1 // minReadySeconds set to 15s rs.Spec.MinReadySeconds = 15 - manager.rsLister.Indexer.Add(rs) + informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) // First pod becomes ready 20s ago moment := metav1.Time{Time: time.Now().Add(-2e10)} pod := newPod("pod", rs, v1.PodRunning, &moment) - manager.podLister.Indexer.Add(pod) + informers.Core().V1().Pods().Informer().GetIndexer().Add(pod) // Second pod becomes ready now otherMoment := metav1.Now() otherPod := newPod("otherPod", rs, v1.PodRunning, &otherMoment) - manager.podLister.Indexer.Add(otherPod) + informers.Core().V1().Pods().Informer().GetIndexer().Add(otherPod) // This response body is just so we don't err out decoding the http response response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{}) diff --git a/pkg/controller/replicaset/replica_set_utils.go b/pkg/controller/replicaset/replica_set_utils.go index 56ce629d33..c3c394206b 100644 --- a/pkg/controller/replicaset/replica_set_utils.go +++ b/pkg/controller/replicaset/replica_set_utils.go @@ -33,7 +33,7 @@ import ( ) // updateReplicaSetStatus attempts to update the Status.Replicas of the given ReplicaSet, with a single GET/PUT retry. -func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs extensions.ReplicaSet, newStatus extensions.ReplicaSetStatus) (updateErr error) { +func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs *extensions.ReplicaSet, newStatus extensions.ReplicaSetStatus) (updateErr error) { // This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since // we do a periodic relist every 30s. If the generations differ but the replicas are // the same, a caller might've resized to the same replica count. @@ -48,11 +48,11 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs exte // deep copy to avoid mutation now. // TODO this method need some work. Retry on conflict probably, though I suspect this is stomping status to something it probably shouldn't - copyObj, err := api.Scheme.DeepCopy(&rs) + copyObj, err := api.Scheme.DeepCopy(rs) if err != nil { return err } - rs = *copyObj.(*extensions.ReplicaSet) + rs = copyObj.(*extensions.ReplicaSet) // Save the generation number we acted on, otherwise we might wrongfully indicate // that we've seen a spec update when we retry. @@ -61,7 +61,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs exte newStatus.ObservedGeneration = rs.Generation var getErr error - for i, rs := 0, &rs; ; i++ { + for i, rs := 0, rs; ; i++ { glog.V(4).Infof(fmt.Sprintf("Updating replica count for ReplicaSet: %s/%s, ", rs.Namespace, rs.Name) + fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, *(rs.Spec.Replicas)) + fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) + @@ -83,7 +83,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs exte } } -func calculateStatus(rs extensions.ReplicaSet, filteredPods []*v1.Pod, manageReplicasErr error) extensions.ReplicaSetStatus { +func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageReplicasErr error) extensions.ReplicaSetStatus { newStatus := rs.Status // Count the number of pods that have labels matching the labels of the pod // template of the replica set, the matching pods may have more diff --git a/pkg/controller/replication/BUILD b/pkg/controller/replication/BUILD index d0bdb16742..ba840810e9 100644 --- a/pkg/controller/replication/BUILD +++ b/pkg/controller/replication/BUILD @@ -21,9 +21,9 @@ go_library( "//pkg/api/v1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", - "//pkg/client/legacylisters:go_default_library", + "//pkg/client/informers/informers_generated/core/v1:go_default_library", + "//pkg/client/listers/core/v1:go_default_library", "//pkg/controller:go_default_library", - "//pkg/controller/informers:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/errors", @@ -53,8 +53,9 @@ go_test( "//pkg/api/v1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/fake:go_default_library", + "//pkg/client/informers/informers_generated:go_default_library", + "//pkg/client/informers/informers_generated/core/v1:go_default_library", "//pkg/controller:go_default_library", - "//pkg/controller/informers:go_default_library", "//pkg/securitycontext:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/equality", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", diff --git a/pkg/controller/replication/replication_controller.go b/pkg/controller/replication/replication_controller.go index 454b7fbf66..a0c96c1d12 100644 --- a/pkg/controller/replication/replication_controller.go +++ b/pkg/controller/replication/replication_controller.go @@ -19,6 +19,7 @@ limitations under the License. package replication import ( + "fmt" "reflect" "sort" "sync" @@ -41,9 +42,9 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/pkg/client/legacylisters" + coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -68,13 +69,6 @@ type ReplicationManager struct { kubeClient clientset.Interface podControl controller.PodControlInterface - // internalPodInformer is used to hold a personal informer. If we're using - // a normal shared informer, then the informer will be started for us. If - // we have a personal informer, we must start it ourselves. If you start - // the controller using NewReplicationManager(passing SharedInformer), this - // will be null - internalPodInformer cache.SharedIndexInformer - // An rc is temporarily suspended after creating/deleting these many replicas. // It resumes normal action after observing the watch events for them. burstReplicas int @@ -84,15 +78,13 @@ type ReplicationManager struct { // A TTLCache of pod creates/deletes each rc expects to see. expectations *controller.UIDTrackingControllerExpectations - // A store of replication controllers, populated by the rcController - rcLister listers.StoreToReplicationControllerLister - // A store of pods, populated by the podController - podLister listers.StoreToPodLister - // Watches changes to all pods - podController cache.Controller + rcLister corelisters.ReplicationControllerLister + rcListerSynced cache.InformerSynced + + podLister corelisters.PodLister // podListerSynced returns true if the pod store has been synced at least once. // Added as a member to the struct to allow injection for testing. - podListerSynced func() bool + podListerSynced cache.InformerSynced lookupCache *controller.MatchingCache @@ -105,7 +97,7 @@ type ReplicationManager struct { } // NewReplicationManager configures a replication manager with the specified event recorder -func NewReplicationManager(podInformer, rcInformer cache.SharedIndexInformer, kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager { +func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager { if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().RESTClient().GetRateLimiter()) } @@ -126,7 +118,7 @@ func NewReplicationManager(podInformer, rcInformer cache.SharedIndexInformer, ku garbageCollectorEnabled: garbageCollectorEnabled, } - rcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + rcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: rm.enqueueController, UpdateFunc: rm.updateRC, // This will enter the sync loop and no-op, because the controller has been deleted from the store. @@ -134,7 +126,10 @@ func NewReplicationManager(podInformer, rcInformer cache.SharedIndexInformer, ku // way of achieving this is by performing a `stop` operation on the controller. DeleteFunc: rm.enqueueController, }) - podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + rm.rcLister = rcInformer.Lister() + rm.rcListerSynced = rcInformer.Informer().HasSynced + + podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: rm.addPod, // This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill // the most frequent pod update is status, and the associated rc will only list from local storage, so @@ -142,24 +137,14 @@ func NewReplicationManager(podInformer, rcInformer cache.SharedIndexInformer, ku UpdateFunc: rm.updatePod, DeleteFunc: rm.deletePod, }) + rm.podLister = podInformer.Lister() + rm.podListerSynced = podInformer.Informer().HasSynced rm.syncHandler = rm.syncReplicationController - rm.rcLister.Indexer = rcInformer.GetIndexer() - rm.podLister.Indexer = podInformer.GetIndexer() - rm.podListerSynced = podInformer.HasSynced rm.lookupCache = controller.NewMatchingCache(lookupCacheSize) return rm } -// NewReplicationManagerFromClient creates a new ReplicationManager that runs its own informer. -func NewReplicationManagerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager { - podInformer := informers.NewPodInformer(kubeClient, resyncPeriod()) - rcInformer := informers.NewReplicationControllerInformer(kubeClient, resyncPeriod()) - rm := NewReplicationManager(podInformer, rcInformer, kubeClient, burstReplicas, lookupCacheSize, false) - rm.internalPodInformer = podInformer - return rm -} - // SetEventRecorder replaces the event recorder used by the replication manager // with the given recorder. Only used for testing. func (rm *ReplicationManager) SetEventRecorder(recorder record.EventRecorder) { @@ -174,11 +159,9 @@ func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) { defer rm.queue.ShutDown() glog.Infof("Starting RC Manager") - if rm.internalPodInformer != nil { - go rm.internalPodInformer.Run(stopCh) - } - if !cache.WaitForCacheSync(stopCh, rm.podListerSynced) { + if !cache.WaitForCacheSync(stopCh, rm.podListerSynced, rm.rcListerSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) return } @@ -606,16 +589,19 @@ func (rm *ReplicationManager) syncReplicationController(key string) error { glog.V(4).Infof("Finished syncing controller %q (%v)", key, time.Now().Sub(startTime)) }() - obj, exists, err := rm.rcLister.Indexer.GetByKey(key) + namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return err } - if !exists { + rc, err := rm.rcLister.ReplicationControllers(namespace).Get(name) + if errors.IsNotFound(err) { glog.Infof("Replication Controller has been deleted %v", key) rm.expectations.DeleteExpectations(key) return nil } - rc := *obj.(*v1.ReplicationController) + if err != nil { + return err + } trace.Step("ReplicationController restored") rcNeedsSync := rm.expectations.SatisfiedExpectations(key) @@ -680,14 +666,20 @@ func (rm *ReplicationManager) syncReplicationController(key string) error { var manageReplicasErr error if rcNeedsSync && rc.DeletionTimestamp == nil { - manageReplicasErr = rm.manageReplicas(filteredPods, &rc) + manageReplicasErr = rm.manageReplicas(filteredPods, rc) } trace.Step("manageReplicas done") + copy, err := api.Scheme.DeepCopy(rc) + if err != nil { + return err + } + rc = copy.(*v1.ReplicationController) + newStatus := calculateStatus(rc, filteredPods, manageReplicasErr) // Always updates status as pods come up or die. - if err := updateReplicationControllerStatus(rm.kubeClient.Core().ReplicationControllers(rc.Namespace), rc, newStatus); err != nil { + if err := updateReplicationControllerStatus(rm.kubeClient.Core().ReplicationControllers(rc.Namespace), *rc, newStatus); err != nil { // Multiple things could lead to this update failing. Returning an error causes a requeue without forcing a hotloop return err } diff --git a/pkg/controller/replication/replication_controller_test.go b/pkg/controller/replication/replication_controller_test.go index b8b43558ab..6c3dd5a023 100644 --- a/pkg/controller/replication/replication_controller_test.go +++ b/pkg/controller/replication/replication_controller_test.go @@ -44,8 +44,9 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" + coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/securitycontext" ) @@ -159,16 +160,25 @@ type serverResponse struct { obj interface{} } +func NewReplicationManagerFromClient(kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int) (*ReplicationManager, coreinformers.PodInformer, coreinformers.ReplicationControllerInformer) { + informerFactory := informers.NewSharedInformerFactory(nil, kubeClient, controller.NoResyncPeriodFunc()) + podInformer := informerFactory.Core().V1().Pods() + rcInformer := informerFactory.Core().V1().ReplicationControllers() + rm := NewReplicationManager(podInformer, rcInformer, kubeClient, burstReplicas, lookupCacheSize, false) + rm.podListerSynced = alwaysReady + rm.rcListerSynced = alwaysReady + return rm, podInformer, rcInformer +} + func TestSyncReplicationControllerDoesNothing(t *testing.T) { c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) fakePodControl := controller.FakePodControl{} - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0) // 2 running pods, a controller with 2 replicas, sync is a no-op controllerSpec := newReplicationController(2) - manager.rcLister.Indexer.Add(controllerSpec) - newPodList(manager.podLister.Indexer, 2, v1.PodRunning, controllerSpec, "pod") + rcInformer.Informer().GetIndexer().Add(controllerSpec) + newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodRunning, controllerSpec, "pod") manager.podControl = &fakePodControl manager.syncReplicationController(getKey(controllerSpec, t)) @@ -178,14 +188,13 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) { func TestSyncReplicationControllerDeletes(t *testing.T) { c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) fakePodControl := controller.FakePodControl{} - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0) manager.podControl = &fakePodControl // 2 running pods and a controller with 1 replica, one pod delete expected controllerSpec := newReplicationController(1) - manager.rcLister.Indexer.Add(controllerSpec) - newPodList(manager.podLister.Indexer, 2, v1.PodRunning, controllerSpec, "pod") + rcInformer.Informer().GetIndexer().Add(controllerSpec) + newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodRunning, controllerSpec, "pod") manager.syncReplicationController(getKey(controllerSpec, t)) validateSyncReplication(t, &fakePodControl, 0, 1, 0) @@ -194,8 +203,7 @@ func TestSyncReplicationControllerDeletes(t *testing.T) { func TestDeleteFinalStateUnknown(t *testing.T) { c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) fakePodControl := controller.FakePodControl{} - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, _, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0) manager.podControl = &fakePodControl received := make(chan string) @@ -207,7 +215,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) { // The DeletedFinalStateUnknown object should cause the rc manager to insert // the controller matching the selectors of the deleted pod into the work queue. controllerSpec := newReplicationController(1) - manager.rcLister.Indexer.Add(controllerSpec) + rcInformer.Informer().GetIndexer().Add(controllerSpec) pods := newPodList(nil, 1, v1.PodRunning, controllerSpec, "pod") manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]}) @@ -226,12 +234,11 @@ func TestDeleteFinalStateUnknown(t *testing.T) { func TestSyncReplicationControllerCreates(t *testing.T) { c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, _, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0) // A controller with 2 replicas and no pods in the store, 2 creates expected rc := newReplicationController(2) - manager.rcLister.Indexer.Add(rc) + rcInformer.Informer().GetIndexer().Add(rc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl @@ -248,15 +255,14 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) { testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0) // Steady state for the replication controller, no Status.Replicas updates expected activePods := 5 rc := newReplicationController(activePods) - manager.rcLister.Indexer.Add(rc) + rcInformer.Informer().GetIndexer().Add(rc) rc.Status = v1.ReplicationControllerStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)} - newPodList(manager.podLister.Indexer, activePods, v1.PodRunning, rc, "pod") + newPodList(podInformer.Informer().GetIndexer(), activePods, v1.PodRunning, rc, "pod") fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl @@ -289,20 +295,19 @@ func TestControllerUpdateReplicas(t *testing.T) { testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0) // Insufficient number of pods in the system, and Status.Replicas is wrong; // Status.Replica should update to match number of pods in system, 1 new pod should be created. rc := newReplicationController(5) - manager.rcLister.Indexer.Add(rc) + rcInformer.Informer().GetIndexer().Add(rc) rc.Status = v1.ReplicationControllerStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0} rc.Generation = 1 - newPodList(manager.podLister.Indexer, 2, v1.PodRunning, rc, "pod") + newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodRunning, rc, "pod") rcCopy := *rc extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"} rcCopy.Spec.Selector = extraLabelMap - newPodList(manager.podLister.Indexer, 2, v1.PodRunning, &rcCopy, "podWithExtraLabel") + newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodRunning, &rcCopy, "podWithExtraLabel") // This response body is just so we don't err out decoding the http response response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{}) @@ -335,13 +340,12 @@ func TestSyncReplicationControllerDormancy(t *testing.T) { defer testServer.Close() c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) fakePodControl := controller.FakePodControl{} - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0) manager.podControl = &fakePodControl controllerSpec := newReplicationController(2) - manager.rcLister.Indexer.Add(controllerSpec) - newPodList(manager.podLister.Indexer, 1, v1.PodRunning, controllerSpec, "pod") + rcInformer.Informer().GetIndexer().Add(controllerSpec) + newPodList(podInformer.Informer().GetIndexer(), 1, v1.PodRunning, controllerSpec, "pod") // Creates a replica and sets expectations controllerSpec.Status.Replicas = 1 @@ -388,8 +392,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) { } func TestPodControllerLookup(t *testing.T) { - manager := NewReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}), controller.NoResyncPeriodFunc, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, _, rcInformer := NewReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}), BurstReplicas, 0) testCases := []struct { inRCs []*v1.ReplicationController pod *v1.Pod @@ -435,7 +438,7 @@ func TestPodControllerLookup(t *testing.T) { } for _, c := range testCases { for _, r := range c.inRCs { - manager.rcLister.Indexer.Add(r) + rcInformer.Informer().GetIndexer().Add(r) } if rc := manager.getPodController(c.pod); rc != nil { if c.outRCName != rc.Name { @@ -453,12 +456,11 @@ func TestWatchControllers(t *testing.T) { c.AddWatchReactor("replicationcontrollers", core.DefaultWatchReactor(fakeWatch, nil)) stopCh := make(chan struct{}) defer close(stopCh) - informers := informers.NewSharedInformerFactory(c, nil, controller.NoResyncPeriodFunc()) - podInformer := informers.Pods().Informer() - rcInformer := informers.ReplicationControllers().Informer() + informers := informers.NewSharedInformerFactory(nil, c, controller.NoResyncPeriodFunc()) + podInformer := informers.Core().V1().Pods() + rcInformer := informers.Core().V1().ReplicationControllers() manager := NewReplicationManager(podInformer, rcInformer, c, BurstReplicas, 0, false) informers.Start(stopCh) - manager.podListerSynced = alwaysReady var testControllerSpec v1.ReplicationController received := make(chan string) @@ -467,7 +469,7 @@ func TestWatchControllers(t *testing.T) { // and eventually into the syncHandler. The handler validates the received controller // and closes the received channel to indicate that the test can finish. manager.syncHandler = func(key string) error { - obj, exists, err := manager.rcLister.Indexer.GetByKey(key) + obj, exists, err := rcInformer.Informer().GetIndexer().GetByKey(key) if !exists || err != nil { t.Errorf("Expected to find controller under key %v", key) } @@ -497,18 +499,17 @@ func TestWatchPods(t *testing.T) { fakeWatch := watch.NewFake() c := &fake.Clientset{} c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil)) - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0) // Put one rc and one pod into the controller's stores testControllerSpec := newReplicationController(1) - manager.rcLister.Indexer.Add(testControllerSpec) + rcInformer.Informer().GetIndexer().Add(testControllerSpec) received := make(chan string) // The pod update sent through the fakeWatcher should figure out the managing rc and // send it into the syncHandler. manager.syncHandler = func(key string) error { - obj, exists, err := manager.rcLister.Indexer.GetByKey(key) + obj, exists, err := rcInformer.Informer().GetIndexer().GetByKey(key) if !exists || err != nil { t.Errorf("Expected to find controller under key %v", key) } @@ -523,7 +524,7 @@ func TestWatchPods(t *testing.T) { // and make sure it hits the sync method for the right rc. stopCh := make(chan struct{}) defer close(stopCh) - go manager.internalPodInformer.Run(stopCh) + go podInformer.Informer().Run(stopCh) go wait.Until(manager.worker, 10*time.Millisecond, stopCh) pods := newPodList(nil, 1, v1.PodRunning, testControllerSpec, "pod") @@ -539,13 +540,12 @@ func TestWatchPods(t *testing.T) { } func TestUpdatePods(t *testing.T) { - manager := NewReplicationManagerFromClient(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, podInformer, rcInformer := NewReplicationManagerFromClient(fake.NewSimpleClientset(), BurstReplicas, 0) received := make(chan string) manager.syncHandler = func(key string) error { - obj, exists, err := manager.rcLister.Indexer.GetByKey(key) + obj, exists, err := rcInformer.Informer().GetIndexer().GetByKey(key) if !exists || err != nil { t.Errorf("Expected to find controller under key %v", key) } @@ -559,16 +559,16 @@ func TestUpdatePods(t *testing.T) { // Put 2 rcs and one pod into the controller's stores testControllerSpec1 := newReplicationController(1) - manager.rcLister.Indexer.Add(testControllerSpec1) + rcInformer.Informer().GetIndexer().Add(testControllerSpec1) testControllerSpec2 := *testControllerSpec1 testControllerSpec2.Spec.Selector = map[string]string{"bar": "foo"} testControllerSpec2.Name = "barfoo" - manager.rcLister.Indexer.Add(&testControllerSpec2) + rcInformer.Informer().GetIndexer().Add(&testControllerSpec2) // case 1: We put in the podLister a pod with labels matching // testControllerSpec1, then update its labels to match testControllerSpec2. // We expect to receive a sync request for both controllers. - pod1 := newPodList(manager.podLister.Indexer, 1, v1.PodRunning, testControllerSpec1, "pod").Items[0] + pod1 := newPodList(podInformer.Informer().GetIndexer(), 1, v1.PodRunning, testControllerSpec1, "pod").Items[0] pod1.ResourceVersion = "1" pod2 := pod1 pod2.Labels = testControllerSpec2.Spec.Selector @@ -617,13 +617,12 @@ func TestControllerUpdateRequeue(t *testing.T) { defer testServer.Close() c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0) rc := newReplicationController(1) - manager.rcLister.Indexer.Add(rc) + rcInformer.Informer().GetIndexer().Add(rc) rc.Status = v1.ReplicationControllerStatus{Replicas: 2} - newPodList(manager.podLister.Indexer, 1, v1.PodRunning, rc, "pod") + newPodList(podInformer.Informer().GetIndexer(), 1, v1.PodRunning, rc, "pod") fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl @@ -688,12 +687,11 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) { func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) { c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) fakePodControl := controller.FakePodControl{} - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, burstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, burstReplicas, 0) manager.podControl = &fakePodControl controllerSpec := newReplicationController(numReplicas) - manager.rcLister.Indexer.Add(controllerSpec) + rcInformer.Informer().GetIndexer().Add(controllerSpec) expectedPods := 0 pods := newPodList(nil, numReplicas, v1.PodPending, controllerSpec, "pod") @@ -707,14 +705,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) for _, replicas := range []int{numReplicas, 0} { *(controllerSpec.Spec.Replicas) = int32(replicas) - manager.rcLister.Indexer.Add(controllerSpec) + rcInformer.Informer().GetIndexer().Add(controllerSpec) for i := 0; i < numReplicas; i += burstReplicas { manager.syncReplicationController(getKey(controllerSpec, t)) // The store accrues active pods. It's also used by the rc to determine how many // replicas to create. - activePods := len(manager.podLister.Indexer.List()) + activePods := len(podInformer.Informer().GetIndexer().List()) if replicas != 0 { // This is the number of pods currently "in flight". They were created by the rc manager above, // which then puts the rc to sleep till all of them have been observed. @@ -728,7 +726,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) // This simulates the watch events for all but 1 of the expected pods. // None of these should wake the controller because it has expectations==BurstReplicas. for i := 0; i < expectedPods-1; i++ { - manager.podLister.Indexer.Add(&pods.Items[i]) + podInformer.Informer().GetIndexer().Add(&pods.Items[i]) manager.addPod(&pods.Items[i]) } @@ -764,7 +762,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) // has exactly one expectation at the end, to verify that we // don't double delete. for i := range podsToDelete[1:] { - manager.podLister.Indexer.Delete(podsToDelete[i]) + podInformer.Informer().GetIndexer().Delete(podsToDelete[i]) manager.deletePod(podsToDelete[i]) } podExp, exists, err := manager.expectations.GetExpectations(rcKey) @@ -785,7 +783,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) // The last add pod will decrease the expectation of the rc to 0, // which will cause it to create/delete the remaining replicas up to burstReplicas. if replicas != 0 { - manager.podLister.Indexer.Add(&pods.Items[expectedPods-1]) + podInformer.Informer().GetIndexer().Add(&pods.Items[expectedPods-1]) manager.addPod(&pods.Items[expectedPods-1]) } else { expectedDel := manager.expectations.GetUIDs(getKey(controllerSpec, t)) @@ -800,14 +798,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) Labels: controllerSpec.Spec.Selector, }, } - manager.podLister.Indexer.Delete(lastPod) + podInformer.Informer().GetIndexer().Delete(lastPod) manager.deletePod(lastPod) } pods.Items = pods.Items[expectedPods:] } // Confirm that we've created the right number of replicas - activePods := int32(len(manager.podLister.Indexer.List())) + activePods := int32(len(podInformer.Informer().GetIndexer().List())) if activePods != *(controllerSpec.Spec.Replicas) { t.Fatalf("Unexpected number of active pods, expected %d, got %d", *(controllerSpec.Spec.Replicas), activePods) } @@ -838,14 +836,13 @@ func (fe FakeRCExpectations) SatisfiedExpectations(controllerKey string) bool { func TestRCSyncExpectations(t *testing.T) { c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) fakePodControl := controller.FakePodControl{} - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0) - manager.podListerSynced = alwaysReady + manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, 2, 0) manager.podControl = &fakePodControl controllerSpec := newReplicationController(2) - manager.rcLister.Indexer.Add(controllerSpec) + rcInformer.Informer().GetIndexer().Add(controllerSpec) pods := newPodList(nil, 2, v1.PodPending, controllerSpec, "pod") - manager.podLister.Indexer.Add(&pods.Items[0]) + podInformer.Informer().GetIndexer().Add(&pods.Items[0]) postExpectationsPod := pods.Items[1] manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRCExpectations{ @@ -853,7 +850,7 @@ func TestRCSyncExpectations(t *testing.T) { // If we check active pods before checking expectataions, the rc // will create a new replica because it doesn't see this pod, but // has fulfilled its expectations. - manager.podLister.Indexer.Add(&postExpectationsPod) + podInformer.Informer().GetIndexer().Add(&postExpectationsPod) }, }) manager.syncReplicationController(getKey(controllerSpec, t)) @@ -862,11 +859,10 @@ func TestRCSyncExpectations(t *testing.T) { func TestDeleteControllerAndExpectations(t *testing.T) { c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0) - manager.podListerSynced = alwaysReady + manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, 10, 0) rc := newReplicationController(1) - manager.rcLister.Indexer.Add(rc) + rcInformer.Informer().GetIndexer().Add(rc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl @@ -888,7 +884,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) { if !exists || err != nil { t.Errorf("No expectations found for rc") } - manager.rcLister.Indexer.Delete(rc) + rcInformer.Informer().GetIndexer().Delete(rc) manager.syncReplicationController(getKey(rc, t)) if _, exists, err = manager.expectations.GetExpectations(rcKey); exists { @@ -897,7 +893,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) { // This should have no effect, since we've deleted the rc. podExp.Add(-1, 0) - manager.podLister.Indexer.Replace(make([]interface{}, 0), "0") + podInformer.Informer().GetIndexer().Replace(make([]interface{}, 0), "0") manager.syncReplicationController(getKey(rc, t)) validateSyncReplication(t, &fakePodControl, 0, 0, 0) } @@ -917,8 +913,7 @@ func TestOverlappingRCs(t *testing.T) { c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) for i := 0; i < 5; i++ { - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0) - manager.podListerSynced = alwaysReady + manager, _, rcInformer := NewReplicationManagerFromClient(c, 10, 0) // Create 10 rcs, shuffled them randomly and insert them into the rc manager's store var controllers []*v1.ReplicationController @@ -930,7 +925,7 @@ func TestOverlappingRCs(t *testing.T) { } shuffledControllers := shuffle(controllers) for j := range shuffledControllers { - manager.rcLister.Indexer.Add(shuffledControllers[j]) + rcInformer.Informer().GetIndexer().Add(shuffledControllers[j]) } // Add a pod and make sure only the oldest rc is synced pods := newPodList(nil, 1, v1.PodPending, controllers[0], "pod") @@ -946,11 +941,10 @@ func TestOverlappingRCs(t *testing.T) { func TestDeletionTimestamp(t *testing.T) { c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0) - manager.podListerSynced = alwaysReady + manager, _, rcInformer := NewReplicationManagerFromClient(c, 10, 0) controllerSpec := newReplicationController(1) - manager.rcLister.Indexer.Add(controllerSpec) + rcInformer.Informer().GetIndexer().Add(controllerSpec) rcKey, err := controller.KeyFunc(controllerSpec) if err != nil { t.Errorf("Couldn't get key for object %#v: %v", controllerSpec, err) @@ -1036,7 +1030,7 @@ func TestDeletionTimestamp(t *testing.T) { func BenchmarkGetPodControllerMultiNS(b *testing.B) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) - manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager, _, rcInformer := NewReplicationManagerFromClient(client, BurstReplicas, 0) const nsNum = 1000 @@ -1062,7 +1056,7 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) { ns := fmt.Sprintf("ns-%d", i) for j := 0; j < 10; j++ { rcName := fmt.Sprintf("rc-%d", j) - manager.rcLister.Indexer.Add(&v1.ReplicationController{ + rcInformer.Informer().GetIndexer().Add(&v1.ReplicationController{ ObjectMeta: metav1.ObjectMeta{Name: rcName, Namespace: ns}, Spec: v1.ReplicationControllerSpec{ Selector: map[string]string{"rcName": rcName}, @@ -1082,7 +1076,7 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) { func BenchmarkGetPodControllerSingleNS(b *testing.B) { client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) - manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager, _, rcInformer := NewReplicationManagerFromClient(client, BurstReplicas, 0) const rcNum = 1000 const replicaNum = 3 @@ -1104,7 +1098,7 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) { for i := 0; i < rcNum; i++ { rcName := fmt.Sprintf("rc-%d", i) - manager.rcLister.Indexer.Add(&v1.ReplicationController{ + rcInformer.Informer().GetIndexer().Add(&v1.ReplicationController{ ObjectMeta: metav1.ObjectMeta{Name: rcName, Namespace: "foo"}, Spec: v1.ReplicationControllerSpec{ Selector: map[string]string{"rcName": rcName}, @@ -1121,26 +1115,25 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) { } // setupManagerWithGCEnabled creates a RC manager with a fakePodControl and with garbageCollectorEnabled set to true -func setupManagerWithGCEnabled(objs ...runtime.Object) (manager *ReplicationManager, fakePodControl *controller.FakePodControl) { +func setupManagerWithGCEnabled(objs ...runtime.Object) (manager *ReplicationManager, fakePodControl *controller.FakePodControl, podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer) { c := fakeclientset.NewSimpleClientset(objs...) fakePodControl = &controller.FakePodControl{} - manager = NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager, podInformer, rcInformer = NewReplicationManagerFromClient(c, BurstReplicas, 0) manager.garbageCollectorEnabled = true - manager.podListerSynced = alwaysReady manager.podControl = fakePodControl - return manager, fakePodControl + return manager, fakePodControl, podInformer, rcInformer } func TestDoNotPatchPodWithOtherControlRef(t *testing.T) { - manager, fakePodControl := setupManagerWithGCEnabled() + manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled() rc := newReplicationController(2) - manager.rcLister.Indexer.Add(rc) + rcInformer.Informer().GetIndexer().Add(rc) var trueVar = true otherControllerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1", Kind: "ReplicationController", Name: "AnotherRC", Controller: &trueVar} // add to podLister a matching Pod controlled by another controller. Expect no patch. pod := newPod("pod", rc, v1.PodRunning, nil) pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference} - manager.podLister.Indexer.Add(pod) + podInformer.Informer().GetIndexer().Add(pod) err := manager.syncReplicationController(getKey(rc, t)) if err != nil { t.Fatal(err) @@ -1151,15 +1144,15 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) { func TestPatchPodWithOtherOwnerRef(t *testing.T) { rc := newReplicationController(2) - manager, fakePodControl := setupManagerWithGCEnabled(rc) - manager.rcLister.Indexer.Add(rc) + manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled(rc) + rcInformer.Informer().GetIndexer().Add(rc) // add to podLister one more matching pod that doesn't have a controller // ref, but has an owner ref pointing to other object. Expect a patch to // take control of it. unrelatedOwnerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"} pod := newPod("pod", rc, v1.PodRunning, nil) pod.OwnerReferences = []metav1.OwnerReference{unrelatedOwnerReference} - manager.podLister.Indexer.Add(pod) + podInformer.Informer().GetIndexer().Add(pod) err := manager.syncReplicationController(getKey(rc, t)) if err != nil { @@ -1171,14 +1164,14 @@ func TestPatchPodWithOtherOwnerRef(t *testing.T) { func TestPatchPodWithCorrectOwnerRef(t *testing.T) { rc := newReplicationController(2) - manager, fakePodControl := setupManagerWithGCEnabled(rc) - manager.rcLister.Indexer.Add(rc) + manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled(rc) + rcInformer.Informer().GetIndexer().Add(rc) // add to podLister a matching pod that has an ownerRef pointing to the rc, // but ownerRef.Controller is false. Expect a patch to take control it. rcOwnerReference := metav1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name} pod := newPod("pod", rc, v1.PodRunning, nil) pod.OwnerReferences = []metav1.OwnerReference{rcOwnerReference} - manager.podLister.Indexer.Add(pod) + podInformer.Informer().GetIndexer().Add(pod) err := manager.syncReplicationController(getKey(rc, t)) if err != nil { @@ -1190,12 +1183,12 @@ func TestPatchPodWithCorrectOwnerRef(t *testing.T) { func TestPatchPodFails(t *testing.T) { rc := newReplicationController(2) - manager, fakePodControl := setupManagerWithGCEnabled(rc) - manager.rcLister.Indexer.Add(rc) + manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled(rc) + rcInformer.Informer().GetIndexer().Add(rc) // add to podLister two matching pods. Expect two patches to take control // them. - manager.podLister.Indexer.Add(newPod("pod1", rc, v1.PodRunning, nil)) - manager.podLister.Indexer.Add(newPod("pod2", rc, v1.PodRunning, nil)) + podInformer.Informer().GetIndexer().Add(newPod("pod1", rc, v1.PodRunning, nil)) + podInformer.Informer().GetIndexer().Add(newPod("pod2", rc, v1.PodRunning, nil)) // let both patches fail. The rc manager will assume it fails to take // control of the pods and create new ones. fakePodControl.Err = fmt.Errorf("Fake Error") @@ -1209,13 +1202,13 @@ func TestPatchPodFails(t *testing.T) { func TestPatchExtraPodsThenDelete(t *testing.T) { rc := newReplicationController(2) - manager, fakePodControl := setupManagerWithGCEnabled(rc) - manager.rcLister.Indexer.Add(rc) + manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled(rc) + rcInformer.Informer().GetIndexer().Add(rc) // add to podLister three matching pods. Expect three patches to take control // them, and later delete one of them. - manager.podLister.Indexer.Add(newPod("pod1", rc, v1.PodRunning, nil)) - manager.podLister.Indexer.Add(newPod("pod2", rc, v1.PodRunning, nil)) - manager.podLister.Indexer.Add(newPod("pod3", rc, v1.PodRunning, nil)) + podInformer.Informer().GetIndexer().Add(newPod("pod1", rc, v1.PodRunning, nil)) + podInformer.Informer().GetIndexer().Add(newPod("pod2", rc, v1.PodRunning, nil)) + podInformer.Informer().GetIndexer().Add(newPod("pod3", rc, v1.PodRunning, nil)) err := manager.syncReplicationController(getKey(rc, t)) if err != nil { t.Fatal(err) @@ -1225,9 +1218,9 @@ func TestPatchExtraPodsThenDelete(t *testing.T) { } func TestUpdateLabelsRemoveControllerRef(t *testing.T) { - manager, fakePodControl := setupManagerWithGCEnabled() + manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled() rc := newReplicationController(2) - manager.rcLister.Indexer.Add(rc) + rcInformer.Informer().GetIndexer().Add(rc) // put one pod in the podLister pod := newPod("pod", rc, v1.PodRunning, nil) pod.ResourceVersion = "1" @@ -1241,7 +1234,7 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) { // add the updatedPod to the store. This is consistent with the behavior of // the Informer: Informer updates the store before call the handler // (updatePod() in this case). - manager.podLister.Indexer.Add(&updatedPod) + podInformer.Informer().GetIndexer().Add(&updatedPod) // send a update of the same pod with modified labels manager.updatePod(pod, &updatedPod) // verifies that rc is added to the queue @@ -1263,17 +1256,17 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) { } func TestUpdateSelectorControllerRef(t *testing.T) { - manager, fakePodControl := setupManagerWithGCEnabled() + manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled() rc := newReplicationController(2) // put 2 pods in the podLister - newPodList(manager.podLister.Indexer, 2, v1.PodRunning, rc, "pod") + newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodRunning, rc, "pod") // update the RC so that its selector no longer matches the pods updatedRC := *rc updatedRC.Spec.Selector = map[string]string{"foo": "baz"} // put the updatedRC into the store. This is consistent with the behavior of // the Informer: Informer updates the store before call the handler // (updateRC() in this case). - manager.rcLister.Indexer.Add(&updatedRC) + rcInformer.Informer().GetIndexer().Add(&updatedRC) manager.updateRC(rc, &updatedRC) // verifies that the rc is added to the queue rcKey := getKey(rc, t) @@ -1296,13 +1289,13 @@ func TestUpdateSelectorControllerRef(t *testing.T) { // RC manager shouldn't adopt or create more pods if the rc is about to be // deleted. func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) { - manager, fakePodControl := setupManagerWithGCEnabled() + manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled() rc := newReplicationController(2) now := metav1.Now() rc.DeletionTimestamp = &now - manager.rcLister.Indexer.Add(rc) + rcInformer.Informer().GetIndexer().Add(rc) pod1 := newPod("pod1", rc, v1.PodRunning, nil) - manager.podLister.Indexer.Add(pod1) + podInformer.Informer().GetIndexer().Add(pod1) // no patch, no create err := manager.syncReplicationController(getKey(rc, t)) @@ -1322,17 +1315,16 @@ func TestReadyReplicas(t *testing.T) { defer testServer.Close() c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0) // Status.Replica should update to match number of pods in system, 1 new pod should be created. rc := newReplicationController(2) rc.Status = v1.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 0, AvailableReplicas: 0, ObservedGeneration: 1} rc.Generation = 1 - manager.rcLister.Indexer.Add(rc) + rcInformer.Informer().GetIndexer().Add(rc) - newPodList(manager.podLister.Indexer, 2, v1.PodPending, rc, "pod") - newPodList(manager.podLister.Indexer, 2, v1.PodRunning, rc, "pod") + newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodPending, rc, "pod") + newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodRunning, rc, "pod") // This response body is just so we don't err out decoding the http response response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{}) @@ -1361,8 +1353,7 @@ func TestAvailableReplicas(t *testing.T) { defer testServer.Close() c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}) - manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) - manager.podListerSynced = alwaysReady + manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0) // Status.Replica should update to match number of pods in system, 1 new pod should be created. rc := newReplicationController(2) @@ -1370,17 +1361,17 @@ func TestAvailableReplicas(t *testing.T) { rc.Generation = 1 // minReadySeconds set to 15s rc.Spec.MinReadySeconds = 15 - manager.rcLister.Indexer.Add(rc) + rcInformer.Informer().GetIndexer().Add(rc) // First pod becomes ready 20s ago moment := metav1.Time{Time: time.Now().Add(-2e10)} pod := newPod("pod", rc, v1.PodRunning, &moment) - manager.podLister.Indexer.Add(pod) + podInformer.Informer().GetIndexer().Add(pod) // Second pod becomes ready now otherMoment := metav1.Now() otherPod := newPod("otherPod", rc, v1.PodRunning, &otherMoment) - manager.podLister.Indexer.Add(otherPod) + podInformer.Informer().GetIndexer().Add(otherPod) // This response body is just so we don't err out decoding the http response response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{}) diff --git a/pkg/controller/replication/replication_controller_utils.go b/pkg/controller/replication/replication_controller_utils.go index db572c4617..166a3c14a9 100644 --- a/pkg/controller/replication/replication_controller_utils.go +++ b/pkg/controller/replication/replication_controller_utils.go @@ -84,7 +84,7 @@ func (o OverlappingControllers) Less(i, j int) bool { return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) } -func calculateStatus(rc v1.ReplicationController, filteredPods []*v1.Pod, manageReplicasErr error) v1.ReplicationControllerStatus { +func calculateStatus(rc *v1.ReplicationController, filteredPods []*v1.Pod, manageReplicasErr error) v1.ReplicationControllerStatus { newStatus := rc.Status // Count the number of pods that have labels matching the labels of the pod // template of the replication controller, the matching pods may have more diff --git a/pkg/controller/volume/attachdetach/BUILD b/pkg/controller/volume/attachdetach/BUILD index ee42d8e354..486cb85891 100644 --- a/pkg/controller/volume/attachdetach/BUILD +++ b/pkg/controller/volume/attachdetach/BUILD @@ -16,6 +16,8 @@ go_library( "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", + "//pkg/client/informers/informers_generated/core/v1:go_default_library", + "//pkg/client/listers/core/v1:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/controller/volume/attachdetach/cache:go_default_library", "//pkg/controller/volume/attachdetach/populator:go_default_library", @@ -42,7 +44,8 @@ go_test( library = ":go_default_library", tags = ["automanaged"], deps = [ - "//pkg/controller/informers:go_default_library", + "//pkg/client/informers/informers_generated:go_default_library", + "//pkg/controller:go_default_library", "//pkg/controller/volume/attachdetach/testing:go_default_library", ], ) diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller.go b/pkg/controller/volume/attachdetach/attach_detach_controller.go index 4238074fbe..28d826d41f 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller.go @@ -33,6 +33,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator" @@ -70,10 +72,10 @@ type AttachDetachController interface { // NewAttachDetachController returns a new instance of AttachDetachController. func NewAttachDetachController( kubeClient clientset.Interface, - podInformer kcache.SharedInformer, - nodeInformer kcache.SharedInformer, - pvcInformer kcache.SharedInformer, - pvInformer kcache.SharedInformer, + podInformer coreinformers.PodInformer, + nodeInformer coreinformers.NodeInformer, + pvcInformer coreinformers.PersistentVolumeClaimInformer, + pvInformer coreinformers.PersistentVolumeInformer, cloud cloudprovider.Interface, plugins []volume.VolumePlugin, disableReconciliationSync bool, @@ -93,23 +95,27 @@ func NewAttachDetachController( // dropped pods so they are continuously processed until it is accepted or // deleted (probably can't do this with sharedInformer), etc. adc := &attachDetachController{ - kubeClient: kubeClient, - pvcInformer: pvcInformer, - pvInformer: pvInformer, - cloud: cloud, + kubeClient: kubeClient, + pvcLister: pvcInformer.Lister(), + pvcsSynced: pvcInformer.Informer().HasSynced, + pvLister: pvInformer.Lister(), + pvsSynced: pvInformer.Informer().HasSynced, + cloud: cloud, } - podInformer.AddEventHandler(kcache.ResourceEventHandlerFuncs{ + podInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{ AddFunc: adc.podAdd, UpdateFunc: adc.podUpdate, DeleteFunc: adc.podDelete, }) + adc.podsSynced = podInformer.Informer().HasSynced - nodeInformer.AddEventHandler(kcache.ResourceEventHandlerFuncs{ + nodeInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{ AddFunc: adc.nodeAdd, UpdateFunc: adc.nodeUpdate, DeleteFunc: adc.nodeDelete, }) + adc.nodesSynced = nodeInformer.Informer().HasSynced if err := adc.volumePluginMgr.InitPlugins(plugins, adc); err != nil { return nil, fmt.Errorf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err) @@ -129,7 +135,7 @@ func NewAttachDetachController( recorder, false)) // flag for experimental binary check for volume mount adc.nodeStatusUpdater = statusupdater.NewNodeStatusUpdater( - kubeClient, nodeInformer, adc.actualStateOfWorld) + kubeClient, nodeInformer.Lister(), adc.actualStateOfWorld) // Default these to values in options adc.reconciler = reconciler.NewReconciler( @@ -144,7 +150,7 @@ func NewAttachDetachController( adc.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator( desiredStateOfWorldPopulatorLoopSleepPeriod, - podInformer, + podInformer.Lister(), adc.desiredStateOfWorld) return adc, nil @@ -155,15 +161,20 @@ type attachDetachController struct { // the API server. kubeClient clientset.Interface - // pvcInformer is the shared PVC informer used to fetch and store PVC + // pvcLister is the shared PVC lister used to fetch and store PVC // objects from the API server. It is shared with other controllers and // therefore the PVC objects in its store should be treated as immutable. - pvcInformer kcache.SharedInformer + pvcLister corelisters.PersistentVolumeClaimLister + pvcsSynced kcache.InformerSynced - // pvInformer is the shared PV informer used to fetch and store PV objects + // pvLister is the shared PV lister used to fetch and store PV objects // from the API server. It is shared with other controllers and therefore // the PV objects in its store should be treated as immutable. - pvInformer kcache.SharedInformer + pvLister corelisters.PersistentVolumeLister + pvsSynced kcache.InformerSynced + + podsSynced kcache.InformerSynced + nodesSynced kcache.InformerSynced // cloud provider used by volume host cloud cloudprovider.Interface @@ -211,6 +222,14 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) { defer runtime.HandleCrash() glog.Infof("Starting Attach Detach Controller") + // TODO uncomment once we agree this is ok and we fix the attach/detach integration test that + // currently fails because it doesn't set pvcsSynced and pvsSynced to alwaysReady, so this + // controller never runs. + // if !kcache.WaitForCacheSync(stopCh, adc.podsSynced, adc.nodesSynced, adc.pvcsSynced, adc.pvsSynced) { + // runtime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) + // return + // } + go adc.reconciler.Run(stopCh) go adc.desiredStateOfWorldPopulator.Run(stopCh) @@ -456,33 +475,17 @@ func (adc *attachDetachController) createVolumeSpec( // This method returns an error if a PVC object does not exist in the cache // with the given namespace/name. // This method returns an error if the PVC object's phase is not "Bound". -func (adc *attachDetachController) getPVCFromCacheExtractPV( - namespace string, name string) (string, types.UID, error) { - key := name - if len(namespace) > 0 { - key = namespace + "/" + name - } - - pvcObj, exists, err := adc.pvcInformer.GetStore().GetByKey(key) - if pvcObj == nil || !exists || err != nil { - return "", "", fmt.Errorf( - "failed to find PVC %q in PVCInformer cache. %v", - key, - err) - } - - pvc, ok := pvcObj.(*v1.PersistentVolumeClaim) - if !ok || pvc == nil { - return "", "", fmt.Errorf( - "failed to cast %q object %#v to PersistentVolumeClaim", - key, - pvcObj) +func (adc *attachDetachController) getPVCFromCacheExtractPV(namespace string, name string) (string, types.UID, error) { + pvc, err := adc.pvcLister.PersistentVolumeClaims(namespace).Get(name) + if err != nil { + return "", "", fmt.Errorf("failed to find PVC %s/%s in PVCInformer cache: %v", namespace, name, err) } if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" { return "", "", fmt.Errorf( - "PVC %q has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)", - key, + "PVC %s/%s has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)", + namespace, + name, pvc.Status.Phase, pvc.Spec.VolumeName) } @@ -496,20 +499,10 @@ func (adc *attachDetachController) getPVCFromCacheExtractPV( // the given name. // This method deep copies the PV object so the caller may use the returned // volume.Spec object without worrying about it mutating unexpectedly. -func (adc *attachDetachController) getPVSpecFromCache( - name string, - pvcReadOnly bool, - expectedClaimUID types.UID) (*volume.Spec, error) { - pvObj, exists, err := adc.pvInformer.GetStore().GetByKey(name) - if pvObj == nil || !exists || err != nil { - return nil, fmt.Errorf( - "failed to find PV %q in PVInformer cache. %v", name, err) - } - - pv, ok := pvObj.(*v1.PersistentVolume) - if !ok || pv == nil { - return nil, fmt.Errorf( - "failed to cast %q object %#v to PersistentVolume", name, pvObj) +func (adc *attachDetachController) getPVSpecFromCache(name string, pvcReadOnly bool, expectedClaimUID types.UID) (*volume.Spec, error) { + pv, err := adc.pvLister.Get(name) + if err != nil { + return nil, fmt.Errorf("failed to find PV %q in PVInformer cache: %v", name, err) } if pv.Spec.ClaimRef == nil { @@ -530,14 +523,12 @@ func (adc *attachDetachController) getPVSpecFromCache( // may be mutated by another consumer. clonedPVObj, err := api.Scheme.DeepCopy(pv) if err != nil || clonedPVObj == nil { - return nil, fmt.Errorf( - "failed to deep copy %q PV object. err=%v", name, err) + return nil, fmt.Errorf("failed to deep copy %q PV object. err=%v", name, err) } clonedPV, ok := clonedPVObj.(*v1.PersistentVolume) if !ok { - return nil, fmt.Errorf( - "failed to cast %q clonedPV %#v to PersistentVolume", name, pvObj) + return nil, fmt.Errorf("failed to cast %q clonedPV %#v to PersistentVolume", name, pv) } return volume.NewSpecFromPersistentVolume(clonedPV, pvcReadOnly), nil diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go index defe9132c5..18886a0e73 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go @@ -20,26 +20,23 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/controller/informers" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" + "k8s.io/kubernetes/pkg/controller" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" ) func Test_NewAttachDetachController_Positive(t *testing.T) { // Arrange fakeKubeClient := controllervolumetesting.CreateTestClient() - resyncPeriod := 5 * time.Minute - podInformer := informers.NewPodInformer(fakeKubeClient, resyncPeriod) - nodeInformer := informers.NewNodeInformer(fakeKubeClient, resyncPeriod) - pvcInformer := informers.NewPVCInformer(fakeKubeClient, resyncPeriod) - pvInformer := informers.NewPVInformer(fakeKubeClient, resyncPeriod) + informerFactory := informers.NewSharedInformerFactory(nil, fakeKubeClient, controller.NoResyncPeriodFunc()) // Act _, err := NewAttachDetachController( fakeKubeClient, - podInformer, - nodeInformer, - pvcInformer, - pvInformer, + informerFactory.Core().V1().Pods(), + informerFactory.Core().V1().Nodes(), + informerFactory.Core().V1().PersistentVolumeClaims(), + informerFactory.Core().V1().PersistentVolumes(), nil, /* cloud */ nil, /* plugins */ false, diff --git a/pkg/controller/volume/attachdetach/populator/BUILD b/pkg/controller/volume/attachdetach/populator/BUILD index 7655bf16a3..7e87c31a72 100644 --- a/pkg/controller/volume/attachdetach/populator/BUILD +++ b/pkg/controller/volume/attachdetach/populator/BUILD @@ -12,10 +12,12 @@ go_library( srcs = ["desired_state_of_world_populator.go"], tags = ["automanaged"], deps = [ - "//pkg/api/v1:go_default_library", + "//pkg/client/listers/core/v1:go_default_library", "//pkg/controller/volume/attachdetach/cache:go_default_library", "//pkg/volume/util/volumehelper:go_default_library", "//vendor:github.com/golang/glog", + "//vendor:k8s.io/apimachinery/pkg/api/errors", + "//vendor:k8s.io/apimachinery/pkg/util/runtime", "//vendor:k8s.io/apimachinery/pkg/util/wait", "//vendor:k8s.io/client-go/tools/cache", ], diff --git a/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go b/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go index 183523bdb8..91bbaeae57 100644 --- a/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go +++ b/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go @@ -19,13 +19,16 @@ limitations under the License. package populator import ( + "fmt" "time" "github.com/golang/glog" + "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" kcache "k8s.io/client-go/tools/cache" - "k8s.io/kubernetes/pkg/api/v1" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) @@ -47,18 +50,18 @@ type DesiredStateOfWorldPopulator interface { // desiredStateOfWorld - the cache to populate func NewDesiredStateOfWorldPopulator( loopSleepDuration time.Duration, - podInformer kcache.SharedInformer, + podLister corelisters.PodLister, desiredStateOfWorld cache.DesiredStateOfWorld) DesiredStateOfWorldPopulator { return &desiredStateOfWorldPopulator{ loopSleepDuration: loopSleepDuration, - podInformer: podInformer, + podLister: podLister, desiredStateOfWorld: desiredStateOfWorld, } } type desiredStateOfWorldPopulator struct { loopSleepDuration time.Duration - podInformer kcache.SharedInformer + podLister corelisters.PodLister desiredStateOfWorld cache.DesiredStateOfWorld } @@ -83,43 +86,30 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { } // Retrieve the pod object from pod informer with the namespace key - informerPodObj, exists, err := - dswp.podInformer.GetStore().GetByKey(dswPodKey) + namespace, name, err := kcache.SplitMetaNamespaceKey(dswPodKey) if err != nil { - glog.Errorf( - "podInformer GetByKey failed for pod %q (UID %q) with %v", - dswPodKey, - dswPodUID, - err) + utilruntime.HandleError(fmt.Errorf("error splitting dswPodKey %q: %v", dswPodKey, err)) continue } - if exists && informerPodObj == nil { - glog.Info( - "podInformer GetByKey found pod, but informerPodObj is nil for pod %q (UID %q)", - dswPodKey, - dswPodUID) + informerPod, err := dswp.podLister.Pods(namespace).Get(name) + switch { + case errors.IsNotFound(err): + // if we can't find the pod, we need to delete it below + case err != nil: + glog.Errorf("podLister Get failed for pod %q (UID %q) with %v", dswPodKey, dswPodUID, err) continue - } - - if exists { - informerPod, ok := informerPodObj.(*v1.Pod) - if !ok { - glog.Errorf("Failed to cast obj %#v to pod object for pod %q (UID %q)", informerPod, dswPodKey, dswPodUID) - continue - } + default: informerPodUID := volumehelper.GetUniquePodName(informerPod) // Check whether the unique identifier of the pod from dsw matches the one retrieved from pod informer if informerPodUID == dswPodUID { - glog.V(10).Infof( - "Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID) + glog.V(10).Infof("Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID) continue - } } + // the pod from dsw does not exist in pod informer, or it does not match the unique identifer retrieved // from the informer, delete it from dsw - glog.V(1).Infof( - "Removing pod %q (UID %q) from dsw because it does not exist in pod informer.", dswPodKey, dswPodUID) + glog.V(1).Infof("Removing pod %q (UID %q) from dsw because it does not exist in pod informer.", dswPodKey, dswPodUID) dswp.desiredStateOfWorld.DeletePod(dswPodUID, dswPodToAdd.VolumeName, dswPodToAdd.NodeName) } } diff --git a/pkg/controller/volume/attachdetach/reconciler/BUILD b/pkg/controller/volume/attachdetach/reconciler/BUILD index b976153ff4..cca94709f7 100644 --- a/pkg/controller/volume/attachdetach/reconciler/BUILD +++ b/pkg/controller/volume/attachdetach/reconciler/BUILD @@ -30,7 +30,8 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", - "//pkg/controller/informers:go_default_library", + "//pkg/client/informers/informers_generated:go_default_library", + "//pkg/controller:go_default_library", "//pkg/controller/volume/attachdetach/cache:go_default_library", "//pkg/controller/volume/attachdetach/statusupdater:go_default_library", "//pkg/controller/volume/attachdetach/testing:go_default_library", diff --git a/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go b/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go index bed3ea39d9..a926d4c547 100644 --- a/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go +++ b/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go @@ -24,7 +24,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/controller/informers" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" @@ -50,10 +51,9 @@ func Test_Run_Positive_DoNothing(t *testing.T) { fakeKubeClient := controllervolumetesting.CreateTestClient() fakeRecorder := &record.FakeRecorder{} ad := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(fakeKubeClient, volumePluginMgr, fakeRecorder, false /* checkNodeCapabilitiesBeforeMount */)) - nodeInformer := informers.NewNodeInformer( - fakeKubeClient, resyncPeriod) + informerFactory := informers.NewSharedInformerFactory(nil, fakeKubeClient, controller.NoResyncPeriodFunc()) nsu := statusupdater.NewNodeStatusUpdater( - fakeKubeClient, nodeInformer, asw) + fakeKubeClient, informerFactory.Core().V1().Nodes().Lister(), asw) reconciler := NewReconciler( reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu) diff --git a/pkg/controller/volume/attachdetach/statusupdater/BUILD b/pkg/controller/volume/attachdetach/statusupdater/BUILD index 446504f2bf..89883ac2b8 100644 --- a/pkg/controller/volume/attachdetach/statusupdater/BUILD +++ b/pkg/controller/volume/attachdetach/statusupdater/BUILD @@ -18,10 +18,10 @@ go_library( "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", + "//pkg/client/listers/core/v1:go_default_library", "//pkg/controller/volume/attachdetach/cache:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/util/strategicpatch", - "//vendor:k8s.io/client-go/tools/cache", ], ) diff --git a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go index 1216074513..7feaa01aa9 100644 --- a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go +++ b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go @@ -25,10 +25,10 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/strategicpatch" - kcache "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" ) @@ -43,18 +43,18 @@ type NodeStatusUpdater interface { // NewNodeStatusUpdater returns a new instance of NodeStatusUpdater. func NewNodeStatusUpdater( kubeClient clientset.Interface, - nodeInformer kcache.SharedInformer, + nodeLister corelisters.NodeLister, actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater { return &nodeStatusUpdater{ actualStateOfWorld: actualStateOfWorld, - nodeInformer: nodeInformer, + nodeLister: nodeLister, kubeClient: kubeClient, } } type nodeStatusUpdater struct { kubeClient clientset.Interface - nodeInformer kcache.SharedInformer + nodeLister corelisters.NodeLister actualStateOfWorld cache.ActualStateOfWorld } @@ -63,8 +63,8 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error { // kubernetes/kubernetes/issues/37777 nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached() for nodeName, attachedVolumes := range nodesToUpdate { - nodeObj, exists, err := nsu.nodeInformer.GetStore().GetByKey(string(nodeName)) - if nodeObj == nil || !exists || err != nil { + nodeObj, err := nsu.nodeLister.Get(string(nodeName)) + if nodeObj == nil || err != nil { // If node does not exist, its status cannot be updated, log error and // reset flag statusUpdateNeeded back to true to indicate this node status // needs to be updated again diff --git a/test/integration/framework/BUILD b/test/integration/framework/BUILD index 42eb0f832a..d90b696745 100644 --- a/test/integration/framework/BUILD +++ b/test/integration/framework/BUILD @@ -30,6 +30,7 @@ go_library( "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", + "//pkg/client/informers/informers_generated:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/replication:go_default_library", "//pkg/generated/openapi:go_default_library", diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index 4c8a6d62f5..d78b00530c 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -56,6 +56,7 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" "k8s.io/kubernetes/pkg/controller" replicationcontroller "k8s.io/kubernetes/pkg/controller/replication" "k8s.io/kubernetes/pkg/generated/openapi" @@ -120,11 +121,13 @@ func NewMasterComponents(c *Config) *MasterComponents { // TODO: caesarxuchao: remove this client when the refactoring of client libraray is done. clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}, QPS: c.QPS, Burst: c.Burst}) rcStopCh := make(chan struct{}) - controllerManager := replicationcontroller.NewReplicationManagerFromClient(clientset, controller.NoResyncPeriodFunc, c.Burst, 4096) + informerFactory := informers.NewSharedInformerFactory(nil, clientset, controller.NoResyncPeriodFunc()) + controllerManager := replicationcontroller.NewReplicationManager(informerFactory.Core().V1().Pods(), informerFactory.Core().V1().ReplicationControllers(), clientset, c.Burst, 4096, false) // TODO: Support events once we can cleanly shutdown an event recorder. controllerManager.SetEventRecorder(&record.FakeRecorder{}) if c.StartReplicationManager { + informerFactory.Start(rcStopCh) go controllerManager.Run(goruntime.NumCPU(), rcStopCh) } return &MasterComponents{ diff --git a/test/integration/quota/quota_test.go b/test/integration/quota/quota_test.go index 89e82c44b1..f95274668d 100644 --- a/test/integration/quota/quota_test.go +++ b/test/integration/quota/quota_test.go @@ -37,8 +37,8 @@ import ( "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/informers" replicationcontroller "k8s.io/kubernetes/pkg/controller/replication" resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota" kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" @@ -84,10 +84,15 @@ func TestQuota(t *testing.T) { controllerCh := make(chan struct{}) defer close(controllerCh) - informers := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc()) - podInformer := informers.Pods().Informer() - rcInformer := informers.ReplicationControllers().Informer() - rm := replicationcontroller.NewReplicationManager(podInformer, rcInformer, clientset, replicationcontroller.BurstReplicas, 4096, false) + informers := informers.NewSharedInformerFactory(nil, clientset, controller.NoResyncPeriodFunc()) + rm := replicationcontroller.NewReplicationManager( + informers.Core().V1().Pods(), + informers.Core().V1().ReplicationControllers(), + clientset, + replicationcontroller.BurstReplicas, + 4096, + false, + ) rm.SetEventRecorder(&record.FakeRecorder{}) informers.Start(controllerCh) go rm.Run(3, controllerCh) diff --git a/test/integration/replicaset/replicaset_test.go b/test/integration/replicaset/replicaset_test.go index 8d2943cf6f..733bb77b54 100644 --- a/test/integration/replicaset/replicaset_test.go +++ b/test/integration/replicaset/replicaset_test.go @@ -33,7 +33,7 @@ import ( "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/pkg/controller/informers" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" "k8s.io/kubernetes/pkg/controller/replicaset" "k8s.io/kubernetes/test/integration/framework" ) @@ -126,7 +126,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa return ret, nil } -func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, cache.SharedIndexInformer, cache.SharedIndexInformer, clientset.Interface) { +func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, informers.SharedInformerFactory, clientset.Interface) { masterConfig := framework.NewIntegrationTestMasterConfig() _, s := framework.RunAMaster(masterConfig) @@ -136,11 +136,11 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl t.Fatalf("Error in create clientset: %v", err) } resyncPeriod := 12 * time.Hour - informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), nil, resyncPeriod) + informers := informers.NewSharedInformerFactory(nil, clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), resyncPeriod) rm := replicaset.NewReplicaSetController( - informers.ReplicaSets(), - informers.Pods(), + informers.Extensions().V1beta1().ReplicaSets(), + informers.Core().V1().Pods(), clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")), replicaset.BurstReplicas, 4096, @@ -150,7 +150,7 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl if err != nil { t.Fatalf("Failed to create replicaset controller") } - return s, rm, informers.ReplicaSets().Informer(), informers.Pods().Informer(), clientSet + return s, rm, informers, clientSet } // wait for the podInformer to observe the pods. Call this function before @@ -220,7 +220,8 @@ func TestAdoption(t *testing.T) { }, } for i, tc := range testCases { - s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true) + s, rm, informers, clientSet := rmSetup(t, true) + podInformer := informers.Core().V1().Pods().Informer() ns := framework.CreateTestingNamespace(fmt.Sprintf("rs-adoption-%d", i), s, t) defer framework.DeleteTestingNamespace(ns, s, t) @@ -240,8 +241,7 @@ func TestAdoption(t *testing.T) { } stopCh := make(chan struct{}) - go rsInformer.Run(stopCh) - go podInformer.Run(stopCh) + informers.Start(stopCh) waitToObservePods(t, podInformer, 1) go rm.Run(5, stopCh) if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { @@ -298,7 +298,7 @@ func TestUpdateSelectorToAdopt(t *testing.T) { // We have pod1, pod2 and rs. rs.spec.replicas=1. At first rs.Selector // matches pod1 only; change the selector to match pod2 as well. Verify // there is only one pod left. - s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true) + s, rm, informers, clientSet := rmSetup(t, true) ns := framework.CreateTestingNamespace("rs-update-selector-to-adopt", s, t) defer framework.DeleteTestingNamespace(ns, s, t) rs := newRS("rs", ns.Name, 1) @@ -312,8 +312,7 @@ func TestUpdateSelectorToAdopt(t *testing.T) { createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name) stopCh := make(chan struct{}) - go rsInformer.Run(stopCh) - go podInformer.Run(stopCh) + informers.Start(stopCh) go rm.Run(5, stopCh) waitRSStable(t, clientSet, rs, ns.Name) @@ -339,7 +338,8 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) { // matches pod1 and pod2; change the selector to match only pod1. Verify // that rs creates one more pod, so there are 3 pods. Also verify that // pod2's controllerRef is cleared. - s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true) + s, rm, informers, clientSet := rmSetup(t, true) + podInformer := informers.Core().V1().Pods().Informer() ns := framework.CreateTestingNamespace("rs-update-selector-to-remove-controllerref", s, t) defer framework.DeleteTestingNamespace(ns, s, t) rs := newRS("rs", ns.Name, 2) @@ -350,8 +350,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) { createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name) stopCh := make(chan struct{}) - go rsInformer.Run(stopCh) - go podInformer.Run(stopCh) + informers.Start(stopCh) waitToObservePods(t, podInformer, 2) go rm.Run(5, stopCh) waitRSStable(t, clientSet, rs, ns.Name) @@ -386,7 +385,7 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) { // matches pod1 and pod2; change pod2's labels to non-matching. Verify // that rs creates one more pod, so there are 3 pods. Also verify that // pod2's controllerRef is cleared. - s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true) + s, rm, informers, clientSet := rmSetup(t, true) ns := framework.CreateTestingNamespace("rs-update-label-to-remove-controllerref", s, t) defer framework.DeleteTestingNamespace(ns, s, t) rs := newRS("rs", ns.Name, 2) @@ -395,8 +394,7 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) { createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name) stopCh := make(chan struct{}) - go rsInformer.Run(stopCh) - go podInformer.Run(stopCh) + informers.Start(stopCh) go rm.Run(5, stopCh) waitRSStable(t, clientSet, rs, ns.Name) @@ -429,7 +427,7 @@ func TestUpdateLabelToBeAdopted(t *testing.T) { // matches pod1 only; change pod2's labels to be matching. Verify the RS // controller adopts pod2 and delete one of them, so there is only 1 pod // left. - s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true) + s, rm, informers, clientSet := rmSetup(t, true) ns := framework.CreateTestingNamespace("rs-update-label-to-be-adopted", s, t) defer framework.DeleteTestingNamespace(ns, s, t) rs := newRS("rs", ns.Name, 1) @@ -443,8 +441,7 @@ func TestUpdateLabelToBeAdopted(t *testing.T) { createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name) stopCh := make(chan struct{}) - go rsInformer.Run(stopCh) - go podInformer.Run(stopCh) + informers.Start(stopCh) go rm.Run(5, stopCh) waitRSStable(t, clientSet, rs, ns.Name) diff --git a/test/integration/replicationcontroller/replicationcontroller_test.go b/test/integration/replicationcontroller/replicationcontroller_test.go index 6ea78efab1..16b42ba6c2 100644 --- a/test/integration/replicationcontroller/replicationcontroller_test.go +++ b/test/integration/replicationcontroller/replicationcontroller_test.go @@ -32,7 +32,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - "k8s.io/kubernetes/pkg/controller/informers" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" "k8s.io/kubernetes/pkg/controller/replication" "k8s.io/kubernetes/test/integration/framework" ) @@ -123,7 +123,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa return ret, nil } -func rmSetup(t *testing.T, stopCh chan struct{}, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, cache.SharedIndexInformer, clientset.Interface) { +func rmSetup(t *testing.T, stopCh chan struct{}, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, informers.SharedInformerFactory, clientset.Interface) { masterConfig := framework.NewIntegrationTestMasterConfig() _, s := framework.RunAMaster(masterConfig) @@ -134,13 +134,11 @@ func rmSetup(t *testing.T, stopCh chan struct{}, enableGarbageCollector bool) (* } resyncPeriod := 12 * time.Hour - informers := informers.NewSharedInformerFactory(clientSet, nil, resyncPeriod) - podInformer := informers.Pods().Informer() - rcInformer := informers.ReplicationControllers().Informer() - rm := replication.NewReplicationManager(podInformer, rcInformer, clientSet, replication.BurstReplicas, 4096, enableGarbageCollector) + informers := informers.NewSharedInformerFactory(nil, clientSet, resyncPeriod) + rm := replication.NewReplicationManager(informers.Core().V1().Pods(), informers.Core().V1().ReplicationControllers(), clientSet, replication.BurstReplicas, 4096, enableGarbageCollector) informers.Start(stopCh) - return s, rm, podInformer, clientSet + return s, rm, informers, clientSet } // wait for the podInformer to observe the pods. Call this function before @@ -211,7 +209,7 @@ func TestAdoption(t *testing.T) { } for i, tc := range testCases { stopCh := make(chan struct{}) - s, rm, podInformer, clientSet := rmSetup(t, stopCh, true) + s, rm, informers, clientSet := rmSetup(t, stopCh, true) ns := framework.CreateTestingNamespace(fmt.Sprintf("adoption-%d", i), s, t) defer framework.DeleteTestingNamespace(ns, s, t) @@ -230,8 +228,8 @@ func TestAdoption(t *testing.T) { t.Fatalf("Failed to create Pod: %v", err) } - go podInformer.Run(stopCh) - waitToObservePods(t, podInformer, 1) + informers.Start(stopCh) + waitToObservePods(t, informers.Core().V1().Pods().Informer(), 1) go rm.Run(5, stopCh) if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{}) @@ -327,7 +325,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) { // that rc creates one more pod, so there are 3 pods. Also verify that // pod2's controllerRef is cleared. stopCh := make(chan struct{}) - s, rm, podInformer, clientSet := rmSetup(t, stopCh, true) + s, rm, informers, clientSet := rmSetup(t, stopCh, true) ns := framework.CreateTestingNamespace("update-selector-to-remove-controllerref", s, t) defer framework.DeleteTestingNamespace(ns, s, t) rc := newRC("rc", ns.Name, 2) @@ -337,7 +335,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) { pod2.Labels["uniqueKey"] = "2" createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name) - waitToObservePods(t, podInformer, 2) + waitToObservePods(t, informers.Core().V1().Pods().Informer(), 2) go rm.Run(5, stopCh) waitRCStable(t, clientSet, rc, ns.Name) diff --git a/test/integration/volume/attach_detach_test.go b/test/integration/volume/attach_detach_test.go index 34efa51b37..f1b06f2fc1 100644 --- a/test/integration/volume/attach_detach_test.go +++ b/test/integration/volume/attach_detach_test.go @@ -30,8 +30,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated" fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" - "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/volume/attachdetach" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -94,7 +94,7 @@ func TestPodDeletionWithDswp(t *testing.T) { ns := framework.CreateTestingNamespace(namespaceName, server, t) defer framework.DeleteTestingNamespace(ns, server, t) - testClient, ctrl, podInformer, nodeInformer := createAdClients(ns, t, server, defaultSyncPeriod) + testClient, ctrl, informers := createAdClients(ns, t, server, defaultSyncPeriod) pod := fakePodWithVol(namespaceName) podStopCh := make(chan struct{}) @@ -103,12 +103,13 @@ func TestPodDeletionWithDswp(t *testing.T) { t.Fatalf("Failed to created node : %v", err) } - go nodeInformer.Run(podStopCh) + go informers.Core().V1().Nodes().Informer().Run(podStopCh) if _, err := testClient.Core().Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod : %v", err) } + podInformer := informers.Core().V1().Pods().Informer() go podInformer.Run(podStopCh) // start controller loop @@ -167,7 +168,7 @@ func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podN } } -func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, attachdetach.AttachDetachController, cache.SharedIndexInformer, cache.SharedIndexInformer) { +func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, attachdetach.AttachDetachController, informers.SharedInformerFactory) { config := restclient.Config{ Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}, @@ -192,13 +193,20 @@ func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, sy } plugins := []volume.VolumePlugin{plugin} cloud := &fakecloud.FakeCloud{} - podInformer := informers.NewPodInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod) - nodeInformer := informers.NewNodeInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "node-informer")), resyncPeriod) - pvcInformer := informers.NewNodeInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pvc-informer")), resyncPeriod) - pvInformer := informers.NewNodeInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pv-informer")), resyncPeriod) - ctrl, err := attachdetach.NewAttachDetachController(testClient, podInformer, nodeInformer, pvcInformer, pvInformer, cloud, plugins, false, time.Second*5) + informers := informers.NewSharedInformerFactory(nil, testClient, resyncPeriod) + ctrl, err := attachdetach.NewAttachDetachController( + testClient, + informers.Core().V1().Pods(), + informers.Core().V1().Nodes(), + informers.Core().V1().PersistentVolumeClaims(), + informers.Core().V1().PersistentVolumes(), + cloud, + plugins, + false, + time.Second*5, + ) if err != nil { t.Fatalf("Error creating AttachDetach : %v", err) } - return testClient, ctrl, podInformer, nodeInformer + return testClient, ctrl, informers }