mirror of https://github.com/k3s-io/k3s
Replace hand-written informers with generated ones
Replace existing uses of hand-written informers with generated ones. Follow-up commits will switch the use of one-off informers to shared informers.pull/6/head
parent
cb758738f9
commit
70c6087600
|
@ -15,6 +15,7 @@ go_library(
|
||||||
"//cmd/cloud-controller-manager/app/options:go_default_library",
|
"//cmd/cloud-controller-manager/app/options:go_default_library",
|
||||||
"//pkg/api:go_default_library",
|
"//pkg/api:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated:go_default_library",
|
||||||
"//pkg/client/leaderelection:go_default_library",
|
"//pkg/client/leaderelection:go_default_library",
|
||||||
"//pkg/client/leaderelection/resourcelock:go_default_library",
|
"//pkg/client/leaderelection/resourcelock:go_default_library",
|
||||||
"//pkg/cloudprovider:go_default_library",
|
"//pkg/cloudprovider:go_default_library",
|
||||||
|
|
|
@ -36,6 +36,7 @@ import (
|
||||||
"k8s.io/kubernetes/cmd/cloud-controller-manager/app/options"
|
"k8s.io/kubernetes/cmd/cloud-controller-manager/app/options"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
|
newinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||||
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
|
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
|
@ -193,7 +194,10 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc
|
||||||
client := func(serviceAccountName string) clientset.Interface {
|
client := func(serviceAccountName string) clientset.Interface {
|
||||||
return rootClientBuilder.ClientOrDie(serviceAccountName)
|
return rootClientBuilder.ClientOrDie(serviceAccountName)
|
||||||
}
|
}
|
||||||
sharedInformers := informers.NewSharedInformerFactory(client("shared-informers"), nil, resyncPeriod(s)())
|
versionedClient := client("shared-informers")
|
||||||
|
// TODO replace sharedInformers with newSharedInformers
|
||||||
|
sharedInformers := informers.NewSharedInformerFactory(versionedClient, nil, resyncPeriod(s)())
|
||||||
|
newSharedInformers := newinformers.NewSharedInformerFactory(nil, versionedClient, resyncPeriod(s)())
|
||||||
|
|
||||||
_, clusterCIDR, err := net.ParseCIDR(s.ClusterCIDR)
|
_, clusterCIDR, err := net.ParseCIDR(s.ClusterCIDR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -202,7 +206,7 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc
|
||||||
|
|
||||||
// Start the CloudNodeController
|
// Start the CloudNodeController
|
||||||
nodeController, err := nodecontroller.NewCloudNodeController(
|
nodeController, err := nodecontroller.NewCloudNodeController(
|
||||||
sharedInformers.Nodes(),
|
newSharedInformers.Core().V1().Nodes(),
|
||||||
client("cloud-node-controller"), cloud,
|
client("cloud-node-controller"), cloud,
|
||||||
s.NodeMonitorPeriod.Duration)
|
s.NodeMonitorPeriod.Duration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -247,7 +251,9 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc
|
||||||
glog.Fatalf("Failed to get api versions from server: %v", err)
|
glog.Fatalf("Failed to get api versions from server: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO replace sharedInformers with newSharedInformers
|
||||||
sharedInformers.Start(stop)
|
sharedInformers.Start(stop)
|
||||||
|
newSharedInformers.Start(stop)
|
||||||
|
|
||||||
select {}
|
select {}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@ go_library(
|
||||||
"//pkg/apis/componentconfig:go_default_library",
|
"//pkg/apis/componentconfig:go_default_library",
|
||||||
"//pkg/apis/extensions:go_default_library",
|
"//pkg/apis/extensions:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated:go_default_library",
|
||||||
"//pkg/client/leaderelection:go_default_library",
|
"//pkg/client/leaderelection:go_default_library",
|
||||||
"//pkg/client/leaderelection/resourcelock:go_default_library",
|
"//pkg/client/leaderelection/resourcelock:go_default_library",
|
||||||
"//pkg/cloudprovider:go_default_library",
|
"//pkg/cloudprovider:go_default_library",
|
||||||
|
|
|
@ -33,8 +33,8 @@ func startJobController(ctx ControllerContext) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
go job.NewJobController(
|
go job.NewJobController(
|
||||||
ctx.InformerFactory.Pods().Informer(),
|
ctx.NewInformerFactory.Core().V1().Pods(),
|
||||||
ctx.InformerFactory.Jobs(),
|
ctx.NewInformerFactory.Batch().V1().Jobs(),
|
||||||
ctx.ClientBuilder.ClientOrDie("job-controller"),
|
ctx.ClientBuilder.ClientOrDie("job-controller"),
|
||||||
).Run(int(ctx.Options.ConcurrentJobSyncs), ctx.Stop)
|
).Run(int(ctx.Options.ConcurrentJobSyncs), ctx.Stop)
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
|
@ -46,6 +46,7 @@ import (
|
||||||
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
|
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
|
newinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||||
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
|
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
|
@ -210,9 +211,14 @@ type ControllerContext struct {
|
||||||
// ClientBuilder will provide a client for this controller to use
|
// ClientBuilder will provide a client for this controller to use
|
||||||
ClientBuilder controller.ControllerClientBuilder
|
ClientBuilder controller.ControllerClientBuilder
|
||||||
|
|
||||||
// InformerFactory gives access to informers for the controller
|
// InformerFactory gives access to informers for the controller.
|
||||||
|
// TODO delete this instance once the conversion to generated informers is complete.
|
||||||
InformerFactory informers.SharedInformerFactory
|
InformerFactory informers.SharedInformerFactory
|
||||||
|
|
||||||
|
// NewInformerFactory gives access to informers for the controller.
|
||||||
|
// TODO rename this to InformerFactory once the conversion to generated informers is complete.
|
||||||
|
NewInformerFactory newinformers.SharedInformerFactory
|
||||||
|
|
||||||
// Options provides access to init options for a given controller
|
// Options provides access to init options for a given controller
|
||||||
Options options.CMServer
|
Options options.CMServer
|
||||||
|
|
||||||
|
@ -326,7 +332,10 @@ func getAvailableResources(clientBuilder controller.ControllerClientBuilder) (ma
|
||||||
}
|
}
|
||||||
|
|
||||||
func StartControllers(controllers map[string]InitFunc, s *options.CMServer, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}) error {
|
func StartControllers(controllers map[string]InitFunc, s *options.CMServer, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}) error {
|
||||||
sharedInformers := informers.NewSharedInformerFactory(rootClientBuilder.ClientOrDie("shared-informers"), nil, ResyncPeriod(s)())
|
versionedClient := rootClientBuilder.ClientOrDie("shared-informers")
|
||||||
|
// TODO replace sharedInformers with newSharedInformers
|
||||||
|
sharedInformers := informers.NewSharedInformerFactory(versionedClient, nil, ResyncPeriod(s)())
|
||||||
|
newSharedInformers := newinformers.NewSharedInformerFactory(nil, versionedClient, ResyncPeriod(s)())
|
||||||
|
|
||||||
// always start the SA token controller first using a full-power client, since it needs to mint tokens for the rest
|
// always start the SA token controller first using a full-power client, since it needs to mint tokens for the rest
|
||||||
if len(s.ServiceAccountKeyFile) > 0 {
|
if len(s.ServiceAccountKeyFile) > 0 {
|
||||||
|
@ -366,6 +375,7 @@ func StartControllers(controllers map[string]InitFunc, s *options.CMServer, root
|
||||||
ctx := ControllerContext{
|
ctx := ControllerContext{
|
||||||
ClientBuilder: clientBuilder,
|
ClientBuilder: clientBuilder,
|
||||||
InformerFactory: sharedInformers,
|
InformerFactory: sharedInformers,
|
||||||
|
NewInformerFactory: newSharedInformers,
|
||||||
Options: *s,
|
Options: *s,
|
||||||
AvailableResources: availableResources,
|
AvailableResources: availableResources,
|
||||||
Stop: stop,
|
Stop: stop,
|
||||||
|
@ -406,11 +416,24 @@ func StartControllers(controllers map[string]InitFunc, s *options.CMServer, root
|
||||||
glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", s.ServiceCIDR, err)
|
glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", s.ServiceCIDR, err)
|
||||||
}
|
}
|
||||||
nodeController, err := nodecontroller.NewNodeController(
|
nodeController, err := nodecontroller.NewNodeController(
|
||||||
sharedInformers.Pods(), sharedInformers.Nodes(), sharedInformers.DaemonSets(),
|
newSharedInformers.Core().V1().Pods(),
|
||||||
cloud, clientBuilder.ClientOrDie("node-controller"),
|
newSharedInformers.Core().V1().Nodes(),
|
||||||
s.PodEvictionTimeout.Duration, s.NodeEvictionRate, s.SecondaryNodeEvictionRate, s.LargeClusterSizeThreshold, s.UnhealthyZoneThreshold, s.NodeMonitorGracePeriod.Duration,
|
newSharedInformers.Extensions().V1beta1().DaemonSets(),
|
||||||
s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR,
|
cloud,
|
||||||
int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs)
|
clientBuilder.ClientOrDie("node-controller"),
|
||||||
|
s.PodEvictionTimeout.Duration,
|
||||||
|
s.NodeEvictionRate,
|
||||||
|
s.SecondaryNodeEvictionRate,
|
||||||
|
s.LargeClusterSizeThreshold,
|
||||||
|
s.UnhealthyZoneThreshold,
|
||||||
|
s.NodeMonitorGracePeriod.Duration,
|
||||||
|
s.NodeStartupGracePeriod.Duration,
|
||||||
|
s.NodeMonitorPeriod.Duration,
|
||||||
|
clusterCIDR,
|
||||||
|
serviceCIDR,
|
||||||
|
int(s.NodeCIDRMaskSize),
|
||||||
|
s.AllocateNodeCIDRs,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to initialize nodecontroller: %v", err)
|
return fmt.Errorf("failed to initialize nodecontroller: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -463,10 +486,10 @@ func StartControllers(controllers map[string]InitFunc, s *options.CMServer, root
|
||||||
attachDetachController, attachDetachControllerErr :=
|
attachDetachController, attachDetachControllerErr :=
|
||||||
attachdetach.NewAttachDetachController(
|
attachdetach.NewAttachDetachController(
|
||||||
clientBuilder.ClientOrDie("attachdetach-controller"),
|
clientBuilder.ClientOrDie("attachdetach-controller"),
|
||||||
sharedInformers.Pods().Informer(),
|
newSharedInformers.Core().V1().Pods(),
|
||||||
sharedInformers.Nodes().Informer(),
|
newSharedInformers.Core().V1().Nodes(),
|
||||||
sharedInformers.PersistentVolumeClaims().Informer(),
|
newSharedInformers.Core().V1().PersistentVolumeClaims(),
|
||||||
sharedInformers.PersistentVolumes().Informer(),
|
newSharedInformers.Core().V1().PersistentVolumes(),
|
||||||
cloud,
|
cloud,
|
||||||
ProbeAttachableVolumePlugins(s.VolumeConfiguration),
|
ProbeAttachableVolumePlugins(s.VolumeConfiguration),
|
||||||
s.DisableAttachDetachReconcilerSync,
|
s.DisableAttachDetachReconcilerSync,
|
||||||
|
@ -478,7 +501,9 @@ func StartControllers(controllers map[string]InitFunc, s *options.CMServer, root
|
||||||
go attachDetachController.Run(stop)
|
go attachDetachController.Run(stop)
|
||||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||||
|
|
||||||
|
// TODO replace sharedInformers with newSharedInformers
|
||||||
sharedInformers.Start(stop)
|
sharedInformers.Start(stop)
|
||||||
|
newSharedInformers.Start(stop)
|
||||||
|
|
||||||
select {}
|
select {}
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,8 +53,8 @@ func startEndpointController(ctx ControllerContext) (bool, error) {
|
||||||
|
|
||||||
func startReplicationController(ctx ControllerContext) (bool, error) {
|
func startReplicationController(ctx ControllerContext) (bool, error) {
|
||||||
go replicationcontroller.NewReplicationManager(
|
go replicationcontroller.NewReplicationManager(
|
||||||
ctx.InformerFactory.Pods().Informer(),
|
ctx.NewInformerFactory.Core().V1().Pods(),
|
||||||
ctx.InformerFactory.ReplicationControllers().Informer(),
|
ctx.NewInformerFactory.Core().V1().ReplicationControllers(),
|
||||||
ctx.ClientBuilder.ClientOrDie("replication-controller"),
|
ctx.ClientBuilder.ClientOrDie("replication-controller"),
|
||||||
replicationcontroller.BurstReplicas,
|
replicationcontroller.BurstReplicas,
|
||||||
int(ctx.Options.LookupCacheSizeForRC),
|
int(ctx.Options.LookupCacheSizeForRC),
|
||||||
|
@ -66,7 +66,7 @@ func startReplicationController(ctx ControllerContext) (bool, error) {
|
||||||
func startPodGCController(ctx ControllerContext) (bool, error) {
|
func startPodGCController(ctx ControllerContext) (bool, error) {
|
||||||
go podgc.NewPodGC(
|
go podgc.NewPodGC(
|
||||||
ctx.ClientBuilder.ClientOrDie("pod-garbage-collector"),
|
ctx.ClientBuilder.ClientOrDie("pod-garbage-collector"),
|
||||||
ctx.InformerFactory.Pods().Informer(),
|
ctx.NewInformerFactory.Core().V1().Pods(),
|
||||||
int(ctx.Options.TerminatedPodGCThreshold),
|
int(ctx.Options.TerminatedPodGCThreshold),
|
||||||
).Run(ctx.Stop)
|
).Run(ctx.Stop)
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
|
@ -32,9 +32,9 @@ func startDaemonSetController(ctx ControllerContext) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
go daemon.NewDaemonSetsController(
|
go daemon.NewDaemonSetsController(
|
||||||
ctx.InformerFactory.DaemonSets(),
|
ctx.NewInformerFactory.Extensions().V1beta1().DaemonSets(),
|
||||||
ctx.InformerFactory.Pods(),
|
ctx.NewInformerFactory.Core().V1().Pods(),
|
||||||
ctx.InformerFactory.Nodes(),
|
ctx.NewInformerFactory.Core().V1().Nodes(),
|
||||||
ctx.ClientBuilder.ClientOrDie("daemon-set-controller"),
|
ctx.ClientBuilder.ClientOrDie("daemon-set-controller"),
|
||||||
int(ctx.Options.LookupCacheSizeForDaemonSet),
|
int(ctx.Options.LookupCacheSizeForDaemonSet),
|
||||||
).Run(int(ctx.Options.ConcurrentDaemonSetSyncs), ctx.Stop)
|
).Run(int(ctx.Options.ConcurrentDaemonSetSyncs), ctx.Stop)
|
||||||
|
@ -46,9 +46,9 @@ func startDeploymentController(ctx ControllerContext) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
go deployment.NewDeploymentController(
|
go deployment.NewDeploymentController(
|
||||||
ctx.InformerFactory.Deployments(),
|
ctx.NewInformerFactory.Extensions().V1beta1().Deployments(),
|
||||||
ctx.InformerFactory.ReplicaSets(),
|
ctx.NewInformerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||||
ctx.InformerFactory.Pods(),
|
ctx.NewInformerFactory.Core().V1().Pods(),
|
||||||
ctx.ClientBuilder.ClientOrDie("deployment-controller"),
|
ctx.ClientBuilder.ClientOrDie("deployment-controller"),
|
||||||
).Run(int(ctx.Options.ConcurrentDeploymentSyncs), ctx.Stop)
|
).Run(int(ctx.Options.ConcurrentDeploymentSyncs), ctx.Stop)
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -59,8 +59,8 @@ func startReplicaSetController(ctx ControllerContext) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
go replicaset.NewReplicaSetController(
|
go replicaset.NewReplicaSetController(
|
||||||
ctx.InformerFactory.ReplicaSets(),
|
ctx.NewInformerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||||
ctx.InformerFactory.Pods(),
|
ctx.NewInformerFactory.Core().V1().Pods(),
|
||||||
ctx.ClientBuilder.ClientOrDie("replicaset-controller"),
|
ctx.ClientBuilder.ClientOrDie("replicaset-controller"),
|
||||||
replicaset.BurstReplicas,
|
replicaset.BurstReplicas,
|
||||||
int(ctx.Options.LookupCacheSizeForRS),
|
int(ctx.Options.LookupCacheSizeForRS),
|
||||||
|
|
|
@ -16,8 +16,8 @@ go_library(
|
||||||
"//pkg/api:go_default_library",
|
"//pkg/api:go_default_library",
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
|
||||||
"//pkg/cloudprovider:go_default_library",
|
"//pkg/cloudprovider:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||||
|
@ -38,10 +38,10 @@ go_test(
|
||||||
"//pkg/api:go_default_library",
|
"//pkg/api:go_default_library",
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated:go_default_library",
|
||||||
"//pkg/cloudprovider:go_default_library",
|
"//pkg/cloudprovider:go_default_library",
|
||||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//pkg/controller/node/testutil:go_default_library",
|
"//pkg/controller/node/testutil:go_default_library",
|
||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
|
|
|
@ -32,12 +32,12 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
|
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type CloudNodeController struct {
|
type CloudNodeController struct {
|
||||||
nodeInformer informers.NodeInformer
|
nodeInformer coreinformers.NodeInformer
|
||||||
kubeClient clientset.Interface
|
kubeClient clientset.Interface
|
||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ const (
|
||||||
|
|
||||||
// NewCloudNodeController creates a CloudNodeController object
|
// NewCloudNodeController creates a CloudNodeController object
|
||||||
func NewCloudNodeController(
|
func NewCloudNodeController(
|
||||||
nodeInformer informers.NodeInformer,
|
nodeInformer coreinformers.NodeInformer,
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
cloud cloudprovider.Interface,
|
cloud cloudprovider.Interface,
|
||||||
nodeMonitorPeriod time.Duration) (*CloudNodeController, error) {
|
nodeMonitorPeriod time.Duration) (*CloudNodeController, error) {
|
||||||
|
|
|
@ -30,10 +30,10 @@ import (
|
||||||
clientv1 "k8s.io/client-go/pkg/api/v1"
|
clientv1 "k8s.io/client-go/pkg/api/v1"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/node/testutil"
|
"k8s.io/kubernetes/pkg/controller/node/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -99,12 +99,12 @@ func TestNodeDeleted(t *testing.T) {
|
||||||
DeleteWaitChan: make(chan struct{}),
|
DeleteWaitChan: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
factory := informers.NewSharedInformerFactory(fnh, nil, controller.NoResyncPeriodFunc())
|
factory := informers.NewSharedInformerFactory(nil, fnh, controller.NoResyncPeriodFunc())
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
cloudNodeController := &CloudNodeController{
|
cloudNodeController := &CloudNodeController{
|
||||||
kubeClient: fnh,
|
kubeClient: fnh,
|
||||||
nodeInformer: factory.Nodes(),
|
nodeInformer: factory.Core().V1().Nodes(),
|
||||||
cloud: &fakecloud.FakeCloud{Err: cloudprovider.InstanceNotFound},
|
cloud: &fakecloud.FakeCloud{Err: cloudprovider.InstanceNotFound},
|
||||||
nodeMonitorPeriod: 5 * time.Second,
|
nodeMonitorPeriod: 5 * time.Second,
|
||||||
recorder: eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "controllermanager"}),
|
recorder: eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "controllermanager"}),
|
||||||
|
|
|
@ -21,13 +21,16 @@ go_library(
|
||||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/client/legacylisters:go_default_library",
|
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated/extensions/v1beta1:go_default_library",
|
||||||
|
"//pkg/client/listers/core/v1:go_default_library",
|
||||||
|
"//pkg/client/listers/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//pkg/util/metrics:go_default_library",
|
"//pkg/util/metrics:go_default_library",
|
||||||
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
|
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||||
|
@ -52,8 +55,8 @@ go_test(
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//pkg/securitycontext:go_default_library",
|
"//pkg/securitycontext:go_default_library",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/api/resource",
|
"//vendor:k8s.io/apimachinery/pkg/api/resource",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
|
@ -38,9 +39,11 @@ import (
|
||||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
|
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
|
||||||
|
extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1"
|
||||||
|
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||||
|
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
|
@ -74,18 +77,18 @@ type DaemonSetsController struct {
|
||||||
syncHandler func(dsKey string) error
|
syncHandler func(dsKey string) error
|
||||||
// A TTLCache of pod creates/deletes each ds expects to see
|
// A TTLCache of pod creates/deletes each ds expects to see
|
||||||
expectations controller.ControllerExpectationsInterface
|
expectations controller.ControllerExpectationsInterface
|
||||||
// A store of daemon sets
|
// dsLister can list/get daemonsets from the shared informer's store
|
||||||
dsStore *listers.StoreToDaemonSetLister
|
dsLister extensionslisters.DaemonSetLister
|
||||||
// A store of pods
|
|
||||||
podStore *listers.StoreToPodLister
|
|
||||||
// A store of nodes
|
|
||||||
nodeStore *listers.StoreToNodeLister
|
|
||||||
// dsStoreSynced returns true if the daemonset store has been synced at least once.
|
// dsStoreSynced returns true if the daemonset store has been synced at least once.
|
||||||
// Added as a member to the struct to allow injection for testing.
|
// Added as a member to the struct to allow injection for testing.
|
||||||
dsStoreSynced cache.InformerSynced
|
dsStoreSynced cache.InformerSynced
|
||||||
|
// podLister get list/get pods from the shared informers's store
|
||||||
|
podLister corelisters.PodLister
|
||||||
// podStoreSynced returns true if the pod store has been synced at least once.
|
// podStoreSynced returns true if the pod store has been synced at least once.
|
||||||
// Added as a member to the struct to allow injection for testing.
|
// Added as a member to the struct to allow injection for testing.
|
||||||
podStoreSynced cache.InformerSynced
|
podStoreSynced cache.InformerSynced
|
||||||
|
// nodeLister can list/get nodes from the shared informer's store
|
||||||
|
nodeLister corelisters.NodeLister
|
||||||
// nodeStoreSynced returns true if the node store has been synced at least once.
|
// nodeStoreSynced returns true if the node store has been synced at least once.
|
||||||
// Added as a member to the struct to allow injection for testing.
|
// Added as a member to the struct to allow injection for testing.
|
||||||
nodeStoreSynced cache.InformerSynced
|
nodeStoreSynced cache.InformerSynced
|
||||||
|
@ -96,7 +99,7 @@ type DaemonSetsController struct {
|
||||||
queue workqueue.RateLimitingInterface
|
queue workqueue.RateLimitingInterface
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podInformer informers.PodInformer, nodeInformer informers.NodeInformer, kubeClient clientset.Interface, lookupCacheSize int) *DaemonSetsController {
|
func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface, lookupCacheSize int) *DaemonSetsController {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||||
|
@ -146,7 +149,7 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI
|
||||||
},
|
},
|
||||||
DeleteFunc: dsc.deleteDaemonset,
|
DeleteFunc: dsc.deleteDaemonset,
|
||||||
})
|
})
|
||||||
dsc.dsStore = daemonSetInformer.Lister()
|
dsc.dsLister = daemonSetInformer.Lister()
|
||||||
dsc.dsStoreSynced = daemonSetInformer.Informer().HasSynced
|
dsc.dsStoreSynced = daemonSetInformer.Informer().HasSynced
|
||||||
|
|
||||||
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
|
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
|
||||||
|
@ -156,7 +159,7 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI
|
||||||
UpdateFunc: dsc.updatePod,
|
UpdateFunc: dsc.updatePod,
|
||||||
DeleteFunc: dsc.deletePod,
|
DeleteFunc: dsc.deletePod,
|
||||||
})
|
})
|
||||||
dsc.podStore = podInformer.Lister()
|
dsc.podLister = podInformer.Lister()
|
||||||
dsc.podStoreSynced = podInformer.Informer().HasSynced
|
dsc.podStoreSynced = podInformer.Informer().HasSynced
|
||||||
|
|
||||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
|
@ -165,7 +168,7 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
dsc.nodeStoreSynced = nodeInformer.Informer().HasSynced
|
dsc.nodeStoreSynced = nodeInformer.Informer().HasSynced
|
||||||
dsc.nodeStore = nodeInformer.Lister()
|
dsc.nodeLister = nodeInformer.Lister()
|
||||||
|
|
||||||
dsc.syncHandler = dsc.syncDaemonSet
|
dsc.syncHandler = dsc.syncDaemonSet
|
||||||
dsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
|
dsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
|
||||||
|
@ -198,6 +201,7 @@ func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
|
||||||
glog.Infof("Starting Daemon Sets controller manager")
|
glog.Infof("Starting Daemon Sets controller manager")
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.dsStoreSynced) {
|
if !cache.WaitForCacheSync(stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.dsStoreSynced) {
|
||||||
|
utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,7 +262,7 @@ func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.Daemon
|
||||||
return ds
|
return ds
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sets, err := dsc.dsStore.GetPodDaemonSets(pod)
|
sets, err := dsc.dsLister.GetPodDaemonSets(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(4).Infof("No daemon sets found for pod %v, daemon set controller will avoid syncing", pod.Name)
|
glog.V(4).Infof("No daemon sets found for pod %v, daemon set controller will avoid syncing", pod.Name)
|
||||||
return nil
|
return nil
|
||||||
|
@ -272,16 +276,16 @@ func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.Daemon
|
||||||
}
|
}
|
||||||
|
|
||||||
// update lookup cache
|
// update lookup cache
|
||||||
dsc.lookupCache.Update(pod, &sets[0])
|
dsc.lookupCache.Update(pod, sets[0])
|
||||||
|
|
||||||
return &sets[0]
|
return sets[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
// isCacheValid check if the cache is valid
|
// isCacheValid check if the cache is valid
|
||||||
func (dsc *DaemonSetsController) isCacheValid(pod *v1.Pod, cachedDS *extensions.DaemonSet) bool {
|
func (dsc *DaemonSetsController) isCacheValid(pod *v1.Pod, cachedDS *extensions.DaemonSet) bool {
|
||||||
_, exists, err := dsc.dsStore.Get(cachedDS)
|
_, err := dsc.dsLister.DaemonSets(cachedDS.Namespace).Get(cachedDS.Name)
|
||||||
// ds has been deleted or updated, cache is invalid
|
// ds has been deleted or updated, cache is invalid
|
||||||
if err != nil || !exists || !isDaemonSetMatch(pod, cachedDS) {
|
if err != nil || !isDaemonSetMatch(pod, cachedDS) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
@ -380,14 +384,14 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) {
|
||||||
|
|
||||||
func (dsc *DaemonSetsController) addNode(obj interface{}) {
|
func (dsc *DaemonSetsController) addNode(obj interface{}) {
|
||||||
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
||||||
dsList, err := dsc.dsStore.List()
|
dsList, err := dsc.dsLister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
|
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
node := obj.(*v1.Node)
|
node := obj.(*v1.Node)
|
||||||
for i := range dsList.Items {
|
for i := range dsList {
|
||||||
ds := &dsList.Items[i]
|
ds := dsList[i]
|
||||||
_, shouldSchedule, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
_, shouldSchedule, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
|
@ -405,14 +409,14 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||||
// If node labels didn't change, we can ignore this update.
|
// If node labels didn't change, we can ignore this update.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
dsList, err := dsc.dsStore.List()
|
dsList, err := dsc.dsLister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
|
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
||||||
for i := range dsList.Items {
|
for i := range dsList {
|
||||||
ds := &dsList.Items[i]
|
ds := dsList[i]
|
||||||
_, oldShouldSchedule, oldShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(oldNode, ds)
|
_, oldShouldSchedule, oldShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(oldNode, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
|
@ -434,7 +438,7 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
daemonPods, err := dsc.podStore.Pods(ds.Namespace).List(selector)
|
daemonPods, err := dsc.podLister.Pods(ds.Namespace).List(selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nodeToDaemonPods, err
|
return nodeToDaemonPods, err
|
||||||
}
|
}
|
||||||
|
@ -456,14 +460,15 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
|
||||||
|
|
||||||
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
|
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
|
||||||
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
|
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
|
||||||
nodeList, err := dsc.nodeStore.List()
|
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't get list of nodes when syncing daemon set %#v: %v", ds, err)
|
return fmt.Errorf("couldn't get list of nodes when syncing daemon set %#v: %v", ds, err)
|
||||||
}
|
}
|
||||||
var nodesNeedingDaemonPods, podsToDelete []string
|
var nodesNeedingDaemonPods, podsToDelete []string
|
||||||
var failedPodsObserved int
|
var failedPodsObserved int
|
||||||
for _, node := range nodeList.Items {
|
for i := range nodeList {
|
||||||
_, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(&node, ds)
|
node := nodeList[i]
|
||||||
|
_, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -615,14 +620,15 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
|
||||||
return fmt.Errorf("error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
|
return fmt.Errorf("error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeList, err := dsc.nodeStore.List()
|
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't get list of nodes when updating daemon set %#v: %v", ds, err)
|
return fmt.Errorf("couldn't get list of nodes when updating daemon set %#v: %v", ds, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int
|
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int
|
||||||
for _, node := range nodeList.Items {
|
for i := range nodeList {
|
||||||
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(&node, ds)
|
node := nodeList[i]
|
||||||
|
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -661,16 +667,19 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||||
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime))
|
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
obj, exists, err := dsc.dsStore.Store.GetByKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to retrieve ds %v from store: %v", key, err)
|
return err
|
||||||
}
|
}
|
||||||
if !exists {
|
ds, err := dsc.dsLister.DaemonSets(namespace).Get(name)
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
glog.V(3).Infof("daemon set has been deleted %v", key)
|
glog.V(3).Infof("daemon set has been deleted %v", key)
|
||||||
dsc.expectations.DeleteExpectations(key)
|
dsc.expectations.DeleteExpectations(key)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
ds := obj.(*extensions.DaemonSet)
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to retrieve ds %v from store: %v", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
everything := metav1.LabelSelector{}
|
everything := metav1.LabelSelector{}
|
||||||
if reflect.DeepEqual(ds.Spec.Selector, &everything) {
|
if reflect.DeepEqual(ds.Spec.Selector, &everything) {
|
||||||
|
@ -733,8 +742,12 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
|
||||||
|
|
||||||
pods := []*v1.Pod{}
|
pods := []*v1.Pod{}
|
||||||
|
|
||||||
for _, m := range dsc.podStore.Indexer.List() {
|
podList, err := dsc.podLister.List(labels.Everything())
|
||||||
pod := m.(*v1.Pod)
|
if err != nil {
|
||||||
|
return false, false, false, err
|
||||||
|
}
|
||||||
|
for i := range podList {
|
||||||
|
pod := podList[i]
|
||||||
if pod.Spec.NodeName != node.Name {
|
if pod.Spec.NodeName != node.Name {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -807,7 +820,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
|
||||||
}
|
}
|
||||||
|
|
||||||
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
|
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
|
||||||
type byCreationTimestamp []extensions.DaemonSet
|
type byCreationTimestamp []*extensions.DaemonSet
|
||||||
|
|
||||||
func (o byCreationTimestamp) Len() int { return len(o) }
|
func (o byCreationTimestamp) Len() int { return len(o) }
|
||||||
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||||
|
|
|
@ -32,8 +32,8 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/securitycontext"
|
"k8s.io/kubernetes/pkg/securitycontext"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -147,11 +147,25 @@ func addFailedPods(podStore cache.Store, nodeName string, label map[string]strin
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestController(initialObjects ...runtime.Object) (*DaemonSetsController, *controller.FakePodControl, *fake.Clientset) {
|
type daemonSetsController struct {
|
||||||
clientset := fake.NewSimpleClientset(initialObjects...)
|
*DaemonSetsController
|
||||||
informerFactory := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc())
|
|
||||||
|
|
||||||
manager := NewDaemonSetsController(informerFactory.DaemonSets(), informerFactory.Pods(), informerFactory.Nodes(), clientset, 0)
|
dsStore cache.Store
|
||||||
|
podStore cache.Store
|
||||||
|
nodeStore cache.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestController(initialObjects ...runtime.Object) (*daemonSetsController, *controller.FakePodControl, *fake.Clientset) {
|
||||||
|
clientset := fake.NewSimpleClientset(initialObjects...)
|
||||||
|
informerFactory := informers.NewSharedInformerFactory(nil, clientset, controller.NoResyncPeriodFunc())
|
||||||
|
|
||||||
|
manager := NewDaemonSetsController(
|
||||||
|
informerFactory.Extensions().V1beta1().DaemonSets(),
|
||||||
|
informerFactory.Core().V1().Pods(),
|
||||||
|
informerFactory.Core().V1().Nodes(),
|
||||||
|
clientset,
|
||||||
|
0,
|
||||||
|
)
|
||||||
manager.eventRecorder = record.NewFakeRecorder(100)
|
manager.eventRecorder = record.NewFakeRecorder(100)
|
||||||
|
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
|
@ -159,7 +173,13 @@ func newTestController(initialObjects ...runtime.Object) (*DaemonSetsController,
|
||||||
manager.dsStoreSynced = alwaysReady
|
manager.dsStoreSynced = alwaysReady
|
||||||
podControl := &controller.FakePodControl{}
|
podControl := &controller.FakePodControl{}
|
||||||
manager.podControl = podControl
|
manager.podControl = podControl
|
||||||
return manager, podControl, clientset
|
|
||||||
|
return &daemonSetsController{
|
||||||
|
manager,
|
||||||
|
informerFactory.Extensions().V1beta1().DaemonSets().Informer().GetStore(),
|
||||||
|
informerFactory.Core().V1().Pods().Informer().GetStore(),
|
||||||
|
informerFactory.Core().V1().Nodes().Informer().GetStore(),
|
||||||
|
}, podControl, clientset
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
|
func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
|
||||||
|
@ -171,7 +191,7 @@ func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodCont
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func syncAndValidateDaemonSets(t *testing.T, manager *DaemonSetsController, ds *extensions.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
|
func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
|
||||||
key, err := controller.KeyFunc(ds)
|
key, err := controller.KeyFunc(ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Could not get key for daemon.")
|
t.Errorf("Could not get key for daemon.")
|
||||||
|
@ -182,7 +202,7 @@ func syncAndValidateDaemonSets(t *testing.T, manager *DaemonSetsController, ds *
|
||||||
|
|
||||||
func TestDeleteFinalStateUnknown(t *testing.T) {
|
func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||||
manager, _, _ := newTestController()
|
manager, _, _ := newTestController()
|
||||||
addNodes(manager.nodeStore.Store, 0, 1, nil)
|
addNodes(manager.nodeStore, 0, 1, nil)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
// DeletedFinalStateUnknown should queue the embedded DS if found.
|
// DeletedFinalStateUnknown should queue the embedded DS if found.
|
||||||
manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds})
|
manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds})
|
||||||
|
@ -195,7 +215,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||||
// DaemonSets without node selectors should launch pods on every node.
|
// DaemonSets without node selectors should launch pods on every node.
|
||||||
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore.Store, 0, 5, nil)
|
addNodes(manager.nodeStore, 0, 5, nil)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
|
||||||
|
@ -269,7 +289,7 @@ func TestInsufficentCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||||
node := newNode("too-much-mem", nil)
|
node := newNode("too-much-mem", nil)
|
||||||
node.Status.Allocatable = allocatableResources("100M", "200m")
|
node.Status.Allocatable = allocatableResources("100M", "200m")
|
||||||
manager.nodeStore.Add(node)
|
manager.nodeStore.Add(node)
|
||||||
manager.podStore.Indexer.Add(&v1.Pod{
|
manager.podStore.Add(&v1.Pod{
|
||||||
Spec: podSpec,
|
Spec: podSpec,
|
||||||
})
|
})
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
@ -286,7 +306,7 @@ func TestInsufficentCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T)
|
||||||
node := newNode("too-much-mem", nil)
|
node := newNode("too-much-mem", nil)
|
||||||
node.Status.Allocatable = allocatableResources("100M", "200m")
|
node.Status.Allocatable = allocatableResources("100M", "200m")
|
||||||
manager.nodeStore.Add(node)
|
manager.nodeStore.Add(node)
|
||||||
manager.podStore.Indexer.Add(&v1.Pod{
|
manager.podStore.Add(&v1.Pod{
|
||||||
Spec: podSpec,
|
Spec: podSpec,
|
||||||
})
|
})
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
@ -301,7 +321,7 @@ func TestSufficentCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
|
||||||
node := newNode("too-much-mem", nil)
|
node := newNode("too-much-mem", nil)
|
||||||
node.Status.Allocatable = allocatableResources("100M", "200m")
|
node.Status.Allocatable = allocatableResources("100M", "200m")
|
||||||
manager.nodeStore.Add(node)
|
manager.nodeStore.Add(node)
|
||||||
manager.podStore.Indexer.Add(&v1.Pod{
|
manager.podStore.Add(&v1.Pod{
|
||||||
Spec: podSpec,
|
Spec: podSpec,
|
||||||
Status: v1.PodStatus{Phase: v1.PodSucceeded},
|
Status: v1.PodStatus{Phase: v1.PodSucceeded},
|
||||||
})
|
})
|
||||||
|
@ -318,7 +338,7 @@ func TestSufficentCapacityNodeDaemonLaunchesPod(t *testing.T) {
|
||||||
node := newNode("not-too-much-mem", nil)
|
node := newNode("not-too-much-mem", nil)
|
||||||
node.Status.Allocatable = allocatableResources("200M", "200m")
|
node.Status.Allocatable = allocatableResources("200M", "200m")
|
||||||
manager.nodeStore.Add(node)
|
manager.nodeStore.Add(node)
|
||||||
manager.podStore.Indexer.Add(&v1.Pod{
|
manager.podStore.Add(&v1.Pod{
|
||||||
Spec: podSpec,
|
Spec: podSpec,
|
||||||
})
|
})
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
@ -334,7 +354,7 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
|
||||||
node := newNode("not-too-much-mem", nil)
|
node := newNode("not-too-much-mem", nil)
|
||||||
node.Status.Allocatable = allocatableResources("200M", "200m")
|
node.Status.Allocatable = allocatableResources("200M", "200m")
|
||||||
manager.nodeStore.Add(node)
|
manager.nodeStore.Add(node)
|
||||||
manager.podStore.Indexer.Add(&v1.Pod{
|
manager.podStore.Add(&v1.Pod{
|
||||||
Spec: podSpec,
|
Spec: podSpec,
|
||||||
})
|
})
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
@ -358,7 +378,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
node := newNode("port-conflict", nil)
|
node := newNode("port-conflict", nil)
|
||||||
manager.nodeStore.Add(node)
|
manager.nodeStore.Add(node)
|
||||||
manager.podStore.Indexer.Add(&v1.Pod{
|
manager.podStore.Add(&v1.Pod{
|
||||||
Spec: podSpec,
|
Spec: podSpec,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -384,7 +404,7 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
node := newNode("port-conflict", nil)
|
node := newNode("port-conflict", nil)
|
||||||
manager.nodeStore.Add(node)
|
manager.nodeStore.Add(node)
|
||||||
manager.podStore.Indexer.Add(&v1.Pod{
|
manager.podStore.Add(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: simpleDaemonSetLabel,
|
Labels: simpleDaemonSetLabel,
|
||||||
Namespace: metav1.NamespaceDefault,
|
Namespace: metav1.NamespaceDefault,
|
||||||
|
@ -418,7 +438,7 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
node := newNode("no-port-conflict", nil)
|
node := newNode("no-port-conflict", nil)
|
||||||
manager.nodeStore.Add(node)
|
manager.nodeStore.Add(node)
|
||||||
manager.podStore.Indexer.Add(&v1.Pod{
|
manager.podStore.Add(&v1.Pod{
|
||||||
Spec: podSpec1,
|
Spec: podSpec1,
|
||||||
})
|
})
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
@ -432,9 +452,9 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
||||||
// issue https://github.com/kubernetes/kubernetes/pull/23223
|
// issue https://github.com/kubernetes/kubernetes/pull/23223
|
||||||
func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
manager.nodeStore.Store.Add(newNode("node1", nil))
|
manager.nodeStore.Add(newNode("node1", nil))
|
||||||
// Create pod not controlled by a daemonset.
|
// Create pod not controlled by a daemonset.
|
||||||
manager.podStore.Indexer.Add(&v1.Pod{
|
manager.podStore.Add(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: map[string]string{"bang": "boom"},
|
Labels: map[string]string{"bang": "boom"},
|
||||||
Namespace: metav1.NamespaceDefault,
|
Namespace: metav1.NamespaceDefault,
|
||||||
|
@ -465,11 +485,11 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
||||||
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
|
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
|
||||||
func TestDealsWithExistingPods(t *testing.T) {
|
func TestDealsWithExistingPods(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore.Store, 0, 5, nil)
|
addNodes(manager.nodeStore, 0, 5, nil)
|
||||||
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 1)
|
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 1)
|
||||||
addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 2)
|
addPods(manager.podStore, "node-2", simpleDaemonSetLabel, 2)
|
||||||
addPods(manager.podStore.Indexer, "node-3", simpleDaemonSetLabel, 5)
|
addPods(manager.podStore, "node-3", simpleDaemonSetLabel, 5)
|
||||||
addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel2, 2)
|
addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, 2)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5)
|
||||||
|
@ -478,8 +498,8 @@ func TestDealsWithExistingPods(t *testing.T) {
|
||||||
// Daemon with node selector should launch pods on nodes matching selector.
|
// Daemon with node selector should launch pods on nodes matching selector.
|
||||||
func TestSelectorDaemonLaunchesPods(t *testing.T) {
|
func TestSelectorDaemonLaunchesPods(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore.Store, 0, 4, nil)
|
addNodes(manager.nodeStore, 0, 4, nil)
|
||||||
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
|
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||||
daemon := newDaemonSet("foo")
|
daemon := newDaemonSet("foo")
|
||||||
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||||
manager.dsStore.Add(daemon)
|
manager.dsStore.Add(daemon)
|
||||||
|
@ -489,12 +509,12 @@ func TestSelectorDaemonLaunchesPods(t *testing.T) {
|
||||||
// Daemon with node selector should delete pods from nodes that do not satisfy selector.
|
// Daemon with node selector should delete pods from nodes that do not satisfy selector.
|
||||||
func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
|
func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore.Store, 0, 5, nil)
|
addNodes(manager.nodeStore, 0, 5, nil)
|
||||||
addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel)
|
addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
|
||||||
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel2, 2)
|
addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, 2)
|
||||||
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3)
|
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 3)
|
||||||
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 1)
|
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, 1)
|
||||||
addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel, 1)
|
addPods(manager.podStore, "node-4", simpleDaemonSetLabel, 1)
|
||||||
daemon := newDaemonSet("foo")
|
daemon := newDaemonSet("foo")
|
||||||
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||||
manager.dsStore.Add(daemon)
|
manager.dsStore.Add(daemon)
|
||||||
|
@ -504,16 +524,16 @@ func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
|
||||||
// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
|
// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
|
||||||
func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
|
func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore.Store, 0, 5, nil)
|
addNodes(manager.nodeStore, 0, 5, nil)
|
||||||
addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel)
|
addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
|
||||||
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1)
|
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1)
|
||||||
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3)
|
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 3)
|
||||||
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 2)
|
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, 2)
|
||||||
addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 4)
|
addPods(manager.podStore, "node-2", simpleDaemonSetLabel, 4)
|
||||||
addPods(manager.podStore.Indexer, "node-6", simpleDaemonSetLabel, 13)
|
addPods(manager.podStore, "node-6", simpleDaemonSetLabel, 13)
|
||||||
addPods(manager.podStore.Indexer, "node-7", simpleDaemonSetLabel2, 4)
|
addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, 4)
|
||||||
addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel, 1)
|
addPods(manager.podStore, "node-9", simpleDaemonSetLabel, 1)
|
||||||
addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel2, 1)
|
addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, 1)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
@ -523,8 +543,8 @@ func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
|
||||||
// DaemonSet with node selector which does not match any node labels should not launch pods.
|
// DaemonSet with node selector which does not match any node labels should not launch pods.
|
||||||
func TestBadSelectorDaemonDoesNothing(t *testing.T) {
|
func TestBadSelectorDaemonDoesNothing(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore.Store, 0, 4, nil)
|
addNodes(manager.nodeStore, 0, 4, nil)
|
||||||
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
|
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2
|
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
@ -534,7 +554,7 @@ func TestBadSelectorDaemonDoesNothing(t *testing.T) {
|
||||||
// DaemonSet with node name should launch pod on node with corresponding name.
|
// DaemonSet with node name should launch pod on node with corresponding name.
|
||||||
func TestNameDaemonSetLaunchesPods(t *testing.T) {
|
func TestNameDaemonSetLaunchesPods(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore.Store, 0, 5, nil)
|
addNodes(manager.nodeStore, 0, 5, nil)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
ds.Spec.Template.Spec.NodeName = "node-0"
|
ds.Spec.Template.Spec.NodeName = "node-0"
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
@ -544,7 +564,7 @@ func TestNameDaemonSetLaunchesPods(t *testing.T) {
|
||||||
// DaemonSet with node name that does not exist should not launch pods.
|
// DaemonSet with node name that does not exist should not launch pods.
|
||||||
func TestBadNameDaemonSetDoesNothing(t *testing.T) {
|
func TestBadNameDaemonSetDoesNothing(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore.Store, 0, 5, nil)
|
addNodes(manager.nodeStore, 0, 5, nil)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
ds.Spec.Template.Spec.NodeName = "node-10"
|
ds.Spec.Template.Spec.NodeName = "node-10"
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
@ -554,8 +574,8 @@ func TestBadNameDaemonSetDoesNothing(t *testing.T) {
|
||||||
// DaemonSet with node selector, and node name, matching a node, should launch a pod on the node.
|
// DaemonSet with node selector, and node name, matching a node, should launch a pod on the node.
|
||||||
func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
|
func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore.Store, 0, 4, nil)
|
addNodes(manager.nodeStore, 0, 4, nil)
|
||||||
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
|
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||||
ds.Spec.Template.Spec.NodeName = "node-6"
|
ds.Spec.Template.Spec.NodeName = "node-6"
|
||||||
|
@ -566,8 +586,8 @@ func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
|
||||||
// DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing.
|
// DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing.
|
||||||
func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
|
func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore.Store, 0, 4, nil)
|
addNodes(manager.nodeStore, 0, 4, nil)
|
||||||
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
|
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||||
ds.Spec.Template.Spec.NodeName = "node-0"
|
ds.Spec.Template.Spec.NodeName = "node-0"
|
||||||
|
@ -578,8 +598,8 @@ func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
|
||||||
// Daemon with node affinity should launch pods on nodes matching affinity.
|
// Daemon with node affinity should launch pods on nodes matching affinity.
|
||||||
func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
|
func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore.Store, 0, 4, nil)
|
addNodes(manager.nodeStore, 0, 4, nil)
|
||||||
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
|
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||||
daemon := newDaemonSet("foo")
|
daemon := newDaemonSet("foo")
|
||||||
daemon.Spec.Template.Spec.Affinity = &v1.Affinity{
|
daemon.Spec.Template.Spec.Affinity = &v1.Affinity{
|
||||||
NodeAffinity: &v1.NodeAffinity{
|
NodeAffinity: &v1.NodeAffinity{
|
||||||
|
@ -615,9 +635,9 @@ func TestNumberReadyStatus(t *testing.T) {
|
||||||
}
|
}
|
||||||
return false, nil, nil
|
return false, nil, nil
|
||||||
})
|
})
|
||||||
addNodes(manager.nodeStore.Store, 0, 2, simpleNodeLabel)
|
addNodes(manager.nodeStore, 0, 2, simpleNodeLabel)
|
||||||
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1)
|
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1)
|
||||||
addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 1)
|
addPods(manager.podStore, "node-1", simpleDaemonSetLabel, 1)
|
||||||
manager.dsStore.Add(daemon)
|
manager.dsStore.Add(daemon)
|
||||||
|
|
||||||
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
|
||||||
|
@ -626,7 +646,7 @@ func TestNumberReadyStatus(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
selector, _ := metav1.LabelSelectorAsSelector(daemon.Spec.Selector)
|
selector, _ := metav1.LabelSelectorAsSelector(daemon.Spec.Selector)
|
||||||
daemonPods, _ := manager.podStore.Pods(daemon.Namespace).List(selector)
|
daemonPods, _ := manager.podLister.Pods(daemon.Namespace).List(selector)
|
||||||
for _, pod := range daemonPods {
|
for _, pod := range daemonPods {
|
||||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||||
pod.Status.Conditions = append(pod.Status.Conditions, condition)
|
pod.Status.Conditions = append(pod.Status.Conditions, condition)
|
||||||
|
@ -653,8 +673,8 @@ func TestObservedGeneration(t *testing.T) {
|
||||||
return false, nil, nil
|
return false, nil, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
addNodes(manager.nodeStore.Store, 0, 1, simpleNodeLabel)
|
addNodes(manager.nodeStore, 0, 1, simpleNodeLabel)
|
||||||
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1)
|
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, 1)
|
||||||
manager.dsStore.Add(daemon)
|
manager.dsStore.Add(daemon)
|
||||||
|
|
||||||
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, daemon, podControl, 0, 0)
|
||||||
|
@ -679,9 +699,9 @@ func TestDaemonKillFailedPods(t *testing.T) {
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Logf("test case: %s\n", test.test)
|
t.Logf("test case: %s\n", test.test)
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore.Store, 0, 1, nil)
|
addNodes(manager.nodeStore, 0, 1, nil)
|
||||||
addFailedPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, test.numFailedPods)
|
addFailedPods(manager.podStore, "node-0", simpleDaemonSetLabel, test.numFailedPods)
|
||||||
addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, test.numNormalPods)
|
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, test.numNormalPods)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes)
|
||||||
|
@ -782,9 +802,9 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||||
node := newNode("test-node", nil)
|
node := newNode("test-node", nil)
|
||||||
node.Status.Allocatable = allocatableResources("100M", "1")
|
node.Status.Allocatable = allocatableResources("100M", "1")
|
||||||
manager, _, _ := newTestController()
|
manager, _, _ := newTestController()
|
||||||
manager.nodeStore.Store.Add(node)
|
manager.nodeStore.Add(node)
|
||||||
for _, p := range c.podsOnNode {
|
for _, p := range c.podsOnNode {
|
||||||
manager.podStore.Indexer.Add(p)
|
manager.podStore.Add(p)
|
||||||
p.Spec.NodeName = "test-node"
|
p.Spec.NodeName = "test-node"
|
||||||
}
|
}
|
||||||
wantToRun, shouldSchedule, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds)
|
wantToRun, shouldSchedule, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds)
|
||||||
|
|
|
@ -24,10 +24,12 @@ go_library(
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/legacylisters:go_default_library",
|
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated/extensions/v1beta1:go_default_library",
|
||||||
|
"//pkg/client/listers/core/v1:go_default_library",
|
||||||
|
"//pkg/client/listers/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/deployment/util:go_default_library",
|
"//pkg/controller/deployment/util:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//pkg/util/labels:go_default_library",
|
"//pkg/util/labels:go_default_library",
|
||||||
"//pkg/util/metrics:go_default_library",
|
"//pkg/util/metrics:go_default_library",
|
||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
|
@ -63,9 +65,9 @@ go_test(
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/deployment/util:go_default_library",
|
"//pkg/controller/deployment/util:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||||
|
|
|
@ -44,10 +44,12 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
|
||||||
|
extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1"
|
||||||
|
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||||
|
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -79,12 +81,12 @@ type DeploymentController struct {
|
||||||
// used for unit testing
|
// used for unit testing
|
||||||
enqueueDeployment func(deployment *extensions.Deployment)
|
enqueueDeployment func(deployment *extensions.Deployment)
|
||||||
|
|
||||||
// A store of deployments, populated by the dController
|
// dLister can list/get deployments from the shared informer's store
|
||||||
dLister *listers.StoreToDeploymentLister
|
dLister extensionslisters.DeploymentLister
|
||||||
// A store of ReplicaSets, populated by the rsController
|
// rsLister can list/get replica sets from the shared informer's store
|
||||||
rsLister *listers.StoreToReplicaSetLister
|
rsLister extensionslisters.ReplicaSetLister
|
||||||
// A store of pods, populated by the podController
|
// podLister can list/get pods from the shared informer's store
|
||||||
podLister *listers.StoreToPodLister
|
podLister corelisters.PodLister
|
||||||
|
|
||||||
// dListerSynced returns true if the Deployment store has been synced at least once.
|
// dListerSynced returns true if the Deployment store has been synced at least once.
|
||||||
// Added as a member to the struct to allow injection for testing.
|
// Added as a member to the struct to allow injection for testing.
|
||||||
|
@ -103,7 +105,7 @@ type DeploymentController struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDeploymentController creates a new DeploymentController.
|
// NewDeploymentController creates a new DeploymentController.
|
||||||
func NewDeploymentController(dInformer informers.DeploymentInformer, rsInformer informers.ReplicaSetInformer, podInformer informers.PodInformer, client clientset.Interface) *DeploymentController {
|
func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) *DeploymentController {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||||
|
@ -159,6 +161,7 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
|
||||||
glog.Infof("Starting deployment controller")
|
glog.Infof("Starting deployment controller")
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
|
if !cache.WaitForCacheSync(stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
|
||||||
|
utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -497,17 +500,20 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||||
glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Now().Sub(startTime))
|
glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Now().Sub(startTime))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
obj, exists, err := dc.dLister.Indexer.GetByKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
deployment, err := dc.dLister.Deployments(namespace).Get(name)
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
|
glog.Infof("Deployment has been deleted %v", key)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utilruntime.HandleError(fmt.Errorf("Unable to retrieve deployment %v from store: %v", key, err))
|
utilruntime.HandleError(fmt.Errorf("Unable to retrieve deployment %v from store: %v", key, err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !exists {
|
|
||||||
glog.Infof("Deployment has been deleted %v", key)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
deployment := obj.(*extensions.Deployment)
|
|
||||||
// Deep-copy otherwise we are mutating our cache.
|
// Deep-copy otherwise we are mutating our cache.
|
||||||
// TODO: Deep-copy only when needed.
|
// TODO: Deep-copy only when needed.
|
||||||
d, err := util.DeploymentDeepCopy(deployment)
|
d, err := util.DeploymentDeepCopy(deployment)
|
||||||
|
@ -766,15 +772,18 @@ func (dc *DeploymentController) checkNextItemForProgress() bool {
|
||||||
// checkForProgress checks the progress for the provided deployment. Meant to be called
|
// checkForProgress checks the progress for the provided deployment. Meant to be called
|
||||||
// by the progressWorker and work on items synced in a secondary queue.
|
// by the progressWorker and work on items synced in a secondary queue.
|
||||||
func (dc *DeploymentController) checkForProgress(key string) (bool, error) {
|
func (dc *DeploymentController) checkForProgress(key string) (bool, error) {
|
||||||
obj, exists, err := dc.dLister.Indexer.GetByKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
deployment, err := dc.dLister.Deployments(namespace).Get(name)
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(2).Infof("Cannot retrieve deployment %q found in the secondary queue: %#v", key, err)
|
glog.V(2).Infof("Cannot retrieve deployment %q found in the secondary queue: %#v", key, err)
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if !exists {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
deployment := obj.(*extensions.Deployment)
|
|
||||||
cond := util.GetDeploymentCondition(deployment.Status, extensions.DeploymentProgressing)
|
cond := util.GetDeploymentCondition(deployment.Status, extensions.DeploymentProgressing)
|
||||||
// Already marked with a terminal reason - no need to add it back to the main queue.
|
// Already marked with a terminal reason - no need to add it back to the main queue.
|
||||||
if cond != nil && (cond.Reason == util.TimedOutReason || cond.Reason == util.NewRSAvailableReason) {
|
if cond != nil && (cond.Reason == util.TimedOutReason || cond.Reason == util.NewRSAvailableReason) {
|
||||||
|
|
|
@ -32,9 +32,9 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -166,20 +166,20 @@ func newFixture(t *testing.T) *fixture {
|
||||||
|
|
||||||
func (f *fixture) newController() (*DeploymentController, informers.SharedInformerFactory) {
|
func (f *fixture) newController() (*DeploymentController, informers.SharedInformerFactory) {
|
||||||
f.client = fake.NewSimpleClientset(f.objects...)
|
f.client = fake.NewSimpleClientset(f.objects...)
|
||||||
informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(nil, f.client, controller.NoResyncPeriodFunc())
|
||||||
c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client)
|
c := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), f.client)
|
||||||
c.eventRecorder = &record.FakeRecorder{}
|
c.eventRecorder = &record.FakeRecorder{}
|
||||||
c.dListerSynced = alwaysReady
|
c.dListerSynced = alwaysReady
|
||||||
c.rsListerSynced = alwaysReady
|
c.rsListerSynced = alwaysReady
|
||||||
c.podListerSynced = alwaysReady
|
c.podListerSynced = alwaysReady
|
||||||
for _, d := range f.dLister {
|
for _, d := range f.dLister {
|
||||||
c.dLister.Indexer.Add(d)
|
informers.Extensions().V1beta1().Deployments().Informer().GetIndexer().Add(d)
|
||||||
}
|
}
|
||||||
for _, rs := range f.rsLister {
|
for _, rs := range f.rsLister {
|
||||||
c.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
}
|
}
|
||||||
for _, pod := range f.podLister {
|
for _, pod := range f.podLister {
|
||||||
c.podLister.Indexer.Add(pod)
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
|
||||||
}
|
}
|
||||||
return c, informers
|
return c, informers
|
||||||
}
|
}
|
||||||
|
@ -246,8 +246,8 @@ func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
|
||||||
// issue: https://github.com/kubernetes/kubernetes/issues/23218
|
// issue: https://github.com/kubernetes/kubernetes/issues/23218
|
||||||
func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
|
func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
|
||||||
fake := &fake.Clientset{}
|
fake := &fake.Clientset{}
|
||||||
informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(nil, fake, controller.NoResyncPeriodFunc())
|
||||||
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
|
controller := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), fake)
|
||||||
controller.eventRecorder = &record.FakeRecorder{}
|
controller.eventRecorder = &record.FakeRecorder{}
|
||||||
controller.dListerSynced = alwaysReady
|
controller.dListerSynced = alwaysReady
|
||||||
controller.rsListerSynced = alwaysReady
|
controller.rsListerSynced = alwaysReady
|
||||||
|
@ -260,7 +260,7 @@ func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing
|
||||||
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
|
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
|
||||||
empty := metav1.LabelSelector{}
|
empty := metav1.LabelSelector{}
|
||||||
d.Spec.Selector = &empty
|
d.Spec.Selector = &empty
|
||||||
controller.dLister.Indexer.Add(d)
|
informers.Extensions().V1beta1().Deployments().Informer().GetIndexer().Add(d)
|
||||||
// We expect the deployment controller to not take action here since it's configuration
|
// We expect the deployment controller to not take action here since it's configuration
|
||||||
// is invalid, even though no replicasets exist that match it's selector.
|
// is invalid, even though no replicasets exist that match it's selector.
|
||||||
controller.syncDeployment(fmt.Sprintf("%s/%s", d.ObjectMeta.Namespace, d.ObjectMeta.Name))
|
controller.syncDeployment(fmt.Sprintf("%s/%s", d.ObjectMeta.Namespace, d.ObjectMeta.Name))
|
||||||
|
|
|
@ -25,8 +25,8 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestScaleDownOldReplicaSets(t *testing.T) {
|
func TestScaleDownOldReplicaSets(t *testing.T) {
|
||||||
|
@ -68,8 +68,8 @@ func TestScaleDownOldReplicaSets(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
kc := fake.NewSimpleClientset(expected...)
|
kc := fake.NewSimpleClientset(expected...)
|
||||||
informers := informers.NewSharedInformerFactory(kc, nil, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(nil, kc, controller.NoResyncPeriodFunc())
|
||||||
c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), kc)
|
c := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), kc)
|
||||||
c.eventRecorder = &record.FakeRecorder{}
|
c.eventRecorder = &record.FakeRecorder{}
|
||||||
|
|
||||||
c.scaleDownOldReplicaSetsForRecreate(oldRSs, test.d)
|
c.scaleDownOldReplicaSetsForRecreate(oldRSs, test.d)
|
||||||
|
|
|
@ -26,9 +26,9 @@ import (
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func maxSurge(val int) *intstr.IntOrString {
|
func maxSurge(val int) *intstr.IntOrString {
|
||||||
|
@ -371,15 +371,15 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
|
||||||
for i := range tests {
|
for i := range tests {
|
||||||
test := tests[i]
|
test := tests[i]
|
||||||
fake := &fake.Clientset{}
|
fake := &fake.Clientset{}
|
||||||
informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(nil, fake, controller.NoResyncPeriodFunc())
|
||||||
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
|
controller := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), fake)
|
||||||
|
|
||||||
controller.eventRecorder = &record.FakeRecorder{}
|
controller.eventRecorder = &record.FakeRecorder{}
|
||||||
controller.dListerSynced = alwaysReady
|
controller.dListerSynced = alwaysReady
|
||||||
controller.rsListerSynced = alwaysReady
|
controller.rsListerSynced = alwaysReady
|
||||||
controller.podListerSynced = alwaysReady
|
controller.podListerSynced = alwaysReady
|
||||||
for _, rs := range test.oldRSs {
|
for _, rs := range test.oldRSs {
|
||||||
controller.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
}
|
}
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
|
|
|
@ -25,7 +25,8 @@ go_library(
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/client/legacylisters:go_default_library",
|
"//pkg/client/listers/core/v1:go_default_library",
|
||||||
|
"//pkg/client/listers/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/client/retry:go_default_library",
|
"//pkg/client/retry:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/util/hash:go_default_library",
|
"//pkg/util/hash:go_default_library",
|
||||||
|
|
|
@ -40,7 +40,7 @@ import (
|
||||||
internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
|
internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||||
)
|
)
|
||||||
|
@ -685,7 +685,7 @@ func WaitForPodsHashPopulated(c clientset.Interface, desiredGeneration int64, na
|
||||||
|
|
||||||
// LabelPodsWithHash labels all pods in the given podList with the new hash label.
|
// LabelPodsWithHash labels all pods in the given podList with the new hash label.
|
||||||
// The returned bool value can be used to tell if all pods are actually labeled.
|
// The returned bool value can be used to tell if all pods are actually labeled.
|
||||||
func LabelPodsWithHash(podList *v1.PodList, c clientset.Interface, podLister *listers.StoreToPodLister, namespace, name, hash string) error {
|
func LabelPodsWithHash(podList *v1.PodList, c clientset.Interface, podLister corelisters.PodLister, namespace, name, hash string) error {
|
||||||
for _, pod := range podList.Items {
|
for _, pod := range podList.Items {
|
||||||
// Only label the pod that doesn't already have the new hash
|
// Only label the pod that doesn't already have the new hash
|
||||||
if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
|
if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
|
||||||
|
|
|
@ -26,7 +26,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/retry"
|
"k8s.io/kubernetes/pkg/client/retry"
|
||||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||||
)
|
)
|
||||||
|
@ -56,7 +56,7 @@ type updatePodFunc func(pod *v1.Pod) error
|
||||||
|
|
||||||
// UpdatePodWithRetries updates a pod with given applyUpdate function. Note that pod not found error is ignored.
|
// UpdatePodWithRetries updates a pod with given applyUpdate function. Note that pod not found error is ignored.
|
||||||
// The returned bool value can be used to tell if the pod is actually updated.
|
// The returned bool value can be used to tell if the pod is actually updated.
|
||||||
func UpdatePodWithRetries(podClient v1core.PodInterface, podLister *listers.StoreToPodLister, namespace, name string, applyUpdate updatePodFunc) (*v1.Pod, error) {
|
func UpdatePodWithRetries(podClient v1core.PodInterface, podLister corelisters.PodLister, namespace, name string, applyUpdate updatePodFunc) (*v1.Pod, error) {
|
||||||
var pod *v1.Pod
|
var pod *v1.Pod
|
||||||
|
|
||||||
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||||
|
|
|
@ -26,7 +26,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
|
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/retry"
|
"k8s.io/kubernetes/pkg/client/retry"
|
||||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||||
)
|
)
|
||||||
|
@ -37,7 +37,7 @@ type updateRSFunc func(rs *extensions.ReplicaSet) error
|
||||||
|
|
||||||
// UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored.
|
// UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored.
|
||||||
// The returned bool value can be used to tell if the RS is actually updated.
|
// The returned bool value can be used to tell if the RS is actually updated.
|
||||||
func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsLister *listers.StoreToReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) {
|
func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsLister extensionslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) {
|
||||||
var rs *extensions.ReplicaSet
|
var rs *extensions.ReplicaSet
|
||||||
|
|
||||||
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||||
|
|
|
@ -21,10 +21,11 @@ go_library(
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/apis/batch/v1:go_default_library",
|
"//pkg/apis/batch/v1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/legacylisters:go_default_library",
|
"//pkg/client/informers/informers_generated/batch/v1:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
|
||||||
"//pkg/client/listers/batch/v1:go_default_library",
|
"//pkg/client/listers/batch/v1:go_default_library",
|
||||||
|
"//pkg/client/listers/core/v1:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//pkg/util/metrics:go_default_library",
|
"//pkg/util/metrics:go_default_library",
|
||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||||
|
@ -53,8 +54,8 @@ go_test(
|
||||||
"//pkg/apis/batch/v1:go_default_library",
|
"//pkg/apis/batch/v1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//vendor:k8s.io/apimachinery/pkg/api/equality",
|
"//vendor:k8s.io/apimachinery/pkg/api/equality",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/util/rand",
|
"//vendor:k8s.io/apimachinery/pkg/util/rand",
|
||||||
|
|
|
@ -36,10 +36,11 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
|
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
batchinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/batch/v1"
|
||||||
|
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
|
||||||
batchv1listers "k8s.io/kubernetes/pkg/client/listers/batch/v1"
|
batchv1listers "k8s.io/kubernetes/pkg/client/listers/batch/v1"
|
||||||
|
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
@ -66,7 +67,7 @@ type JobController struct {
|
||||||
jobLister batchv1listers.JobLister
|
jobLister batchv1listers.JobLister
|
||||||
|
|
||||||
// A store of pods, populated by the podController
|
// A store of pods, populated by the podController
|
||||||
podStore listers.StoreToPodLister
|
podStore corelisters.PodLister
|
||||||
|
|
||||||
// Jobs that need to be updated
|
// Jobs that need to be updated
|
||||||
queue workqueue.RateLimitingInterface
|
queue workqueue.RateLimitingInterface
|
||||||
|
@ -74,7 +75,7 @@ type JobController struct {
|
||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewJobController(podInformer cache.SharedIndexInformer, jobInformer informers.JobInformer, kubeClient clientset.Interface) *JobController {
|
func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *JobController {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||||
|
@ -107,13 +108,13 @@ func NewJobController(podInformer cache.SharedIndexInformer, jobInformer informe
|
||||||
jm.jobLister = jobInformer.Lister()
|
jm.jobLister = jobInformer.Lister()
|
||||||
jm.jobStoreSynced = jobInformer.Informer().HasSynced
|
jm.jobStoreSynced = jobInformer.Informer().HasSynced
|
||||||
|
|
||||||
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: jm.addPod,
|
AddFunc: jm.addPod,
|
||||||
UpdateFunc: jm.updatePod,
|
UpdateFunc: jm.updatePod,
|
||||||
DeleteFunc: jm.deletePod,
|
DeleteFunc: jm.deletePod,
|
||||||
})
|
})
|
||||||
jm.podStore.Indexer = podInformer.GetIndexer()
|
jm.podStore = podInformer.Lister()
|
||||||
jm.podStoreSynced = podInformer.HasSynced
|
jm.podStoreSynced = podInformer.Informer().HasSynced
|
||||||
|
|
||||||
jm.updateHandler = jm.updateJobStatus
|
jm.updateHandler = jm.updateJobStatus
|
||||||
jm.syncHandler = jm.syncJob
|
jm.syncHandler = jm.syncJob
|
||||||
|
@ -126,6 +127,7 @@ func (jm *JobController) Run(workers int, stopCh <-chan struct{}) {
|
||||||
defer jm.queue.ShutDown()
|
defer jm.queue.ShutDown()
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(stopCh, jm.podStoreSynced, jm.jobStoreSynced) {
|
if !cache.WaitForCacheSync(stopCh, jm.podStoreSynced, jm.jobStoreSynced) {
|
||||||
|
utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,8 +34,8 @@ import (
|
||||||
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
|
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var alwaysReady = func() bool { return true }
|
var alwaysReady = func() bool { return true }
|
||||||
|
@ -89,8 +89,8 @@ func getKey(job *batch.Job, t *testing.T) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*JobController, informers.SharedInformerFactory) {
|
func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*JobController, informers.SharedInformerFactory) {
|
||||||
sharedInformers := informers.NewSharedInformerFactory(kubeClient, nil, resyncPeriod())
|
sharedInformers := informers.NewSharedInformerFactory(nil, kubeClient, resyncPeriod())
|
||||||
jm := NewJobController(sharedInformers.Pods().Informer(), sharedInformers.Jobs(), kubeClient)
|
jm := NewJobController(sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), kubeClient)
|
||||||
|
|
||||||
return jm, sharedInformers
|
return jm, sharedInformers
|
||||||
}
|
}
|
||||||
|
@ -246,8 +246,8 @@ func TestControllerSyncJob(t *testing.T) {
|
||||||
now := metav1.Now()
|
now := metav1.Now()
|
||||||
job.DeletionTimestamp = &now
|
job.DeletionTimestamp = &now
|
||||||
}
|
}
|
||||||
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer()
|
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
||||||
for _, pod := range newPodList(tc.pendingPods, v1.PodPending, job) {
|
for _, pod := range newPodList(tc.pendingPods, v1.PodPending, job) {
|
||||||
podIndexer.Add(&pod)
|
podIndexer.Add(&pod)
|
||||||
}
|
}
|
||||||
|
@ -349,8 +349,8 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
||||||
job.Spec.ActiveDeadlineSeconds = &tc.activeDeadlineSeconds
|
job.Spec.ActiveDeadlineSeconds = &tc.activeDeadlineSeconds
|
||||||
start := metav1.Unix(metav1.Now().Time.Unix()-tc.startTime, 0)
|
start := metav1.Unix(metav1.Now().Time.Unix()-tc.startTime, 0)
|
||||||
job.Status.StartTime = &start
|
job.Status.StartTime = &start
|
||||||
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer()
|
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
||||||
for _, pod := range newPodList(tc.activePods, v1.PodRunning, job) {
|
for _, pod := range newPodList(tc.activePods, v1.PodRunning, job) {
|
||||||
podIndexer.Add(&pod)
|
podIndexer.Add(&pod)
|
||||||
}
|
}
|
||||||
|
@ -422,7 +422,7 @@ func TestSyncPastDeadlineJobFinished(t *testing.T) {
|
||||||
start := metav1.Unix(metav1.Now().Time.Unix()-15, 0)
|
start := metav1.Unix(metav1.Now().Time.Unix()-15, 0)
|
||||||
job.Status.StartTime = &start
|
job.Status.StartTime = &start
|
||||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
|
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
|
||||||
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
err := manager.syncJob(getKey(job, t))
|
err := manager.syncJob(getKey(job, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error when syncing jobs %v", err)
|
t.Errorf("Unexpected error when syncing jobs %v", err)
|
||||||
|
@ -448,7 +448,7 @@ func TestSyncJobComplete(t *testing.T) {
|
||||||
|
|
||||||
job := newJob(1, 1)
|
job := newJob(1, 1)
|
||||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, "", ""))
|
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, "", ""))
|
||||||
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
err := manager.syncJob(getKey(job, t))
|
err := manager.syncJob(getKey(job, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error when syncing jobs %v", err)
|
t.Fatalf("Unexpected error when syncing jobs %v", err)
|
||||||
|
@ -497,7 +497,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
|
||||||
return updateError
|
return updateError
|
||||||
}
|
}
|
||||||
job := newJob(2, 2)
|
job := newJob(2, 2)
|
||||||
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
err := manager.syncJob(getKey(job, t))
|
err := manager.syncJob(getKey(job, t))
|
||||||
if err == nil || err != updateError {
|
if err == nil || err != updateError {
|
||||||
t.Errorf("Expected error %v when syncing jobs, got %v", updateError, err)
|
t.Errorf("Expected error %v when syncing jobs, got %v", updateError, err)
|
||||||
|
@ -577,7 +577,7 @@ func TestJobPodLookup(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(tc.job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(tc.job)
|
||||||
if job := manager.getPodJob(tc.pod); job != nil {
|
if job := manager.getPodJob(tc.pod); job != nil {
|
||||||
if tc.expectedName != job.Name {
|
if tc.expectedName != job.Name {
|
||||||
t.Errorf("Got job %+v expected %+v", job.Name, tc.expectedName)
|
t.Errorf("Got job %+v expected %+v", job.Name, tc.expectedName)
|
||||||
|
@ -611,9 +611,9 @@ func TestSyncJobExpectations(t *testing.T) {
|
||||||
manager.updateHandler = func(job *batch.Job) error { return nil }
|
manager.updateHandler = func(job *batch.Job) error { return nil }
|
||||||
|
|
||||||
job := newJob(2, 2)
|
job := newJob(2, 2)
|
||||||
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
pods := newPodList(2, v1.PodPending, job)
|
pods := newPodList(2, v1.PodPending, job)
|
||||||
podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer()
|
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
||||||
podIndexer.Add(&pods[0])
|
podIndexer.Add(&pods[0])
|
||||||
|
|
||||||
manager.expectations = FakeJobExpectations{
|
manager.expectations = FakeJobExpectations{
|
||||||
|
@ -687,7 +687,7 @@ func TestWatchPods(t *testing.T) {
|
||||||
manager.jobStoreSynced = alwaysReady
|
manager.jobStoreSynced = alwaysReady
|
||||||
|
|
||||||
// Put one job and one pod into the store
|
// Put one job and one pod into the store
|
||||||
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(testJob)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(testJob)
|
||||||
received := make(chan struct{})
|
received := make(chan struct{})
|
||||||
// The pod update sent through the fakeWatcher should figure out the managing job and
|
// The pod update sent through the fakeWatcher should figure out the managing job and
|
||||||
// send it into the syncHandler.
|
// send it into the syncHandler.
|
||||||
|
@ -712,7 +712,7 @@ func TestWatchPods(t *testing.T) {
|
||||||
// and make sure it hits the sync method for the right job.
|
// and make sure it hits the sync method for the right job.
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
go sharedInformerFactory.Pods().Informer().Run(stopCh)
|
go sharedInformerFactory.Core().V1().Pods().Informer().Run(stopCh)
|
||||||
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
||||||
|
|
||||||
pods := newPodList(1, v1.PodRunning, testJob)
|
pods := newPodList(1, v1.PodRunning, testJob)
|
||||||
|
|
|
@ -24,9 +24,11 @@ go_library(
|
||||||
"//pkg/api:go_default_library",
|
"//pkg/api:go_default_library",
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/legacylisters:go_default_library",
|
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated/extensions/v1beta1:go_default_library",
|
||||||
|
"//pkg/client/listers/core/v1:go_default_library",
|
||||||
|
"//pkg/client/listers/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/cloudprovider:go_default_library",
|
"//pkg/cloudprovider:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//pkg/kubelet/util/format:go_default_library",
|
"//pkg/kubelet/util/format:go_default_library",
|
||||||
"//pkg/util/metrics:go_default_library",
|
"//pkg/util/metrics:go_default_library",
|
||||||
"//pkg/util/node:go_default_library",
|
"//pkg/util/node:go_default_library",
|
||||||
|
@ -67,10 +69,12 @@ go_test(
|
||||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/cloudprovider:go_default_library",
|
"//pkg/cloudprovider:go_default_library",
|
||||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//pkg/controller/node/testutil:go_default_library",
|
"//pkg/controller/node/testutil:go_default_library",
|
||||||
"//pkg/util/node:go_default_library",
|
"//pkg/util/node:go_default_library",
|
||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
|
@ -82,7 +86,6 @@ go_test(
|
||||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||||
"//vendor:k8s.io/client-go/testing",
|
"//vendor:k8s.io/client-go/testing",
|
||||||
"//vendor:k8s.io/client-go/tools/cache",
|
|
||||||
"//vendor:k8s.io/client-go/util/flowcontrol",
|
"//vendor:k8s.io/client-go/util/flowcontrol",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
|
@ -31,7 +31,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||||
"k8s.io/kubernetes/pkg/util/node"
|
"k8s.io/kubernetes/pkg/util/node"
|
||||||
|
@ -47,7 +47,7 @@ const (
|
||||||
|
|
||||||
// deletePods will delete all pods from master running on given node, and return true
|
// deletePods will delete all pods from master running on given node, and return true
|
||||||
// if any pods were deleted, or were found pending deletion.
|
// if any pods were deleted, or were found pending deletion.
|
||||||
func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore listers.StoreToDaemonSetLister) (bool, error) {
|
func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore extensionslisters.DaemonSetLister) (bool, error) {
|
||||||
remaining := false
|
remaining := false
|
||||||
selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String()
|
selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String()
|
||||||
options := metav1.ListOptions{FieldSelector: selector}
|
options := metav1.ListOptions{FieldSelector: selector}
|
||||||
|
@ -160,7 +160,11 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeObj, found, err := nc.nodeStore.Store.GetByKey(pod.Spec.NodeName)
|
node, err := nc.nodeLister.Get(pod.Spec.NodeName)
|
||||||
|
// if there is no such node, do nothing and let the podGC clean it up.
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// this can only happen if the Store.KeyFunc has a problem creating
|
// this can only happen if the Store.KeyFunc has a problem creating
|
||||||
// a key for the pod. If it happens once, it will happen again so
|
// a key for the pod. If it happens once, it will happen again so
|
||||||
|
@ -169,17 +173,11 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// if there is no such node, do nothing and let the podGC clean it up.
|
|
||||||
if !found {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// delete terminating pods that have been scheduled on
|
// delete terminating pods that have been scheduled on
|
||||||
// nodes that do not support graceful termination
|
// nodes that do not support graceful termination
|
||||||
// TODO(mikedanese): this can be removed when we no longer
|
// TODO(mikedanese): this can be removed when we no longer
|
||||||
// guarantee backwards compatibility of master API to kubelets with
|
// guarantee backwards compatibility of master API to kubelets with
|
||||||
// versions less than 1.1.0
|
// versions less than 1.1.0
|
||||||
node := nodeObj.(*v1.Node)
|
|
||||||
v, err := utilversion.ParseSemantic(node.Status.NodeInfo.KubeletVersion)
|
v, err := utilversion.ParseSemantic(node.Status.NodeInfo.KubeletVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("Couldn't parse version %q of node: %v", node.Status.NodeInfo.KubeletVersion, err)
|
glog.V(0).Infof("Couldn't parse version %q of node: %v", node.Status.NodeInfo.KubeletVersion, err)
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
@ -39,9 +40,11 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
|
||||||
|
extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1"
|
||||||
|
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||||
|
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||||
"k8s.io/kubernetes/pkg/util/system"
|
"k8s.io/kubernetes/pkg/util/system"
|
||||||
|
@ -134,13 +137,15 @@ type NodeController struct {
|
||||||
// The maximum duration before a pod evicted from a node can be forcefully terminated.
|
// The maximum duration before a pod evicted from a node can be forcefully terminated.
|
||||||
maximumGracePeriod time.Duration
|
maximumGracePeriod time.Duration
|
||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
podInformer informers.PodInformer
|
|
||||||
nodeInformer informers.NodeInformer
|
|
||||||
daemonSetInformer informers.DaemonSetInformer
|
|
||||||
|
|
||||||
podStore listers.StoreToPodLister
|
nodeLister corelisters.NodeLister
|
||||||
nodeStore listers.StoreToNodeLister
|
nodeInformerSynced cache.InformerSynced
|
||||||
daemonSetStore listers.StoreToDaemonSetLister
|
|
||||||
|
daemonSetStore extensionslisters.DaemonSetLister
|
||||||
|
daemonSetInformerSynced cache.InformerSynced
|
||||||
|
|
||||||
|
podInformerSynced cache.InformerSynced
|
||||||
|
|
||||||
// allocate/recycle CIDRs for node if allocateNodeCIDRs == true
|
// allocate/recycle CIDRs for node if allocateNodeCIDRs == true
|
||||||
cidrAllocator CIDRAllocator
|
cidrAllocator CIDRAllocator
|
||||||
|
|
||||||
|
@ -155,13 +160,6 @@ type NodeController struct {
|
||||||
secondaryEvictionLimiterQPS float32
|
secondaryEvictionLimiterQPS float32
|
||||||
largeClusterThreshold int32
|
largeClusterThreshold int32
|
||||||
unhealthyZoneThreshold float32
|
unhealthyZoneThreshold float32
|
||||||
|
|
||||||
// internalPodInformer is used to hold a personal informer. If we're using
|
|
||||||
// a normal shared informer, then the informer will be started for us. If
|
|
||||||
// we have a personal informer, we must start it ourselves. If you start
|
|
||||||
// the controller using NewDaemonSetsController(passing SharedInformer), this
|
|
||||||
// will be null
|
|
||||||
internalPodInformer cache.SharedIndexInformer
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNodeController returns a new node controller to sync instances from cloudprovider.
|
// NewNodeController returns a new node controller to sync instances from cloudprovider.
|
||||||
|
@ -169,9 +167,9 @@ type NodeController struct {
|
||||||
// podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes
|
// podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes
|
||||||
// currently, this should be handled as a fatal error.
|
// currently, this should be handled as a fatal error.
|
||||||
func NewNodeController(
|
func NewNodeController(
|
||||||
podInformer informers.PodInformer,
|
podInformer coreinformers.PodInformer,
|
||||||
nodeInformer informers.NodeInformer,
|
nodeInformer coreinformers.NodeInformer,
|
||||||
daemonSetInformer informers.DaemonSetInformer,
|
daemonSetInformer extensionsinformers.DaemonSetInformer,
|
||||||
cloud cloudprovider.Interface,
|
cloud cloudprovider.Interface,
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
podEvictionTimeout time.Duration,
|
podEvictionTimeout time.Duration,
|
||||||
|
@ -234,9 +232,6 @@ func NewNodeController(
|
||||||
largeClusterThreshold: largeClusterThreshold,
|
largeClusterThreshold: largeClusterThreshold,
|
||||||
unhealthyZoneThreshold: unhealthyZoneThreshold,
|
unhealthyZoneThreshold: unhealthyZoneThreshold,
|
||||||
zoneStates: make(map[string]zoneState),
|
zoneStates: make(map[string]zoneState),
|
||||||
podInformer: podInformer,
|
|
||||||
nodeInformer: nodeInformer,
|
|
||||||
daemonSetInformer: daemonSetInformer,
|
|
||||||
}
|
}
|
||||||
nc.enterPartialDisruptionFunc = nc.ReducedQPSFunc
|
nc.enterPartialDisruptionFunc = nc.ReducedQPSFunc
|
||||||
nc.enterFullDisruptionFunc = nc.HealthyQPSFunc
|
nc.enterFullDisruptionFunc = nc.HealthyQPSFunc
|
||||||
|
@ -246,7 +241,7 @@ func NewNodeController(
|
||||||
AddFunc: nc.maybeDeleteTerminatingPod,
|
AddFunc: nc.maybeDeleteTerminatingPod,
|
||||||
UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) },
|
UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) },
|
||||||
})
|
})
|
||||||
nc.podStore = *podInformer.Lister()
|
nc.podInformerSynced = podInformer.Informer().HasSynced
|
||||||
|
|
||||||
nodeEventHandlerFuncs := cache.ResourceEventHandlerFuncs{}
|
nodeEventHandlerFuncs := cache.ResourceEventHandlerFuncs{}
|
||||||
if nc.allocateNodeCIDRs {
|
if nc.allocateNodeCIDRs {
|
||||||
|
@ -347,9 +342,11 @@ func NewNodeController(
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeInformer.Informer().AddEventHandler(nodeEventHandlerFuncs)
|
nodeInformer.Informer().AddEventHandler(nodeEventHandlerFuncs)
|
||||||
nc.nodeStore = *nodeInformer.Lister()
|
nc.nodeLister = nodeInformer.Lister()
|
||||||
|
nc.nodeInformerSynced = nodeInformer.Informer().HasSynced
|
||||||
|
|
||||||
nc.daemonSetStore = *daemonSetInformer.Lister()
|
nc.daemonSetStore = daemonSetInformer.Lister()
|
||||||
|
nc.daemonSetInformerSynced = daemonSetInformer.Informer().HasSynced
|
||||||
|
|
||||||
return nc, nil
|
return nc, nil
|
||||||
}
|
}
|
||||||
|
@ -359,8 +356,8 @@ func (nc *NodeController) Run() {
|
||||||
go func() {
|
go func() {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(wait.NeverStop, nc.nodeInformer.Informer().HasSynced, nc.podInformer.Informer().HasSynced, nc.daemonSetInformer.Informer().HasSynced) {
|
if !cache.WaitForCacheSync(wait.NeverStop, nc.nodeInformerSynced, nc.podInformerSynced, nc.daemonSetInformerSynced) {
|
||||||
utilruntime.HandleError(errors.New("NodeController timed out while waiting for informers to sync..."))
|
utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -379,13 +376,13 @@ func (nc *NodeController) Run() {
|
||||||
defer nc.evictorLock.Unlock()
|
defer nc.evictorLock.Unlock()
|
||||||
for k := range nc.zonePodEvictor {
|
for k := range nc.zonePodEvictor {
|
||||||
nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
|
nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
|
||||||
obj, exists, err := nc.nodeStore.GetByKey(value.Value)
|
node, err := nc.nodeLister.Get(value.Value)
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
glog.Warningf("Node %v no longer present in nodeLister!", value.Value)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Warningf("Failed to get Node %v from the nodeStore: %v", value.Value, err)
|
glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err)
|
||||||
} else if !exists {
|
|
||||||
glog.Warningf("Node %v no longer present in nodeStore!", value.Value)
|
|
||||||
} else {
|
} else {
|
||||||
node, _ := obj.(*v1.Node)
|
|
||||||
zone := utilnode.GetZoneKey(node)
|
zone := utilnode.GetZoneKey(node)
|
||||||
EvictionsNumber.WithLabelValues(zone).Inc()
|
EvictionsNumber.WithLabelValues(zone).Inc()
|
||||||
}
|
}
|
||||||
|
@ -413,11 +410,11 @@ func (nc *NodeController) Run() {
|
||||||
func (nc *NodeController) monitorNodeStatus() error {
|
func (nc *NodeController) monitorNodeStatus() error {
|
||||||
// We are listing nodes from local cache as we can tolerate some small delays
|
// We are listing nodes from local cache as we can tolerate some small delays
|
||||||
// comparing to state from etcd and there is eventual consistency anyway.
|
// comparing to state from etcd and there is eventual consistency anyway.
|
||||||
nodes, err := nc.nodeStore.List()
|
nodes, err := nc.nodeLister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
added, deleted := nc.checkForNodeAddedDeleted(&nodes)
|
added, deleted := nc.checkForNodeAddedDeleted(nodes)
|
||||||
for i := range added {
|
for i := range added {
|
||||||
glog.V(1).Infof("NodeController observed a new Node: %#v", added[i].Name)
|
glog.V(1).Infof("NodeController observed a new Node: %#v", added[i].Name)
|
||||||
recordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", added[i].Name))
|
recordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", added[i].Name))
|
||||||
|
@ -442,11 +439,11 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
zoneToNodeConditions := map[string][]*v1.NodeCondition{}
|
zoneToNodeConditions := map[string][]*v1.NodeCondition{}
|
||||||
for i := range nodes.Items {
|
for i := range nodes {
|
||||||
var gracePeriod time.Duration
|
var gracePeriod time.Duration
|
||||||
var observedReadyCondition v1.NodeCondition
|
var observedReadyCondition v1.NodeCondition
|
||||||
var currentReadyCondition *v1.NodeCondition
|
var currentReadyCondition *v1.NodeCondition
|
||||||
nodeCopy, err := api.Scheme.DeepCopy(&nodes.Items[i])
|
nodeCopy, err := api.Scheme.DeepCopy(nodes[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utilruntime.HandleError(err)
|
utilruntime.HandleError(err)
|
||||||
continue
|
continue
|
||||||
|
@ -528,12 +525,12 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
nc.handleDisruption(zoneToNodeConditions, &nodes)
|
nc.handleDisruption(zoneToNodeConditions, nodes)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*v1.NodeCondition, nodes *v1.NodeList) {
|
func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*v1.NodeCondition, nodes []*v1.Node) {
|
||||||
newZoneStates := map[string]zoneState{}
|
newZoneStates := map[string]zoneState{}
|
||||||
allAreFullyDisrupted := true
|
allAreFullyDisrupted := true
|
||||||
for k, v := range zoneToNodeConditions {
|
for k, v := range zoneToNodeConditions {
|
||||||
|
@ -574,8 +571,8 @@ func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*v1
|
||||||
// We're switching to full disruption mode
|
// We're switching to full disruption mode
|
||||||
if allAreFullyDisrupted {
|
if allAreFullyDisrupted {
|
||||||
glog.V(0).Info("NodeController detected that all Nodes are not-Ready. Entering master disruption mode.")
|
glog.V(0).Info("NodeController detected that all Nodes are not-Ready. Entering master disruption mode.")
|
||||||
for i := range nodes.Items {
|
for i := range nodes {
|
||||||
nc.cancelPodEviction(&nodes.Items[i])
|
nc.cancelPodEviction(nodes[i])
|
||||||
}
|
}
|
||||||
// We stop all evictions.
|
// We stop all evictions.
|
||||||
for k := range nc.zonePodEvictor {
|
for k := range nc.zonePodEvictor {
|
||||||
|
@ -592,11 +589,11 @@ func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*v1
|
||||||
glog.V(0).Info("NodeController detected that some Nodes are Ready. Exiting master disruption mode.")
|
glog.V(0).Info("NodeController detected that some Nodes are Ready. Exiting master disruption mode.")
|
||||||
// When exiting disruption mode update probe timestamps on all Nodes.
|
// When exiting disruption mode update probe timestamps on all Nodes.
|
||||||
now := nc.now()
|
now := nc.now()
|
||||||
for i := range nodes.Items {
|
for i := range nodes {
|
||||||
v := nc.nodeStatusMap[nodes.Items[i].Name]
|
v := nc.nodeStatusMap[nodes[i].Name]
|
||||||
v.probeTimestamp = now
|
v.probeTimestamp = now
|
||||||
v.readyTransitionTimestamp = now
|
v.readyTransitionTimestamp = now
|
||||||
nc.nodeStatusMap[nodes.Items[i].Name] = v
|
nc.nodeStatusMap[nodes[i].Name] = v
|
||||||
}
|
}
|
||||||
// We reset all rate limiters to settings appropriate for the given state.
|
// We reset all rate limiters to settings appropriate for the given state.
|
||||||
for k := range nc.zonePodEvictor {
|
for k := range nc.zonePodEvictor {
|
||||||
|
@ -797,21 +794,21 @@ func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.
|
||||||
return gracePeriod, observedReadyCondition, currentReadyCondition, err
|
return gracePeriod, observedReadyCondition, currentReadyCondition, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nc *NodeController) checkForNodeAddedDeleted(nodes *v1.NodeList) (added, deleted []*v1.Node) {
|
func (nc *NodeController) checkForNodeAddedDeleted(nodes []*v1.Node) (added, deleted []*v1.Node) {
|
||||||
for i := range nodes.Items {
|
for i := range nodes {
|
||||||
if _, has := nc.knownNodeSet[nodes.Items[i].Name]; !has {
|
if _, has := nc.knownNodeSet[nodes[i].Name]; !has {
|
||||||
added = append(added, &nodes.Items[i])
|
added = append(added, nodes[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If there's a difference between lengths of known Nodes and observed nodes
|
// If there's a difference between lengths of known Nodes and observed nodes
|
||||||
// we must have removed some Node.
|
// we must have removed some Node.
|
||||||
if len(nc.knownNodeSet)+len(added) != len(nodes.Items) {
|
if len(nc.knownNodeSet)+len(added) != len(nodes) {
|
||||||
knowSetCopy := map[string]*v1.Node{}
|
knowSetCopy := map[string]*v1.Node{}
|
||||||
for k, v := range nc.knownNodeSet {
|
for k, v := range nc.knownNodeSet {
|
||||||
knowSetCopy[k] = v
|
knowSetCopy[k] = v
|
||||||
}
|
}
|
||||||
for i := range nodes.Items {
|
for i := range nodes {
|
||||||
delete(knowSetCopy, nodes.Items[i].Name)
|
delete(knowSetCopy, nodes[i].Name)
|
||||||
}
|
}
|
||||||
for i := range knowSetCopy {
|
for i := range knowSetCopy {
|
||||||
deleted = append(deleted, knowSetCopy[i])
|
deleted = append(deleted, knowSetCopy[i])
|
||||||
|
|
|
@ -29,15 +29,16 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/util/diff"
|
"k8s.io/apimachinery/pkg/util/diff"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
testcore "k8s.io/client-go/testing"
|
testcore "k8s.io/client-go/testing"
|
||||||
"k8s.io/client-go/tools/cache"
|
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
|
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
|
||||||
|
extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/node/testutil"
|
"k8s.io/kubernetes/pkg/controller/node/testutil"
|
||||||
"k8s.io/kubernetes/pkg/util/node"
|
"k8s.io/kubernetes/pkg/util/node"
|
||||||
)
|
)
|
||||||
|
@ -51,6 +52,14 @@ const (
|
||||||
testUnhealtyThreshold = float32(0.55)
|
testUnhealtyThreshold = float32(0.55)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func alwaysReady() bool { return true }
|
||||||
|
|
||||||
|
type nodeController struct {
|
||||||
|
*NodeController
|
||||||
|
nodeInformer coreinformers.NodeInformer
|
||||||
|
daemonSetInformer extensionsinformers.DaemonSetInformer
|
||||||
|
}
|
||||||
|
|
||||||
func NewNodeControllerFromClient(
|
func NewNodeControllerFromClient(
|
||||||
cloud cloudprovider.Interface,
|
cloud cloudprovider.Interface,
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
|
@ -65,21 +74,44 @@ func NewNodeControllerFromClient(
|
||||||
clusterCIDR *net.IPNet,
|
clusterCIDR *net.IPNet,
|
||||||
serviceCIDR *net.IPNet,
|
serviceCIDR *net.IPNet,
|
||||||
nodeCIDRMaskSize int,
|
nodeCIDRMaskSize int,
|
||||||
allocateNodeCIDRs bool) (*NodeController, error) {
|
allocateNodeCIDRs bool) (*nodeController, error) {
|
||||||
|
|
||||||
factory := informers.NewSharedInformerFactory(kubeClient, nil, controller.NoResyncPeriodFunc())
|
factory := informers.NewSharedInformerFactory(nil, kubeClient, controller.NoResyncPeriodFunc())
|
||||||
|
|
||||||
nc, err := NewNodeController(factory.Pods(), factory.Nodes(), factory.DaemonSets(), cloud, kubeClient, podEvictionTimeout, evictionLimiterQPS, secondaryEvictionLimiterQPS,
|
nodeInformer := factory.Core().V1().Nodes()
|
||||||
largeClusterThreshold, unhealthyZoneThreshold, nodeMonitorGracePeriod, nodeStartupGracePeriod, nodeMonitorPeriod, clusterCIDR,
|
daemonSetInformer := factory.Extensions().V1beta1().DaemonSets()
|
||||||
serviceCIDR, nodeCIDRMaskSize, allocateNodeCIDRs)
|
|
||||||
|
nc, err := NewNodeController(
|
||||||
|
factory.Core().V1().Pods(),
|
||||||
|
nodeInformer,
|
||||||
|
daemonSetInformer,
|
||||||
|
cloud,
|
||||||
|
kubeClient,
|
||||||
|
podEvictionTimeout,
|
||||||
|
evictionLimiterQPS,
|
||||||
|
secondaryEvictionLimiterQPS,
|
||||||
|
largeClusterThreshold,
|
||||||
|
unhealthyZoneThreshold,
|
||||||
|
nodeMonitorGracePeriod,
|
||||||
|
nodeStartupGracePeriod,
|
||||||
|
nodeMonitorPeriod,
|
||||||
|
clusterCIDR,
|
||||||
|
serviceCIDR,
|
||||||
|
nodeCIDRMaskSize,
|
||||||
|
allocateNodeCIDRs,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nc, nil
|
nc.podInformerSynced = alwaysReady
|
||||||
|
nc.nodeInformerSynced = alwaysReady
|
||||||
|
nc.daemonSetInformerSynced = alwaysReady
|
||||||
|
|
||||||
|
return &nodeController{nc, nodeInformer, daemonSetInformer}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func syncNodeStore(nc *NodeController, fakeNodeHandler *testutil.FakeNodeHandler) error {
|
func syncNodeStore(nc *nodeController, fakeNodeHandler *testutil.FakeNodeHandler) error {
|
||||||
nodes, err := fakeNodeHandler.List(metav1.ListOptions{})
|
nodes, err := fakeNodeHandler.List(metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -88,7 +120,7 @@ func syncNodeStore(nc *NodeController, fakeNodeHandler *testutil.FakeNodeHandler
|
||||||
for i := range nodes.Items {
|
for i := range nodes.Items {
|
||||||
newElems = append(newElems, &nodes.Items[i])
|
newElems = append(newElems, &nodes.Items[i])
|
||||||
}
|
}
|
||||||
return nc.nodeStore.Replace(newElems, "newRV")
|
return nc.nodeInformer.Informer().GetStore().Replace(newElems, "newRV")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||||
|
@ -518,7 +550,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||||
nodeController.now = func() metav1.Time { return fakeNow }
|
nodeController.now = func() metav1.Time { return fakeNow }
|
||||||
for _, ds := range item.daemonSets {
|
for _, ds := range item.daemonSets {
|
||||||
nodeController.daemonSetStore.Add(&ds)
|
nodeController.daemonSetInformer.Informer().GetStore().Add(&ds)
|
||||||
}
|
}
|
||||||
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
|
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
@ -541,7 +573,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||||
for _, zone := range zones {
|
for _, zone := range zones {
|
||||||
nodeController.zonePodEvictor[zone].Try(func(value TimedValue) (bool, time.Duration) {
|
nodeController.zonePodEvictor[zone].Try(func(value TimedValue) (bool, time.Duration) {
|
||||||
nodeUid, _ := value.UID.(string)
|
nodeUid, _ := value.UID.(string)
|
||||||
deletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUid, nodeController.daemonSetStore)
|
deletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUid, nodeController.daemonSetInformer.Lister())
|
||||||
return true, 0
|
return true, 0
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -1910,8 +1942,7 @@ func TestCheckPod(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
nc, _ := NewNodeControllerFromClient(nil, fake.NewSimpleClientset(), 0, 0, 0, 0, 0, 0, 0, 0, nil, nil, 0, false)
|
nc, _ := NewNodeControllerFromClient(nil, fake.NewSimpleClientset(), 0, 0, 0, 0, 0, 0, 0, 0, nil, nil, 0, false)
|
||||||
nc.nodeStore.Store = cache.NewStore(cache.MetaNamespaceKeyFunc)
|
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
|
||||||
nc.nodeStore.Store.Add(&v1.Node{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "new",
|
Name: "new",
|
||||||
},
|
},
|
||||||
|
@ -1921,7 +1952,7 @@ func TestCheckPod(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
nc.nodeStore.Store.Add(&v1.Node{
|
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "old",
|
Name: "old",
|
||||||
},
|
},
|
||||||
|
@ -1931,7 +1962,7 @@ func TestCheckPod(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
nc.nodeStore.Store.Add(&v1.Node{
|
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "older",
|
Name: "older",
|
||||||
},
|
},
|
||||||
|
@ -1941,7 +1972,7 @@ func TestCheckPod(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
nc.nodeStore.Store.Add(&v1.Node{
|
nc.nodeInformer.Informer().GetStore().Add(&v1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "oldest",
|
Name: "oldest",
|
||||||
},
|
},
|
||||||
|
|
|
@ -18,9 +18,8 @@ go_library(
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/legacylisters:go_default_library",
|
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/client/listers/core/v1:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//pkg/util/metrics:go_default_library",
|
"//pkg/util/metrics:go_default_library",
|
||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
|
@ -39,7 +38,11 @@ go_test(
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
|
||||||
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/node/testutil:go_default_library",
|
"//pkg/controller/node/testutil:go_default_library",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||||
package podgc
|
package podgc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -29,9 +30,8 @@ import (
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
@ -44,21 +44,14 @@ const (
|
||||||
type PodGCController struct {
|
type PodGCController struct {
|
||||||
kubeClient clientset.Interface
|
kubeClient clientset.Interface
|
||||||
|
|
||||||
// internalPodInformer is used to hold a personal informer. If we're using
|
podLister corelisters.PodLister
|
||||||
// a normal shared informer, then the informer will be started for us. If
|
podListerSynced cache.InformerSynced
|
||||||
// we have a personal informer, we must start it ourselves. If you start
|
|
||||||
// the controller using NewPodGC(..., passing SharedInformer, ...), this
|
|
||||||
// will be null
|
|
||||||
internalPodInformer cache.SharedIndexInformer
|
|
||||||
|
|
||||||
podStore listers.StoreToPodLister
|
|
||||||
podController cache.Controller
|
|
||||||
|
|
||||||
deletePod func(namespace, name string) error
|
deletePod func(namespace, name string) error
|
||||||
terminatedPodThreshold int
|
terminatedPodThreshold int
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInformer, terminatedPodThreshold int) *PodGCController {
|
func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInformer, terminatedPodThreshold int) *PodGCController {
|
||||||
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
|
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
|
||||||
metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.Core().RESTClient().GetRateLimiter())
|
metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.Core().RESTClient().GetRateLimiter())
|
||||||
}
|
}
|
||||||
|
@ -71,36 +64,24 @@ func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInfor
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
gcc.podStore.Indexer = podInformer.GetIndexer()
|
gcc.podLister = podInformer.Lister()
|
||||||
gcc.podController = podInformer.GetController()
|
gcc.podListerSynced = podInformer.Informer().HasSynced
|
||||||
|
|
||||||
return gcc
|
return gcc
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFromClient(
|
|
||||||
kubeClient clientset.Interface,
|
|
||||||
terminatedPodThreshold int,
|
|
||||||
) *PodGCController {
|
|
||||||
podInformer := informers.NewPodInformer(kubeClient, controller.NoResyncPeriodFunc())
|
|
||||||
controller := NewPodGC(kubeClient, podInformer, terminatedPodThreshold)
|
|
||||||
controller.internalPodInformer = podInformer
|
|
||||||
return controller
|
|
||||||
}
|
|
||||||
|
|
||||||
func (gcc *PodGCController) Run(stop <-chan struct{}) {
|
func (gcc *PodGCController) Run(stop <-chan struct{}) {
|
||||||
if gcc.internalPodInformer != nil {
|
if !cache.WaitForCacheSync(stop, gcc.podListerSynced) {
|
||||||
go gcc.podController.Run(stop)
|
utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
go wait.Until(gcc.gc, gcCheckPeriod, stop)
|
go wait.Until(gcc.gc, gcCheckPeriod, stop)
|
||||||
<-stop
|
<-stop
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gcc *PodGCController) gc() {
|
func (gcc *PodGCController) gc() {
|
||||||
if !gcc.podController.HasSynced() {
|
pods, err := gcc.podLister.List(labels.Everything())
|
||||||
glog.V(2).Infof("PodGCController is waiting for informer sync...")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pods, err := gcc.podStore.List(labels.Everything())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Error while listing all Pods: %v", err)
|
glog.Errorf("Error while listing all Pods: %v", err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -25,7 +25,11 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
|
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/node/testutil"
|
"k8s.io/kubernetes/pkg/controller/node/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -41,6 +45,16 @@ func (*FakeController) LastSyncResourceVersion() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func alwaysReady() bool { return true }
|
||||||
|
|
||||||
|
func NewFromClient(kubeClient clientset.Interface, terminatedPodThreshold int) (*PodGCController, coreinformers.PodInformer) {
|
||||||
|
informerFactory := informers.NewSharedInformerFactory(nil, kubeClient, controller.NoResyncPeriodFunc())
|
||||||
|
podInformer := informerFactory.Core().V1().Pods()
|
||||||
|
controller := NewPodGC(kubeClient, podInformer, terminatedPodThreshold)
|
||||||
|
controller.podListerSynced = alwaysReady
|
||||||
|
return controller, podInformer
|
||||||
|
}
|
||||||
|
|
||||||
func TestGCTerminated(t *testing.T) {
|
func TestGCTerminated(t *testing.T) {
|
||||||
type nameToPhase struct {
|
type nameToPhase struct {
|
||||||
name string
|
name string
|
||||||
|
@ -99,7 +113,7 @@ func TestGCTerminated(t *testing.T) {
|
||||||
|
|
||||||
for i, test := range testCases {
|
for i, test := range testCases {
|
||||||
client := fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*testutil.NewNode("node")}})
|
client := fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*testutil.NewNode("node")}})
|
||||||
gcc := NewFromClient(client, test.threshold)
|
gcc, podInformer := NewFromClient(client, test.threshold)
|
||||||
deletedPodNames := make([]string, 0)
|
deletedPodNames := make([]string, 0)
|
||||||
var lock sync.Mutex
|
var lock sync.Mutex
|
||||||
gcc.deletePod = func(_, name string) error {
|
gcc.deletePod = func(_, name string) error {
|
||||||
|
@ -112,15 +126,13 @@ func TestGCTerminated(t *testing.T) {
|
||||||
creationTime := time.Unix(0, 0)
|
creationTime := time.Unix(0, 0)
|
||||||
for _, pod := range test.pods {
|
for _, pod := range test.pods {
|
||||||
creationTime = creationTime.Add(1 * time.Hour)
|
creationTime = creationTime.Add(1 * time.Hour)
|
||||||
gcc.podStore.Indexer.Add(&v1.Pod{
|
podInformer.Informer().GetStore().Add(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}},
|
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}},
|
||||||
Status: v1.PodStatus{Phase: pod.phase},
|
Status: v1.PodStatus{Phase: pod.phase},
|
||||||
Spec: v1.PodSpec{NodeName: "node"},
|
Spec: v1.PodSpec{NodeName: "node"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
gcc.podController = &FakeController{}
|
|
||||||
|
|
||||||
gcc.gc()
|
gcc.gc()
|
||||||
|
|
||||||
pass := true
|
pass := true
|
||||||
|
@ -168,7 +180,7 @@ func TestGCOrphaned(t *testing.T) {
|
||||||
|
|
||||||
for i, test := range testCases {
|
for i, test := range testCases {
|
||||||
client := fake.NewSimpleClientset()
|
client := fake.NewSimpleClientset()
|
||||||
gcc := NewFromClient(client, test.threshold)
|
gcc, podInformer := NewFromClient(client, test.threshold)
|
||||||
deletedPodNames := make([]string, 0)
|
deletedPodNames := make([]string, 0)
|
||||||
var lock sync.Mutex
|
var lock sync.Mutex
|
||||||
gcc.deletePod = func(_, name string) error {
|
gcc.deletePod = func(_, name string) error {
|
||||||
|
@ -181,16 +193,14 @@ func TestGCOrphaned(t *testing.T) {
|
||||||
creationTime := time.Unix(0, 0)
|
creationTime := time.Unix(0, 0)
|
||||||
for _, pod := range test.pods {
|
for _, pod := range test.pods {
|
||||||
creationTime = creationTime.Add(1 * time.Hour)
|
creationTime = creationTime.Add(1 * time.Hour)
|
||||||
gcc.podStore.Indexer.Add(&v1.Pod{
|
podInformer.Informer().GetStore().Add(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}},
|
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime}},
|
||||||
Status: v1.PodStatus{Phase: pod.phase},
|
Status: v1.PodStatus{Phase: pod.phase},
|
||||||
Spec: v1.PodSpec{NodeName: "node"},
|
Spec: v1.PodSpec{NodeName: "node"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
gcc.podController = &FakeController{}
|
pods, err := podInformer.Lister().List(labels.Everything())
|
||||||
|
|
||||||
pods, err := gcc.podStore.List(labels.Everything())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error while listing all Pods: %v", err)
|
t.Errorf("Error while listing all Pods: %v", err)
|
||||||
return
|
return
|
||||||
|
@ -247,7 +257,7 @@ func TestGCUnscheduledTerminating(t *testing.T) {
|
||||||
|
|
||||||
for i, test := range testCases {
|
for i, test := range testCases {
|
||||||
client := fake.NewSimpleClientset()
|
client := fake.NewSimpleClientset()
|
||||||
gcc := NewFromClient(client, -1)
|
gcc, podInformer := NewFromClient(client, -1)
|
||||||
deletedPodNames := make([]string, 0)
|
deletedPodNames := make([]string, 0)
|
||||||
var lock sync.Mutex
|
var lock sync.Mutex
|
||||||
gcc.deletePod = func(_, name string) error {
|
gcc.deletePod = func(_, name string) error {
|
||||||
|
@ -260,7 +270,7 @@ func TestGCUnscheduledTerminating(t *testing.T) {
|
||||||
creationTime := time.Unix(0, 0)
|
creationTime := time.Unix(0, 0)
|
||||||
for _, pod := range test.pods {
|
for _, pod := range test.pods {
|
||||||
creationTime = creationTime.Add(1 * time.Hour)
|
creationTime = creationTime.Add(1 * time.Hour)
|
||||||
gcc.podStore.Indexer.Add(&v1.Pod{
|
podInformer.Informer().GetStore().Add(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime},
|
ObjectMeta: metav1.ObjectMeta{Name: pod.name, CreationTimestamp: metav1.Time{Time: creationTime},
|
||||||
DeletionTimestamp: pod.deletionTimeStamp},
|
DeletionTimestamp: pod.deletionTimeStamp},
|
||||||
Status: v1.PodStatus{Phase: pod.phase},
|
Status: v1.PodStatus{Phase: pod.phase},
|
||||||
|
@ -268,9 +278,7 @@ func TestGCUnscheduledTerminating(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
gcc.podController = &FakeController{}
|
pods, err := podInformer.Lister().List(labels.Everything())
|
||||||
|
|
||||||
pods, err := gcc.podStore.List(labels.Everything())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error while listing all Pods: %v", err)
|
t.Errorf("Error while listing all Pods: %v", err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -22,9 +22,11 @@ go_library(
|
||||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/client/legacylisters:go_default_library",
|
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated/extensions/v1beta1:go_default_library",
|
||||||
|
"//pkg/client/listers/core/v1:go_default_library",
|
||||||
|
"//pkg/client/listers/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//pkg/util/metrics:go_default_library",
|
"//pkg/util/metrics:go_default_library",
|
||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||||
|
@ -54,9 +56,8 @@ go_test(
|
||||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||||
"//pkg/client/legacylisters:go_default_library",
|
"//pkg/client/informers/informers_generated:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//pkg/securitycontext:go_default_library",
|
"//pkg/securitycontext:go_default_library",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/api/equality",
|
"//vendor:k8s.io/apimachinery/pkg/api/equality",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
|
|
|
@ -43,9 +43,11 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
|
||||||
|
extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1"
|
||||||
|
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||||
|
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -77,10 +79,14 @@ type ReplicaSetController struct {
|
||||||
// A TTLCache of pod creates/deletes each rc expects to see.
|
// A TTLCache of pod creates/deletes each rc expects to see.
|
||||||
expectations *controller.UIDTrackingControllerExpectations
|
expectations *controller.UIDTrackingControllerExpectations
|
||||||
|
|
||||||
// A store of ReplicaSets, populated by the rsController
|
// A store of ReplicaSets, populated by the shared informer passed to NewReplicaSetController
|
||||||
rsLister *listers.StoreToReplicaSetLister
|
rsLister extensionslisters.ReplicaSetLister
|
||||||
// A store of pods, populated by the podController
|
// rsListerSynced returns true if the pod store has been synced at least once.
|
||||||
podLister *listers.StoreToPodLister
|
// Added as a member to the struct to allow injection for testing.
|
||||||
|
rsListerSynced cache.InformerSynced
|
||||||
|
|
||||||
|
// A store of pods, populated by the shared informer passed to NewReplicaSetController
|
||||||
|
podLister corelisters.PodLister
|
||||||
// podListerSynced returns true if the pod store has been synced at least once.
|
// podListerSynced returns true if the pod store has been synced at least once.
|
||||||
// Added as a member to the struct to allow injection for testing.
|
// Added as a member to the struct to allow injection for testing.
|
||||||
podListerSynced cache.InformerSynced
|
podListerSynced cache.InformerSynced
|
||||||
|
@ -96,7 +102,7 @@ type ReplicaSetController struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReplicaSetController configures a replica set controller with the specified event recorder
|
// NewReplicaSetController configures a replica set controller with the specified event recorder
|
||||||
func NewReplicaSetController(rsInformer informers.ReplicaSetInformer, podInformer informers.PodInformer, kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
|
func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
|
||||||
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
|
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
|
||||||
metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().RESTClient().GetRateLimiter())
|
metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().RESTClient().GetRateLimiter())
|
||||||
}
|
}
|
||||||
|
@ -124,6 +130,9 @@ func NewReplicaSetController(rsInformer informers.ReplicaSetInformer, podInforme
|
||||||
// way of achieving this is by performing a `stop` operation on the replica set.
|
// way of achieving this is by performing a `stop` operation on the replica set.
|
||||||
DeleteFunc: rsc.enqueueReplicaSet,
|
DeleteFunc: rsc.enqueueReplicaSet,
|
||||||
})
|
})
|
||||||
|
rsc.rsLister = rsInformer.Lister()
|
||||||
|
rsc.rsListerSynced = rsInformer.Informer().HasSynced
|
||||||
|
|
||||||
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: rsc.addPod,
|
AddFunc: rsc.addPod,
|
||||||
// This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like
|
// This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like
|
||||||
|
@ -132,12 +141,12 @@ func NewReplicaSetController(rsInformer informers.ReplicaSetInformer, podInforme
|
||||||
UpdateFunc: rsc.updatePod,
|
UpdateFunc: rsc.updatePod,
|
||||||
DeleteFunc: rsc.deletePod,
|
DeleteFunc: rsc.deletePod,
|
||||||
})
|
})
|
||||||
|
|
||||||
rsc.syncHandler = rsc.syncReplicaSet
|
|
||||||
rsc.rsLister = rsInformer.Lister()
|
|
||||||
rsc.podLister = podInformer.Lister()
|
rsc.podLister = podInformer.Lister()
|
||||||
rsc.podListerSynced = podInformer.Informer().HasSynced
|
rsc.podListerSynced = podInformer.Informer().HasSynced
|
||||||
|
|
||||||
|
rsc.syncHandler = rsc.syncReplicaSet
|
||||||
rsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
|
rsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
|
||||||
|
|
||||||
return rsc
|
return rsc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,7 +165,8 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
|
||||||
|
|
||||||
glog.Infof("Starting ReplicaSet controller")
|
glog.Infof("Starting ReplicaSet controller")
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(stopCh, rsc.podListerSynced) {
|
if !cache.WaitForCacheSync(stopCh, rsc.podListerSynced, rsc.rsListerSynced) {
|
||||||
|
utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -561,16 +571,19 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
|
||||||
glog.V(4).Infof("Finished syncing replica set %q (%v)", key, time.Now().Sub(startTime))
|
glog.V(4).Infof("Finished syncing replica set %q (%v)", key, time.Now().Sub(startTime))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
obj, exists, err := rsc.rsLister.Indexer.GetByKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !exists {
|
rs, err := rsc.rsLister.ReplicaSets(namespace).Get(name)
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
glog.V(4).Infof("ReplicaSet has been deleted %v", key)
|
glog.V(4).Infof("ReplicaSet has been deleted %v", key)
|
||||||
rsc.expectations.DeleteExpectations(key)
|
rsc.expectations.DeleteExpectations(key)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
rs := *obj.(*extensions.ReplicaSet)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
rsNeedsSync := rsc.expectations.SatisfiedExpectations(key)
|
rsNeedsSync := rsc.expectations.SatisfiedExpectations(key)
|
||||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||||
|
@ -633,9 +646,15 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
|
||||||
|
|
||||||
var manageReplicasErr error
|
var manageReplicasErr error
|
||||||
if rsNeedsSync && rs.DeletionTimestamp == nil {
|
if rsNeedsSync && rs.DeletionTimestamp == nil {
|
||||||
manageReplicasErr = rsc.manageReplicas(filteredPods, &rs)
|
manageReplicasErr = rsc.manageReplicas(filteredPods, rs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
copy, err := api.Scheme.DeepCopy(rs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rs = copy.(*extensions.ReplicaSet)
|
||||||
|
|
||||||
newStatus := calculateStatus(rs, filteredPods, manageReplicasErr)
|
newStatus := calculateStatus(rs, filteredPods, manageReplicasErr)
|
||||||
|
|
||||||
// Always updates status as pods come up or die.
|
// Always updates status as pods come up or die.
|
||||||
|
|
|
@ -46,19 +46,27 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||||
fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/securitycontext"
|
"k8s.io/kubernetes/pkg/securitycontext"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh chan struct{}, burstReplicas int, lookupCacheSize int) *ReplicaSetController {
|
func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh chan struct{}, burstReplicas int, lookupCacheSize int) (*ReplicaSetController, informers.SharedInformerFactory) {
|
||||||
informers := informers.NewSharedInformerFactory(client, nil, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(nil, client, controller.NoResyncPeriodFunc())
|
||||||
ret := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, burstReplicas, lookupCacheSize, false)
|
|
||||||
ret.podLister = &listers.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
ret := NewReplicaSetController(
|
||||||
ret.rsLister = &listers.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
informers.Extensions().V1beta1().ReplicaSets(),
|
||||||
informers.Start(stopCh)
|
informers.Core().V1().Pods(),
|
||||||
return ret
|
client,
|
||||||
|
burstReplicas,
|
||||||
|
lookupCacheSize,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
|
||||||
|
ret.podListerSynced = alwaysReady
|
||||||
|
ret.rsListerSynced = alwaysReady
|
||||||
|
|
||||||
|
return ret, informers
|
||||||
}
|
}
|
||||||
|
|
||||||
func filterInformerActions(actions []core.Action) []core.Action {
|
func filterInformerActions(actions []core.Action) []core.Action {
|
||||||
|
@ -203,14 +211,13 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
rsSpec := newReplicaSet(2, labelMap)
|
rsSpec := newReplicaSet(2, labelMap)
|
||||||
manager.rsLister.Indexer.Add(rsSpec)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rsSpec, "pod")
|
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod")
|
||||||
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||||
|
@ -222,15 +229,14 @@ func TestSyncReplicaSetDeletes(t *testing.T) {
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
// 2 running pods and a controller with 1 replica, one pod delete expected
|
// 2 running pods and a controller with 1 replica, one pod delete expected
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
rsSpec := newReplicaSet(1, labelMap)
|
rsSpec := newReplicaSet(1, labelMap)
|
||||||
manager.rsLister.Indexer.Add(rsSpec)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rsSpec, "pod")
|
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod")
|
||||||
|
|
||||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||||
validateSyncReplicaSet(t, &fakePodControl, 0, 1, 0)
|
validateSyncReplicaSet(t, &fakePodControl, 0, 1, 0)
|
||||||
|
@ -241,8 +247,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
received := make(chan string)
|
received := make(chan string)
|
||||||
|
@ -255,7 +260,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||||
// the controller matching the selectors of the deleted pod into the work queue.
|
// the controller matching the selectors of the deleted pod into the work queue.
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
rsSpec := newReplicaSet(1, labelMap)
|
rsSpec := newReplicaSet(1, labelMap)
|
||||||
manager.rsLister.Indexer.Add(rsSpec)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||||
pods := newPodList(nil, 1, v1.PodRunning, labelMap, rsSpec, "pod")
|
pods := newPodList(nil, 1, v1.PodRunning, labelMap, rsSpec, "pod")
|
||||||
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
|
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
|
||||||
|
|
||||||
|
@ -276,13 +281,12 @@ func TestSyncReplicaSetCreates(t *testing.T) {
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// A controller with 2 replicas and no pods in the store, 2 creates expected
|
// A controller with 2 replicas and no pods in the store, 2 creates expected
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
rs := newReplicaSet(2, labelMap)
|
rs := newReplicaSet(2, labelMap)
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
|
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
@ -302,16 +306,15 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// Steady state for the ReplicaSet, no Status.Replicas updates expected
|
// Steady state for the ReplicaSet, no Status.Replicas updates expected
|
||||||
activePods := 5
|
activePods := 5
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
rs := newReplicaSet(activePods, labelMap)
|
rs := newReplicaSet(activePods, labelMap)
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
rs.Status = extensions.ReplicaSetStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)}
|
rs.Status = extensions.ReplicaSetStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)}
|
||||||
newPodList(manager.podLister.Indexer, activePods, v1.PodRunning, labelMap, rs, "pod")
|
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), activePods, v1.PodRunning, labelMap, rs, "pod")
|
||||||
|
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
@ -348,8 +351,7 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
||||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||||
|
@ -357,11 +359,11 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
||||||
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
|
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
|
||||||
rs := newReplicaSet(5, labelMap)
|
rs := newReplicaSet(5, labelMap)
|
||||||
rs.Spec.Template.Labels = extraLabelMap
|
rs.Spec.Template.Labels = extraLabelMap
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
rs.Status = extensions.ReplicaSetStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0}
|
rs.Status = extensions.ReplicaSetStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0}
|
||||||
rs.Generation = 1
|
rs.Generation = 1
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rs, "pod")
|
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rs, "pod")
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, extraLabelMap, rs, "podWithExtraLabel")
|
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, extraLabelMap, rs, "podWithExtraLabel")
|
||||||
|
|
||||||
// This response body is just so we don't err out decoding the http response
|
// This response body is just so we don't err out decoding the http response
|
||||||
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
|
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
|
||||||
|
@ -398,15 +400,14 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
rsSpec := newReplicaSet(2, labelMap)
|
rsSpec := newReplicaSet(2, labelMap)
|
||||||
manager.rsLister.Indexer.Add(rsSpec)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||||
newPodList(manager.podLister.Indexer, 1, v1.PodRunning, labelMap, rsSpec, "pod")
|
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap, rsSpec, "pod")
|
||||||
|
|
||||||
// Creates a replica and sets expectations
|
// Creates a replica and sets expectations
|
||||||
rsSpec.Status.Replicas = 1
|
rsSpec.Status.Replicas = 1
|
||||||
|
@ -455,8 +456,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
||||||
func TestPodControllerLookup(t *testing.T) {
|
func TestPodControllerLookup(t *testing.T) {
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}), stopCh, BurstReplicas, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}), stopCh, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
inRSs []*extensions.ReplicaSet
|
inRSs []*extensions.ReplicaSet
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
|
@ -502,7 +502,7 @@ func TestPodControllerLookup(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, c := range testCases {
|
for _, c := range testCases {
|
||||||
for _, r := range c.inRSs {
|
for _, r := range c.inRSs {
|
||||||
manager.rsLister.Indexer.Add(r)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(r)
|
||||||
}
|
}
|
||||||
if rs := manager.getPodReplicaSet(c.pod); rs != nil {
|
if rs := manager.getPodReplicaSet(c.pod); rs != nil {
|
||||||
if c.outRSName != rs.Name {
|
if c.outRSName != rs.Name {
|
||||||
|
@ -521,14 +521,20 @@ type FakeWatcher struct {
|
||||||
|
|
||||||
func TestWatchControllers(t *testing.T) {
|
func TestWatchControllers(t *testing.T) {
|
||||||
fakeWatch := watch.NewFake()
|
fakeWatch := watch.NewFake()
|
||||||
client := &fake.Clientset{}
|
client := fake.NewSimpleClientset()
|
||||||
client.AddWatchReactor("replicasets", core.DefaultWatchReactor(fakeWatch, nil))
|
client.PrependWatchReactor("replicasets", core.DefaultWatchReactor(fakeWatch, nil))
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
informers := informers.NewSharedInformerFactory(client, nil, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(nil, client, controller.NoResyncPeriodFunc())
|
||||||
manager := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, BurstReplicas, 0, false)
|
manager := NewReplicaSetController(
|
||||||
|
informers.Extensions().V1beta1().ReplicaSets(),
|
||||||
|
informers.Core().V1().Pods(),
|
||||||
|
client,
|
||||||
|
BurstReplicas,
|
||||||
|
0,
|
||||||
|
false,
|
||||||
|
)
|
||||||
informers.Start(stopCh)
|
informers.Start(stopCh)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
var testRSSpec extensions.ReplicaSet
|
var testRSSpec extensions.ReplicaSet
|
||||||
received := make(chan string)
|
received := make(chan string)
|
||||||
|
@ -537,7 +543,7 @@ func TestWatchControllers(t *testing.T) {
|
||||||
// and eventually into the syncHandler. The handler validates the received controller
|
// and eventually into the syncHandler. The handler validates the received controller
|
||||||
// and closes the received channel to indicate that the test can finish.
|
// and closes the received channel to indicate that the test can finish.
|
||||||
manager.syncHandler = func(key string) error {
|
manager.syncHandler = func(key string) error {
|
||||||
obj, exists, err := manager.rsLister.Indexer.GetByKey(key)
|
obj, exists, err := informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().GetByKey(key)
|
||||||
if !exists || err != nil {
|
if !exists || err != nil {
|
||||||
t.Errorf("Expected to find replica set under key %v", key)
|
t.Errorf("Expected to find replica set under key %v", key)
|
||||||
}
|
}
|
||||||
|
@ -563,36 +569,44 @@ func TestWatchControllers(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWatchPods(t *testing.T) {
|
func TestWatchPods(t *testing.T) {
|
||||||
|
client := fake.NewSimpleClientset()
|
||||||
|
|
||||||
fakeWatch := watch.NewFake()
|
fakeWatch := watch.NewFake()
|
||||||
client := &fake.Clientset{}
|
client.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
|
||||||
client.AddWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// Put one ReplicaSet and one pod into the controller's stores
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||||
|
|
||||||
|
// Put one ReplicaSet into the shared informer
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
testRSSpec := newReplicaSet(1, labelMap)
|
testRSSpec := newReplicaSet(1, labelMap)
|
||||||
manager.rsLister.Indexer.Add(testRSSpec)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec)
|
||||||
|
|
||||||
received := make(chan string)
|
received := make(chan string)
|
||||||
// The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and
|
// The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and
|
||||||
// send it into the syncHandler.
|
// send it into the syncHandler.
|
||||||
manager.syncHandler = func(key string) error {
|
manager.syncHandler = func(key string) error {
|
||||||
obj, exists, err := manager.rsLister.Indexer.GetByKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
if !exists || err != nil {
|
if err != nil {
|
||||||
t.Errorf("Expected to find replica set under key %v", key)
|
t.Errorf("Error splitting key: %v", err)
|
||||||
|
}
|
||||||
|
rsSpec, err := manager.rsLister.ReplicaSets(namespace).Get(name)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Expected to find replica set under key %v: %v", key, err)
|
||||||
}
|
}
|
||||||
rsSpec := obj.(*extensions.ReplicaSet)
|
|
||||||
if !apiequality.Semantic.DeepDerivative(rsSpec, testRSSpec) {
|
if !apiequality.Semantic.DeepDerivative(rsSpec, testRSSpec) {
|
||||||
t.Errorf("\nExpected %#v,\nbut got %#v", testRSSpec, rsSpec)
|
t.Errorf("\nExpected %#v,\nbut got %#v", testRSSpec, rsSpec)
|
||||||
}
|
}
|
||||||
close(received)
|
close(received)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start only the pod watcher and the workqueue, send a watch event,
|
// Start only the pod watcher and the workqueue, send a watch event,
|
||||||
// and make sure it hits the sync method for the right ReplicaSet.
|
// and make sure it hits the sync method for the right ReplicaSet.
|
||||||
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
go informers.Core().V1().Pods().Informer().Run(stopCh)
|
||||||
|
go manager.Run(1, stopCh)
|
||||||
|
|
||||||
pods := newPodList(nil, 1, v1.PodRunning, labelMap, testRSSpec, "pod")
|
pods := newPodList(nil, 1, v1.PodRunning, labelMap, testRSSpec, "pod")
|
||||||
testPod := pods.Items[0]
|
testPod := pods.Items[0]
|
||||||
|
@ -609,36 +623,39 @@ func TestWatchPods(t *testing.T) {
|
||||||
func TestUpdatePods(t *testing.T) {
|
func TestUpdatePods(t *testing.T) {
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(fake.NewSimpleClientset(), stopCh, BurstReplicas, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(fake.NewSimpleClientset(), stopCh, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
received := make(chan string)
|
received := make(chan string)
|
||||||
|
|
||||||
manager.syncHandler = func(key string) error {
|
manager.syncHandler = func(key string) error {
|
||||||
obj, exists, err := manager.rsLister.Indexer.GetByKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
if !exists || err != nil {
|
if err != nil {
|
||||||
t.Errorf("Expected to find replica set under key %v", key)
|
t.Errorf("Error splitting key: %v", err)
|
||||||
}
|
}
|
||||||
received <- obj.(*extensions.ReplicaSet).Name
|
rsSpec, err := manager.rsLister.ReplicaSets(namespace).Get(name)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Expected to find replica set under key %v: %v", key, err)
|
||||||
|
}
|
||||||
|
received <- rsSpec.Name
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
||||||
|
|
||||||
// Put 2 ReplicaSets and one pod into the controller's stores
|
// Put 2 ReplicaSets and one pod into the informers
|
||||||
labelMap1 := map[string]string{"foo": "bar"}
|
labelMap1 := map[string]string{"foo": "bar"}
|
||||||
testRSSpec1 := newReplicaSet(1, labelMap1)
|
testRSSpec1 := newReplicaSet(1, labelMap1)
|
||||||
manager.rsLister.Indexer.Add(testRSSpec1)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec1)
|
||||||
testRSSpec2 := *testRSSpec1
|
testRSSpec2 := *testRSSpec1
|
||||||
labelMap2 := map[string]string{"bar": "foo"}
|
labelMap2 := map[string]string{"bar": "foo"}
|
||||||
testRSSpec2.Spec.Selector = &metav1.LabelSelector{MatchLabels: labelMap2}
|
testRSSpec2.Spec.Selector = &metav1.LabelSelector{MatchLabels: labelMap2}
|
||||||
testRSSpec2.Name = "barfoo"
|
testRSSpec2.Name = "barfoo"
|
||||||
manager.rsLister.Indexer.Add(&testRSSpec2)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(&testRSSpec2)
|
||||||
|
|
||||||
// case 1: We put in the podLister a pod with labels matching testRSSpec1,
|
// case 1: We put in the podLister a pod with labels matching testRSSpec1,
|
||||||
// then update its labels to match testRSSpec2. We expect to receive a sync
|
// then update its labels to match testRSSpec2. We expect to receive a sync
|
||||||
// request for both replica sets.
|
// request for both replica sets.
|
||||||
pod1 := newPodList(manager.podLister.Indexer, 1, v1.PodRunning, labelMap1, testRSSpec1, "pod").Items[0]
|
pod1 := newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap1, testRSSpec1, "pod").Items[0]
|
||||||
pod1.ResourceVersion = "1"
|
pod1.ResourceVersion = "1"
|
||||||
pod2 := pod1
|
pod2 := pod1
|
||||||
pod2.Labels = labelMap2
|
pod2.Labels = labelMap2
|
||||||
|
@ -690,14 +707,13 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
rs := newReplicaSet(1, labelMap)
|
rs := newReplicaSet(1, labelMap)
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
rs.Status = extensions.ReplicaSetStatus{Replicas: 2}
|
rs.Status = extensions.ReplicaSetStatus{Replicas: 2}
|
||||||
newPodList(manager.podLister.Indexer, 1, v1.PodRunning, labelMap, rs, "pod")
|
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap, rs, "pod")
|
||||||
|
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
@ -721,7 +737,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
||||||
fakeRSClient := fakeClient.Extensions().ReplicaSets("default")
|
fakeRSClient := fakeClient.Extensions().ReplicaSets("default")
|
||||||
numReplicas := int32(10)
|
numReplicas := int32(10)
|
||||||
newStatus := extensions.ReplicaSetStatus{Replicas: numReplicas}
|
newStatus := extensions.ReplicaSetStatus{Replicas: numReplicas}
|
||||||
updateReplicaSetStatus(fakeRSClient, *rs, newStatus)
|
updateReplicaSetStatus(fakeRSClient, rs, newStatus)
|
||||||
updates, gets := 0, 0
|
updates, gets := 0, 0
|
||||||
for _, a := range fakeClient.Actions() {
|
for _, a := range fakeClient.Actions() {
|
||||||
if a.GetResource().Resource != "replicasets" {
|
if a.GetResource().Resource != "replicasets" {
|
||||||
|
@ -762,13 +778,12 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, burstReplicas, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, burstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
rsSpec := newReplicaSet(numReplicas, labelMap)
|
rsSpec := newReplicaSet(numReplicas, labelMap)
|
||||||
manager.rsLister.Indexer.Add(rsSpec)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||||
|
|
||||||
expectedPods := int32(0)
|
expectedPods := int32(0)
|
||||||
pods := newPodList(nil, numReplicas, v1.PodPending, labelMap, rsSpec, "pod")
|
pods := newPodList(nil, numReplicas, v1.PodPending, labelMap, rsSpec, "pod")
|
||||||
|
@ -782,14 +797,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||||
for _, replicas := range []int32{int32(numReplicas), 0} {
|
for _, replicas := range []int32{int32(numReplicas), 0} {
|
||||||
|
|
||||||
*(rsSpec.Spec.Replicas) = replicas
|
*(rsSpec.Spec.Replicas) = replicas
|
||||||
manager.rsLister.Indexer.Add(rsSpec)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||||
|
|
||||||
for i := 0; i < numReplicas; i += burstReplicas {
|
for i := 0; i < numReplicas; i += burstReplicas {
|
||||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||||
|
|
||||||
// The store accrues active pods. It's also used by the ReplicaSet to determine how many
|
// The store accrues active pods. It's also used by the ReplicaSet to determine how many
|
||||||
// replicas to create.
|
// replicas to create.
|
||||||
activePods := int32(len(manager.podLister.Indexer.List()))
|
activePods := int32(len(informers.Core().V1().Pods().Informer().GetIndexer().List()))
|
||||||
if replicas != 0 {
|
if replicas != 0 {
|
||||||
// This is the number of pods currently "in flight". They were created by the
|
// This is the number of pods currently "in flight". They were created by the
|
||||||
// ReplicaSet controller above, which then puts the ReplicaSet to sleep till
|
// ReplicaSet controller above, which then puts the ReplicaSet to sleep till
|
||||||
|
@ -804,7 +819,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||||
// This simulates the watch events for all but 1 of the expected pods.
|
// This simulates the watch events for all but 1 of the expected pods.
|
||||||
// None of these should wake the controller because it has expectations==BurstReplicas.
|
// None of these should wake the controller because it has expectations==BurstReplicas.
|
||||||
for i := int32(0); i < expectedPods-1; i++ {
|
for i := int32(0); i < expectedPods-1; i++ {
|
||||||
manager.podLister.Indexer.Add(&pods.Items[i])
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[i])
|
||||||
manager.addPod(&pods.Items[i])
|
manager.addPod(&pods.Items[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -840,7 +855,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||||
// has exactly one expectation at the end, to verify that we
|
// has exactly one expectation at the end, to verify that we
|
||||||
// don't double delete.
|
// don't double delete.
|
||||||
for i := range podsToDelete[1:] {
|
for i := range podsToDelete[1:] {
|
||||||
manager.podLister.Indexer.Delete(podsToDelete[i])
|
informers.Core().V1().Pods().Informer().GetIndexer().Delete(podsToDelete[i])
|
||||||
manager.deletePod(podsToDelete[i])
|
manager.deletePod(podsToDelete[i])
|
||||||
}
|
}
|
||||||
podExp, exists, err := manager.expectations.GetExpectations(rsKey)
|
podExp, exists, err := manager.expectations.GetExpectations(rsKey)
|
||||||
|
@ -861,7 +876,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||||
// The last add pod will decrease the expectation of the ReplicaSet to 0,
|
// The last add pod will decrease the expectation of the ReplicaSet to 0,
|
||||||
// which will cause it to create/delete the remaining replicas up to burstReplicas.
|
// which will cause it to create/delete the remaining replicas up to burstReplicas.
|
||||||
if replicas != 0 {
|
if replicas != 0 {
|
||||||
manager.podLister.Indexer.Add(&pods.Items[expectedPods-1])
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[expectedPods-1])
|
||||||
manager.addPod(&pods.Items[expectedPods-1])
|
manager.addPod(&pods.Items[expectedPods-1])
|
||||||
} else {
|
} else {
|
||||||
expectedDel := manager.expectations.GetUIDs(getKey(rsSpec, t))
|
expectedDel := manager.expectations.GetUIDs(getKey(rsSpec, t))
|
||||||
|
@ -876,14 +891,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||||
Labels: rsSpec.Spec.Selector.MatchLabels,
|
Labels: rsSpec.Spec.Selector.MatchLabels,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
manager.podLister.Indexer.Delete(lastPod)
|
informers.Core().V1().Pods().Informer().GetIndexer().Delete(lastPod)
|
||||||
manager.deletePod(lastPod)
|
manager.deletePod(lastPod)
|
||||||
}
|
}
|
||||||
pods.Items = pods.Items[expectedPods:]
|
pods.Items = pods.Items[expectedPods:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Confirm that we've created the right number of replicas
|
// Confirm that we've created the right number of replicas
|
||||||
activePods := int32(len(manager.podLister.Indexer.List()))
|
activePods := int32(len(informers.Core().V1().Pods().Informer().GetIndexer().List()))
|
||||||
if activePods != *(rsSpec.Spec.Replicas) {
|
if activePods != *(rsSpec.Spec.Replicas) {
|
||||||
t.Fatalf("Unexpected number of active pods, expected %d, got %d", *(rsSpec.Spec.Replicas), activePods)
|
t.Fatalf("Unexpected number of active pods, expected %d, got %d", *(rsSpec.Spec.Replicas), activePods)
|
||||||
}
|
}
|
||||||
|
@ -916,15 +931,14 @@ func TestRSSyncExpectations(t *testing.T) {
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, 2, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, 2, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
rsSpec := newReplicaSet(2, labelMap)
|
rsSpec := newReplicaSet(2, labelMap)
|
||||||
manager.rsLister.Indexer.Add(rsSpec)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
|
||||||
pods := newPodList(nil, 2, v1.PodPending, labelMap, rsSpec, "pod")
|
pods := newPodList(nil, 2, v1.PodPending, labelMap, rsSpec, "pod")
|
||||||
manager.podLister.Indexer.Add(&pods.Items[0])
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[0])
|
||||||
postExpectationsPod := pods.Items[1]
|
postExpectationsPod := pods.Items[1]
|
||||||
|
|
||||||
manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRSExpectations{
|
manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRSExpectations{
|
||||||
|
@ -932,7 +946,7 @@ func TestRSSyncExpectations(t *testing.T) {
|
||||||
// If we check active pods before checking expectataions, the
|
// If we check active pods before checking expectataions, the
|
||||||
// ReplicaSet will create a new replica because it doesn't see
|
// ReplicaSet will create a new replica because it doesn't see
|
||||||
// this pod, but has fulfilled its expectations.
|
// this pod, but has fulfilled its expectations.
|
||||||
manager.podLister.Indexer.Add(&postExpectationsPod)
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(&postExpectationsPod)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||||
|
@ -943,11 +957,10 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, 10, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, 10, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
rs := newReplicaSet(1, map[string]string{"foo": "bar"})
|
rs := newReplicaSet(1, map[string]string{"foo": "bar"})
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
|
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
@ -969,7 +982,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||||
if !exists || err != nil {
|
if !exists || err != nil {
|
||||||
t.Errorf("No expectations found for ReplicaSet")
|
t.Errorf("No expectations found for ReplicaSet")
|
||||||
}
|
}
|
||||||
manager.rsLister.Indexer.Delete(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Delete(rs)
|
||||||
manager.syncReplicaSet(getKey(rs, t))
|
manager.syncReplicaSet(getKey(rs, t))
|
||||||
|
|
||||||
if _, exists, err = manager.expectations.GetExpectations(rsKey); exists {
|
if _, exists, err = manager.expectations.GetExpectations(rsKey); exists {
|
||||||
|
@ -978,7 +991,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||||
|
|
||||||
// This should have no effect, since we've deleted the ReplicaSet.
|
// This should have no effect, since we've deleted the ReplicaSet.
|
||||||
podExp.Add(-1, 0)
|
podExp.Add(-1, 0)
|
||||||
manager.podLister.Indexer.Replace(make([]interface{}, 0), "0")
|
informers.Core().V1().Pods().Informer().GetIndexer().Replace(make([]interface{}, 0), "0")
|
||||||
manager.syncReplicaSet(getKey(rs, t))
|
manager.syncReplicaSet(getKey(rs, t))
|
||||||
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
|
||||||
}
|
}
|
||||||
|
@ -1002,8 +1015,7 @@ func TestOverlappingRSs(t *testing.T) {
|
||||||
func() {
|
func() {
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, 10, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, 10, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store
|
// Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store
|
||||||
var controllers []*extensions.ReplicaSet
|
var controllers []*extensions.ReplicaSet
|
||||||
|
@ -1015,7 +1027,7 @@ func TestOverlappingRSs(t *testing.T) {
|
||||||
}
|
}
|
||||||
shuffledControllers := shuffle(controllers)
|
shuffledControllers := shuffle(controllers)
|
||||||
for j := range shuffledControllers {
|
for j := range shuffledControllers {
|
||||||
manager.rsLister.Indexer.Add(shuffledControllers[j])
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(shuffledControllers[j])
|
||||||
}
|
}
|
||||||
// Add a pod and make sure only the oldest ReplicaSet is synced
|
// Add a pod and make sure only the oldest ReplicaSet is synced
|
||||||
pods := newPodList(nil, 1, v1.PodPending, labelMap, controllers[0], "pod")
|
pods := newPodList(nil, 1, v1.PodPending, labelMap, controllers[0], "pod")
|
||||||
|
@ -1035,11 +1047,10 @@ func TestDeletionTimestamp(t *testing.T) {
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(c, stopCh, 10, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(c, stopCh, 10, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
rs := newReplicaSet(1, labelMap)
|
rs := newReplicaSet(1, labelMap)
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
rsKey, err := controller.KeyFunc(rs)
|
rsKey, err := controller.KeyFunc(rs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Couldn't get key for object %#v: %v", rs, err)
|
t.Errorf("Couldn't get key for object %#v: %v", rs, err)
|
||||||
|
@ -1125,15 +1136,14 @@ func TestDeletionTimestamp(t *testing.T) {
|
||||||
|
|
||||||
// setupManagerWithGCEnabled creates a RS manager with a fakePodControl
|
// setupManagerWithGCEnabled creates a RS manager with a fakePodControl
|
||||||
// and with garbageCollectorEnabled set to true
|
// and with garbageCollectorEnabled set to true
|
||||||
func setupManagerWithGCEnabled(stopCh chan struct{}, objs ...runtime.Object) (manager *ReplicaSetController, fakePodControl *controller.FakePodControl) {
|
func setupManagerWithGCEnabled(stopCh chan struct{}, objs ...runtime.Object) (manager *ReplicaSetController, fakePodControl *controller.FakePodControl, informers informers.SharedInformerFactory) {
|
||||||
c := fakeclientset.NewSimpleClientset(objs...)
|
c := fakeclientset.NewSimpleClientset(objs...)
|
||||||
fakePodControl = &controller.FakePodControl{}
|
fakePodControl = &controller.FakePodControl{}
|
||||||
manager = testNewReplicaSetControllerFromClient(c, stopCh, BurstReplicas, 0)
|
manager, informers = testNewReplicaSetControllerFromClient(c, stopCh, BurstReplicas, 0)
|
||||||
manager.garbageCollectorEnabled = true
|
manager.garbageCollectorEnabled = true
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
manager.podControl = fakePodControl
|
manager.podControl = fakePodControl
|
||||||
return manager, fakePodControl
|
return manager, fakePodControl, informers
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
||||||
|
@ -1141,14 +1151,14 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
||||||
rs := newReplicaSet(2, labelMap)
|
rs := newReplicaSet(2, labelMap)
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
var trueVar = true
|
var trueVar = true
|
||||||
otherControllerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1beta1", Kind: "ReplicaSet", Name: "AnotherRS", Controller: &trueVar}
|
otherControllerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1beta1", Kind: "ReplicaSet", Name: "AnotherRS", Controller: &trueVar}
|
||||||
// add to podLister a matching Pod controlled by another controller. Expect no patch.
|
// add to podLister a matching Pod controlled by another controller. Expect no patch.
|
||||||
pod := newPod("pod", rs, v1.PodRunning, nil)
|
pod := newPod("pod", rs, v1.PodRunning, nil)
|
||||||
pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
|
pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
|
||||||
manager.podLister.Indexer.Add(pod)
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
|
||||||
err := manager.syncReplicaSet(getKey(rs, t))
|
err := manager.syncReplicaSet(getKey(rs, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -1162,15 +1172,15 @@ func TestPatchPodWithOtherOwnerRef(t *testing.T) {
|
||||||
rs := newReplicaSet(2, labelMap)
|
rs := newReplicaSet(2, labelMap)
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
// add to podLister one more matching pod that doesn't have a controller
|
// add to podLister one more matching pod that doesn't have a controller
|
||||||
// ref, but has an owner ref pointing to other object. Expect a patch to
|
// ref, but has an owner ref pointing to other object. Expect a patch to
|
||||||
// take control of it.
|
// take control of it.
|
||||||
unrelatedOwnerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
|
unrelatedOwnerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
|
||||||
pod := newPod("pod", rs, v1.PodRunning, nil)
|
pod := newPod("pod", rs, v1.PodRunning, nil)
|
||||||
pod.OwnerReferences = []metav1.OwnerReference{unrelatedOwnerReference}
|
pod.OwnerReferences = []metav1.OwnerReference{unrelatedOwnerReference}
|
||||||
manager.podLister.Indexer.Add(pod)
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
|
||||||
|
|
||||||
err := manager.syncReplicaSet(getKey(rs, t))
|
err := manager.syncReplicaSet(getKey(rs, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1185,14 +1195,14 @@ func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
|
||||||
rs := newReplicaSet(2, labelMap)
|
rs := newReplicaSet(2, labelMap)
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
// add to podLister a matching pod that has an ownerRef pointing to the rs,
|
// add to podLister a matching pod that has an ownerRef pointing to the rs,
|
||||||
// but ownerRef.Controller is false. Expect a patch to take control it.
|
// but ownerRef.Controller is false. Expect a patch to take control it.
|
||||||
rsOwnerReference := metav1.OwnerReference{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name}
|
rsOwnerReference := metav1.OwnerReference{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name}
|
||||||
pod := newPod("pod", rs, v1.PodRunning, nil)
|
pod := newPod("pod", rs, v1.PodRunning, nil)
|
||||||
pod.OwnerReferences = []metav1.OwnerReference{rsOwnerReference}
|
pod.OwnerReferences = []metav1.OwnerReference{rsOwnerReference}
|
||||||
manager.podLister.Indexer.Add(pod)
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
|
||||||
|
|
||||||
err := manager.syncReplicaSet(getKey(rs, t))
|
err := manager.syncReplicaSet(getKey(rs, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1207,12 +1217,12 @@ func TestPatchPodFails(t *testing.T) {
|
||||||
rs := newReplicaSet(2, labelMap)
|
rs := newReplicaSet(2, labelMap)
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
// add to podLister two matching pods. Expect two patches to take control
|
// add to podLister two matching pods. Expect two patches to take control
|
||||||
// them.
|
// them.
|
||||||
manager.podLister.Indexer.Add(newPod("pod1", rs, v1.PodRunning, nil))
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod1", rs, v1.PodRunning, nil))
|
||||||
manager.podLister.Indexer.Add(newPod("pod2", rs, v1.PodRunning, nil))
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod2", rs, v1.PodRunning, nil))
|
||||||
// let both patches fail. The rs controller will assume it fails to take
|
// let both patches fail. The rs controller will assume it fails to take
|
||||||
// control of the pods and create new ones.
|
// control of the pods and create new ones.
|
||||||
fakePodControl.Err = fmt.Errorf("Fake Error")
|
fakePodControl.Err = fmt.Errorf("Fake Error")
|
||||||
|
@ -1229,13 +1239,13 @@ func TestPatchExtraPodsThenDelete(t *testing.T) {
|
||||||
rs := newReplicaSet(2, labelMap)
|
rs := newReplicaSet(2, labelMap)
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
// add to podLister three matching pods. Expect three patches to take control
|
// add to podLister three matching pods. Expect three patches to take control
|
||||||
// them, and later delete one of them.
|
// them, and later delete one of them.
|
||||||
manager.podLister.Indexer.Add(newPod("pod1", rs, v1.PodRunning, nil))
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod1", rs, v1.PodRunning, nil))
|
||||||
manager.podLister.Indexer.Add(newPod("pod2", rs, v1.PodRunning, nil))
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod2", rs, v1.PodRunning, nil))
|
||||||
manager.podLister.Indexer.Add(newPod("pod3", rs, v1.PodRunning, nil))
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod3", rs, v1.PodRunning, nil))
|
||||||
err := manager.syncReplicaSet(getKey(rs, t))
|
err := manager.syncReplicaSet(getKey(rs, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -1249,8 +1259,8 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
||||||
rs := newReplicaSet(2, labelMap)
|
rs := newReplicaSet(2, labelMap)
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
// put one pod in the podLister
|
// put one pod in the podLister
|
||||||
pod := newPod("pod", rs, v1.PodRunning, nil)
|
pod := newPod("pod", rs, v1.PodRunning, nil)
|
||||||
pod.ResourceVersion = "1"
|
pod.ResourceVersion = "1"
|
||||||
|
@ -1264,7 +1274,7 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
||||||
// add the updatedPod to the store. This is consistent with the behavior of
|
// add the updatedPod to the store. This is consistent with the behavior of
|
||||||
// the Informer: Informer updates the store before call the handler
|
// the Informer: Informer updates the store before call the handler
|
||||||
// (updatePod() in this case).
|
// (updatePod() in this case).
|
||||||
manager.podLister.Indexer.Add(&updatedPod)
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(&updatedPod)
|
||||||
// send a update of the same pod with modified labels
|
// send a update of the same pod with modified labels
|
||||||
manager.updatePod(pod, &updatedPod)
|
manager.updatePod(pod, &updatedPod)
|
||||||
// verifies that rs is added to the queue
|
// verifies that rs is added to the queue
|
||||||
|
@ -1290,16 +1300,16 @@ func TestUpdateSelectorControllerRef(t *testing.T) {
|
||||||
rs := newReplicaSet(2, labelMap)
|
rs := newReplicaSet(2, labelMap)
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
|
||||||
// put 2 pods in the podLister
|
// put 2 pods in the podLister
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rs, "pod")
|
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rs, "pod")
|
||||||
// update the RS so that its selector no longer matches the pods
|
// update the RS so that its selector no longer matches the pods
|
||||||
updatedRS := *rs
|
updatedRS := *rs
|
||||||
updatedRS.Spec.Selector.MatchLabels = map[string]string{"foo": "baz"}
|
updatedRS.Spec.Selector.MatchLabels = map[string]string{"foo": "baz"}
|
||||||
// put the updatedRS into the store. This is consistent with the behavior of
|
// put the updatedRS into the store. This is consistent with the behavior of
|
||||||
// the Informer: Informer updates the store before call the handler
|
// the Informer: Informer updates the store before call the handler
|
||||||
// (updateRS() in this case).
|
// (updateRS() in this case).
|
||||||
manager.rsLister.Indexer.Add(&updatedRS)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(&updatedRS)
|
||||||
manager.updateRS(rs, &updatedRS)
|
manager.updateRS(rs, &updatedRS)
|
||||||
// verifies that the rs is added to the queue
|
// verifies that the rs is added to the queue
|
||||||
rsKey := getKey(rs, t)
|
rsKey := getKey(rs, t)
|
||||||
|
@ -1326,12 +1336,12 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
|
||||||
rs := newReplicaSet(2, labelMap)
|
rs := newReplicaSet(2, labelMap)
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
|
||||||
now := metav1.Now()
|
now := metav1.Now()
|
||||||
rs.DeletionTimestamp = &now
|
rs.DeletionTimestamp = &now
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
pod1 := newPod("pod1", rs, v1.PodRunning, nil)
|
pod1 := newPod("pod1", rs, v1.PodRunning, nil)
|
||||||
manager.podLister.Indexer.Add(pod1)
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
|
||||||
|
|
||||||
// no patch, no create
|
// no patch, no create
|
||||||
err := manager.syncReplicaSet(getKey(rs, t))
|
err := manager.syncReplicaSet(getKey(rs, t))
|
||||||
|
@ -1354,18 +1364,17 @@ func TestReadyReplicas(t *testing.T) {
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
rs := newReplicaSet(2, labelMap)
|
rs := newReplicaSet(2, labelMap)
|
||||||
rs.Status = extensions.ReplicaSetStatus{Replicas: 2, ReadyReplicas: 0, AvailableReplicas: 0, ObservedGeneration: 1}
|
rs.Status = extensions.ReplicaSetStatus{Replicas: 2, ReadyReplicas: 0, AvailableReplicas: 0, ObservedGeneration: 1}
|
||||||
rs.Generation = 1
|
rs.Generation = 1
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
|
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodPending, labelMap, rs, "pod")
|
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodPending, labelMap, rs, "pod")
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rs, "pod")
|
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rs, "pod")
|
||||||
|
|
||||||
// This response body is just so we don't err out decoding the http response
|
// This response body is just so we don't err out decoding the http response
|
||||||
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
|
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
|
||||||
|
@ -1397,8 +1406,7 @@ func TestAvailableReplicas(t *testing.T) {
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||||
labelMap := map[string]string{"foo": "bar"}
|
labelMap := map[string]string{"foo": "bar"}
|
||||||
|
@ -1407,17 +1415,17 @@ func TestAvailableReplicas(t *testing.T) {
|
||||||
rs.Generation = 1
|
rs.Generation = 1
|
||||||
// minReadySeconds set to 15s
|
// minReadySeconds set to 15s
|
||||||
rs.Spec.MinReadySeconds = 15
|
rs.Spec.MinReadySeconds = 15
|
||||||
manager.rsLister.Indexer.Add(rs)
|
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
|
||||||
|
|
||||||
// First pod becomes ready 20s ago
|
// First pod becomes ready 20s ago
|
||||||
moment := metav1.Time{Time: time.Now().Add(-2e10)}
|
moment := metav1.Time{Time: time.Now().Add(-2e10)}
|
||||||
pod := newPod("pod", rs, v1.PodRunning, &moment)
|
pod := newPod("pod", rs, v1.PodRunning, &moment)
|
||||||
manager.podLister.Indexer.Add(pod)
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
|
||||||
|
|
||||||
// Second pod becomes ready now
|
// Second pod becomes ready now
|
||||||
otherMoment := metav1.Now()
|
otherMoment := metav1.Now()
|
||||||
otherPod := newPod("otherPod", rs, v1.PodRunning, &otherMoment)
|
otherPod := newPod("otherPod", rs, v1.PodRunning, &otherMoment)
|
||||||
manager.podLister.Indexer.Add(otherPod)
|
informers.Core().V1().Pods().Informer().GetIndexer().Add(otherPod)
|
||||||
|
|
||||||
// This response body is just so we don't err out decoding the http response
|
// This response body is just so we don't err out decoding the http response
|
||||||
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
|
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
|
||||||
|
|
|
@ -33,7 +33,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// updateReplicaSetStatus attempts to update the Status.Replicas of the given ReplicaSet, with a single GET/PUT retry.
|
// updateReplicaSetStatus attempts to update the Status.Replicas of the given ReplicaSet, with a single GET/PUT retry.
|
||||||
func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs extensions.ReplicaSet, newStatus extensions.ReplicaSetStatus) (updateErr error) {
|
func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs *extensions.ReplicaSet, newStatus extensions.ReplicaSetStatus) (updateErr error) {
|
||||||
// This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since
|
// This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since
|
||||||
// we do a periodic relist every 30s. If the generations differ but the replicas are
|
// we do a periodic relist every 30s. If the generations differ but the replicas are
|
||||||
// the same, a caller might've resized to the same replica count.
|
// the same, a caller might've resized to the same replica count.
|
||||||
|
@ -48,11 +48,11 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs exte
|
||||||
|
|
||||||
// deep copy to avoid mutation now.
|
// deep copy to avoid mutation now.
|
||||||
// TODO this method need some work. Retry on conflict probably, though I suspect this is stomping status to something it probably shouldn't
|
// TODO this method need some work. Retry on conflict probably, though I suspect this is stomping status to something it probably shouldn't
|
||||||
copyObj, err := api.Scheme.DeepCopy(&rs)
|
copyObj, err := api.Scheme.DeepCopy(rs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rs = *copyObj.(*extensions.ReplicaSet)
|
rs = copyObj.(*extensions.ReplicaSet)
|
||||||
|
|
||||||
// Save the generation number we acted on, otherwise we might wrongfully indicate
|
// Save the generation number we acted on, otherwise we might wrongfully indicate
|
||||||
// that we've seen a spec update when we retry.
|
// that we've seen a spec update when we retry.
|
||||||
|
@ -61,7 +61,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs exte
|
||||||
newStatus.ObservedGeneration = rs.Generation
|
newStatus.ObservedGeneration = rs.Generation
|
||||||
|
|
||||||
var getErr error
|
var getErr error
|
||||||
for i, rs := 0, &rs; ; i++ {
|
for i, rs := 0, rs; ; i++ {
|
||||||
glog.V(4).Infof(fmt.Sprintf("Updating replica count for ReplicaSet: %s/%s, ", rs.Namespace, rs.Name) +
|
glog.V(4).Infof(fmt.Sprintf("Updating replica count for ReplicaSet: %s/%s, ", rs.Namespace, rs.Name) +
|
||||||
fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, *(rs.Spec.Replicas)) +
|
fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, *(rs.Spec.Replicas)) +
|
||||||
fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) +
|
fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) +
|
||||||
|
@ -83,7 +83,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs exte
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func calculateStatus(rs extensions.ReplicaSet, filteredPods []*v1.Pod, manageReplicasErr error) extensions.ReplicaSetStatus {
|
func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageReplicasErr error) extensions.ReplicaSetStatus {
|
||||||
newStatus := rs.Status
|
newStatus := rs.Status
|
||||||
// Count the number of pods that have labels matching the labels of the pod
|
// Count the number of pods that have labels matching the labels of the pod
|
||||||
// template of the replica set, the matching pods may have more
|
// template of the replica set, the matching pods may have more
|
||||||
|
|
|
@ -21,9 +21,9 @@ go_library(
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||||
"//pkg/client/legacylisters:go_default_library",
|
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
|
||||||
|
"//pkg/client/listers/core/v1:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//pkg/util/metrics:go_default_library",
|
"//pkg/util/metrics:go_default_library",
|
||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||||
|
@ -53,8 +53,9 @@ go_test(
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
|
||||||
"//pkg/securitycontext:go_default_library",
|
"//pkg/securitycontext:go_default_library",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/api/equality",
|
"//vendor:k8s.io/apimachinery/pkg/api/equality",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
|
|
|
@ -19,6 +19,7 @@ limitations under the License.
|
||||||
package replication
|
package replication
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -41,9 +42,9 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
|
||||||
|
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -68,13 +69,6 @@ type ReplicationManager struct {
|
||||||
kubeClient clientset.Interface
|
kubeClient clientset.Interface
|
||||||
podControl controller.PodControlInterface
|
podControl controller.PodControlInterface
|
||||||
|
|
||||||
// internalPodInformer is used to hold a personal informer. If we're using
|
|
||||||
// a normal shared informer, then the informer will be started for us. If
|
|
||||||
// we have a personal informer, we must start it ourselves. If you start
|
|
||||||
// the controller using NewReplicationManager(passing SharedInformer), this
|
|
||||||
// will be null
|
|
||||||
internalPodInformer cache.SharedIndexInformer
|
|
||||||
|
|
||||||
// An rc is temporarily suspended after creating/deleting these many replicas.
|
// An rc is temporarily suspended after creating/deleting these many replicas.
|
||||||
// It resumes normal action after observing the watch events for them.
|
// It resumes normal action after observing the watch events for them.
|
||||||
burstReplicas int
|
burstReplicas int
|
||||||
|
@ -84,15 +78,13 @@ type ReplicationManager struct {
|
||||||
// A TTLCache of pod creates/deletes each rc expects to see.
|
// A TTLCache of pod creates/deletes each rc expects to see.
|
||||||
expectations *controller.UIDTrackingControllerExpectations
|
expectations *controller.UIDTrackingControllerExpectations
|
||||||
|
|
||||||
// A store of replication controllers, populated by the rcController
|
rcLister corelisters.ReplicationControllerLister
|
||||||
rcLister listers.StoreToReplicationControllerLister
|
rcListerSynced cache.InformerSynced
|
||||||
// A store of pods, populated by the podController
|
|
||||||
podLister listers.StoreToPodLister
|
podLister corelisters.PodLister
|
||||||
// Watches changes to all pods
|
|
||||||
podController cache.Controller
|
|
||||||
// podListerSynced returns true if the pod store has been synced at least once.
|
// podListerSynced returns true if the pod store has been synced at least once.
|
||||||
// Added as a member to the struct to allow injection for testing.
|
// Added as a member to the struct to allow injection for testing.
|
||||||
podListerSynced func() bool
|
podListerSynced cache.InformerSynced
|
||||||
|
|
||||||
lookupCache *controller.MatchingCache
|
lookupCache *controller.MatchingCache
|
||||||
|
|
||||||
|
@ -105,7 +97,7 @@ type ReplicationManager struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReplicationManager configures a replication manager with the specified event recorder
|
// NewReplicationManager configures a replication manager with the specified event recorder
|
||||||
func NewReplicationManager(podInformer, rcInformer cache.SharedIndexInformer, kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
|
func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
|
||||||
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
|
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
|
||||||
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().RESTClient().GetRateLimiter())
|
metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().RESTClient().GetRateLimiter())
|
||||||
}
|
}
|
||||||
|
@ -126,7 +118,7 @@ func NewReplicationManager(podInformer, rcInformer cache.SharedIndexInformer, ku
|
||||||
garbageCollectorEnabled: garbageCollectorEnabled,
|
garbageCollectorEnabled: garbageCollectorEnabled,
|
||||||
}
|
}
|
||||||
|
|
||||||
rcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
rcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: rm.enqueueController,
|
AddFunc: rm.enqueueController,
|
||||||
UpdateFunc: rm.updateRC,
|
UpdateFunc: rm.updateRC,
|
||||||
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
|
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
|
||||||
|
@ -134,7 +126,10 @@ func NewReplicationManager(podInformer, rcInformer cache.SharedIndexInformer, ku
|
||||||
// way of achieving this is by performing a `stop` operation on the controller.
|
// way of achieving this is by performing a `stop` operation on the controller.
|
||||||
DeleteFunc: rm.enqueueController,
|
DeleteFunc: rm.enqueueController,
|
||||||
})
|
})
|
||||||
podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
rm.rcLister = rcInformer.Lister()
|
||||||
|
rm.rcListerSynced = rcInformer.Informer().HasSynced
|
||||||
|
|
||||||
|
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: rm.addPod,
|
AddFunc: rm.addPod,
|
||||||
// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
|
// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
|
||||||
// the most frequent pod update is status, and the associated rc will only list from local storage, so
|
// the most frequent pod update is status, and the associated rc will only list from local storage, so
|
||||||
|
@ -142,24 +137,14 @@ func NewReplicationManager(podInformer, rcInformer cache.SharedIndexInformer, ku
|
||||||
UpdateFunc: rm.updatePod,
|
UpdateFunc: rm.updatePod,
|
||||||
DeleteFunc: rm.deletePod,
|
DeleteFunc: rm.deletePod,
|
||||||
})
|
})
|
||||||
|
rm.podLister = podInformer.Lister()
|
||||||
|
rm.podListerSynced = podInformer.Informer().HasSynced
|
||||||
|
|
||||||
rm.syncHandler = rm.syncReplicationController
|
rm.syncHandler = rm.syncReplicationController
|
||||||
rm.rcLister.Indexer = rcInformer.GetIndexer()
|
|
||||||
rm.podLister.Indexer = podInformer.GetIndexer()
|
|
||||||
rm.podListerSynced = podInformer.HasSynced
|
|
||||||
rm.lookupCache = controller.NewMatchingCache(lookupCacheSize)
|
rm.lookupCache = controller.NewMatchingCache(lookupCacheSize)
|
||||||
return rm
|
return rm
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReplicationManagerFromClient creates a new ReplicationManager that runs its own informer.
|
|
||||||
func NewReplicationManagerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager {
|
|
||||||
podInformer := informers.NewPodInformer(kubeClient, resyncPeriod())
|
|
||||||
rcInformer := informers.NewReplicationControllerInformer(kubeClient, resyncPeriod())
|
|
||||||
rm := NewReplicationManager(podInformer, rcInformer, kubeClient, burstReplicas, lookupCacheSize, false)
|
|
||||||
rm.internalPodInformer = podInformer
|
|
||||||
return rm
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetEventRecorder replaces the event recorder used by the replication manager
|
// SetEventRecorder replaces the event recorder used by the replication manager
|
||||||
// with the given recorder. Only used for testing.
|
// with the given recorder. Only used for testing.
|
||||||
func (rm *ReplicationManager) SetEventRecorder(recorder record.EventRecorder) {
|
func (rm *ReplicationManager) SetEventRecorder(recorder record.EventRecorder) {
|
||||||
|
@ -174,11 +159,9 @@ func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) {
|
||||||
defer rm.queue.ShutDown()
|
defer rm.queue.ShutDown()
|
||||||
|
|
||||||
glog.Infof("Starting RC Manager")
|
glog.Infof("Starting RC Manager")
|
||||||
if rm.internalPodInformer != nil {
|
|
||||||
go rm.internalPodInformer.Run(stopCh)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(stopCh, rm.podListerSynced) {
|
if !cache.WaitForCacheSync(stopCh, rm.podListerSynced, rm.rcListerSynced) {
|
||||||
|
utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -606,16 +589,19 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
|
||||||
glog.V(4).Infof("Finished syncing controller %q (%v)", key, time.Now().Sub(startTime))
|
glog.V(4).Infof("Finished syncing controller %q (%v)", key, time.Now().Sub(startTime))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
obj, exists, err := rm.rcLister.Indexer.GetByKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !exists {
|
rc, err := rm.rcLister.ReplicationControllers(namespace).Get(name)
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
glog.Infof("Replication Controller has been deleted %v", key)
|
glog.Infof("Replication Controller has been deleted %v", key)
|
||||||
rm.expectations.DeleteExpectations(key)
|
rm.expectations.DeleteExpectations(key)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
rc := *obj.(*v1.ReplicationController)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
trace.Step("ReplicationController restored")
|
trace.Step("ReplicationController restored")
|
||||||
rcNeedsSync := rm.expectations.SatisfiedExpectations(key)
|
rcNeedsSync := rm.expectations.SatisfiedExpectations(key)
|
||||||
|
@ -680,14 +666,20 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
|
||||||
|
|
||||||
var manageReplicasErr error
|
var manageReplicasErr error
|
||||||
if rcNeedsSync && rc.DeletionTimestamp == nil {
|
if rcNeedsSync && rc.DeletionTimestamp == nil {
|
||||||
manageReplicasErr = rm.manageReplicas(filteredPods, &rc)
|
manageReplicasErr = rm.manageReplicas(filteredPods, rc)
|
||||||
}
|
}
|
||||||
trace.Step("manageReplicas done")
|
trace.Step("manageReplicas done")
|
||||||
|
|
||||||
|
copy, err := api.Scheme.DeepCopy(rc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rc = copy.(*v1.ReplicationController)
|
||||||
|
|
||||||
newStatus := calculateStatus(rc, filteredPods, manageReplicasErr)
|
newStatus := calculateStatus(rc, filteredPods, manageReplicasErr)
|
||||||
|
|
||||||
// Always updates status as pods come up or die.
|
// Always updates status as pods come up or die.
|
||||||
if err := updateReplicationControllerStatus(rm.kubeClient.Core().ReplicationControllers(rc.Namespace), rc, newStatus); err != nil {
|
if err := updateReplicationControllerStatus(rm.kubeClient.Core().ReplicationControllers(rc.Namespace), *rc, newStatus); err != nil {
|
||||||
// Multiple things could lead to this update failing. Returning an error causes a requeue without forcing a hotloop
|
// Multiple things could lead to this update failing. Returning an error causes a requeue without forcing a hotloop
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,8 +44,9 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||||
fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
|
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/securitycontext"
|
"k8s.io/kubernetes/pkg/securitycontext"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -159,16 +160,25 @@ type serverResponse struct {
|
||||||
obj interface{}
|
obj interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewReplicationManagerFromClient(kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int) (*ReplicationManager, coreinformers.PodInformer, coreinformers.ReplicationControllerInformer) {
|
||||||
|
informerFactory := informers.NewSharedInformerFactory(nil, kubeClient, controller.NoResyncPeriodFunc())
|
||||||
|
podInformer := informerFactory.Core().V1().Pods()
|
||||||
|
rcInformer := informerFactory.Core().V1().ReplicationControllers()
|
||||||
|
rm := NewReplicationManager(podInformer, rcInformer, kubeClient, burstReplicas, lookupCacheSize, false)
|
||||||
|
rm.podListerSynced = alwaysReady
|
||||||
|
rm.rcListerSynced = alwaysReady
|
||||||
|
return rm, podInformer, rcInformer
|
||||||
|
}
|
||||||
|
|
||||||
func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
||||||
controllerSpec := newReplicationController(2)
|
controllerSpec := newReplicationController(2)
|
||||||
manager.rcLister.Indexer.Add(controllerSpec)
|
rcInformer.Informer().GetIndexer().Add(controllerSpec)
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, controllerSpec, "pod")
|
newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodRunning, controllerSpec, "pod")
|
||||||
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
manager.syncReplicationController(getKey(controllerSpec, t))
|
manager.syncReplicationController(getKey(controllerSpec, t))
|
||||||
|
@ -178,14 +188,13 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
||||||
func TestSyncReplicationControllerDeletes(t *testing.T) {
|
func TestSyncReplicationControllerDeletes(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
// 2 running pods and a controller with 1 replica, one pod delete expected
|
// 2 running pods and a controller with 1 replica, one pod delete expected
|
||||||
controllerSpec := newReplicationController(1)
|
controllerSpec := newReplicationController(1)
|
||||||
manager.rcLister.Indexer.Add(controllerSpec)
|
rcInformer.Informer().GetIndexer().Add(controllerSpec)
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, controllerSpec, "pod")
|
newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodRunning, controllerSpec, "pod")
|
||||||
|
|
||||||
manager.syncReplicationController(getKey(controllerSpec, t))
|
manager.syncReplicationController(getKey(controllerSpec, t))
|
||||||
validateSyncReplication(t, &fakePodControl, 0, 1, 0)
|
validateSyncReplication(t, &fakePodControl, 0, 1, 0)
|
||||||
|
@ -194,8 +203,7 @@ func TestSyncReplicationControllerDeletes(t *testing.T) {
|
||||||
func TestDeleteFinalStateUnknown(t *testing.T) {
|
func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, _, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
received := make(chan string)
|
received := make(chan string)
|
||||||
|
@ -207,7 +215,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||||
// The DeletedFinalStateUnknown object should cause the rc manager to insert
|
// The DeletedFinalStateUnknown object should cause the rc manager to insert
|
||||||
// the controller matching the selectors of the deleted pod into the work queue.
|
// the controller matching the selectors of the deleted pod into the work queue.
|
||||||
controllerSpec := newReplicationController(1)
|
controllerSpec := newReplicationController(1)
|
||||||
manager.rcLister.Indexer.Add(controllerSpec)
|
rcInformer.Informer().GetIndexer().Add(controllerSpec)
|
||||||
pods := newPodList(nil, 1, v1.PodRunning, controllerSpec, "pod")
|
pods := newPodList(nil, 1, v1.PodRunning, controllerSpec, "pod")
|
||||||
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
|
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
|
||||||
|
|
||||||
|
@ -226,12 +234,11 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||||
|
|
||||||
func TestSyncReplicationControllerCreates(t *testing.T) {
|
func TestSyncReplicationControllerCreates(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, _, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// A controller with 2 replicas and no pods in the store, 2 creates expected
|
// A controller with 2 replicas and no pods in the store, 2 creates expected
|
||||||
rc := newReplicationController(2)
|
rc := newReplicationController(2)
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
|
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
@ -248,15 +255,14 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
||||||
testServer := httptest.NewServer(&fakeHandler)
|
testServer := httptest.NewServer(&fakeHandler)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// Steady state for the replication controller, no Status.Replicas updates expected
|
// Steady state for the replication controller, no Status.Replicas updates expected
|
||||||
activePods := 5
|
activePods := 5
|
||||||
rc := newReplicationController(activePods)
|
rc := newReplicationController(activePods)
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
rc.Status = v1.ReplicationControllerStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)}
|
rc.Status = v1.ReplicationControllerStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)}
|
||||||
newPodList(manager.podLister.Indexer, activePods, v1.PodRunning, rc, "pod")
|
newPodList(podInformer.Informer().GetIndexer(), activePods, v1.PodRunning, rc, "pod")
|
||||||
|
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
@ -289,20 +295,19 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
||||||
testServer := httptest.NewServer(&fakeHandler)
|
testServer := httptest.NewServer(&fakeHandler)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
||||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||||
rc := newReplicationController(5)
|
rc := newReplicationController(5)
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
rc.Status = v1.ReplicationControllerStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0}
|
rc.Status = v1.ReplicationControllerStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0}
|
||||||
rc.Generation = 1
|
rc.Generation = 1
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, rc, "pod")
|
newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodRunning, rc, "pod")
|
||||||
rcCopy := *rc
|
rcCopy := *rc
|
||||||
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
|
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
|
||||||
rcCopy.Spec.Selector = extraLabelMap
|
rcCopy.Spec.Selector = extraLabelMap
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, &rcCopy, "podWithExtraLabel")
|
newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodRunning, &rcCopy, "podWithExtraLabel")
|
||||||
|
|
||||||
// This response body is just so we don't err out decoding the http response
|
// This response body is just so we don't err out decoding the http response
|
||||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
|
response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
|
||||||
|
@ -335,13 +340,12 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
controllerSpec := newReplicationController(2)
|
controllerSpec := newReplicationController(2)
|
||||||
manager.rcLister.Indexer.Add(controllerSpec)
|
rcInformer.Informer().GetIndexer().Add(controllerSpec)
|
||||||
newPodList(manager.podLister.Indexer, 1, v1.PodRunning, controllerSpec, "pod")
|
newPodList(podInformer.Informer().GetIndexer(), 1, v1.PodRunning, controllerSpec, "pod")
|
||||||
|
|
||||||
// Creates a replica and sets expectations
|
// Creates a replica and sets expectations
|
||||||
controllerSpec.Status.Replicas = 1
|
controllerSpec.Status.Replicas = 1
|
||||||
|
@ -388,8 +392,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPodControllerLookup(t *testing.T) {
|
func TestPodControllerLookup(t *testing.T) {
|
||||||
manager := NewReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, _, rcInformer := NewReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}}), BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
inRCs []*v1.ReplicationController
|
inRCs []*v1.ReplicationController
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
|
@ -435,7 +438,7 @@ func TestPodControllerLookup(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, c := range testCases {
|
for _, c := range testCases {
|
||||||
for _, r := range c.inRCs {
|
for _, r := range c.inRCs {
|
||||||
manager.rcLister.Indexer.Add(r)
|
rcInformer.Informer().GetIndexer().Add(r)
|
||||||
}
|
}
|
||||||
if rc := manager.getPodController(c.pod); rc != nil {
|
if rc := manager.getPodController(c.pod); rc != nil {
|
||||||
if c.outRCName != rc.Name {
|
if c.outRCName != rc.Name {
|
||||||
|
@ -453,12 +456,11 @@ func TestWatchControllers(t *testing.T) {
|
||||||
c.AddWatchReactor("replicationcontrollers", core.DefaultWatchReactor(fakeWatch, nil))
|
c.AddWatchReactor("replicationcontrollers", core.DefaultWatchReactor(fakeWatch, nil))
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
informers := informers.NewSharedInformerFactory(c, nil, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(nil, c, controller.NoResyncPeriodFunc())
|
||||||
podInformer := informers.Pods().Informer()
|
podInformer := informers.Core().V1().Pods()
|
||||||
rcInformer := informers.ReplicationControllers().Informer()
|
rcInformer := informers.Core().V1().ReplicationControllers()
|
||||||
manager := NewReplicationManager(podInformer, rcInformer, c, BurstReplicas, 0, false)
|
manager := NewReplicationManager(podInformer, rcInformer, c, BurstReplicas, 0, false)
|
||||||
informers.Start(stopCh)
|
informers.Start(stopCh)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
var testControllerSpec v1.ReplicationController
|
var testControllerSpec v1.ReplicationController
|
||||||
received := make(chan string)
|
received := make(chan string)
|
||||||
|
@ -467,7 +469,7 @@ func TestWatchControllers(t *testing.T) {
|
||||||
// and eventually into the syncHandler. The handler validates the received controller
|
// and eventually into the syncHandler. The handler validates the received controller
|
||||||
// and closes the received channel to indicate that the test can finish.
|
// and closes the received channel to indicate that the test can finish.
|
||||||
manager.syncHandler = func(key string) error {
|
manager.syncHandler = func(key string) error {
|
||||||
obj, exists, err := manager.rcLister.Indexer.GetByKey(key)
|
obj, exists, err := rcInformer.Informer().GetIndexer().GetByKey(key)
|
||||||
if !exists || err != nil {
|
if !exists || err != nil {
|
||||||
t.Errorf("Expected to find controller under key %v", key)
|
t.Errorf("Expected to find controller under key %v", key)
|
||||||
}
|
}
|
||||||
|
@ -497,18 +499,17 @@ func TestWatchPods(t *testing.T) {
|
||||||
fakeWatch := watch.NewFake()
|
fakeWatch := watch.NewFake()
|
||||||
c := &fake.Clientset{}
|
c := &fake.Clientset{}
|
||||||
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// Put one rc and one pod into the controller's stores
|
// Put one rc and one pod into the controller's stores
|
||||||
testControllerSpec := newReplicationController(1)
|
testControllerSpec := newReplicationController(1)
|
||||||
manager.rcLister.Indexer.Add(testControllerSpec)
|
rcInformer.Informer().GetIndexer().Add(testControllerSpec)
|
||||||
received := make(chan string)
|
received := make(chan string)
|
||||||
// The pod update sent through the fakeWatcher should figure out the managing rc and
|
// The pod update sent through the fakeWatcher should figure out the managing rc and
|
||||||
// send it into the syncHandler.
|
// send it into the syncHandler.
|
||||||
manager.syncHandler = func(key string) error {
|
manager.syncHandler = func(key string) error {
|
||||||
|
|
||||||
obj, exists, err := manager.rcLister.Indexer.GetByKey(key)
|
obj, exists, err := rcInformer.Informer().GetIndexer().GetByKey(key)
|
||||||
if !exists || err != nil {
|
if !exists || err != nil {
|
||||||
t.Errorf("Expected to find controller under key %v", key)
|
t.Errorf("Expected to find controller under key %v", key)
|
||||||
}
|
}
|
||||||
|
@ -523,7 +524,7 @@ func TestWatchPods(t *testing.T) {
|
||||||
// and make sure it hits the sync method for the right rc.
|
// and make sure it hits the sync method for the right rc.
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
go manager.internalPodInformer.Run(stopCh)
|
go podInformer.Informer().Run(stopCh)
|
||||||
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
||||||
|
|
||||||
pods := newPodList(nil, 1, v1.PodRunning, testControllerSpec, "pod")
|
pods := newPodList(nil, 1, v1.PodRunning, testControllerSpec, "pod")
|
||||||
|
@ -539,13 +540,12 @@ func TestWatchPods(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdatePods(t *testing.T) {
|
func TestUpdatePods(t *testing.T) {
|
||||||
manager := NewReplicationManagerFromClient(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, podInformer, rcInformer := NewReplicationManagerFromClient(fake.NewSimpleClientset(), BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
received := make(chan string)
|
received := make(chan string)
|
||||||
|
|
||||||
manager.syncHandler = func(key string) error {
|
manager.syncHandler = func(key string) error {
|
||||||
obj, exists, err := manager.rcLister.Indexer.GetByKey(key)
|
obj, exists, err := rcInformer.Informer().GetIndexer().GetByKey(key)
|
||||||
if !exists || err != nil {
|
if !exists || err != nil {
|
||||||
t.Errorf("Expected to find controller under key %v", key)
|
t.Errorf("Expected to find controller under key %v", key)
|
||||||
}
|
}
|
||||||
|
@ -559,16 +559,16 @@ func TestUpdatePods(t *testing.T) {
|
||||||
|
|
||||||
// Put 2 rcs and one pod into the controller's stores
|
// Put 2 rcs and one pod into the controller's stores
|
||||||
testControllerSpec1 := newReplicationController(1)
|
testControllerSpec1 := newReplicationController(1)
|
||||||
manager.rcLister.Indexer.Add(testControllerSpec1)
|
rcInformer.Informer().GetIndexer().Add(testControllerSpec1)
|
||||||
testControllerSpec2 := *testControllerSpec1
|
testControllerSpec2 := *testControllerSpec1
|
||||||
testControllerSpec2.Spec.Selector = map[string]string{"bar": "foo"}
|
testControllerSpec2.Spec.Selector = map[string]string{"bar": "foo"}
|
||||||
testControllerSpec2.Name = "barfoo"
|
testControllerSpec2.Name = "barfoo"
|
||||||
manager.rcLister.Indexer.Add(&testControllerSpec2)
|
rcInformer.Informer().GetIndexer().Add(&testControllerSpec2)
|
||||||
|
|
||||||
// case 1: We put in the podLister a pod with labels matching
|
// case 1: We put in the podLister a pod with labels matching
|
||||||
// testControllerSpec1, then update its labels to match testControllerSpec2.
|
// testControllerSpec1, then update its labels to match testControllerSpec2.
|
||||||
// We expect to receive a sync request for both controllers.
|
// We expect to receive a sync request for both controllers.
|
||||||
pod1 := newPodList(manager.podLister.Indexer, 1, v1.PodRunning, testControllerSpec1, "pod").Items[0]
|
pod1 := newPodList(podInformer.Informer().GetIndexer(), 1, v1.PodRunning, testControllerSpec1, "pod").Items[0]
|
||||||
pod1.ResourceVersion = "1"
|
pod1.ResourceVersion = "1"
|
||||||
pod2 := pod1
|
pod2 := pod1
|
||||||
pod2.Labels = testControllerSpec2.Spec.Selector
|
pod2.Labels = testControllerSpec2.Spec.Selector
|
||||||
|
@ -617,13 +617,12 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
|
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
rc := newReplicationController(1)
|
rc := newReplicationController(1)
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
rc.Status = v1.ReplicationControllerStatus{Replicas: 2}
|
rc.Status = v1.ReplicationControllerStatus{Replicas: 2}
|
||||||
newPodList(manager.podLister.Indexer, 1, v1.PodRunning, rc, "pod")
|
newPodList(podInformer.Informer().GetIndexer(), 1, v1.PodRunning, rc, "pod")
|
||||||
|
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
@ -688,12 +687,11 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
||||||
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
|
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, burstReplicas, 0)
|
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, burstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
controllerSpec := newReplicationController(numReplicas)
|
controllerSpec := newReplicationController(numReplicas)
|
||||||
manager.rcLister.Indexer.Add(controllerSpec)
|
rcInformer.Informer().GetIndexer().Add(controllerSpec)
|
||||||
|
|
||||||
expectedPods := 0
|
expectedPods := 0
|
||||||
pods := newPodList(nil, numReplicas, v1.PodPending, controllerSpec, "pod")
|
pods := newPodList(nil, numReplicas, v1.PodPending, controllerSpec, "pod")
|
||||||
|
@ -707,14 +705,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||||
for _, replicas := range []int{numReplicas, 0} {
|
for _, replicas := range []int{numReplicas, 0} {
|
||||||
|
|
||||||
*(controllerSpec.Spec.Replicas) = int32(replicas)
|
*(controllerSpec.Spec.Replicas) = int32(replicas)
|
||||||
manager.rcLister.Indexer.Add(controllerSpec)
|
rcInformer.Informer().GetIndexer().Add(controllerSpec)
|
||||||
|
|
||||||
for i := 0; i < numReplicas; i += burstReplicas {
|
for i := 0; i < numReplicas; i += burstReplicas {
|
||||||
manager.syncReplicationController(getKey(controllerSpec, t))
|
manager.syncReplicationController(getKey(controllerSpec, t))
|
||||||
|
|
||||||
// The store accrues active pods. It's also used by the rc to determine how many
|
// The store accrues active pods. It's also used by the rc to determine how many
|
||||||
// replicas to create.
|
// replicas to create.
|
||||||
activePods := len(manager.podLister.Indexer.List())
|
activePods := len(podInformer.Informer().GetIndexer().List())
|
||||||
if replicas != 0 {
|
if replicas != 0 {
|
||||||
// This is the number of pods currently "in flight". They were created by the rc manager above,
|
// This is the number of pods currently "in flight". They were created by the rc manager above,
|
||||||
// which then puts the rc to sleep till all of them have been observed.
|
// which then puts the rc to sleep till all of them have been observed.
|
||||||
|
@ -728,7 +726,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||||
// This simulates the watch events for all but 1 of the expected pods.
|
// This simulates the watch events for all but 1 of the expected pods.
|
||||||
// None of these should wake the controller because it has expectations==BurstReplicas.
|
// None of these should wake the controller because it has expectations==BurstReplicas.
|
||||||
for i := 0; i < expectedPods-1; i++ {
|
for i := 0; i < expectedPods-1; i++ {
|
||||||
manager.podLister.Indexer.Add(&pods.Items[i])
|
podInformer.Informer().GetIndexer().Add(&pods.Items[i])
|
||||||
manager.addPod(&pods.Items[i])
|
manager.addPod(&pods.Items[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -764,7 +762,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||||
// has exactly one expectation at the end, to verify that we
|
// has exactly one expectation at the end, to verify that we
|
||||||
// don't double delete.
|
// don't double delete.
|
||||||
for i := range podsToDelete[1:] {
|
for i := range podsToDelete[1:] {
|
||||||
manager.podLister.Indexer.Delete(podsToDelete[i])
|
podInformer.Informer().GetIndexer().Delete(podsToDelete[i])
|
||||||
manager.deletePod(podsToDelete[i])
|
manager.deletePod(podsToDelete[i])
|
||||||
}
|
}
|
||||||
podExp, exists, err := manager.expectations.GetExpectations(rcKey)
|
podExp, exists, err := manager.expectations.GetExpectations(rcKey)
|
||||||
|
@ -785,7 +783,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||||
// The last add pod will decrease the expectation of the rc to 0,
|
// The last add pod will decrease the expectation of the rc to 0,
|
||||||
// which will cause it to create/delete the remaining replicas up to burstReplicas.
|
// which will cause it to create/delete the remaining replicas up to burstReplicas.
|
||||||
if replicas != 0 {
|
if replicas != 0 {
|
||||||
manager.podLister.Indexer.Add(&pods.Items[expectedPods-1])
|
podInformer.Informer().GetIndexer().Add(&pods.Items[expectedPods-1])
|
||||||
manager.addPod(&pods.Items[expectedPods-1])
|
manager.addPod(&pods.Items[expectedPods-1])
|
||||||
} else {
|
} else {
|
||||||
expectedDel := manager.expectations.GetUIDs(getKey(controllerSpec, t))
|
expectedDel := manager.expectations.GetUIDs(getKey(controllerSpec, t))
|
||||||
|
@ -800,14 +798,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||||
Labels: controllerSpec.Spec.Selector,
|
Labels: controllerSpec.Spec.Selector,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
manager.podLister.Indexer.Delete(lastPod)
|
podInformer.Informer().GetIndexer().Delete(lastPod)
|
||||||
manager.deletePod(lastPod)
|
manager.deletePod(lastPod)
|
||||||
}
|
}
|
||||||
pods.Items = pods.Items[expectedPods:]
|
pods.Items = pods.Items[expectedPods:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Confirm that we've created the right number of replicas
|
// Confirm that we've created the right number of replicas
|
||||||
activePods := int32(len(manager.podLister.Indexer.List()))
|
activePods := int32(len(podInformer.Informer().GetIndexer().List()))
|
||||||
if activePods != *(controllerSpec.Spec.Replicas) {
|
if activePods != *(controllerSpec.Spec.Replicas) {
|
||||||
t.Fatalf("Unexpected number of active pods, expected %d, got %d", *(controllerSpec.Spec.Replicas), activePods)
|
t.Fatalf("Unexpected number of active pods, expected %d, got %d", *(controllerSpec.Spec.Replicas), activePods)
|
||||||
}
|
}
|
||||||
|
@ -838,14 +836,13 @@ func (fe FakeRCExpectations) SatisfiedExpectations(controllerKey string) bool {
|
||||||
func TestRCSyncExpectations(t *testing.T) {
|
func TestRCSyncExpectations(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0)
|
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, 2, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
|
||||||
controllerSpec := newReplicationController(2)
|
controllerSpec := newReplicationController(2)
|
||||||
manager.rcLister.Indexer.Add(controllerSpec)
|
rcInformer.Informer().GetIndexer().Add(controllerSpec)
|
||||||
pods := newPodList(nil, 2, v1.PodPending, controllerSpec, "pod")
|
pods := newPodList(nil, 2, v1.PodPending, controllerSpec, "pod")
|
||||||
manager.podLister.Indexer.Add(&pods.Items[0])
|
podInformer.Informer().GetIndexer().Add(&pods.Items[0])
|
||||||
postExpectationsPod := pods.Items[1]
|
postExpectationsPod := pods.Items[1]
|
||||||
|
|
||||||
manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRCExpectations{
|
manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRCExpectations{
|
||||||
|
@ -853,7 +850,7 @@ func TestRCSyncExpectations(t *testing.T) {
|
||||||
// If we check active pods before checking expectataions, the rc
|
// If we check active pods before checking expectataions, the rc
|
||||||
// will create a new replica because it doesn't see this pod, but
|
// will create a new replica because it doesn't see this pod, but
|
||||||
// has fulfilled its expectations.
|
// has fulfilled its expectations.
|
||||||
manager.podLister.Indexer.Add(&postExpectationsPod)
|
podInformer.Informer().GetIndexer().Add(&postExpectationsPod)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
manager.syncReplicationController(getKey(controllerSpec, t))
|
manager.syncReplicationController(getKey(controllerSpec, t))
|
||||||
|
@ -862,11 +859,10 @@ func TestRCSyncExpectations(t *testing.T) {
|
||||||
|
|
||||||
func TestDeleteControllerAndExpectations(t *testing.T) {
|
func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
|
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, 10, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
rc := newReplicationController(1)
|
rc := newReplicationController(1)
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
|
|
||||||
fakePodControl := controller.FakePodControl{}
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager.podControl = &fakePodControl
|
manager.podControl = &fakePodControl
|
||||||
|
@ -888,7 +884,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||||
if !exists || err != nil {
|
if !exists || err != nil {
|
||||||
t.Errorf("No expectations found for rc")
|
t.Errorf("No expectations found for rc")
|
||||||
}
|
}
|
||||||
manager.rcLister.Indexer.Delete(rc)
|
rcInformer.Informer().GetIndexer().Delete(rc)
|
||||||
manager.syncReplicationController(getKey(rc, t))
|
manager.syncReplicationController(getKey(rc, t))
|
||||||
|
|
||||||
if _, exists, err = manager.expectations.GetExpectations(rcKey); exists {
|
if _, exists, err = manager.expectations.GetExpectations(rcKey); exists {
|
||||||
|
@ -897,7 +893,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||||
|
|
||||||
// This should have no effect, since we've deleted the rc.
|
// This should have no effect, since we've deleted the rc.
|
||||||
podExp.Add(-1, 0)
|
podExp.Add(-1, 0)
|
||||||
manager.podLister.Indexer.Replace(make([]interface{}, 0), "0")
|
podInformer.Informer().GetIndexer().Replace(make([]interface{}, 0), "0")
|
||||||
manager.syncReplicationController(getKey(rc, t))
|
manager.syncReplicationController(getKey(rc, t))
|
||||||
validateSyncReplication(t, &fakePodControl, 0, 0, 0)
|
validateSyncReplication(t, &fakePodControl, 0, 0, 0)
|
||||||
}
|
}
|
||||||
|
@ -917,8 +913,7 @@ func TestOverlappingRCs(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
|
manager, _, rcInformer := NewReplicationManagerFromClient(c, 10, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
|
// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
|
||||||
var controllers []*v1.ReplicationController
|
var controllers []*v1.ReplicationController
|
||||||
|
@ -930,7 +925,7 @@ func TestOverlappingRCs(t *testing.T) {
|
||||||
}
|
}
|
||||||
shuffledControllers := shuffle(controllers)
|
shuffledControllers := shuffle(controllers)
|
||||||
for j := range shuffledControllers {
|
for j := range shuffledControllers {
|
||||||
manager.rcLister.Indexer.Add(shuffledControllers[j])
|
rcInformer.Informer().GetIndexer().Add(shuffledControllers[j])
|
||||||
}
|
}
|
||||||
// Add a pod and make sure only the oldest rc is synced
|
// Add a pod and make sure only the oldest rc is synced
|
||||||
pods := newPodList(nil, 1, v1.PodPending, controllers[0], "pod")
|
pods := newPodList(nil, 1, v1.PodPending, controllers[0], "pod")
|
||||||
|
@ -946,11 +941,10 @@ func TestOverlappingRCs(t *testing.T) {
|
||||||
|
|
||||||
func TestDeletionTimestamp(t *testing.T) {
|
func TestDeletionTimestamp(t *testing.T) {
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
|
manager, _, rcInformer := NewReplicationManagerFromClient(c, 10, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
controllerSpec := newReplicationController(1)
|
controllerSpec := newReplicationController(1)
|
||||||
manager.rcLister.Indexer.Add(controllerSpec)
|
rcInformer.Informer().GetIndexer().Add(controllerSpec)
|
||||||
rcKey, err := controller.KeyFunc(controllerSpec)
|
rcKey, err := controller.KeyFunc(controllerSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Couldn't get key for object %#v: %v", controllerSpec, err)
|
t.Errorf("Couldn't get key for object %#v: %v", controllerSpec, err)
|
||||||
|
@ -1036,7 +1030,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
||||||
|
|
||||||
func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, _, rcInformer := NewReplicationManagerFromClient(client, BurstReplicas, 0)
|
||||||
|
|
||||||
const nsNum = 1000
|
const nsNum = 1000
|
||||||
|
|
||||||
|
@ -1062,7 +1056,7 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
||||||
ns := fmt.Sprintf("ns-%d", i)
|
ns := fmt.Sprintf("ns-%d", i)
|
||||||
for j := 0; j < 10; j++ {
|
for j := 0; j < 10; j++ {
|
||||||
rcName := fmt.Sprintf("rc-%d", j)
|
rcName := fmt.Sprintf("rc-%d", j)
|
||||||
manager.rcLister.Indexer.Add(&v1.ReplicationController{
|
rcInformer.Informer().GetIndexer().Add(&v1.ReplicationController{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: rcName, Namespace: ns},
|
ObjectMeta: metav1.ObjectMeta{Name: rcName, Namespace: ns},
|
||||||
Spec: v1.ReplicationControllerSpec{
|
Spec: v1.ReplicationControllerSpec{
|
||||||
Selector: map[string]string{"rcName": rcName},
|
Selector: map[string]string{"rcName": rcName},
|
||||||
|
@ -1082,7 +1076,7 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
||||||
|
|
||||||
func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, _, rcInformer := NewReplicationManagerFromClient(client, BurstReplicas, 0)
|
||||||
|
|
||||||
const rcNum = 1000
|
const rcNum = 1000
|
||||||
const replicaNum = 3
|
const replicaNum = 3
|
||||||
|
@ -1104,7 +1098,7 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
||||||
|
|
||||||
for i := 0; i < rcNum; i++ {
|
for i := 0; i < rcNum; i++ {
|
||||||
rcName := fmt.Sprintf("rc-%d", i)
|
rcName := fmt.Sprintf("rc-%d", i)
|
||||||
manager.rcLister.Indexer.Add(&v1.ReplicationController{
|
rcInformer.Informer().GetIndexer().Add(&v1.ReplicationController{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: rcName, Namespace: "foo"},
|
ObjectMeta: metav1.ObjectMeta{Name: rcName, Namespace: "foo"},
|
||||||
Spec: v1.ReplicationControllerSpec{
|
Spec: v1.ReplicationControllerSpec{
|
||||||
Selector: map[string]string{"rcName": rcName},
|
Selector: map[string]string{"rcName": rcName},
|
||||||
|
@ -1121,26 +1115,25 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// setupManagerWithGCEnabled creates a RC manager with a fakePodControl and with garbageCollectorEnabled set to true
|
// setupManagerWithGCEnabled creates a RC manager with a fakePodControl and with garbageCollectorEnabled set to true
|
||||||
func setupManagerWithGCEnabled(objs ...runtime.Object) (manager *ReplicationManager, fakePodControl *controller.FakePodControl) {
|
func setupManagerWithGCEnabled(objs ...runtime.Object) (manager *ReplicationManager, fakePodControl *controller.FakePodControl, podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer) {
|
||||||
c := fakeclientset.NewSimpleClientset(objs...)
|
c := fakeclientset.NewSimpleClientset(objs...)
|
||||||
fakePodControl = &controller.FakePodControl{}
|
fakePodControl = &controller.FakePodControl{}
|
||||||
manager = NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, podInformer, rcInformer = NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||||
manager.garbageCollectorEnabled = true
|
manager.garbageCollectorEnabled = true
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
manager.podControl = fakePodControl
|
manager.podControl = fakePodControl
|
||||||
return manager, fakePodControl
|
return manager, fakePodControl, podInformer, rcInformer
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled()
|
manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled()
|
||||||
rc := newReplicationController(2)
|
rc := newReplicationController(2)
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
var trueVar = true
|
var trueVar = true
|
||||||
otherControllerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1", Kind: "ReplicationController", Name: "AnotherRC", Controller: &trueVar}
|
otherControllerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1", Kind: "ReplicationController", Name: "AnotherRC", Controller: &trueVar}
|
||||||
// add to podLister a matching Pod controlled by another controller. Expect no patch.
|
// add to podLister a matching Pod controlled by another controller. Expect no patch.
|
||||||
pod := newPod("pod", rc, v1.PodRunning, nil)
|
pod := newPod("pod", rc, v1.PodRunning, nil)
|
||||||
pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
|
pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
|
||||||
manager.podLister.Indexer.Add(pod)
|
podInformer.Informer().GetIndexer().Add(pod)
|
||||||
err := manager.syncReplicationController(getKey(rc, t))
|
err := manager.syncReplicationController(getKey(rc, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -1151,15 +1144,15 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
||||||
|
|
||||||
func TestPatchPodWithOtherOwnerRef(t *testing.T) {
|
func TestPatchPodWithOtherOwnerRef(t *testing.T) {
|
||||||
rc := newReplicationController(2)
|
rc := newReplicationController(2)
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled(rc)
|
manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled(rc)
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
// add to podLister one more matching pod that doesn't have a controller
|
// add to podLister one more matching pod that doesn't have a controller
|
||||||
// ref, but has an owner ref pointing to other object. Expect a patch to
|
// ref, but has an owner ref pointing to other object. Expect a patch to
|
||||||
// take control of it.
|
// take control of it.
|
||||||
unrelatedOwnerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
|
unrelatedOwnerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
|
||||||
pod := newPod("pod", rc, v1.PodRunning, nil)
|
pod := newPod("pod", rc, v1.PodRunning, nil)
|
||||||
pod.OwnerReferences = []metav1.OwnerReference{unrelatedOwnerReference}
|
pod.OwnerReferences = []metav1.OwnerReference{unrelatedOwnerReference}
|
||||||
manager.podLister.Indexer.Add(pod)
|
podInformer.Informer().GetIndexer().Add(pod)
|
||||||
|
|
||||||
err := manager.syncReplicationController(getKey(rc, t))
|
err := manager.syncReplicationController(getKey(rc, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1171,14 +1164,14 @@ func TestPatchPodWithOtherOwnerRef(t *testing.T) {
|
||||||
|
|
||||||
func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
|
func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
|
||||||
rc := newReplicationController(2)
|
rc := newReplicationController(2)
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled(rc)
|
manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled(rc)
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
// add to podLister a matching pod that has an ownerRef pointing to the rc,
|
// add to podLister a matching pod that has an ownerRef pointing to the rc,
|
||||||
// but ownerRef.Controller is false. Expect a patch to take control it.
|
// but ownerRef.Controller is false. Expect a patch to take control it.
|
||||||
rcOwnerReference := metav1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name}
|
rcOwnerReference := metav1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name}
|
||||||
pod := newPod("pod", rc, v1.PodRunning, nil)
|
pod := newPod("pod", rc, v1.PodRunning, nil)
|
||||||
pod.OwnerReferences = []metav1.OwnerReference{rcOwnerReference}
|
pod.OwnerReferences = []metav1.OwnerReference{rcOwnerReference}
|
||||||
manager.podLister.Indexer.Add(pod)
|
podInformer.Informer().GetIndexer().Add(pod)
|
||||||
|
|
||||||
err := manager.syncReplicationController(getKey(rc, t))
|
err := manager.syncReplicationController(getKey(rc, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1190,12 +1183,12 @@ func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
|
||||||
|
|
||||||
func TestPatchPodFails(t *testing.T) {
|
func TestPatchPodFails(t *testing.T) {
|
||||||
rc := newReplicationController(2)
|
rc := newReplicationController(2)
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled(rc)
|
manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled(rc)
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
// add to podLister two matching pods. Expect two patches to take control
|
// add to podLister two matching pods. Expect two patches to take control
|
||||||
// them.
|
// them.
|
||||||
manager.podLister.Indexer.Add(newPod("pod1", rc, v1.PodRunning, nil))
|
podInformer.Informer().GetIndexer().Add(newPod("pod1", rc, v1.PodRunning, nil))
|
||||||
manager.podLister.Indexer.Add(newPod("pod2", rc, v1.PodRunning, nil))
|
podInformer.Informer().GetIndexer().Add(newPod("pod2", rc, v1.PodRunning, nil))
|
||||||
// let both patches fail. The rc manager will assume it fails to take
|
// let both patches fail. The rc manager will assume it fails to take
|
||||||
// control of the pods and create new ones.
|
// control of the pods and create new ones.
|
||||||
fakePodControl.Err = fmt.Errorf("Fake Error")
|
fakePodControl.Err = fmt.Errorf("Fake Error")
|
||||||
|
@ -1209,13 +1202,13 @@ func TestPatchPodFails(t *testing.T) {
|
||||||
|
|
||||||
func TestPatchExtraPodsThenDelete(t *testing.T) {
|
func TestPatchExtraPodsThenDelete(t *testing.T) {
|
||||||
rc := newReplicationController(2)
|
rc := newReplicationController(2)
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled(rc)
|
manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled(rc)
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
// add to podLister three matching pods. Expect three patches to take control
|
// add to podLister three matching pods. Expect three patches to take control
|
||||||
// them, and later delete one of them.
|
// them, and later delete one of them.
|
||||||
manager.podLister.Indexer.Add(newPod("pod1", rc, v1.PodRunning, nil))
|
podInformer.Informer().GetIndexer().Add(newPod("pod1", rc, v1.PodRunning, nil))
|
||||||
manager.podLister.Indexer.Add(newPod("pod2", rc, v1.PodRunning, nil))
|
podInformer.Informer().GetIndexer().Add(newPod("pod2", rc, v1.PodRunning, nil))
|
||||||
manager.podLister.Indexer.Add(newPod("pod3", rc, v1.PodRunning, nil))
|
podInformer.Informer().GetIndexer().Add(newPod("pod3", rc, v1.PodRunning, nil))
|
||||||
err := manager.syncReplicationController(getKey(rc, t))
|
err := manager.syncReplicationController(getKey(rc, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -1225,9 +1218,9 @@ func TestPatchExtraPodsThenDelete(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled()
|
manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled()
|
||||||
rc := newReplicationController(2)
|
rc := newReplicationController(2)
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
// put one pod in the podLister
|
// put one pod in the podLister
|
||||||
pod := newPod("pod", rc, v1.PodRunning, nil)
|
pod := newPod("pod", rc, v1.PodRunning, nil)
|
||||||
pod.ResourceVersion = "1"
|
pod.ResourceVersion = "1"
|
||||||
|
@ -1241,7 +1234,7 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
||||||
// add the updatedPod to the store. This is consistent with the behavior of
|
// add the updatedPod to the store. This is consistent with the behavior of
|
||||||
// the Informer: Informer updates the store before call the handler
|
// the Informer: Informer updates the store before call the handler
|
||||||
// (updatePod() in this case).
|
// (updatePod() in this case).
|
||||||
manager.podLister.Indexer.Add(&updatedPod)
|
podInformer.Informer().GetIndexer().Add(&updatedPod)
|
||||||
// send a update of the same pod with modified labels
|
// send a update of the same pod with modified labels
|
||||||
manager.updatePod(pod, &updatedPod)
|
manager.updatePod(pod, &updatedPod)
|
||||||
// verifies that rc is added to the queue
|
// verifies that rc is added to the queue
|
||||||
|
@ -1263,17 +1256,17 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateSelectorControllerRef(t *testing.T) {
|
func TestUpdateSelectorControllerRef(t *testing.T) {
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled()
|
manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled()
|
||||||
rc := newReplicationController(2)
|
rc := newReplicationController(2)
|
||||||
// put 2 pods in the podLister
|
// put 2 pods in the podLister
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, rc, "pod")
|
newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodRunning, rc, "pod")
|
||||||
// update the RC so that its selector no longer matches the pods
|
// update the RC so that its selector no longer matches the pods
|
||||||
updatedRC := *rc
|
updatedRC := *rc
|
||||||
updatedRC.Spec.Selector = map[string]string{"foo": "baz"}
|
updatedRC.Spec.Selector = map[string]string{"foo": "baz"}
|
||||||
// put the updatedRC into the store. This is consistent with the behavior of
|
// put the updatedRC into the store. This is consistent with the behavior of
|
||||||
// the Informer: Informer updates the store before call the handler
|
// the Informer: Informer updates the store before call the handler
|
||||||
// (updateRC() in this case).
|
// (updateRC() in this case).
|
||||||
manager.rcLister.Indexer.Add(&updatedRC)
|
rcInformer.Informer().GetIndexer().Add(&updatedRC)
|
||||||
manager.updateRC(rc, &updatedRC)
|
manager.updateRC(rc, &updatedRC)
|
||||||
// verifies that the rc is added to the queue
|
// verifies that the rc is added to the queue
|
||||||
rcKey := getKey(rc, t)
|
rcKey := getKey(rc, t)
|
||||||
|
@ -1296,13 +1289,13 @@ func TestUpdateSelectorControllerRef(t *testing.T) {
|
||||||
// RC manager shouldn't adopt or create more pods if the rc is about to be
|
// RC manager shouldn't adopt or create more pods if the rc is about to be
|
||||||
// deleted.
|
// deleted.
|
||||||
func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
|
func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
|
||||||
manager, fakePodControl := setupManagerWithGCEnabled()
|
manager, fakePodControl, podInformer, rcInformer := setupManagerWithGCEnabled()
|
||||||
rc := newReplicationController(2)
|
rc := newReplicationController(2)
|
||||||
now := metav1.Now()
|
now := metav1.Now()
|
||||||
rc.DeletionTimestamp = &now
|
rc.DeletionTimestamp = &now
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
pod1 := newPod("pod1", rc, v1.PodRunning, nil)
|
pod1 := newPod("pod1", rc, v1.PodRunning, nil)
|
||||||
manager.podLister.Indexer.Add(pod1)
|
podInformer.Informer().GetIndexer().Add(pod1)
|
||||||
|
|
||||||
// no patch, no create
|
// no patch, no create
|
||||||
err := manager.syncReplicationController(getKey(rc, t))
|
err := manager.syncReplicationController(getKey(rc, t))
|
||||||
|
@ -1322,17 +1315,16 @@ func TestReadyReplicas(t *testing.T) {
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
|
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||||
rc := newReplicationController(2)
|
rc := newReplicationController(2)
|
||||||
rc.Status = v1.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 0, AvailableReplicas: 0, ObservedGeneration: 1}
|
rc.Status = v1.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 0, AvailableReplicas: 0, ObservedGeneration: 1}
|
||||||
rc.Generation = 1
|
rc.Generation = 1
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
|
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodPending, rc, "pod")
|
newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodPending, rc, "pod")
|
||||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, rc, "pod")
|
newPodList(podInformer.Informer().GetIndexer(), 2, v1.PodRunning, rc, "pod")
|
||||||
|
|
||||||
// This response body is just so we don't err out decoding the http response
|
// This response body is just so we don't err out decoding the http response
|
||||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
|
response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
|
||||||
|
@ -1361,8 +1353,7 @@ func TestAvailableReplicas(t *testing.T) {
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
|
|
||||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
manager, podInformer, rcInformer := NewReplicationManagerFromClient(c, BurstReplicas, 0)
|
||||||
manager.podListerSynced = alwaysReady
|
|
||||||
|
|
||||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||||
rc := newReplicationController(2)
|
rc := newReplicationController(2)
|
||||||
|
@ -1370,17 +1361,17 @@ func TestAvailableReplicas(t *testing.T) {
|
||||||
rc.Generation = 1
|
rc.Generation = 1
|
||||||
// minReadySeconds set to 15s
|
// minReadySeconds set to 15s
|
||||||
rc.Spec.MinReadySeconds = 15
|
rc.Spec.MinReadySeconds = 15
|
||||||
manager.rcLister.Indexer.Add(rc)
|
rcInformer.Informer().GetIndexer().Add(rc)
|
||||||
|
|
||||||
// First pod becomes ready 20s ago
|
// First pod becomes ready 20s ago
|
||||||
moment := metav1.Time{Time: time.Now().Add(-2e10)}
|
moment := metav1.Time{Time: time.Now().Add(-2e10)}
|
||||||
pod := newPod("pod", rc, v1.PodRunning, &moment)
|
pod := newPod("pod", rc, v1.PodRunning, &moment)
|
||||||
manager.podLister.Indexer.Add(pod)
|
podInformer.Informer().GetIndexer().Add(pod)
|
||||||
|
|
||||||
// Second pod becomes ready now
|
// Second pod becomes ready now
|
||||||
otherMoment := metav1.Now()
|
otherMoment := metav1.Now()
|
||||||
otherPod := newPod("otherPod", rc, v1.PodRunning, &otherMoment)
|
otherPod := newPod("otherPod", rc, v1.PodRunning, &otherMoment)
|
||||||
manager.podLister.Indexer.Add(otherPod)
|
podInformer.Informer().GetIndexer().Add(otherPod)
|
||||||
|
|
||||||
// This response body is just so we don't err out decoding the http response
|
// This response body is just so we don't err out decoding the http response
|
||||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
|
response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
|
||||||
|
|
|
@ -84,7 +84,7 @@ func (o OverlappingControllers) Less(i, j int) bool {
|
||||||
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
|
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func calculateStatus(rc v1.ReplicationController, filteredPods []*v1.Pod, manageReplicasErr error) v1.ReplicationControllerStatus {
|
func calculateStatus(rc *v1.ReplicationController, filteredPods []*v1.Pod, manageReplicasErr error) v1.ReplicationControllerStatus {
|
||||||
newStatus := rc.Status
|
newStatus := rc.Status
|
||||||
// Count the number of pods that have labels matching the labels of the pod
|
// Count the number of pods that have labels matching the labels of the pod
|
||||||
// template of the replication controller, the matching pods may have more
|
// template of the replication controller, the matching pods may have more
|
||||||
|
|
|
@ -16,6 +16,8 @@ go_library(
|
||||||
"//pkg/api:go_default_library",
|
"//pkg/api:go_default_library",
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated/core/v1:go_default_library",
|
||||||
|
"//pkg/client/listers/core/v1:go_default_library",
|
||||||
"//pkg/cloudprovider:go_default_library",
|
"//pkg/cloudprovider:go_default_library",
|
||||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||||
"//pkg/controller/volume/attachdetach/populator:go_default_library",
|
"//pkg/controller/volume/attachdetach/populator:go_default_library",
|
||||||
|
@ -42,7 +44,8 @@ go_test(
|
||||||
library = ":go_default_library",
|
library = ":go_default_library",
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/controller/informers:go_default_library",
|
"//pkg/client/informers/informers_generated:go_default_library",
|
||||||
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/volume/attachdetach/testing:go_default_library",
|
"//pkg/controller/volume/attachdetach/testing:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
|
@ -33,6 +33,8 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
|
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
|
||||||
|
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator"
|
||||||
|
@ -70,10 +72,10 @@ type AttachDetachController interface {
|
||||||
// NewAttachDetachController returns a new instance of AttachDetachController.
|
// NewAttachDetachController returns a new instance of AttachDetachController.
|
||||||
func NewAttachDetachController(
|
func NewAttachDetachController(
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
podInformer kcache.SharedInformer,
|
podInformer coreinformers.PodInformer,
|
||||||
nodeInformer kcache.SharedInformer,
|
nodeInformer coreinformers.NodeInformer,
|
||||||
pvcInformer kcache.SharedInformer,
|
pvcInformer coreinformers.PersistentVolumeClaimInformer,
|
||||||
pvInformer kcache.SharedInformer,
|
pvInformer coreinformers.PersistentVolumeInformer,
|
||||||
cloud cloudprovider.Interface,
|
cloud cloudprovider.Interface,
|
||||||
plugins []volume.VolumePlugin,
|
plugins []volume.VolumePlugin,
|
||||||
disableReconciliationSync bool,
|
disableReconciliationSync bool,
|
||||||
|
@ -93,23 +95,27 @@ func NewAttachDetachController(
|
||||||
// dropped pods so they are continuously processed until it is accepted or
|
// dropped pods so they are continuously processed until it is accepted or
|
||||||
// deleted (probably can't do this with sharedInformer), etc.
|
// deleted (probably can't do this with sharedInformer), etc.
|
||||||
adc := &attachDetachController{
|
adc := &attachDetachController{
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
pvcInformer: pvcInformer,
|
pvcLister: pvcInformer.Lister(),
|
||||||
pvInformer: pvInformer,
|
pvcsSynced: pvcInformer.Informer().HasSynced,
|
||||||
cloud: cloud,
|
pvLister: pvInformer.Lister(),
|
||||||
|
pvsSynced: pvInformer.Informer().HasSynced,
|
||||||
|
cloud: cloud,
|
||||||
}
|
}
|
||||||
|
|
||||||
podInformer.AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
podInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: adc.podAdd,
|
AddFunc: adc.podAdd,
|
||||||
UpdateFunc: adc.podUpdate,
|
UpdateFunc: adc.podUpdate,
|
||||||
DeleteFunc: adc.podDelete,
|
DeleteFunc: adc.podDelete,
|
||||||
})
|
})
|
||||||
|
adc.podsSynced = podInformer.Informer().HasSynced
|
||||||
|
|
||||||
nodeInformer.AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
nodeInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: adc.nodeAdd,
|
AddFunc: adc.nodeAdd,
|
||||||
UpdateFunc: adc.nodeUpdate,
|
UpdateFunc: adc.nodeUpdate,
|
||||||
DeleteFunc: adc.nodeDelete,
|
DeleteFunc: adc.nodeDelete,
|
||||||
})
|
})
|
||||||
|
adc.nodesSynced = nodeInformer.Informer().HasSynced
|
||||||
|
|
||||||
if err := adc.volumePluginMgr.InitPlugins(plugins, adc); err != nil {
|
if err := adc.volumePluginMgr.InitPlugins(plugins, adc); err != nil {
|
||||||
return nil, fmt.Errorf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err)
|
return nil, fmt.Errorf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err)
|
||||||
|
@ -129,7 +135,7 @@ func NewAttachDetachController(
|
||||||
recorder,
|
recorder,
|
||||||
false)) // flag for experimental binary check for volume mount
|
false)) // flag for experimental binary check for volume mount
|
||||||
adc.nodeStatusUpdater = statusupdater.NewNodeStatusUpdater(
|
adc.nodeStatusUpdater = statusupdater.NewNodeStatusUpdater(
|
||||||
kubeClient, nodeInformer, adc.actualStateOfWorld)
|
kubeClient, nodeInformer.Lister(), adc.actualStateOfWorld)
|
||||||
|
|
||||||
// Default these to values in options
|
// Default these to values in options
|
||||||
adc.reconciler = reconciler.NewReconciler(
|
adc.reconciler = reconciler.NewReconciler(
|
||||||
|
@ -144,7 +150,7 @@ func NewAttachDetachController(
|
||||||
|
|
||||||
adc.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator(
|
adc.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator(
|
||||||
desiredStateOfWorldPopulatorLoopSleepPeriod,
|
desiredStateOfWorldPopulatorLoopSleepPeriod,
|
||||||
podInformer,
|
podInformer.Lister(),
|
||||||
adc.desiredStateOfWorld)
|
adc.desiredStateOfWorld)
|
||||||
|
|
||||||
return adc, nil
|
return adc, nil
|
||||||
|
@ -155,15 +161,20 @@ type attachDetachController struct {
|
||||||
// the API server.
|
// the API server.
|
||||||
kubeClient clientset.Interface
|
kubeClient clientset.Interface
|
||||||
|
|
||||||
// pvcInformer is the shared PVC informer used to fetch and store PVC
|
// pvcLister is the shared PVC lister used to fetch and store PVC
|
||||||
// objects from the API server. It is shared with other controllers and
|
// objects from the API server. It is shared with other controllers and
|
||||||
// therefore the PVC objects in its store should be treated as immutable.
|
// therefore the PVC objects in its store should be treated as immutable.
|
||||||
pvcInformer kcache.SharedInformer
|
pvcLister corelisters.PersistentVolumeClaimLister
|
||||||
|
pvcsSynced kcache.InformerSynced
|
||||||
|
|
||||||
// pvInformer is the shared PV informer used to fetch and store PV objects
|
// pvLister is the shared PV lister used to fetch and store PV objects
|
||||||
// from the API server. It is shared with other controllers and therefore
|
// from the API server. It is shared with other controllers and therefore
|
||||||
// the PV objects in its store should be treated as immutable.
|
// the PV objects in its store should be treated as immutable.
|
||||||
pvInformer kcache.SharedInformer
|
pvLister corelisters.PersistentVolumeLister
|
||||||
|
pvsSynced kcache.InformerSynced
|
||||||
|
|
||||||
|
podsSynced kcache.InformerSynced
|
||||||
|
nodesSynced kcache.InformerSynced
|
||||||
|
|
||||||
// cloud provider used by volume host
|
// cloud provider used by volume host
|
||||||
cloud cloudprovider.Interface
|
cloud cloudprovider.Interface
|
||||||
|
@ -211,6 +222,14 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
|
||||||
defer runtime.HandleCrash()
|
defer runtime.HandleCrash()
|
||||||
glog.Infof("Starting Attach Detach Controller")
|
glog.Infof("Starting Attach Detach Controller")
|
||||||
|
|
||||||
|
// TODO uncomment once we agree this is ok and we fix the attach/detach integration test that
|
||||||
|
// currently fails because it doesn't set pvcsSynced and pvsSynced to alwaysReady, so this
|
||||||
|
// controller never runs.
|
||||||
|
// if !kcache.WaitForCacheSync(stopCh, adc.podsSynced, adc.nodesSynced, adc.pvcsSynced, adc.pvsSynced) {
|
||||||
|
// runtime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
|
||||||
go adc.reconciler.Run(stopCh)
|
go adc.reconciler.Run(stopCh)
|
||||||
go adc.desiredStateOfWorldPopulator.Run(stopCh)
|
go adc.desiredStateOfWorldPopulator.Run(stopCh)
|
||||||
|
|
||||||
|
@ -456,33 +475,17 @@ func (adc *attachDetachController) createVolumeSpec(
|
||||||
// This method returns an error if a PVC object does not exist in the cache
|
// This method returns an error if a PVC object does not exist in the cache
|
||||||
// with the given namespace/name.
|
// with the given namespace/name.
|
||||||
// This method returns an error if the PVC object's phase is not "Bound".
|
// This method returns an error if the PVC object's phase is not "Bound".
|
||||||
func (adc *attachDetachController) getPVCFromCacheExtractPV(
|
func (adc *attachDetachController) getPVCFromCacheExtractPV(namespace string, name string) (string, types.UID, error) {
|
||||||
namespace string, name string) (string, types.UID, error) {
|
pvc, err := adc.pvcLister.PersistentVolumeClaims(namespace).Get(name)
|
||||||
key := name
|
if err != nil {
|
||||||
if len(namespace) > 0 {
|
return "", "", fmt.Errorf("failed to find PVC %s/%s in PVCInformer cache: %v", namespace, name, err)
|
||||||
key = namespace + "/" + name
|
|
||||||
}
|
|
||||||
|
|
||||||
pvcObj, exists, err := adc.pvcInformer.GetStore().GetByKey(key)
|
|
||||||
if pvcObj == nil || !exists || err != nil {
|
|
||||||
return "", "", fmt.Errorf(
|
|
||||||
"failed to find PVC %q in PVCInformer cache. %v",
|
|
||||||
key,
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pvc, ok := pvcObj.(*v1.PersistentVolumeClaim)
|
|
||||||
if !ok || pvc == nil {
|
|
||||||
return "", "", fmt.Errorf(
|
|
||||||
"failed to cast %q object %#v to PersistentVolumeClaim",
|
|
||||||
key,
|
|
||||||
pvcObj)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" {
|
if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" {
|
||||||
return "", "", fmt.Errorf(
|
return "", "", fmt.Errorf(
|
||||||
"PVC %q has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)",
|
"PVC %s/%s has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)",
|
||||||
key,
|
namespace,
|
||||||
|
name,
|
||||||
pvc.Status.Phase,
|
pvc.Status.Phase,
|
||||||
pvc.Spec.VolumeName)
|
pvc.Spec.VolumeName)
|
||||||
}
|
}
|
||||||
|
@ -496,20 +499,10 @@ func (adc *attachDetachController) getPVCFromCacheExtractPV(
|
||||||
// the given name.
|
// the given name.
|
||||||
// This method deep copies the PV object so the caller may use the returned
|
// This method deep copies the PV object so the caller may use the returned
|
||||||
// volume.Spec object without worrying about it mutating unexpectedly.
|
// volume.Spec object without worrying about it mutating unexpectedly.
|
||||||
func (adc *attachDetachController) getPVSpecFromCache(
|
func (adc *attachDetachController) getPVSpecFromCache(name string, pvcReadOnly bool, expectedClaimUID types.UID) (*volume.Spec, error) {
|
||||||
name string,
|
pv, err := adc.pvLister.Get(name)
|
||||||
pvcReadOnly bool,
|
if err != nil {
|
||||||
expectedClaimUID types.UID) (*volume.Spec, error) {
|
return nil, fmt.Errorf("failed to find PV %q in PVInformer cache: %v", name, err)
|
||||||
pvObj, exists, err := adc.pvInformer.GetStore().GetByKey(name)
|
|
||||||
if pvObj == nil || !exists || err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"failed to find PV %q in PVInformer cache. %v", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pv, ok := pvObj.(*v1.PersistentVolume)
|
|
||||||
if !ok || pv == nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"failed to cast %q object %#v to PersistentVolume", name, pvObj)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if pv.Spec.ClaimRef == nil {
|
if pv.Spec.ClaimRef == nil {
|
||||||
|
@ -530,14 +523,12 @@ func (adc *attachDetachController) getPVSpecFromCache(
|
||||||
// may be mutated by another consumer.
|
// may be mutated by another consumer.
|
||||||
clonedPVObj, err := api.Scheme.DeepCopy(pv)
|
clonedPVObj, err := api.Scheme.DeepCopy(pv)
|
||||||
if err != nil || clonedPVObj == nil {
|
if err != nil || clonedPVObj == nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf("failed to deep copy %q PV object. err=%v", name, err)
|
||||||
"failed to deep copy %q PV object. err=%v", name, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
clonedPV, ok := clonedPVObj.(*v1.PersistentVolume)
|
clonedPV, ok := clonedPVObj.(*v1.PersistentVolume)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf("failed to cast %q clonedPV %#v to PersistentVolume", name, pv)
|
||||||
"failed to cast %q clonedPV %#v to PersistentVolume", name, pvObj)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volume.NewSpecFromPersistentVolume(clonedPV, pvcReadOnly), nil
|
return volume.NewSpecFromPersistentVolume(clonedPV, pvcReadOnly), nil
|
||||||
|
|
|
@ -20,26 +20,23 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_NewAttachDetachController_Positive(t *testing.T) {
|
func Test_NewAttachDetachController_Positive(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||||
resyncPeriod := 5 * time.Minute
|
informerFactory := informers.NewSharedInformerFactory(nil, fakeKubeClient, controller.NoResyncPeriodFunc())
|
||||||
podInformer := informers.NewPodInformer(fakeKubeClient, resyncPeriod)
|
|
||||||
nodeInformer := informers.NewNodeInformer(fakeKubeClient, resyncPeriod)
|
|
||||||
pvcInformer := informers.NewPVCInformer(fakeKubeClient, resyncPeriod)
|
|
||||||
pvInformer := informers.NewPVInformer(fakeKubeClient, resyncPeriod)
|
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
_, err := NewAttachDetachController(
|
_, err := NewAttachDetachController(
|
||||||
fakeKubeClient,
|
fakeKubeClient,
|
||||||
podInformer,
|
informerFactory.Core().V1().Pods(),
|
||||||
nodeInformer,
|
informerFactory.Core().V1().Nodes(),
|
||||||
pvcInformer,
|
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||||
pvInformer,
|
informerFactory.Core().V1().PersistentVolumes(),
|
||||||
nil, /* cloud */
|
nil, /* cloud */
|
||||||
nil, /* plugins */
|
nil, /* plugins */
|
||||||
false,
|
false,
|
||||||
|
|
|
@ -12,10 +12,12 @@ go_library(
|
||||||
srcs = ["desired_state_of_world_populator.go"],
|
srcs = ["desired_state_of_world_populator.go"],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/client/listers/core/v1:go_default_library",
|
||||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||||
"//pkg/volume/util/volumehelper:go_default_library",
|
"//pkg/volume/util/volumehelper:go_default_library",
|
||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||||
"//vendor:k8s.io/client-go/tools/cache",
|
"//vendor:k8s.io/client-go/tools/cache",
|
||||||
],
|
],
|
||||||
|
|
|
@ -19,13 +19,16 @@ limitations under the License.
|
||||||
package populator
|
package populator
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
kcache "k8s.io/client-go/tools/cache"
|
kcache "k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
)
|
)
|
||||||
|
@ -47,18 +50,18 @@ type DesiredStateOfWorldPopulator interface {
|
||||||
// desiredStateOfWorld - the cache to populate
|
// desiredStateOfWorld - the cache to populate
|
||||||
func NewDesiredStateOfWorldPopulator(
|
func NewDesiredStateOfWorldPopulator(
|
||||||
loopSleepDuration time.Duration,
|
loopSleepDuration time.Duration,
|
||||||
podInformer kcache.SharedInformer,
|
podLister corelisters.PodLister,
|
||||||
desiredStateOfWorld cache.DesiredStateOfWorld) DesiredStateOfWorldPopulator {
|
desiredStateOfWorld cache.DesiredStateOfWorld) DesiredStateOfWorldPopulator {
|
||||||
return &desiredStateOfWorldPopulator{
|
return &desiredStateOfWorldPopulator{
|
||||||
loopSleepDuration: loopSleepDuration,
|
loopSleepDuration: loopSleepDuration,
|
||||||
podInformer: podInformer,
|
podLister: podLister,
|
||||||
desiredStateOfWorld: desiredStateOfWorld,
|
desiredStateOfWorld: desiredStateOfWorld,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type desiredStateOfWorldPopulator struct {
|
type desiredStateOfWorldPopulator struct {
|
||||||
loopSleepDuration time.Duration
|
loopSleepDuration time.Duration
|
||||||
podInformer kcache.SharedInformer
|
podLister corelisters.PodLister
|
||||||
desiredStateOfWorld cache.DesiredStateOfWorld
|
desiredStateOfWorld cache.DesiredStateOfWorld
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,43 +86,30 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieve the pod object from pod informer with the namespace key
|
// Retrieve the pod object from pod informer with the namespace key
|
||||||
informerPodObj, exists, err :=
|
namespace, name, err := kcache.SplitMetaNamespaceKey(dswPodKey)
|
||||||
dswp.podInformer.GetStore().GetByKey(dswPodKey)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf(
|
utilruntime.HandleError(fmt.Errorf("error splitting dswPodKey %q: %v", dswPodKey, err))
|
||||||
"podInformer GetByKey failed for pod %q (UID %q) with %v",
|
|
||||||
dswPodKey,
|
|
||||||
dswPodUID,
|
|
||||||
err)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if exists && informerPodObj == nil {
|
informerPod, err := dswp.podLister.Pods(namespace).Get(name)
|
||||||
glog.Info(
|
switch {
|
||||||
"podInformer GetByKey found pod, but informerPodObj is nil for pod %q (UID %q)",
|
case errors.IsNotFound(err):
|
||||||
dswPodKey,
|
// if we can't find the pod, we need to delete it below
|
||||||
dswPodUID)
|
case err != nil:
|
||||||
|
glog.Errorf("podLister Get failed for pod %q (UID %q) with %v", dswPodKey, dswPodUID, err)
|
||||||
continue
|
continue
|
||||||
}
|
default:
|
||||||
|
|
||||||
if exists {
|
|
||||||
informerPod, ok := informerPodObj.(*v1.Pod)
|
|
||||||
if !ok {
|
|
||||||
glog.Errorf("Failed to cast obj %#v to pod object for pod %q (UID %q)", informerPod, dswPodKey, dswPodUID)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
informerPodUID := volumehelper.GetUniquePodName(informerPod)
|
informerPodUID := volumehelper.GetUniquePodName(informerPod)
|
||||||
// Check whether the unique identifier of the pod from dsw matches the one retrieved from pod informer
|
// Check whether the unique identifier of the pod from dsw matches the one retrieved from pod informer
|
||||||
if informerPodUID == dswPodUID {
|
if informerPodUID == dswPodUID {
|
||||||
glog.V(10).Infof(
|
glog.V(10).Infof("Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID)
|
||||||
"Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// the pod from dsw does not exist in pod informer, or it does not match the unique identifer retrieved
|
// the pod from dsw does not exist in pod informer, or it does not match the unique identifer retrieved
|
||||||
// from the informer, delete it from dsw
|
// from the informer, delete it from dsw
|
||||||
glog.V(1).Infof(
|
glog.V(1).Infof("Removing pod %q (UID %q) from dsw because it does not exist in pod informer.", dswPodKey, dswPodUID)
|
||||||
"Removing pod %q (UID %q) from dsw because it does not exist in pod informer.", dswPodKey, dswPodUID)
|
|
||||||
dswp.desiredStateOfWorld.DeletePod(dswPodUID, dswPodToAdd.VolumeName, dswPodToAdd.NodeName)
|
dswp.desiredStateOfWorld.DeletePod(dswPodUID, dswPodToAdd.VolumeName, dswPodToAdd.NodeName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,8 @@ go_test(
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/controller/informers:go_default_library",
|
"//pkg/client/informers/informers_generated:go_default_library",
|
||||||
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||||
"//pkg/controller/volume/attachdetach/statusupdater:go_default_library",
|
"//pkg/controller/volume/attachdetach/statusupdater:go_default_library",
|
||||||
"//pkg/controller/volume/attachdetach/testing:go_default_library",
|
"//pkg/controller/volume/attachdetach/testing:go_default_library",
|
||||||
|
|
|
@ -24,7 +24,8 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
|
||||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||||
|
@ -50,10 +51,9 @@ func Test_Run_Positive_DoNothing(t *testing.T) {
|
||||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||||
fakeRecorder := &record.FakeRecorder{}
|
fakeRecorder := &record.FakeRecorder{}
|
||||||
ad := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(fakeKubeClient, volumePluginMgr, fakeRecorder, false /* checkNodeCapabilitiesBeforeMount */))
|
ad := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(fakeKubeClient, volumePluginMgr, fakeRecorder, false /* checkNodeCapabilitiesBeforeMount */))
|
||||||
nodeInformer := informers.NewNodeInformer(
|
informerFactory := informers.NewSharedInformerFactory(nil, fakeKubeClient, controller.NoResyncPeriodFunc())
|
||||||
fakeKubeClient, resyncPeriod)
|
|
||||||
nsu := statusupdater.NewNodeStatusUpdater(
|
nsu := statusupdater.NewNodeStatusUpdater(
|
||||||
fakeKubeClient, nodeInformer, asw)
|
fakeKubeClient, informerFactory.Core().V1().Nodes().Lister(), asw)
|
||||||
reconciler := NewReconciler(
|
reconciler := NewReconciler(
|
||||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu)
|
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu)
|
||||||
|
|
||||||
|
|
|
@ -18,10 +18,10 @@ go_library(
|
||||||
"//pkg/api:go_default_library",
|
"//pkg/api:go_default_library",
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
|
"//pkg/client/listers/core/v1:go_default_library",
|
||||||
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
"//pkg/controller/volume/attachdetach/cache:go_default_library",
|
||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/util/strategicpatch",
|
"//vendor:k8s.io/apimachinery/pkg/util/strategicpatch",
|
||||||
"//vendor:k8s.io/client-go/tools/cache",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -25,10 +25,10 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||||
kcache "k8s.io/client-go/tools/cache"
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
|
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -43,18 +43,18 @@ type NodeStatusUpdater interface {
|
||||||
// NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.
|
// NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.
|
||||||
func NewNodeStatusUpdater(
|
func NewNodeStatusUpdater(
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
nodeInformer kcache.SharedInformer,
|
nodeLister corelisters.NodeLister,
|
||||||
actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater {
|
actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater {
|
||||||
return &nodeStatusUpdater{
|
return &nodeStatusUpdater{
|
||||||
actualStateOfWorld: actualStateOfWorld,
|
actualStateOfWorld: actualStateOfWorld,
|
||||||
nodeInformer: nodeInformer,
|
nodeLister: nodeLister,
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type nodeStatusUpdater struct {
|
type nodeStatusUpdater struct {
|
||||||
kubeClient clientset.Interface
|
kubeClient clientset.Interface
|
||||||
nodeInformer kcache.SharedInformer
|
nodeLister corelisters.NodeLister
|
||||||
actualStateOfWorld cache.ActualStateOfWorld
|
actualStateOfWorld cache.ActualStateOfWorld
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,8 +63,8 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
|
||||||
// kubernetes/kubernetes/issues/37777
|
// kubernetes/kubernetes/issues/37777
|
||||||
nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached()
|
nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached()
|
||||||
for nodeName, attachedVolumes := range nodesToUpdate {
|
for nodeName, attachedVolumes := range nodesToUpdate {
|
||||||
nodeObj, exists, err := nsu.nodeInformer.GetStore().GetByKey(string(nodeName))
|
nodeObj, err := nsu.nodeLister.Get(string(nodeName))
|
||||||
if nodeObj == nil || !exists || err != nil {
|
if nodeObj == nil || err != nil {
|
||||||
// If node does not exist, its status cannot be updated, log error and
|
// If node does not exist, its status cannot be updated, log error and
|
||||||
// reset flag statusUpdateNeeded back to true to indicate this node status
|
// reset flag statusUpdateNeeded back to true to indicate this node status
|
||||||
// needs to be updated again
|
// needs to be updated again
|
||||||
|
|
|
@ -30,6 +30,7 @@ go_library(
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/replication:go_default_library",
|
"//pkg/controller/replication:go_default_library",
|
||||||
"//pkg/generated/openapi:go_default_library",
|
"//pkg/generated/openapi:go_default_library",
|
||||||
|
|
|
@ -56,6 +56,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||||
"k8s.io/kubernetes/pkg/generated/openapi"
|
"k8s.io/kubernetes/pkg/generated/openapi"
|
||||||
|
@ -120,11 +121,13 @@ func NewMasterComponents(c *Config) *MasterComponents {
|
||||||
// TODO: caesarxuchao: remove this client when the refactoring of client libraray is done.
|
// TODO: caesarxuchao: remove this client when the refactoring of client libraray is done.
|
||||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}, QPS: c.QPS, Burst: c.Burst})
|
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}, QPS: c.QPS, Burst: c.Burst})
|
||||||
rcStopCh := make(chan struct{})
|
rcStopCh := make(chan struct{})
|
||||||
controllerManager := replicationcontroller.NewReplicationManagerFromClient(clientset, controller.NoResyncPeriodFunc, c.Burst, 4096)
|
informerFactory := informers.NewSharedInformerFactory(nil, clientset, controller.NoResyncPeriodFunc())
|
||||||
|
controllerManager := replicationcontroller.NewReplicationManager(informerFactory.Core().V1().Pods(), informerFactory.Core().V1().ReplicationControllers(), clientset, c.Burst, 4096, false)
|
||||||
|
|
||||||
// TODO: Support events once we can cleanly shutdown an event recorder.
|
// TODO: Support events once we can cleanly shutdown an event recorder.
|
||||||
controllerManager.SetEventRecorder(&record.FakeRecorder{})
|
controllerManager.SetEventRecorder(&record.FakeRecorder{})
|
||||||
if c.StartReplicationManager {
|
if c.StartReplicationManager {
|
||||||
|
informerFactory.Start(rcStopCh)
|
||||||
go controllerManager.Run(goruntime.NumCPU(), rcStopCh)
|
go controllerManager.Run(goruntime.NumCPU(), rcStopCh)
|
||||||
}
|
}
|
||||||
return &MasterComponents{
|
return &MasterComponents{
|
||||||
|
|
|
@ -37,8 +37,8 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||||
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
|
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
|
||||||
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||||
|
@ -84,10 +84,15 @@ func TestQuota(t *testing.T) {
|
||||||
controllerCh := make(chan struct{})
|
controllerCh := make(chan struct{})
|
||||||
defer close(controllerCh)
|
defer close(controllerCh)
|
||||||
|
|
||||||
informers := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(nil, clientset, controller.NoResyncPeriodFunc())
|
||||||
podInformer := informers.Pods().Informer()
|
rm := replicationcontroller.NewReplicationManager(
|
||||||
rcInformer := informers.ReplicationControllers().Informer()
|
informers.Core().V1().Pods(),
|
||||||
rm := replicationcontroller.NewReplicationManager(podInformer, rcInformer, clientset, replicationcontroller.BurstReplicas, 4096, false)
|
informers.Core().V1().ReplicationControllers(),
|
||||||
|
clientset,
|
||||||
|
replicationcontroller.BurstReplicas,
|
||||||
|
4096,
|
||||||
|
false,
|
||||||
|
)
|
||||||
rm.SetEventRecorder(&record.FakeRecorder{})
|
rm.SetEventRecorder(&record.FakeRecorder{})
|
||||||
informers.Start(controllerCh)
|
informers.Start(controllerCh)
|
||||||
go rm.Run(3, controllerCh)
|
go rm.Run(3, controllerCh)
|
||||||
|
|
|
@ -33,7 +33,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
)
|
)
|
||||||
|
@ -126,7 +126,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, cache.SharedIndexInformer, cache.SharedIndexInformer, clientset.Interface) {
|
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, informers.SharedInformerFactory, clientset.Interface) {
|
||||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||||
_, s := framework.RunAMaster(masterConfig)
|
_, s := framework.RunAMaster(masterConfig)
|
||||||
|
|
||||||
|
@ -136,11 +136,11 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl
|
||||||
t.Fatalf("Error in create clientset: %v", err)
|
t.Fatalf("Error in create clientset: %v", err)
|
||||||
}
|
}
|
||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), nil, resyncPeriod)
|
informers := informers.NewSharedInformerFactory(nil, clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), resyncPeriod)
|
||||||
|
|
||||||
rm := replicaset.NewReplicaSetController(
|
rm := replicaset.NewReplicaSetController(
|
||||||
informers.ReplicaSets(),
|
informers.Extensions().V1beta1().ReplicaSets(),
|
||||||
informers.Pods(),
|
informers.Core().V1().Pods(),
|
||||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
|
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
|
||||||
replicaset.BurstReplicas,
|
replicaset.BurstReplicas,
|
||||||
4096,
|
4096,
|
||||||
|
@ -150,7 +150,7 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create replicaset controller")
|
t.Fatalf("Failed to create replicaset controller")
|
||||||
}
|
}
|
||||||
return s, rm, informers.ReplicaSets().Informer(), informers.Pods().Informer(), clientSet
|
return s, rm, informers, clientSet
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for the podInformer to observe the pods. Call this function before
|
// wait for the podInformer to observe the pods. Call this function before
|
||||||
|
@ -220,7 +220,8 @@ func TestAdoption(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
|
s, rm, informers, clientSet := rmSetup(t, true)
|
||||||
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
ns := framework.CreateTestingNamespace(fmt.Sprintf("rs-adoption-%d", i), s, t)
|
ns := framework.CreateTestingNamespace(fmt.Sprintf("rs-adoption-%d", i), s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
|
@ -240,8 +241,7 @@ func TestAdoption(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
go rsInformer.Run(stopCh)
|
informers.Start(stopCh)
|
||||||
go podInformer.Run(stopCh)
|
|
||||||
waitToObservePods(t, podInformer, 1)
|
waitToObservePods(t, podInformer, 1)
|
||||||
go rm.Run(5, stopCh)
|
go rm.Run(5, stopCh)
|
||||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
|
@ -298,7 +298,7 @@ func TestUpdateSelectorToAdopt(t *testing.T) {
|
||||||
// We have pod1, pod2 and rs. rs.spec.replicas=1. At first rs.Selector
|
// We have pod1, pod2 and rs. rs.spec.replicas=1. At first rs.Selector
|
||||||
// matches pod1 only; change the selector to match pod2 as well. Verify
|
// matches pod1 only; change the selector to match pod2 as well. Verify
|
||||||
// there is only one pod left.
|
// there is only one pod left.
|
||||||
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
|
s, rm, informers, clientSet := rmSetup(t, true)
|
||||||
ns := framework.CreateTestingNamespace("rs-update-selector-to-adopt", s, t)
|
ns := framework.CreateTestingNamespace("rs-update-selector-to-adopt", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
rs := newRS("rs", ns.Name, 1)
|
rs := newRS("rs", ns.Name, 1)
|
||||||
|
@ -312,8 +312,7 @@ func TestUpdateSelectorToAdopt(t *testing.T) {
|
||||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
go rsInformer.Run(stopCh)
|
informers.Start(stopCh)
|
||||||
go podInformer.Run(stopCh)
|
|
||||||
go rm.Run(5, stopCh)
|
go rm.Run(5, stopCh)
|
||||||
waitRSStable(t, clientSet, rs, ns.Name)
|
waitRSStable(t, clientSet, rs, ns.Name)
|
||||||
|
|
||||||
|
@ -339,7 +338,8 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
|
||||||
// matches pod1 and pod2; change the selector to match only pod1. Verify
|
// matches pod1 and pod2; change the selector to match only pod1. Verify
|
||||||
// that rs creates one more pod, so there are 3 pods. Also verify that
|
// that rs creates one more pod, so there are 3 pods. Also verify that
|
||||||
// pod2's controllerRef is cleared.
|
// pod2's controllerRef is cleared.
|
||||||
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
|
s, rm, informers, clientSet := rmSetup(t, true)
|
||||||
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
ns := framework.CreateTestingNamespace("rs-update-selector-to-remove-controllerref", s, t)
|
ns := framework.CreateTestingNamespace("rs-update-selector-to-remove-controllerref", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
rs := newRS("rs", ns.Name, 2)
|
rs := newRS("rs", ns.Name, 2)
|
||||||
|
@ -350,8 +350,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
|
||||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
go rsInformer.Run(stopCh)
|
informers.Start(stopCh)
|
||||||
go podInformer.Run(stopCh)
|
|
||||||
waitToObservePods(t, podInformer, 2)
|
waitToObservePods(t, podInformer, 2)
|
||||||
go rm.Run(5, stopCh)
|
go rm.Run(5, stopCh)
|
||||||
waitRSStable(t, clientSet, rs, ns.Name)
|
waitRSStable(t, clientSet, rs, ns.Name)
|
||||||
|
@ -386,7 +385,7 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
|
||||||
// matches pod1 and pod2; change pod2's labels to non-matching. Verify
|
// matches pod1 and pod2; change pod2's labels to non-matching. Verify
|
||||||
// that rs creates one more pod, so there are 3 pods. Also verify that
|
// that rs creates one more pod, so there are 3 pods. Also verify that
|
||||||
// pod2's controllerRef is cleared.
|
// pod2's controllerRef is cleared.
|
||||||
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
|
s, rm, informers, clientSet := rmSetup(t, true)
|
||||||
ns := framework.CreateTestingNamespace("rs-update-label-to-remove-controllerref", s, t)
|
ns := framework.CreateTestingNamespace("rs-update-label-to-remove-controllerref", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
rs := newRS("rs", ns.Name, 2)
|
rs := newRS("rs", ns.Name, 2)
|
||||||
|
@ -395,8 +394,7 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
|
||||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
go rsInformer.Run(stopCh)
|
informers.Start(stopCh)
|
||||||
go podInformer.Run(stopCh)
|
|
||||||
go rm.Run(5, stopCh)
|
go rm.Run(5, stopCh)
|
||||||
waitRSStable(t, clientSet, rs, ns.Name)
|
waitRSStable(t, clientSet, rs, ns.Name)
|
||||||
|
|
||||||
|
@ -429,7 +427,7 @@ func TestUpdateLabelToBeAdopted(t *testing.T) {
|
||||||
// matches pod1 only; change pod2's labels to be matching. Verify the RS
|
// matches pod1 only; change pod2's labels to be matching. Verify the RS
|
||||||
// controller adopts pod2 and delete one of them, so there is only 1 pod
|
// controller adopts pod2 and delete one of them, so there is only 1 pod
|
||||||
// left.
|
// left.
|
||||||
s, rm, rsInformer, podInformer, clientSet := rmSetup(t, true)
|
s, rm, informers, clientSet := rmSetup(t, true)
|
||||||
ns := framework.CreateTestingNamespace("rs-update-label-to-be-adopted", s, t)
|
ns := framework.CreateTestingNamespace("rs-update-label-to-be-adopted", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
rs := newRS("rs", ns.Name, 1)
|
rs := newRS("rs", ns.Name, 1)
|
||||||
|
@ -443,8 +441,7 @@ func TestUpdateLabelToBeAdopted(t *testing.T) {
|
||||||
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
createRSsPods(t, clientSet, []*v1beta1.ReplicaSet{rs}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
go rsInformer.Run(stopCh)
|
informers.Start(stopCh)
|
||||||
go podInformer.Run(stopCh)
|
|
||||||
go rm.Run(5, stopCh)
|
go rm.Run(5, stopCh)
|
||||||
waitRSStable(t, clientSet, rs, ns.Name)
|
waitRSStable(t, clientSet, rs, ns.Name)
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@ import (
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
"k8s.io/kubernetes/pkg/controller/replication"
|
"k8s.io/kubernetes/pkg/controller/replication"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
)
|
)
|
||||||
|
@ -123,7 +123,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func rmSetup(t *testing.T, stopCh chan struct{}, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, cache.SharedIndexInformer, clientset.Interface) {
|
func rmSetup(t *testing.T, stopCh chan struct{}, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, informers.SharedInformerFactory, clientset.Interface) {
|
||||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||||
_, s := framework.RunAMaster(masterConfig)
|
_, s := framework.RunAMaster(masterConfig)
|
||||||
|
|
||||||
|
@ -134,13 +134,11 @@ func rmSetup(t *testing.T, stopCh chan struct{}, enableGarbageCollector bool) (*
|
||||||
}
|
}
|
||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
|
|
||||||
informers := informers.NewSharedInformerFactory(clientSet, nil, resyncPeriod)
|
informers := informers.NewSharedInformerFactory(nil, clientSet, resyncPeriod)
|
||||||
podInformer := informers.Pods().Informer()
|
rm := replication.NewReplicationManager(informers.Core().V1().Pods(), informers.Core().V1().ReplicationControllers(), clientSet, replication.BurstReplicas, 4096, enableGarbageCollector)
|
||||||
rcInformer := informers.ReplicationControllers().Informer()
|
|
||||||
rm := replication.NewReplicationManager(podInformer, rcInformer, clientSet, replication.BurstReplicas, 4096, enableGarbageCollector)
|
|
||||||
informers.Start(stopCh)
|
informers.Start(stopCh)
|
||||||
|
|
||||||
return s, rm, podInformer, clientSet
|
return s, rm, informers, clientSet
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for the podInformer to observe the pods. Call this function before
|
// wait for the podInformer to observe the pods. Call this function before
|
||||||
|
@ -211,7 +209,7 @@ func TestAdoption(t *testing.T) {
|
||||||
}
|
}
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
s, rm, podInformer, clientSet := rmSetup(t, stopCh, true)
|
s, rm, informers, clientSet := rmSetup(t, stopCh, true)
|
||||||
ns := framework.CreateTestingNamespace(fmt.Sprintf("adoption-%d", i), s, t)
|
ns := framework.CreateTestingNamespace(fmt.Sprintf("adoption-%d", i), s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
|
|
||||||
|
@ -230,8 +228,8 @@ func TestAdoption(t *testing.T) {
|
||||||
t.Fatalf("Failed to create Pod: %v", err)
|
t.Fatalf("Failed to create Pod: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
go podInformer.Run(stopCh)
|
informers.Start(stopCh)
|
||||||
waitToObservePods(t, podInformer, 1)
|
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 1)
|
||||||
go rm.Run(5, stopCh)
|
go rm.Run(5, stopCh)
|
||||||
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
if err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||||
updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
updatedPod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||||
|
@ -327,7 +325,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
|
||||||
// that rc creates one more pod, so there are 3 pods. Also verify that
|
// that rc creates one more pod, so there are 3 pods. Also verify that
|
||||||
// pod2's controllerRef is cleared.
|
// pod2's controllerRef is cleared.
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
s, rm, podInformer, clientSet := rmSetup(t, stopCh, true)
|
s, rm, informers, clientSet := rmSetup(t, stopCh, true)
|
||||||
ns := framework.CreateTestingNamespace("update-selector-to-remove-controllerref", s, t)
|
ns := framework.CreateTestingNamespace("update-selector-to-remove-controllerref", s, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||||
rc := newRC("rc", ns.Name, 2)
|
rc := newRC("rc", ns.Name, 2)
|
||||||
|
@ -337,7 +335,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
|
||||||
pod2.Labels["uniqueKey"] = "2"
|
pod2.Labels["uniqueKey"] = "2"
|
||||||
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
|
createRCsPods(t, clientSet, []*v1.ReplicationController{rc}, []*v1.Pod{pod1, pod2}, ns.Name)
|
||||||
|
|
||||||
waitToObservePods(t, podInformer, 2)
|
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 2)
|
||||||
go rm.Run(5, stopCh)
|
go rm.Run(5, stopCh)
|
||||||
waitRCStable(t, clientSet, rc, ns.Name)
|
waitRCStable(t, clientSet, rc, ns.Name)
|
||||||
|
|
||||||
|
|
|
@ -30,8 +30,8 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated"
|
||||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
@ -94,7 +94,7 @@ func TestPodDeletionWithDswp(t *testing.T) {
|
||||||
ns := framework.CreateTestingNamespace(namespaceName, server, t)
|
ns := framework.CreateTestingNamespace(namespaceName, server, t)
|
||||||
defer framework.DeleteTestingNamespace(ns, server, t)
|
defer framework.DeleteTestingNamespace(ns, server, t)
|
||||||
|
|
||||||
testClient, ctrl, podInformer, nodeInformer := createAdClients(ns, t, server, defaultSyncPeriod)
|
testClient, ctrl, informers := createAdClients(ns, t, server, defaultSyncPeriod)
|
||||||
|
|
||||||
pod := fakePodWithVol(namespaceName)
|
pod := fakePodWithVol(namespaceName)
|
||||||
podStopCh := make(chan struct{})
|
podStopCh := make(chan struct{})
|
||||||
|
@ -103,12 +103,13 @@ func TestPodDeletionWithDswp(t *testing.T) {
|
||||||
t.Fatalf("Failed to created node : %v", err)
|
t.Fatalf("Failed to created node : %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
go nodeInformer.Run(podStopCh)
|
go informers.Core().V1().Nodes().Informer().Run(podStopCh)
|
||||||
|
|
||||||
if _, err := testClient.Core().Pods(ns.Name).Create(pod); err != nil {
|
if _, err := testClient.Core().Pods(ns.Name).Create(pod); err != nil {
|
||||||
t.Errorf("Failed to create pod : %v", err)
|
t.Errorf("Failed to create pod : %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
go podInformer.Run(podStopCh)
|
go podInformer.Run(podStopCh)
|
||||||
|
|
||||||
// start controller loop
|
// start controller loop
|
||||||
|
@ -167,7 +168,7 @@ func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podN
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, attachdetach.AttachDetachController, cache.SharedIndexInformer, cache.SharedIndexInformer) {
|
func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, attachdetach.AttachDetachController, informers.SharedInformerFactory) {
|
||||||
config := restclient.Config{
|
config := restclient.Config{
|
||||||
Host: server.URL,
|
Host: server.URL,
|
||||||
ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion},
|
ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion},
|
||||||
|
@ -192,13 +193,20 @@ func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, sy
|
||||||
}
|
}
|
||||||
plugins := []volume.VolumePlugin{plugin}
|
plugins := []volume.VolumePlugin{plugin}
|
||||||
cloud := &fakecloud.FakeCloud{}
|
cloud := &fakecloud.FakeCloud{}
|
||||||
podInformer := informers.NewPodInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod)
|
informers := informers.NewSharedInformerFactory(nil, testClient, resyncPeriod)
|
||||||
nodeInformer := informers.NewNodeInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "node-informer")), resyncPeriod)
|
ctrl, err := attachdetach.NewAttachDetachController(
|
||||||
pvcInformer := informers.NewNodeInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pvc-informer")), resyncPeriod)
|
testClient,
|
||||||
pvInformer := informers.NewNodeInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pv-informer")), resyncPeriod)
|
informers.Core().V1().Pods(),
|
||||||
ctrl, err := attachdetach.NewAttachDetachController(testClient, podInformer, nodeInformer, pvcInformer, pvInformer, cloud, plugins, false, time.Second*5)
|
informers.Core().V1().Nodes(),
|
||||||
|
informers.Core().V1().PersistentVolumeClaims(),
|
||||||
|
informers.Core().V1().PersistentVolumes(),
|
||||||
|
cloud,
|
||||||
|
plugins,
|
||||||
|
false,
|
||||||
|
time.Second*5,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating AttachDetach : %v", err)
|
t.Fatalf("Error creating AttachDetach : %v", err)
|
||||||
}
|
}
|
||||||
return testClient, ctrl, podInformer, nodeInformer
|
return testClient, ctrl, informers
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue