From 68358fd30879a04eb53d8db6e940fcc785710682 Mon Sep 17 00:00:00 2001 From: markturansky Date: Mon, 31 Aug 2015 09:44:37 -0400 Subject: [PATCH] Added VolumeConfig to volumes --- .../app/controllermanager.go | 17 +++++++++++++- cmd/kube-controller-manager/app/plugins.go | 20 +++++++++++++---- cmd/kubelet/app/plugins.go | 7 ++++-- .../controllermanager/controllermanager.go | 2 +- pkg/volume/host_path/host_path.go | 2 +- pkg/volume/host_path/host_path_test.go | 8 +++---- pkg/volume/nfs/nfs.go | 2 +- pkg/volume/nfs/nfs_test.go | 8 +++---- .../persistent_claim/persistent_claim_test.go | 4 ++-- pkg/volume/plugins.go | 22 +++++++++++++++++++ pkg/volume/testing.go | 13 +++++++++++ 11 files changed, 85 insertions(+), 20 deletions(-) diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index ae13517a9c..6464d65b4c 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -67,6 +67,7 @@ type CMServer struct { ResourceQuotaSyncPeriod time.Duration NamespaceSyncPeriod time.Duration PVClaimBinderSyncPeriod time.Duration + VolumeConfigFlags VolumeConfigFlags HorizontalPodAutoscalerSyncPeriod time.Duration RegisterRetryCount int NodeMonitorGracePeriod time.Duration @@ -106,10 +107,22 @@ func NewCMServer() *CMServer { PodEvictionTimeout: 5 * time.Minute, ClusterName: "kubernetes", EnableHorizontalPodAutoscaler: false, + VolumeConfigFlags: VolumeConfigFlags{ + // default values here + PersistentVolumeRecyclerTimeoutNFS: 300, + }, } return &s } +// VolumeConfigFlags is used to bind CLI flags to variables. This top-level struct contains *all* enumerated +// CLI flags meant to configure all volume plugins. From this config, the binary will create many instances +// of volume.VolumeConfig which are then passed to the appropriate plugin. The ControllerManager binary is the only +// part of the code which knows what plugins are supported and which CLI flags correspond to each plugin. +type VolumeConfigFlags struct { + PersistentVolumeRecyclerTimeoutNFS int +} + // AddFlags adds flags for a specific CMServer to the specified FlagSet func (s *CMServer) AddFlags(fs *pflag.FlagSet) { fs.IntVar(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on") @@ -125,6 +138,8 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) { fs.DurationVar(&s.ResourceQuotaSyncPeriod, "resource-quota-sync-period", s.ResourceQuotaSyncPeriod, "The period for syncing quota usage status in the system") fs.DurationVar(&s.NamespaceSyncPeriod, "namespace-sync-period", s.NamespaceSyncPeriod, "The period for syncing namespace life-cycle updates") fs.DurationVar(&s.PVClaimBinderSyncPeriod, "pvclaimbinder-sync-period", s.PVClaimBinderSyncPeriod, "The period for syncing persistent volumes and persistent volume claims") + // TODO markt -- make this example a working config item with Recycler Config PR. + // fs.MyExample(&s.VolumeConfig.PersistentVolumeRecyclerTimeoutNFS, "pv-recycler-timeout-nfs", s.VolumeConfig.PersistentVolumeRecyclerTimeoutNFS, "The minimum timeout for an NFS PV recycling operation") fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod, "The period for syncing the number of pods in horizontal pod autoscaler.") fs.DurationVar(&s.PodEvictionTimeout, "pod-eviction-timeout", s.PodEvictionTimeout, "The grace period for deleting pods on failed nodes.") fs.Float32Var(&s.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.") @@ -231,7 +246,7 @@ func (s *CMServer) Run(_ []string) error { pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod) pvclaimBinder.Run() - pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins()) + pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)) if err != nil { glog.Fatalf("Failed to start persistent volume recycler: %+v", err) } diff --git a/cmd/kube-controller-manager/app/plugins.go b/cmd/kube-controller-manager/app/plugins.go index acea294ffc..76f41e5df1 100644 --- a/cmd/kube-controller-manager/app/plugins.go +++ b/cmd/kube-controller-manager/app/plugins.go @@ -31,13 +31,25 @@ import ( ) // ProbeRecyclableVolumePlugins collects all persistent volume plugins into an easy to use list. -func ProbeRecyclableVolumePlugins() []volume.VolumePlugin { +func ProbeRecyclableVolumePlugins(flags VolumeConfigFlags) []volume.VolumePlugin { allPlugins := []volume.VolumePlugin{} - // The list of plugins to probe is decided by the kubelet binary, not + // The list of plugins to probe is decided by this binary, not // by dynamic linking or other "magic". Plugins will be analyzed and // initialized later. - allPlugins = append(allPlugins, host_path.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, nfs.ProbeVolumePlugins()...) + + // Each plugin can make use of VolumeConfig. The single arg to this func contains *all* enumerated + // CLI flags meant to configure volume plugins. From that single config, create an instance of volume.VolumeConfig + // for a specific plugin and pass that instance to the plugin's ProbeVolumePlugins(config) func. + hostPathConfig := volume.VolumeConfig{ + // transfer attributes from VolumeConfig to this instance of volume.VolumeConfig + } + nfsConfig := volume.VolumeConfig{ + // TODO transfer config.PersistentVolumeRecyclerTimeoutNFS and other flags to this instance of VolumeConfig + // Configuring recyclers will be done in a follow-up PR + } + + allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(hostPathConfig)...) + allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...) return allPlugins } diff --git a/cmd/kubelet/app/plugins.go b/cmd/kubelet/app/plugins.go index 3ec65c215f..50d886f1fe 100644 --- a/cmd/kubelet/app/plugins.go +++ b/cmd/kubelet/app/plugins.go @@ -47,12 +47,15 @@ func ProbeVolumePlugins() []volume.VolumePlugin { // The list of plugins to probe is decided by the kubelet binary, not // by dynamic linking or other "magic". Plugins will be analyzed and // initialized later. + // + // Kubelet does not currently need to configure volume plugins. + // If/when it does, see kube-controller-manager/app/plugins.go for example of using volume.VolumeConfig allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, empty_dir.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, git_repo.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, host_path.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, nfs.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(volume.VolumeConfig{})...) + allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(volume.VolumeConfig{})...) allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...) allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...) allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...) diff --git a/contrib/mesos/pkg/controllermanager/controllermanager.go b/contrib/mesos/pkg/controllermanager/controllermanager.go index f84a6fd4c8..e7ac60e92e 100644 --- a/contrib/mesos/pkg/controllermanager/controllermanager.go +++ b/contrib/mesos/pkg/controllermanager/controllermanager.go @@ -149,7 +149,7 @@ func (s *CMServer) Run(_ []string) error { pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod) pvclaimBinder.Run() - pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, app.ProbeRecyclableVolumePlugins()) + pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, app.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)) if err != nil { glog.Fatalf("Failed to start persistent volume recycler: %+v", err) } diff --git a/pkg/volume/host_path/host_path.go b/pkg/volume/host_path/host_path.go index 0ee5a7325e..17ee2ef023 100644 --- a/pkg/volume/host_path/host_path.go +++ b/pkg/volume/host_path/host_path.go @@ -29,7 +29,7 @@ import ( // This is the primary entrypoint for volume plugins. // Tests covering recycling should not use this func but instead // use their own array of plugins w/ a custom recyclerFunc as appropriate -func ProbeVolumePlugins() []volume.VolumePlugin { +func ProbeVolumePlugins(config volume.VolumeConfig) []volume.VolumePlugin { return []volume.VolumePlugin{&hostPathPlugin{nil, newRecycler}} } diff --git a/pkg/volume/host_path/host_path_test.go b/pkg/volume/host_path/host_path_test.go index b9a4c200ff..94f51f8a86 100644 --- a/pkg/volume/host_path/host_path_test.go +++ b/pkg/volume/host_path/host_path_test.go @@ -28,7 +28,7 @@ import ( func TestCanSupport(t *testing.T) { plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("fake", nil, nil)) + plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path") if err != nil { @@ -50,7 +50,7 @@ func TestCanSupport(t *testing.T) { func TestGetAccessModes(t *testing.T) { plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) + plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/host-path") if err != nil { @@ -104,7 +104,7 @@ func (r *mockRecycler) Recycle() error { func TestPlugin(t *testing.T) { plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("fake", nil, nil)) + plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path") if err != nil { @@ -179,7 +179,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) { client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) + plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) plug, _ := plugMgr.FindPluginByName(hostPathPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes diff --git a/pkg/volume/nfs/nfs.go b/pkg/volume/nfs/nfs.go index 6d83b027f4..3e66045e67 100644 --- a/pkg/volume/nfs/nfs.go +++ b/pkg/volume/nfs/nfs.go @@ -32,7 +32,7 @@ import ( // This is the primary entrypoint for volume plugins. // Tests covering recycling should not use this func but instead // use their own array of plugins w/ a custom recyclerFunc as appropriate -func ProbeVolumePlugins() []volume.VolumePlugin { +func ProbeVolumePlugins(config volume.VolumeConfig) []volume.VolumePlugin { return []volume.VolumePlugin{&nfsPlugin{nil, newRecycler}} } diff --git a/pkg/volume/nfs/nfs_test.go b/pkg/volume/nfs/nfs_test.go index 4ba60638f6..120c0c454b 100644 --- a/pkg/volume/nfs/nfs_test.go +++ b/pkg/volume/nfs/nfs_test.go @@ -30,7 +30,7 @@ import ( func TestCanSupport(t *testing.T) { plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("fake", nil, nil)) + plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs") if err != nil { t.Errorf("Can't find the plugin by name") @@ -51,7 +51,7 @@ func TestCanSupport(t *testing.T) { func TestGetAccessModes(t *testing.T) { plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) + plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/nfs") if err != nil { @@ -114,7 +114,7 @@ func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeA func doTestPlugin(t *testing.T, spec *volume.Spec) { plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) + plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs") if err != nil { t.Errorf("Can't find the plugin by name") @@ -238,7 +238,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) { client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) + plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) plug, _ := plugMgr.FindPluginByName(nfsPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes diff --git a/pkg/volume/persistent_claim/persistent_claim_test.go b/pkg/volume/persistent_claim/persistent_claim_test.go index 1297a2bab2..634c9e5a13 100644 --- a/pkg/volume/persistent_claim/persistent_claim_test.go +++ b/pkg/volume/persistent_claim/persistent_claim_test.go @@ -141,7 +141,7 @@ func TestNewBuilder(t *testing.T) { ClaimName: "claimB", }, }, - plugin: host_path.ProbeVolumePlugins()[0], + plugin: host_path.ProbeVolumePlugins(volume.VolumeConfig{})[0], testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error { if builder.GetPath() != "/tmp" { return fmt.Errorf("Expected HostPath.Path /tmp, got: %s", builder.GetPath()) @@ -317,7 +317,7 @@ func TestNewBuilderClaimNotBound(t *testing.T) { func testProbeVolumePlugins() []volume.VolumePlugin { allPlugins := []volume.VolumePlugin{} allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, host_path.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(volume.VolumeConfig{})...) allPlugins = append(allPlugins, ProbeVolumePlugins()...) return allPlugins } diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go index afd221c7ee..acc462b767 100644 --- a/pkg/volume/plugins.go +++ b/pkg/volume/plugins.go @@ -137,6 +137,28 @@ type Spec struct { ReadOnly bool } +// VolumeConfig is how volume plugins receive configuration. An instance specific to the plugin will be passed to +// the plugin's ProbeVolumePlugins(config) func. Reasonable defaults will be provided by the binary hosting +// the plugins while allowing override of those default values. Those config values are then set to an instance of +// VolumeConfig and passed to the plugin. +// +// Values in VolumeConfig are intended to be relevant to several plugins, but not necessarily all plugins. The +// preference is to leverage strong typing in this struct. All config items must have a descriptive but non-specific +// name (i.e, RecyclerMinimumTimeout is OK but RecyclerMinimumTimeoutForNFS is !OK). An instance of config will be +// given directly to the plugin, so config names specific to plugins are unneeded and wrongly expose plugins +// in this VolumeConfig struct. +// +// OtherAttributes is a map of string values intended for one-off configuration of a plugin or config that is only +// relevant to a single plugin. All values are passed by string and require interpretation by the plugin. +// Passing config as strings is the least desirable option but can be used for truly one-off configuration. +// The binary should still use strong typing for this value when binding CLI values before they are passed as strings +// in OtherAttributes. +type VolumeConfig struct { + // thockin: do we want to wait on this until we have an actual use case? I can change the comments above to + // reflect our intention for one-off config. + OtherAttributes map[string]string +} + // NewSpecFromVolume creates an Spec from an api.Volume func NewSpecFromVolume(vs *api.Volume) *Spec { return &Spec{ diff --git a/pkg/volume/testing.go b/pkg/volume/testing.go index f9406bff94..8d39b31fdd 100644 --- a/pkg/volume/testing.go +++ b/pkg/volume/testing.go @@ -72,6 +72,19 @@ func (f *fakeVolumeHost) NewWrapperCleaner(spec *Spec, podUID types.UID, mounter return plug.NewCleaner(spec.Name, podUID, mounter) } +func ProbeVolumePlugins(config VolumeConfig) []VolumePlugin { + if _, ok := config.OtherAttributes["fake-property"]; ok { + return []VolumePlugin{ + &FakeVolumePlugin{ + PluginName: "fake-plugin", + Host: nil, + // SomeFakeProperty: config.OtherAttributes["fake-property"] -- string, may require parsing by plugin + }, + } + } + return []VolumePlugin{&FakeVolumePlugin{PluginName: "fake-plugin"}} +} + // FakeVolumePlugin is useful for testing. It tries to be a fully compliant // plugin, but all it does is make empty directories. // Use as: