mirror of https://github.com/k3s-io/k3s
Added VolumeConfig to volumes
parent
cb2252b57f
commit
68358fd308
|
@ -67,6 +67,7 @@ type CMServer struct {
|
||||||
ResourceQuotaSyncPeriod time.Duration
|
ResourceQuotaSyncPeriod time.Duration
|
||||||
NamespaceSyncPeriod time.Duration
|
NamespaceSyncPeriod time.Duration
|
||||||
PVClaimBinderSyncPeriod time.Duration
|
PVClaimBinderSyncPeriod time.Duration
|
||||||
|
VolumeConfigFlags VolumeConfigFlags
|
||||||
HorizontalPodAutoscalerSyncPeriod time.Duration
|
HorizontalPodAutoscalerSyncPeriod time.Duration
|
||||||
RegisterRetryCount int
|
RegisterRetryCount int
|
||||||
NodeMonitorGracePeriod time.Duration
|
NodeMonitorGracePeriod time.Duration
|
||||||
|
@ -106,10 +107,22 @@ func NewCMServer() *CMServer {
|
||||||
PodEvictionTimeout: 5 * time.Minute,
|
PodEvictionTimeout: 5 * time.Minute,
|
||||||
ClusterName: "kubernetes",
|
ClusterName: "kubernetes",
|
||||||
EnableHorizontalPodAutoscaler: false,
|
EnableHorizontalPodAutoscaler: false,
|
||||||
|
VolumeConfigFlags: VolumeConfigFlags{
|
||||||
|
// default values here
|
||||||
|
PersistentVolumeRecyclerTimeoutNFS: 300,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
return &s
|
return &s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VolumeConfigFlags is used to bind CLI flags to variables. This top-level struct contains *all* enumerated
|
||||||
|
// CLI flags meant to configure all volume plugins. From this config, the binary will create many instances
|
||||||
|
// of volume.VolumeConfig which are then passed to the appropriate plugin. The ControllerManager binary is the only
|
||||||
|
// part of the code which knows what plugins are supported and which CLI flags correspond to each plugin.
|
||||||
|
type VolumeConfigFlags struct {
|
||||||
|
PersistentVolumeRecyclerTimeoutNFS int
|
||||||
|
}
|
||||||
|
|
||||||
// AddFlags adds flags for a specific CMServer to the specified FlagSet
|
// AddFlags adds flags for a specific CMServer to the specified FlagSet
|
||||||
func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
||||||
fs.IntVar(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on")
|
fs.IntVar(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on")
|
||||||
|
@ -125,6 +138,8 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
|
||||||
fs.DurationVar(&s.ResourceQuotaSyncPeriod, "resource-quota-sync-period", s.ResourceQuotaSyncPeriod, "The period for syncing quota usage status in the system")
|
fs.DurationVar(&s.ResourceQuotaSyncPeriod, "resource-quota-sync-period", s.ResourceQuotaSyncPeriod, "The period for syncing quota usage status in the system")
|
||||||
fs.DurationVar(&s.NamespaceSyncPeriod, "namespace-sync-period", s.NamespaceSyncPeriod, "The period for syncing namespace life-cycle updates")
|
fs.DurationVar(&s.NamespaceSyncPeriod, "namespace-sync-period", s.NamespaceSyncPeriod, "The period for syncing namespace life-cycle updates")
|
||||||
fs.DurationVar(&s.PVClaimBinderSyncPeriod, "pvclaimbinder-sync-period", s.PVClaimBinderSyncPeriod, "The period for syncing persistent volumes and persistent volume claims")
|
fs.DurationVar(&s.PVClaimBinderSyncPeriod, "pvclaimbinder-sync-period", s.PVClaimBinderSyncPeriod, "The period for syncing persistent volumes and persistent volume claims")
|
||||||
|
// TODO markt -- make this example a working config item with Recycler Config PR.
|
||||||
|
// fs.MyExample(&s.VolumeConfig.PersistentVolumeRecyclerTimeoutNFS, "pv-recycler-timeout-nfs", s.VolumeConfig.PersistentVolumeRecyclerTimeoutNFS, "The minimum timeout for an NFS PV recycling operation")
|
||||||
fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod, "The period for syncing the number of pods in horizontal pod autoscaler.")
|
fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod, "The period for syncing the number of pods in horizontal pod autoscaler.")
|
||||||
fs.DurationVar(&s.PodEvictionTimeout, "pod-eviction-timeout", s.PodEvictionTimeout, "The grace period for deleting pods on failed nodes.")
|
fs.DurationVar(&s.PodEvictionTimeout, "pod-eviction-timeout", s.PodEvictionTimeout, "The grace period for deleting pods on failed nodes.")
|
||||||
fs.Float32Var(&s.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.")
|
fs.Float32Var(&s.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.")
|
||||||
|
@ -231,7 +246,7 @@ func (s *CMServer) Run(_ []string) error {
|
||||||
|
|
||||||
pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
|
pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
|
||||||
pvclaimBinder.Run()
|
pvclaimBinder.Run()
|
||||||
pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins())
|
pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
|
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,13 +31,25 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProbeRecyclableVolumePlugins collects all persistent volume plugins into an easy to use list.
|
// ProbeRecyclableVolumePlugins collects all persistent volume plugins into an easy to use list.
|
||||||
func ProbeRecyclableVolumePlugins() []volume.VolumePlugin {
|
func ProbeRecyclableVolumePlugins(flags VolumeConfigFlags) []volume.VolumePlugin {
|
||||||
allPlugins := []volume.VolumePlugin{}
|
allPlugins := []volume.VolumePlugin{}
|
||||||
|
|
||||||
// The list of plugins to probe is decided by the kubelet binary, not
|
// The list of plugins to probe is decided by this binary, not
|
||||||
// by dynamic linking or other "magic". Plugins will be analyzed and
|
// by dynamic linking or other "magic". Plugins will be analyzed and
|
||||||
// initialized later.
|
// initialized later.
|
||||||
allPlugins = append(allPlugins, host_path.ProbeVolumePlugins()...)
|
|
||||||
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins()...)
|
// Each plugin can make use of VolumeConfig. The single arg to this func contains *all* enumerated
|
||||||
|
// CLI flags meant to configure volume plugins. From that single config, create an instance of volume.VolumeConfig
|
||||||
|
// for a specific plugin and pass that instance to the plugin's ProbeVolumePlugins(config) func.
|
||||||
|
hostPathConfig := volume.VolumeConfig{
|
||||||
|
// transfer attributes from VolumeConfig to this instance of volume.VolumeConfig
|
||||||
|
}
|
||||||
|
nfsConfig := volume.VolumeConfig{
|
||||||
|
// TODO transfer config.PersistentVolumeRecyclerTimeoutNFS and other flags to this instance of VolumeConfig
|
||||||
|
// Configuring recyclers will be done in a follow-up PR
|
||||||
|
}
|
||||||
|
|
||||||
|
allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(hostPathConfig)...)
|
||||||
|
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...)
|
||||||
return allPlugins
|
return allPlugins
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,12 +47,15 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||||
// The list of plugins to probe is decided by the kubelet binary, not
|
// The list of plugins to probe is decided by the kubelet binary, not
|
||||||
// by dynamic linking or other "magic". Plugins will be analyzed and
|
// by dynamic linking or other "magic". Plugins will be analyzed and
|
||||||
// initialized later.
|
// initialized later.
|
||||||
|
//
|
||||||
|
// Kubelet does not currently need to configure volume plugins.
|
||||||
|
// If/when it does, see kube-controller-manager/app/plugins.go for example of using volume.VolumeConfig
|
||||||
allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, empty_dir.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, empty_dir.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, git_repo.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, git_repo.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, host_path.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(volume.VolumeConfig{})...)
|
||||||
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(volume.VolumeConfig{})...)
|
||||||
allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
|
||||||
|
|
|
@ -149,7 +149,7 @@ func (s *CMServer) Run(_ []string) error {
|
||||||
|
|
||||||
pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
|
pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
|
||||||
pvclaimBinder.Run()
|
pvclaimBinder.Run()
|
||||||
pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, app.ProbeRecyclableVolumePlugins())
|
pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, app.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
|
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,7 @@ import (
|
||||||
// This is the primary entrypoint for volume plugins.
|
// This is the primary entrypoint for volume plugins.
|
||||||
// Tests covering recycling should not use this func but instead
|
// Tests covering recycling should not use this func but instead
|
||||||
// use their own array of plugins w/ a custom recyclerFunc as appropriate
|
// use their own array of plugins w/ a custom recyclerFunc as appropriate
|
||||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
func ProbeVolumePlugins(config volume.VolumeConfig) []volume.VolumePlugin {
|
||||||
return []volume.VolumePlugin{&hostPathPlugin{nil, newRecycler}}
|
return []volume.VolumePlugin{&hostPathPlugin{nil, newRecycler}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ import (
|
||||||
|
|
||||||
func TestCanSupport(t *testing.T) {
|
func TestCanSupport(t *testing.T) {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("fake", nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("fake", nil, nil))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -50,7 +50,7 @@ func TestCanSupport(t *testing.T) {
|
||||||
|
|
||||||
func TestGetAccessModes(t *testing.T) {
|
func TestGetAccessModes(t *testing.T) {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/host-path")
|
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/host-path")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -104,7 +104,7 @@ func (r *mockRecycler) Recycle() error {
|
||||||
|
|
||||||
func TestPlugin(t *testing.T) {
|
func TestPlugin(t *testing.T) {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("fake", nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("fake", nil, nil))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -179,7 +179,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||||
client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)}
|
client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)}
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
|
||||||
plug, _ := plugMgr.FindPluginByName(hostPathPluginName)
|
plug, _ := plugMgr.FindPluginByName(hostPathPluginName)
|
||||||
|
|
||||||
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
|
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
|
||||||
|
|
|
@ -32,7 +32,7 @@ import (
|
||||||
// This is the primary entrypoint for volume plugins.
|
// This is the primary entrypoint for volume plugins.
|
||||||
// Tests covering recycling should not use this func but instead
|
// Tests covering recycling should not use this func but instead
|
||||||
// use their own array of plugins w/ a custom recyclerFunc as appropriate
|
// use their own array of plugins w/ a custom recyclerFunc as appropriate
|
||||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
func ProbeVolumePlugins(config volume.VolumeConfig) []volume.VolumePlugin {
|
||||||
return []volume.VolumePlugin{&nfsPlugin{nil, newRecycler}}
|
return []volume.VolumePlugin{&nfsPlugin{nil, newRecycler}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ import (
|
||||||
|
|
||||||
func TestCanSupport(t *testing.T) {
|
func TestCanSupport(t *testing.T) {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("fake", nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("fake", nil, nil))
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
|
@ -51,7 +51,7 @@ func TestCanSupport(t *testing.T) {
|
||||||
|
|
||||||
func TestGetAccessModes(t *testing.T) {
|
func TestGetAccessModes(t *testing.T) {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/nfs")
|
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/nfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -114,7 +114,7 @@ func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeA
|
||||||
|
|
||||||
func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
|
@ -238,7 +238,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||||
client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)}
|
client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)}
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
|
||||||
plug, _ := plugMgr.FindPluginByName(nfsPluginName)
|
plug, _ := plugMgr.FindPluginByName(nfsPluginName)
|
||||||
|
|
||||||
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
|
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
|
||||||
|
|
|
@ -141,7 +141,7 @@ func TestNewBuilder(t *testing.T) {
|
||||||
ClaimName: "claimB",
|
ClaimName: "claimB",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
plugin: host_path.ProbeVolumePlugins()[0],
|
plugin: host_path.ProbeVolumePlugins(volume.VolumeConfig{})[0],
|
||||||
testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error {
|
testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error {
|
||||||
if builder.GetPath() != "/tmp" {
|
if builder.GetPath() != "/tmp" {
|
||||||
return fmt.Errorf("Expected HostPath.Path /tmp, got: %s", builder.GetPath())
|
return fmt.Errorf("Expected HostPath.Path /tmp, got: %s", builder.GetPath())
|
||||||
|
@ -317,7 +317,7 @@ func TestNewBuilderClaimNotBound(t *testing.T) {
|
||||||
func testProbeVolumePlugins() []volume.VolumePlugin {
|
func testProbeVolumePlugins() []volume.VolumePlugin {
|
||||||
allPlugins := []volume.VolumePlugin{}
|
allPlugins := []volume.VolumePlugin{}
|
||||||
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, host_path.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(volume.VolumeConfig{})...)
|
||||||
allPlugins = append(allPlugins, ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, ProbeVolumePlugins()...)
|
||||||
return allPlugins
|
return allPlugins
|
||||||
}
|
}
|
||||||
|
|
|
@ -137,6 +137,28 @@ type Spec struct {
|
||||||
ReadOnly bool
|
ReadOnly bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VolumeConfig is how volume plugins receive configuration. An instance specific to the plugin will be passed to
|
||||||
|
// the plugin's ProbeVolumePlugins(config) func. Reasonable defaults will be provided by the binary hosting
|
||||||
|
// the plugins while allowing override of those default values. Those config values are then set to an instance of
|
||||||
|
// VolumeConfig and passed to the plugin.
|
||||||
|
//
|
||||||
|
// Values in VolumeConfig are intended to be relevant to several plugins, but not necessarily all plugins. The
|
||||||
|
// preference is to leverage strong typing in this struct. All config items must have a descriptive but non-specific
|
||||||
|
// name (i.e, RecyclerMinimumTimeout is OK but RecyclerMinimumTimeoutForNFS is !OK). An instance of config will be
|
||||||
|
// given directly to the plugin, so config names specific to plugins are unneeded and wrongly expose plugins
|
||||||
|
// in this VolumeConfig struct.
|
||||||
|
//
|
||||||
|
// OtherAttributes is a map of string values intended for one-off configuration of a plugin or config that is only
|
||||||
|
// relevant to a single plugin. All values are passed by string and require interpretation by the plugin.
|
||||||
|
// Passing config as strings is the least desirable option but can be used for truly one-off configuration.
|
||||||
|
// The binary should still use strong typing for this value when binding CLI values before they are passed as strings
|
||||||
|
// in OtherAttributes.
|
||||||
|
type VolumeConfig struct {
|
||||||
|
// thockin: do we want to wait on this until we have an actual use case? I can change the comments above to
|
||||||
|
// reflect our intention for one-off config.
|
||||||
|
OtherAttributes map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
// NewSpecFromVolume creates an Spec from an api.Volume
|
// NewSpecFromVolume creates an Spec from an api.Volume
|
||||||
func NewSpecFromVolume(vs *api.Volume) *Spec {
|
func NewSpecFromVolume(vs *api.Volume) *Spec {
|
||||||
return &Spec{
|
return &Spec{
|
||||||
|
|
|
@ -72,6 +72,19 @@ func (f *fakeVolumeHost) NewWrapperCleaner(spec *Spec, podUID types.UID, mounter
|
||||||
return plug.NewCleaner(spec.Name, podUID, mounter)
|
return plug.NewCleaner(spec.Name, podUID, mounter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ProbeVolumePlugins(config VolumeConfig) []VolumePlugin {
|
||||||
|
if _, ok := config.OtherAttributes["fake-property"]; ok {
|
||||||
|
return []VolumePlugin{
|
||||||
|
&FakeVolumePlugin{
|
||||||
|
PluginName: "fake-plugin",
|
||||||
|
Host: nil,
|
||||||
|
// SomeFakeProperty: config.OtherAttributes["fake-property"] -- string, may require parsing by plugin
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return []VolumePlugin{&FakeVolumePlugin{PluginName: "fake-plugin"}}
|
||||||
|
}
|
||||||
|
|
||||||
// FakeVolumePlugin is useful for testing. It tries to be a fully compliant
|
// FakeVolumePlugin is useful for testing. It tries to be a fully compliant
|
||||||
// plugin, but all it does is make empty directories.
|
// plugin, but all it does is make empty directories.
|
||||||
// Use as:
|
// Use as:
|
||||||
|
|
Loading…
Reference in New Issue