From a88d1d3dcc3c1a681585d26c50500c9343763950 Mon Sep 17 00:00:00 2001 From: Mike McRill Date: Thu, 6 Dec 2018 10:12:51 -0600 Subject: [PATCH] Add Cinder Max Volume Limit Also add place holder support for reporting limits from node. --- .../max_attachable_volume_predicate_test.go | 46 +++++++++++++++++++ .../algorithm/predicates/predicates.go | 28 ++++++++++- .../algorithmprovider/defaults/defaults.go | 1 + .../defaults/defaults_test.go | 1 + .../defaults/register_predicates.go | 7 +++ pkg/scheduler/factory/factory.go | 2 +- pkg/volume/cinder/cinder.go | 26 +++++++++++ pkg/volume/util/attach_limit.go | 7 +++ 8 files changed, 116 insertions(+), 2 deletions(-) diff --git a/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go b/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go index 24d2f0a70c..b734ddd68e 100644 --- a/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go +++ b/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go @@ -247,6 +247,33 @@ func TestVolumeCountConflicts(t *testing.T) { }, }, } + twoVolCinderPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + Cinder: &v1.CinderVolumeSource{VolumeID: "tvp1"}, + }, + }, + { + VolumeSource: v1.VolumeSource{ + Cinder: &v1.CinderVolumeSource{VolumeID: "tvp2"}, + }, + }, + }, + }, + } + oneVolCinderPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + Cinder: &v1.CinderVolumeSource{VolumeID: "ovp"}, + }, + }, + }, + }, + } tests := []struct { newPod *v1.Pod @@ -739,6 +766,23 @@ func TestVolumeCountConflicts(t *testing.T) { fits: true, test: "two different unbound PVCs are counted towards the PV limit as two volumes", }, + // filterName:CinderVolumeFilterType + { + newPod: oneVolCinderPod, + existingPods: []*v1.Pod{twoVolCinderPod}, + filterName: CinderVolumeFilterType, + maxVols: 4, + fits: true, + test: "fits when node capacity >= new pod's Cinder volumes", + }, + { + newPod: oneVolCinderPod, + existingPods: []*v1.Pod{twoVolCinderPod}, + filterName: CinderVolumeFilterType, + maxVols: 2, + fits: false, + test: "not fit when node capacity < new pod's Cinder volumes", + }, } expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded} @@ -916,6 +960,8 @@ func getVolumeLimitKey(filterType string) v1.ResourceName { return v1.ResourceName(volumeutil.GCEVolumeLimitKey) case AzureDiskVolumeFilterType: return v1.ResourceName(volumeutil.AzureVolumeLimitKey) + case CinderVolumeFilterType: + return v1.ResourceName(volumeutil.CinderVolumeLimitKey) default: return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType)) } diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index 3fcc02b0ca..47066d8c17 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -84,6 +84,8 @@ const ( MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount" // MaxAzureDiskVolumeCountPred defines the name of predicate MaxAzureDiskVolumeCount. MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount" + // MaxCinderVolumeCountPred defines the name of predicate MaxCinderDiskVolumeCount. + MaxCinderVolumeCountPred = "MaxCinderVolumeCount" // MaxCSIVolumeCountPred defines the predicate that decides how many CSI volumes should be attached MaxCSIVolumeCountPred = "MaxCSIVolumeCountPred" // NoVolumeZoneConflictPred defines the name of predicate NoVolumeZoneConflict. @@ -112,6 +114,8 @@ const ( GCEPDVolumeFilterType = "GCE" // AzureDiskVolumeFilterType defines the filter name for AzureDiskVolumeFilter. AzureDiskVolumeFilterType = "AzureDisk" + // CinderVolumeFilterType defines the filter name for CinderVolumeFilter. + CinderVolumeFilterType = "Cinder" ) // IMPORTANT NOTE for predicate developers: @@ -133,7 +137,7 @@ var ( MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred, PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred, CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, MaxCSIVolumeCountPred, - MaxAzureDiskVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred, + MaxAzureDiskVolumeCountPred, MaxCinderVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred, CheckNodeMemoryPressurePred, CheckNodePIDPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred} ) @@ -332,6 +336,9 @@ func NewMaxPDVolumeCountPredicate( case AzureDiskVolumeFilterType: filter = AzureDiskVolumeFilter volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey) + case CinderVolumeFilterType: + filter = CinderVolumeFilter + volumeLimitKey = v1.ResourceName(volumeutil.CinderVolumeLimitKey) default: klog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType, GCEPDVolumeFilterType, AzureDiskVolumeFilterType) @@ -370,6 +377,8 @@ func getMaxVolumeFunc(filterName string) func(node *v1.Node) int { return DefaultMaxGCEPDVolumes case AzureDiskVolumeFilterType: return DefaultMaxAzureDiskVolumes + case CinderVolumeFilterType: + return volumeutil.DefaultMaxCinderVolumes default: return -1 } @@ -558,6 +567,23 @@ var AzureDiskVolumeFilter = VolumeFilter{ }, } +// CinderVolumeFilter is a VolumeFilter for filtering Cinder Volumes +var CinderVolumeFilter = VolumeFilter{ + FilterVolume: func(vol *v1.Volume) (string, bool) { + if vol.Cinder != nil { + return vol.Cinder.VolumeID, true + } + return "", false + }, + + FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { + if pv.Spec.Cinder != nil { + return pv.Spec.Cinder.VolumeID, true + } + return "", false + }, +} + // VolumeZoneChecker contains information to check the volume zone for a predicate. type VolumeZoneChecker struct { pvInfo PersistentVolumeInfo diff --git a/pkg/scheduler/algorithmprovider/defaults/defaults.go b/pkg/scheduler/algorithmprovider/defaults/defaults.go index fbc8cb87b8..b4975999eb 100644 --- a/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -44,6 +44,7 @@ func defaultPredicates() sets.String { predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred, predicates.MaxCSIVolumeCountPred, + predicates.MaxCinderVolumeCountPred, predicates.MatchInterPodAffinityPred, predicates.NoDiskConflictPred, predicates.GeneralPred, diff --git a/pkg/scheduler/algorithmprovider/defaults/defaults_test.go b/pkg/scheduler/algorithmprovider/defaults/defaults_test.go index 48696ba026..c985a961b4 100644 --- a/pkg/scheduler/algorithmprovider/defaults/defaults_test.go +++ b/pkg/scheduler/algorithmprovider/defaults/defaults_test.go @@ -74,6 +74,7 @@ func TestDefaultPredicates(t *testing.T) { predicates.MaxEBSVolumeCountPred, predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred, + predicates.MaxCinderVolumeCountPred, predicates.MaxCSIVolumeCountPred, predicates.MatchInterPodAffinityPred, predicates.NoDiskConflictPred, diff --git a/pkg/scheduler/algorithmprovider/defaults/register_predicates.go b/pkg/scheduler/algorithmprovider/defaults/register_predicates.go index 4f93f83da2..3abf397c17 100644 --- a/pkg/scheduler/algorithmprovider/defaults/register_predicates.go +++ b/pkg/scheduler/algorithmprovider/defaults/register_predicates.go @@ -84,6 +84,13 @@ func init() { return predicates.NewCSIMaxVolumeLimitPredicate(args.PVInfo, args.PVCInfo) }, ) + factory.RegisterFitPredicateFactory( + predicates.MaxCinderVolumeCountPred, + func(args factory.PluginFactoryArgs) predicates.FitPredicate { + return predicates.NewMaxPDVolumeCountPredicate(predicates.CinderVolumeFilterType, args.PVInfo, args.PVCInfo) + }, + ) + // Fit is determined by inter-pod affinity. factory.RegisterFitPredicateFactory( predicates.MatchInterPodAffinityPred, diff --git a/pkg/scheduler/factory/factory.go b/pkg/scheduler/factory/factory.go index 91432afbbc..90265721d9 100644 --- a/pkg/scheduler/factory/factory.go +++ b/pkg/scheduler/factory/factory.go @@ -72,7 +72,7 @@ var ( matchInterPodAffinitySet = sets.NewString(predicates.MatchInterPodAffinityPred) generalPredicatesSets = sets.NewString(predicates.GeneralPred) noDiskConflictSet = sets.NewString(predicates.NoDiskConflictPred) - maxPDVolumeCountPredicateKeys = []string{predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred, predicates.MaxEBSVolumeCountPred} + maxPDVolumeCountPredicateKeys = []string{predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred, predicates.MaxEBSVolumeCountPred, predicates.MaxCinderVolumeCountPred} ) // Binder knows how to write a binding. diff --git a/pkg/volume/cinder/cinder.go b/pkg/volume/cinder/cinder.go index e4e488b8d2..1cc38d4f7a 100644 --- a/pkg/volume/cinder/cinder.go +++ b/pkg/volume/cinder/cinder.go @@ -120,6 +120,32 @@ func (plugin *cinderPlugin) SupportsBulkVolumeVerification() bool { return false } +var _ volume.VolumePluginWithAttachLimits = &cinderPlugin{} + +func (plugin *cinderPlugin) GetVolumeLimits() (map[string]int64, error) { + volumeLimits := map[string]int64{ + util.CinderVolumeLimitKey: util.DefaultMaxCinderVolumes, + } + cloud := plugin.host.GetCloudProvider() + + // if we can't fetch cloudprovider we return an error + // hoping external CCM or admin can set it. Returning + // default values from here will mean, no one can + // override them. + if cloud == nil { + return nil, fmt.Errorf("No cloudprovider present") + } + + if cloud.ProviderName() != openstack.ProviderName { + return nil, fmt.Errorf("Expected Openstack cloud, found %s", cloud.ProviderName()) + } + return volumeLimits, nil +} + +func (plugin *cinderPlugin) VolumeLimitKey(spec *volume.Spec) string { + return util.CinderVolumeLimitKey +} + func (plugin *cinderPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode { return []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, diff --git a/pkg/volume/util/attach_limit.go b/pkg/volume/util/attach_limit.go index 8325dbf755..2abbd2aacc 100644 --- a/pkg/volume/util/attach_limit.go +++ b/pkg/volume/util/attach_limit.go @@ -40,6 +40,13 @@ const ( // GCEVolumeLimitKey stores resource name that will store volume limits for GCE node GCEVolumeLimitKey = "attachable-volumes-gce-pd" + // Volume limit key for Cinder + CinderVolumeLimitKey = "attachable-volumes-aws-ebs" + // DefaultMaxCinderVolumes defines the maximum number of PD Volumes for Cinder + // For Openstack we are keeping this to a high enough value so as depending on backend + // cluster admins can configure it. + DefaultMaxCinderVolumes = 256 + // CSIAttachLimitPrefix defines prefix used for CSI volumes CSIAttachLimitPrefix = "attachable-volumes-csi-"