mirror of https://github.com/k3s-io/k3s
Add Cinder Max Volume Limit
Also add place holder support for reporting limits from node.pull/564/head
parent
1d8960798b
commit
a88d1d3dcc
|
@ -247,6 +247,33 @@ func TestVolumeCountConflicts(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
twoVolCinderPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{VolumeID: "tvp1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{VolumeID: "tvp2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
oneVolCinderPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{VolumeID: "ovp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
|
@ -739,6 +766,23 @@ func TestVolumeCountConflicts(t *testing.T) {
|
|||
fits: true,
|
||||
test: "two different unbound PVCs are counted towards the PV limit as two volumes",
|
||||
},
|
||||
// filterName:CinderVolumeFilterType
|
||||
{
|
||||
newPod: oneVolCinderPod,
|
||||
existingPods: []*v1.Pod{twoVolCinderPod},
|
||||
filterName: CinderVolumeFilterType,
|
||||
maxVols: 4,
|
||||
fits: true,
|
||||
test: "fits when node capacity >= new pod's Cinder volumes",
|
||||
},
|
||||
{
|
||||
newPod: oneVolCinderPod,
|
||||
existingPods: []*v1.Pod{twoVolCinderPod},
|
||||
filterName: CinderVolumeFilterType,
|
||||
maxVols: 2,
|
||||
fits: false,
|
||||
test: "not fit when node capacity < new pod's Cinder volumes",
|
||||
},
|
||||
}
|
||||
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
|
@ -916,6 +960,8 @@ func getVolumeLimitKey(filterType string) v1.ResourceName {
|
|||
return v1.ResourceName(volumeutil.GCEVolumeLimitKey)
|
||||
case AzureDiskVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||
case CinderVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.CinderVolumeLimitKey)
|
||||
default:
|
||||
return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
|
||||
}
|
||||
|
|
|
@ -84,6 +84,8 @@ const (
|
|||
MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount"
|
||||
// MaxAzureDiskVolumeCountPred defines the name of predicate MaxAzureDiskVolumeCount.
|
||||
MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount"
|
||||
// MaxCinderVolumeCountPred defines the name of predicate MaxCinderDiskVolumeCount.
|
||||
MaxCinderVolumeCountPred = "MaxCinderVolumeCount"
|
||||
// MaxCSIVolumeCountPred defines the predicate that decides how many CSI volumes should be attached
|
||||
MaxCSIVolumeCountPred = "MaxCSIVolumeCountPred"
|
||||
// NoVolumeZoneConflictPred defines the name of predicate NoVolumeZoneConflict.
|
||||
|
@ -112,6 +114,8 @@ const (
|
|||
GCEPDVolumeFilterType = "GCE"
|
||||
// AzureDiskVolumeFilterType defines the filter name for AzureDiskVolumeFilter.
|
||||
AzureDiskVolumeFilterType = "AzureDisk"
|
||||
// CinderVolumeFilterType defines the filter name for CinderVolumeFilter.
|
||||
CinderVolumeFilterType = "Cinder"
|
||||
)
|
||||
|
||||
// IMPORTANT NOTE for predicate developers:
|
||||
|
@ -133,7 +137,7 @@ var (
|
|||
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
|
||||
PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred,
|
||||
CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, MaxCSIVolumeCountPred,
|
||||
MaxAzureDiskVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred,
|
||||
MaxAzureDiskVolumeCountPred, MaxCinderVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred,
|
||||
CheckNodeMemoryPressurePred, CheckNodePIDPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred}
|
||||
)
|
||||
|
||||
|
@ -332,6 +336,9 @@ func NewMaxPDVolumeCountPredicate(
|
|||
case AzureDiskVolumeFilterType:
|
||||
filter = AzureDiskVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||
case CinderVolumeFilterType:
|
||||
filter = CinderVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.CinderVolumeLimitKey)
|
||||
default:
|
||||
klog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType,
|
||||
GCEPDVolumeFilterType, AzureDiskVolumeFilterType)
|
||||
|
@ -370,6 +377,8 @@ func getMaxVolumeFunc(filterName string) func(node *v1.Node) int {
|
|||
return DefaultMaxGCEPDVolumes
|
||||
case AzureDiskVolumeFilterType:
|
||||
return DefaultMaxAzureDiskVolumes
|
||||
case CinderVolumeFilterType:
|
||||
return volumeutil.DefaultMaxCinderVolumes
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
|
@ -558,6 +567,23 @@ var AzureDiskVolumeFilter = VolumeFilter{
|
|||
},
|
||||
}
|
||||
|
||||
// CinderVolumeFilter is a VolumeFilter for filtering Cinder Volumes
|
||||
var CinderVolumeFilter = VolumeFilter{
|
||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||
if vol.Cinder != nil {
|
||||
return vol.Cinder.VolumeID, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
|
||||
if pv.Spec.Cinder != nil {
|
||||
return pv.Spec.Cinder.VolumeID, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
}
|
||||
|
||||
// VolumeZoneChecker contains information to check the volume zone for a predicate.
|
||||
type VolumeZoneChecker struct {
|
||||
pvInfo PersistentVolumeInfo
|
||||
|
|
|
@ -44,6 +44,7 @@ func defaultPredicates() sets.String {
|
|||
predicates.MaxGCEPDVolumeCountPred,
|
||||
predicates.MaxAzureDiskVolumeCountPred,
|
||||
predicates.MaxCSIVolumeCountPred,
|
||||
predicates.MaxCinderVolumeCountPred,
|
||||
predicates.MatchInterPodAffinityPred,
|
||||
predicates.NoDiskConflictPred,
|
||||
predicates.GeneralPred,
|
||||
|
|
|
@ -74,6 +74,7 @@ func TestDefaultPredicates(t *testing.T) {
|
|||
predicates.MaxEBSVolumeCountPred,
|
||||
predicates.MaxGCEPDVolumeCountPred,
|
||||
predicates.MaxAzureDiskVolumeCountPred,
|
||||
predicates.MaxCinderVolumeCountPred,
|
||||
predicates.MaxCSIVolumeCountPred,
|
||||
predicates.MatchInterPodAffinityPred,
|
||||
predicates.NoDiskConflictPred,
|
||||
|
|
|
@ -84,6 +84,13 @@ func init() {
|
|||
return predicates.NewCSIMaxVolumeLimitPredicate(args.PVInfo, args.PVCInfo)
|
||||
},
|
||||
)
|
||||
factory.RegisterFitPredicateFactory(
|
||||
predicates.MaxCinderVolumeCountPred,
|
||||
func(args factory.PluginFactoryArgs) predicates.FitPredicate {
|
||||
return predicates.NewMaxPDVolumeCountPredicate(predicates.CinderVolumeFilterType, args.PVInfo, args.PVCInfo)
|
||||
},
|
||||
)
|
||||
|
||||
// Fit is determined by inter-pod affinity.
|
||||
factory.RegisterFitPredicateFactory(
|
||||
predicates.MatchInterPodAffinityPred,
|
||||
|
|
|
@ -72,7 +72,7 @@ var (
|
|||
matchInterPodAffinitySet = sets.NewString(predicates.MatchInterPodAffinityPred)
|
||||
generalPredicatesSets = sets.NewString(predicates.GeneralPred)
|
||||
noDiskConflictSet = sets.NewString(predicates.NoDiskConflictPred)
|
||||
maxPDVolumeCountPredicateKeys = []string{predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred, predicates.MaxEBSVolumeCountPred}
|
||||
maxPDVolumeCountPredicateKeys = []string{predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred, predicates.MaxEBSVolumeCountPred, predicates.MaxCinderVolumeCountPred}
|
||||
)
|
||||
|
||||
// Binder knows how to write a binding.
|
||||
|
|
|
@ -120,6 +120,32 @@ func (plugin *cinderPlugin) SupportsBulkVolumeVerification() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
var _ volume.VolumePluginWithAttachLimits = &cinderPlugin{}
|
||||
|
||||
func (plugin *cinderPlugin) GetVolumeLimits() (map[string]int64, error) {
|
||||
volumeLimits := map[string]int64{
|
||||
util.CinderVolumeLimitKey: util.DefaultMaxCinderVolumes,
|
||||
}
|
||||
cloud := plugin.host.GetCloudProvider()
|
||||
|
||||
// if we can't fetch cloudprovider we return an error
|
||||
// hoping external CCM or admin can set it. Returning
|
||||
// default values from here will mean, no one can
|
||||
// override them.
|
||||
if cloud == nil {
|
||||
return nil, fmt.Errorf("No cloudprovider present")
|
||||
}
|
||||
|
||||
if cloud.ProviderName() != openstack.ProviderName {
|
||||
return nil, fmt.Errorf("Expected Openstack cloud, found %s", cloud.ProviderName())
|
||||
}
|
||||
return volumeLimits, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) VolumeLimitKey(spec *volume.Spec) string {
|
||||
return util.CinderVolumeLimitKey
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
|
|
|
@ -40,6 +40,13 @@ const (
|
|||
// GCEVolumeLimitKey stores resource name that will store volume limits for GCE node
|
||||
GCEVolumeLimitKey = "attachable-volumes-gce-pd"
|
||||
|
||||
// Volume limit key for Cinder
|
||||
CinderVolumeLimitKey = "attachable-volumes-aws-ebs"
|
||||
// DefaultMaxCinderVolumes defines the maximum number of PD Volumes for Cinder
|
||||
// For Openstack we are keeping this to a high enough value so as depending on backend
|
||||
// cluster admins can configure it.
|
||||
DefaultMaxCinderVolumes = 256
|
||||
|
||||
// CSIAttachLimitPrefix defines prefix used for CSI volumes
|
||||
CSIAttachLimitPrefix = "attachable-volumes-csi-"
|
||||
|
||||
|
|
Loading…
Reference in New Issue