mirror of https://github.com/k3s-io/k3s
Merge pull request #66928 from RobertKrawitz/ephemeral-storage-quota-exp
Ephemeral storage monitoring via filesystem quotask3s-v1.15.3
commit
cf76868b34
|
@ -452,6 +452,13 @@ const (
|
|||
//
|
||||
// Enables the regional PD feature on GCE.
|
||||
deprecatedGCERegionalPersistentDisk featuregate.Feature = "GCERegionalPersistentDisk"
|
||||
|
||||
// owner: @RobertKrawitz
|
||||
// alpha: v1.15
|
||||
//
|
||||
// Allow use of filesystems for ephemeral storage monitoring.
|
||||
// Only applies if LocalStorageCapacityIsolation is set.
|
||||
LocalStorageCapacityIsolationFSQuotaMonitoring featuregate.Feature = "LocalStorageCapacityIsolationFSQuotaMonitoring"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -528,6 +535,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
|||
TTLAfterFinished: {Default: false, PreRelease: featuregate.Alpha},
|
||||
KubeletPodResources: {Default: false, PreRelease: featuregate.Alpha},
|
||||
WindowsGMSA: {Default: false, PreRelease: featuregate.Alpha},
|
||||
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
||||
// unintentionally on either side:
|
||||
|
|
|
@ -62,6 +62,7 @@ go_library(
|
|||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
schedulerutils "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
volumeutils "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -399,9 +400,7 @@ func podDiskUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsSt
|
|||
func localEphemeralVolumeNames(pod *v1.Pod) []string {
|
||||
result := []string{}
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.GitRepo != nil ||
|
||||
(volume.EmptyDir != nil && volume.EmptyDir.Medium != v1.StorageMediumMemory) ||
|
||||
volume.ConfigMap != nil || volume.DownwardAPI != nil {
|
||||
if volumeutils.IsLocalEphemeralVolume(volume) {
|
||||
result = append(result, volume.Name)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -439,11 +439,11 @@ func (f *stubVolume) CanMount() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *stubVolume) SetUp(fsGroup *int64) error {
|
||||
func (f *stubVolume) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *stubVolume) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (f *stubVolume) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -14,12 +14,14 @@ go_library(
|
|||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache",
|
||||
deps = [
|
||||
"//pkg/api/v1/resource:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
|
|
|
@ -25,6 +25,8 @@ import (
|
|||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
apiv1resource "k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
|
@ -160,6 +162,10 @@ type volumeToMount struct {
|
|||
// reportedInUse indicates that the volume was successfully added to the
|
||||
// VolumesInUse field in the node's status.
|
||||
reportedInUse bool
|
||||
|
||||
// desiredSizeLimit indicates the desired upper bound on the size of the volume
|
||||
// (if so implemented)
|
||||
desiredSizeLimit *resource.Quantity
|
||||
}
|
||||
|
||||
// The pod object represents a pod that references the underlying volume and
|
||||
|
@ -226,6 +232,20 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
|||
}
|
||||
|
||||
if _, volumeExists := dsw.volumesToMount[volumeName]; !volumeExists {
|
||||
var sizeLimit *resource.Quantity
|
||||
if volumeSpec.Volume != nil {
|
||||
if util.IsLocalEphemeralVolume(*volumeSpec.Volume) {
|
||||
_, podLimits := apiv1resource.PodRequestsAndLimits(pod)
|
||||
ephemeralStorageLimit := podLimits[v1.ResourceEphemeralStorage]
|
||||
sizeLimit = resource.NewQuantity(ephemeralStorageLimit.Value(), resource.BinarySI)
|
||||
if volumeSpec.Volume.EmptyDir != nil &&
|
||||
volumeSpec.Volume.EmptyDir.SizeLimit != nil &&
|
||||
volumeSpec.Volume.EmptyDir.SizeLimit.Value() > 0 &&
|
||||
volumeSpec.Volume.EmptyDir.SizeLimit.Value() < sizeLimit.Value() {
|
||||
sizeLimit = resource.NewQuantity(volumeSpec.Volume.EmptyDir.SizeLimit.Value(), resource.BinarySI)
|
||||
}
|
||||
}
|
||||
}
|
||||
dsw.volumesToMount[volumeName] = volumeToMount{
|
||||
volumeName: volumeName,
|
||||
podsToMount: make(map[types.UniquePodName]podToMount),
|
||||
|
@ -233,6 +253,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
|||
pluginIsDeviceMountable: deviceMountable,
|
||||
volumeGidValue: volumeGidValue,
|
||||
reportedInUse: false,
|
||||
desiredSizeLimit: sizeLimit,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -360,7 +381,8 @@ func (dsw *desiredStateOfWorld) GetVolumesToMount() []VolumeToMount {
|
|||
PluginIsDeviceMountable: volumeObj.pluginIsDeviceMountable,
|
||||
OuterVolumeSpecName: podObj.outerVolumeSpecName,
|
||||
VolumeGidValue: volumeObj.volumeGidValue,
|
||||
ReportedInUse: volumeObj.reportedInUse}})
|
||||
ReportedInUse: volumeObj.reportedInUse,
|
||||
DesiredSizeLimit: volumeObj.desiredSizeLimit}})
|
||||
}
|
||||
}
|
||||
return volumesToMount
|
||||
|
|
|
@ -382,12 +382,12 @@ func (b *awsElasticBlockStoreMounter) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *awsElasticBlockStoreMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *awsElasticBlockStoreMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// SetUpAt attaches the disk and bind mounts to the volume path.
|
||||
func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
// TODO: handle failed mounts here.
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
klog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err)
|
||||
|
@ -440,7 +440,7 @@ func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error
|
|||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
volume.SetVolumeOwnership(b, mounterArgs.FsGroup)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Successfully mounted %s", dir)
|
||||
|
|
|
@ -136,7 +136,7 @@ func TestPlugin(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
|
@ -367,7 +367,7 @@ func TestMountOptions(t *testing.T) {
|
|||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
mountOptions := fakeMounter.MountPoints[0].Opts
|
||||
|
|
|
@ -62,15 +62,15 @@ func (m *azureDiskMounter) CanMount() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *azureDiskMounter) SetUp(fsGroup *int64) error {
|
||||
return m.SetUpAt(m.GetPath(), fsGroup)
|
||||
func (m *azureDiskMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return m.SetUpAt(m.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
func (m *azureDiskMounter) GetPath() string {
|
||||
return getPath(m.dataDisk.podUID, m.dataDisk.volumeName, m.plugin.host)
|
||||
}
|
||||
|
||||
func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (m *azureDiskMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
mounter := m.plugin.host.GetMounter(m.plugin.GetPluginName())
|
||||
volumeSource, _, err := getVolumeSource(m.spec)
|
||||
|
||||
|
@ -161,7 +161,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
|
||||
if volumeSource.ReadOnly == nil || !*volumeSource.ReadOnly {
|
||||
volume.SetVolumeOwnership(m, fsGroup)
|
||||
volume.SetVolumeOwnership(m, mounterArgs.FsGroup)
|
||||
}
|
||||
|
||||
klog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir)
|
||||
|
|
|
@ -239,11 +239,11 @@ func (b *azureFileMounter) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *azureFileMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *azureFileMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *azureFileMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
klog.V(4).Infof("AzureFile mount set up: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
|
@ -286,7 +286,7 @@ func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
options = append(options, "ro")
|
||||
}
|
||||
mountOptions = volutil.JoinMountOptions(b.mountOptions, options)
|
||||
mountOptions = appendDefaultMountOptions(mountOptions, fsGroup)
|
||||
mountOptions = appendDefaultMountOptions(mountOptions, mounterArgs.FsGroup)
|
||||
}
|
||||
|
||||
err = b.mounter.Mount(source, dir, "cifs", mountOptions)
|
||||
|
|
|
@ -155,7 +155,7 @@ func testPlugin(t *testing.T, tmpDir string, volumeHost volume.VolumeHost) {
|
|||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
|
|
|
@ -222,12 +222,12 @@ func (cephfsVolume *cephfsMounter) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (cephfsVolume *cephfsMounter) SetUp(fsGroup *int64) error {
|
||||
return cephfsVolume.SetUpAt(cephfsVolume.GetPath(), fsGroup)
|
||||
func (cephfsVolume *cephfsMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return cephfsVolume.SetUpAt(cephfsVolume.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// SetUpAt attaches the disk and bind mounts to the volume path.
|
||||
func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (cephfsVolume *cephfsMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
notMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir)
|
||||
klog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
|
|
|
@ -87,7 +87,7 @@ func TestPlugin(t *testing.T) {
|
|||
if volumePath != volpath {
|
||||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
|
|
|
@ -377,12 +377,12 @@ func (b *cinderVolumeMounter) CanMount() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *cinderVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *cinderVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// SetUp bind mounts to the volume path.
|
||||
func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *cinderVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
klog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir)
|
||||
|
||||
b.plugin.volumeLocks.LockKey(b.pdName)
|
||||
|
@ -442,7 +442,7 @@ func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
volume.SetVolumeOwnership(b, mounterArgs.FsGroup)
|
||||
}
|
||||
klog.V(3).Infof("Cinder volume %s mounted to %s", b.pdName, dir)
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ func TestPlugin(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
|
|
|
@ -183,11 +183,11 @@ func (b *configMapVolumeMounter) CanMount() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *configMapVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *configMapVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *configMapVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
klog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
|
||||
|
||||
// Wrap EmptyDir, let it do the setup.
|
||||
|
@ -224,7 +224,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
|
||||
setupSuccess := false
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
if err := wrapped.SetUpAt(dir, mounterArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
|
@ -259,9 +259,9 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
err = volume.SetVolumeOwnership(b, fsGroup)
|
||||
err = volume.SetVolumeOwnership(b, mounterArgs.FsGroup)
|
||||
if err != nil {
|
||||
klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
klog.Errorf("Error applying volume ownership settings for group: %v", mounterArgs.FsGroup)
|
||||
return err
|
||||
}
|
||||
setupSuccess = true
|
||||
|
|
|
@ -365,8 +365,10 @@ func TestPlugin(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
err = mounter.SetUp(&fsGroup)
|
||||
var mounterArgs volume.MounterArgs
|
||||
group := int64(1001)
|
||||
mounterArgs.FsGroup = &group
|
||||
err = mounter.SetUp(mounterArgs)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
@ -423,8 +425,10 @@ func TestPluginReboot(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
err = mounter.SetUp(&fsGroup)
|
||||
var mounterArgs volume.MounterArgs
|
||||
group := int64(1001)
|
||||
mounterArgs.FsGroup = &group
|
||||
err = mounter.SetUp(mounterArgs)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
@ -485,8 +489,10 @@ func TestPluginOptional(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
err = mounter.SetUp(&fsGroup)
|
||||
var mounterArgs volume.MounterArgs
|
||||
group := int64(1001)
|
||||
mounterArgs.FsGroup = &group
|
||||
err = mounter.SetUp(mounterArgs)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
@ -582,8 +588,10 @@ func TestPluginKeysOptional(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
err = mounter.SetUp(&fsGroup)
|
||||
var mounterArgs volume.MounterArgs
|
||||
group := int64(1001)
|
||||
mounterArgs.FsGroup = &group
|
||||
err = mounter.SetUp(mounterArgs)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
@ -660,8 +668,10 @@ func TestInvalidConfigMapSetup(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
err = mounter.SetUp(&fsGroup)
|
||||
var mounterArgs volume.MounterArgs
|
||||
group := int64(1001)
|
||||
mounterArgs.FsGroup = &group
|
||||
err = mounter.SetUp(mounterArgs)
|
||||
if err == nil {
|
||||
t.Errorf("Expected setup to fail")
|
||||
}
|
||||
|
|
|
@ -93,11 +93,11 @@ func (c *csiMountMgr) CanMount() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *csiMountMgr) SetUp(fsGroup *int64) error {
|
||||
return c.SetUpAt(c.GetPath(), fsGroup)
|
||||
func (c *csiMountMgr) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return c.SetUpAt(c.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
klog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
|
||||
|
||||
mounted, err := isDirMounted(c.plugin, dir)
|
||||
|
@ -268,7 +268,7 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
|
|||
// if fstype is "", then skip fsgroup (could be indication of non-block filesystem)
|
||||
// if fstype is provided and pv.AccessMode == ReadWriteOnly, then apply fsgroup
|
||||
|
||||
err = c.applyFSGroup(fsType, fsGroup)
|
||||
err = c.applyFSGroup(fsType, mounterArgs.FsGroup)
|
||||
if err != nil {
|
||||
// attempt to rollback mount.
|
||||
fsGrpErr := fmt.Errorf("applyFSGroup failed for vol %s: %v", c.volumeID, err)
|
||||
|
|
|
@ -203,8 +203,10 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) {
|
|||
}
|
||||
|
||||
// Mounter.SetUp()
|
||||
var mounterArgs volume.MounterArgs
|
||||
fsGroup := int64(2000)
|
||||
if err := csiMounter.SetUp(&fsGroup); err != nil {
|
||||
mounterArgs.FsGroup = &fsGroup
|
||||
if err := csiMounter.SetUp(mounterArgs); err != nil {
|
||||
t.Fatalf("mounter.Setup failed: %v", err)
|
||||
}
|
||||
|
||||
|
@ -344,7 +346,7 @@ func TestMounterSetUpSimple(t *testing.T) {
|
|||
}
|
||||
|
||||
// Mounter.SetUp()
|
||||
if err := csiMounter.SetUp(nil); err != nil {
|
||||
if err := csiMounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Fatalf("mounter.Setup failed: %v", err)
|
||||
}
|
||||
|
||||
|
@ -475,7 +477,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||
}
|
||||
|
||||
// Mounter.SetUp()
|
||||
if err := csiMounter.SetUp(nil); err != nil {
|
||||
if err := csiMounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Fatalf("mounter.Setup failed: %v", err)
|
||||
}
|
||||
|
||||
|
@ -621,12 +623,14 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||
}
|
||||
|
||||
// Mounter.SetUp()
|
||||
var mounterArgs volume.MounterArgs
|
||||
var fsGroupPtr *int64
|
||||
if tc.setFsGroup {
|
||||
fsGroup := tc.fsGroup
|
||||
fsGroupPtr = &fsGroup
|
||||
}
|
||||
if err := csiMounter.SetUp(fsGroupPtr); err != nil {
|
||||
mounterArgs.FsGroup = fsGroupPtr
|
||||
if err := csiMounter.SetUp(mounterArgs); err != nil {
|
||||
t.Fatalf("mounter.Setup failed: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -268,7 +268,9 @@ func TestCSI_VolumeAll(t *testing.T) {
|
|||
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
csiMounter.csiClient = csiClient
|
||||
if err := csiMounter.SetUp(fsGroup); err != nil {
|
||||
var mounterArgs volume.MounterArgs
|
||||
mounterArgs.FsGroup = fsGroup
|
||||
if err := csiMounter.SetUp(mounterArgs); err != nil {
|
||||
t.Fatalf("csiTest.VolumeAll mounter.Setup(fsGroup) failed: %s", err)
|
||||
}
|
||||
t.Log("csiTest.VolumeAll mounter.Setup(fsGroup) done OK")
|
||||
|
|
|
@ -174,11 +174,11 @@ func (b *downwardAPIVolumeMounter) CanMount() error {
|
|||
// This function is not idempotent by design. We want the data to be refreshed periodically.
|
||||
// The internal sync interval of kubelet will drive the refresh of data.
|
||||
// TODO: Add volume specific ticker and refresh loop
|
||||
func (b *downwardAPIVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *downwardAPIVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *downwardAPIVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
klog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir)
|
||||
// Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting
|
||||
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), b.pod, *b.opts)
|
||||
|
@ -194,7 +194,7 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
|
||||
setupSuccess := false
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
if err := wrapped.SetUpAt(dir, mounterArgs); err != nil {
|
||||
klog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
|
@ -231,9 +231,9 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
err = volume.SetVolumeOwnership(b, fsGroup)
|
||||
err = volume.SetVolumeOwnership(b, mounterArgs.FsGroup)
|
||||
if err != nil {
|
||||
klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
klog.Errorf("Error applying volume ownership settings for group: %v", mounterArgs.FsGroup)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -253,7 +253,7 @@ func newDownwardAPITest(t *testing.T, name string, volumeFiles, podLabels, podAn
|
|||
|
||||
volumePath := mounter.GetPath()
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
err = mounter.SetUp(volume.MounterArgs{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
@ -380,7 +380,7 @@ func (step reSetUp) run(test *downwardAPITest) {
|
|||
}
|
||||
|
||||
// now re-run Setup
|
||||
if err = test.mounter.SetUp(nil); err != nil {
|
||||
if err = test.mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
test.t.Errorf("Failed to re-setup volume: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ go_library(
|
|||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/quota:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota"
|
||||
utilstrings "k8s.io/utils/strings"
|
||||
)
|
||||
|
||||
|
@ -174,6 +175,7 @@ type emptyDir struct {
|
|||
mounter mount.Interface
|
||||
mountDetector mountDetector
|
||||
plugin *emptyDirPlugin
|
||||
desiredSize int64
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
|
@ -193,12 +195,12 @@ func (ed *emptyDir) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp creates new directory.
|
||||
func (ed *emptyDir) SetUp(fsGroup *int64) error {
|
||||
return ed.SetUpAt(ed.GetPath(), fsGroup)
|
||||
func (ed *emptyDir) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return ed.SetUpAt(ed.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// SetUpAt creates new directory.
|
||||
func (ed *emptyDir) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (ed *emptyDir) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
notMnt, err := ed.mounter.IsLikelyNotMountPoint(dir)
|
||||
// Getting an os.IsNotExist err from is a contingency; the directory
|
||||
// may not exist yet, in which case, setup should run.
|
||||
|
@ -229,12 +231,28 @@ func (ed *emptyDir) SetUpAt(dir string, fsGroup *int64) error {
|
|||
err = fmt.Errorf("unknown storage medium %q", ed.medium)
|
||||
}
|
||||
|
||||
volume.SetVolumeOwnership(ed, fsGroup)
|
||||
volume.SetVolumeOwnership(ed, mounterArgs.FsGroup)
|
||||
|
||||
// If setting up the quota fails, just log a message but don't actually error out.
|
||||
// We'll use the old du mechanism in this case, at least until we support
|
||||
// enforcement.
|
||||
if err == nil {
|
||||
volumeutil.SetReady(ed.getMetaDir())
|
||||
if mounterArgs.DesiredSize != nil {
|
||||
// Deliberately shadow the outer use of err as noted
|
||||
// above.
|
||||
hasQuotas, err := quota.SupportsQuotas(ed.mounter, dir)
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Unable to check for quota support on %s: %s", dir, err.Error())
|
||||
} else if hasQuotas {
|
||||
klog.V(4).Infof("emptydir trying to assign quota %v on %s", mounterArgs.DesiredSize, dir)
|
||||
err := quota.AssignQuota(ed.mounter, dir, mounterArgs.PodUID, mounterArgs.DesiredSize)
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Set quota on %s failed %s", dir, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -397,9 +415,14 @@ func (ed *emptyDir) TearDownAt(dir string) error {
|
|||
}
|
||||
|
||||
func (ed *emptyDir) teardownDefault(dir string) error {
|
||||
// Remove any quota
|
||||
err := quota.ClearQuota(ed.mounter, dir)
|
||||
if err != nil {
|
||||
klog.Warningf("Warning: Failed to clear quota on %s: %v", dir, err)
|
||||
}
|
||||
// Renaming the directory is not required anymore because the operation executor
|
||||
// now handles duplicate operations on the same volume
|
||||
err := os.RemoveAll(dir)
|
||||
err = os.RemoveAll(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -163,7 +163,7 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
|
|||
t.Errorf("Got unexpected path: %s", volPath)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -371,13 +371,13 @@ func (b *fcDiskMounter) CanMount() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *fcDiskMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *fcDiskMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
func (b *fcDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *fcDiskMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
// diskSetUp checks mountpoints and prevent repeated calls
|
||||
err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup)
|
||||
err := diskSetUp(b.manager, *b, dir, b.mounter, mounterArgs.FsGroup)
|
||||
if err != nil {
|
||||
klog.Errorf("fc: failed to setup")
|
||||
}
|
||||
|
|
|
@ -179,7 +179,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
|
|
|
@ -51,7 +51,7 @@ const (
|
|||
optionFSType = "kubernetes.io/fsType"
|
||||
optionReadWrite = "kubernetes.io/readwrite"
|
||||
optionKeySecret = "kubernetes.io/secret"
|
||||
optionFSGroup = "kubernetes.io/fsGroup"
|
||||
optionFSGroup = "kubernetes.io/mounterArgs.FsGroup"
|
||||
optionMountsDir = "kubernetes.io/mountsDir"
|
||||
optionPVorVolumeName = "kubernetes.io/pvOrVolumeName"
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ type mounterDefaults flexVolumeMounter
|
|||
|
||||
// SetUpAt is part of the volume.Mounter interface.
|
||||
// This implementation relies on the attacher's device mount path and does a bind mount to dir.
|
||||
func (f *mounterDefaults) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (f *mounterDefaults) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
klog.Warning(logPrefix(f.plugin), "using default SetUpAt to ", dir)
|
||||
|
||||
src, err := f.plugin.getDeviceMountPath(f.spec)
|
||||
|
|
|
@ -39,12 +39,12 @@ var _ volume.Mounter = &flexVolumeMounter{}
|
|||
// Mounter interface
|
||||
|
||||
// SetUp creates new directory.
|
||||
func (f *flexVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return f.SetUpAt(f.GetPath(), fsGroup)
|
||||
func (f *flexVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return f.SetUpAt(f.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// SetUpAt creates new directory.
|
||||
func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (f *flexVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
// Mount only once.
|
||||
alreadyMounted, err := prepareForMount(f.mounter, dir)
|
||||
if err != nil {
|
||||
|
@ -75,15 +75,15 @@ func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
|
||||
// Implicit parameters
|
||||
if fsGroup != nil {
|
||||
extraOptions[optionFSGroup] = strconv.FormatInt(int64(*fsGroup), 10)
|
||||
if mounterArgs.FsGroup != nil {
|
||||
extraOptions[optionFSGroup] = strconv.FormatInt(int64(*mounterArgs.FsGroup), 10)
|
||||
}
|
||||
|
||||
call.AppendSpec(f.spec, f.plugin.host, extraOptions)
|
||||
|
||||
_, err = call.Run()
|
||||
if isCmdNotSupportedErr(err) {
|
||||
err = (*mounterDefaults)(f).SetUpAt(dir, fsGroup)
|
||||
err = (*mounterDefaults)(f).SetUpAt(dir, mounterArgs)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -93,7 +93,7 @@ func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
|
||||
if !f.readOnly {
|
||||
if f.plugin.capabilities.FSGroup {
|
||||
volume.SetVolumeOwnership(f, fsGroup)
|
||||
volume.SetVolumeOwnership(f, mounterArgs.FsGroup)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/test/utils/harness"
|
||||
)
|
||||
|
||||
|
@ -46,7 +47,7 @@ func TestSetUpAt(tt *testing.T) {
|
|||
plugin, rootDir := testPlugin(t)
|
||||
plugin.unsupportedCommands = []string{"unsupportedCmd"}
|
||||
plugin.runner = fakeRunner(
|
||||
// first call without fsGroup
|
||||
// first call without mounterArgs.FsGroup
|
||||
assertDriverCall(t, successOutput(), mountCmd, rootDir+"/mount-dir",
|
||||
specJSON(plugin, spec, map[string]string{
|
||||
optionKeyPodName: "my-pod",
|
||||
|
@ -55,7 +56,7 @@ func TestSetUpAt(tt *testing.T) {
|
|||
optionKeyServiceAccountName: "my-sa",
|
||||
})),
|
||||
|
||||
// second test has fsGroup
|
||||
// second test has mounterArgs.FsGroup
|
||||
assertDriverCall(t, notSupportedOutput(), mountCmd, rootDir+"/mount-dir",
|
||||
specJSON(plugin, spec, map[string]string{
|
||||
optionFSGroup: "42",
|
||||
|
@ -69,8 +70,10 @@ func TestSetUpAt(tt *testing.T) {
|
|||
)
|
||||
|
||||
m, _ := plugin.newMounterInternal(spec, pod, mounter, plugin.runner)
|
||||
m.SetUpAt(rootDir+"/mount-dir", nil)
|
||||
var mounterArgs volume.MounterArgs
|
||||
m.SetUpAt(rootDir+"/mount-dir", mounterArgs)
|
||||
|
||||
fsGroup := int64(42)
|
||||
m.SetUpAt(rootDir+"/mount-dir", &fsGroup)
|
||||
group := int64(42)
|
||||
mounterArgs.FsGroup = &group
|
||||
m.SetUpAt(rootDir+"/mount-dir", mounterArgs)
|
||||
}
|
||||
|
|
|
@ -235,8 +235,8 @@ func (b *flockerVolumeMounter) GetPath() string {
|
|||
}
|
||||
|
||||
// SetUp bind mounts the disk global mount to the volume path.
|
||||
func (b *flockerVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *flockerVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// newFlockerClient uses environment variables and pod attributes to return a
|
||||
|
@ -277,7 +277,7 @@ control service:
|
|||
need to update the Primary UUID for this volume.
|
||||
5. Wait until the Primary UUID was updated or timeout.
|
||||
*/
|
||||
func (b *flockerVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *flockerVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
var err error
|
||||
if b.flockerClient == nil {
|
||||
b.flockerClient, err = b.newFlockerClient()
|
||||
|
@ -365,7 +365,7 @@ func (b *flockerVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
volume.SetVolumeOwnership(b, mounterArgs.FsGroup)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("successfully mounted %s", dir)
|
||||
|
|
|
@ -354,12 +354,12 @@ func (b *gcePersistentDiskMounter) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp bind mounts the disk global mount to the volume path.
|
||||
func (b *gcePersistentDiskMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *gcePersistentDiskMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// SetUp bind mounts the disk global mount to the give volume path.
|
||||
func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *gcePersistentDiskMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
// TODO: handle failed mounts here.
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
klog.V(4).Infof("GCE PersistentDisk set up: Dir (%s) PD name (%q) Mounted (%t) Error (%v), ReadOnly (%t)", dir, b.pdName, !notMnt, err, b.readOnly)
|
||||
|
@ -419,7 +419,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
volume.SetVolumeOwnership(b, mounterArgs.FsGroup)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Successfully mounted %s", dir)
|
||||
|
|
|
@ -141,7 +141,7 @@ func TestPlugin(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
|
@ -279,7 +279,7 @@ func TestMountOptions(t *testing.T) {
|
|||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
mountOptions := fakeMounter.MountPoints[0].Opts
|
||||
|
|
|
@ -179,12 +179,12 @@ func (b *gitRepoVolumeMounter) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp creates new directory and clones a git repo.
|
||||
func (b *gitRepoVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *gitRepoVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// SetUpAt creates new directory and clones a git repo.
|
||||
func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *gitRepoVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
if volumeutil.IsReady(b.getMetaDir()) {
|
||||
return nil
|
||||
}
|
||||
|
@ -194,7 +194,7 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
if err := wrapped.SetUpAt(dir, mounterArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -240,7 +240,7 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return fmt.Errorf("failed to exec 'git reset --hard': %s: %v", output, err)
|
||||
}
|
||||
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
volume.SetVolumeOwnership(b, mounterArgs.FsGroup)
|
||||
|
||||
volumeutil.SetReady(b.getMetaDir())
|
||||
return nil
|
||||
|
|
|
@ -422,7 +422,7 @@ func doTestSetUp(scenario struct {
|
|||
g := mounter.(*gitRepoVolumeMounter)
|
||||
g.exec = &fake
|
||||
|
||||
g.SetUp(nil)
|
||||
g.SetUp(volume.MounterArgs{})
|
||||
|
||||
if fake.CommandCalls != len(expecteds) {
|
||||
allErrs = append(allErrs,
|
||||
|
|
|
@ -275,11 +275,11 @@ func (b *glusterfsMounter) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *glusterfsMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *glusterfsMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
func (b *glusterfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *glusterfsMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
klog.V(4).Infof("mount setup: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
|
|
|
@ -118,7 +118,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||
if volumePath != expectedPath {
|
||||
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, volumePath)
|
||||
}
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
|
|
|
@ -222,7 +222,7 @@ func (b *hostPathMounter) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp does nothing.
|
||||
func (b *hostPathMounter) SetUp(fsGroup *int64) error {
|
||||
func (b *hostPathMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
err := validation.ValidatePathNoBacksteps(b.GetPath())
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid HostPath `%s`: %v", b.GetPath(), err)
|
||||
|
@ -235,7 +235,7 @@ func (b *hostPathMounter) SetUp(fsGroup *int64) error {
|
|||
}
|
||||
|
||||
// SetUpAt does not make sense for host paths - probably programmer error.
|
||||
func (b *hostPathMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *hostPathMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
return fmt.Errorf("SetUpAt() does not make sense for host paths")
|
||||
}
|
||||
|
||||
|
|
|
@ -217,7 +217,7 @@ func TestInvalidHostPath(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
err = mounter.SetUp(volume.MounterArgs{})
|
||||
expectedMsg := "invalid HostPath `/no/backsteps/allowed/..`: must not contain '..'"
|
||||
if err.Error() != expectedMsg {
|
||||
t.Fatalf("expected error `%s` but got `%s`", expectedMsg, err)
|
||||
|
@ -253,7 +253,7 @@ func TestPlugin(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -330,13 +330,13 @@ func (b *iscsiDiskMounter) CanMount() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *iscsiDiskMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *iscsiDiskMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
func (b *iscsiDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *iscsiDiskMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
// diskSetUp checks mountpoints and prevent repeated calls
|
||||
err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup)
|
||||
err := diskSetUp(b.manager, *b, dir, b.mounter, mounterArgs.FsGroup)
|
||||
if err != nil {
|
||||
klog.Errorf("iscsi: failed to setup")
|
||||
}
|
||||
|
|
|
@ -175,7 +175,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
|
|
|
@ -423,12 +423,12 @@ func (m *localVolumeMounter) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp bind mounts the directory to the volume path
|
||||
func (m *localVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return m.SetUpAt(m.GetPath(), fsGroup)
|
||||
func (m *localVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return m.SetUpAt(m.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// SetUpAt bind mounts the directory to the volume path and sets up volume ownership
|
||||
func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (m *localVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
m.plugin.volumeLocks.LockKey(m.globalPath)
|
||||
defer m.plugin.volumeLocks.UnlockKey(m.globalPath)
|
||||
|
||||
|
@ -452,7 +452,7 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return nil
|
||||
}
|
||||
refs, err := m.mounter.GetMountRefs(m.globalPath)
|
||||
if fsGroup != nil {
|
||||
if mounterArgs.FsGroup != nil {
|
||||
if err != nil {
|
||||
klog.Errorf("cannot collect mounting information: %s %v", m.globalPath, err)
|
||||
return err
|
||||
|
@ -461,7 +461,7 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
// Only count mounts from other pods
|
||||
refs = m.filterPodMounts(refs)
|
||||
if len(refs) > 0 {
|
||||
fsGroupNew := int64(*fsGroup)
|
||||
fsGroupNew := int64(*mounterArgs.FsGroup)
|
||||
fsGroupOld, err := m.mounter.GetFSGroup(m.globalPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check fsGroup for %s (%v)", m.globalPath, err)
|
||||
|
@ -519,7 +519,7 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
if !m.readOnly {
|
||||
// Volume owner will be written only once on the first volume mount
|
||||
if len(refs) == 0 {
|
||||
return volume.SetVolumeOwnership(m, fsGroup)
|
||||
return volume.SetVolumeOwnership(m, mounterArgs.FsGroup)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -200,7 +200,7 @@ func TestInvalidLocalPath(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
err = mounter.SetUp(volume.MounterArgs{})
|
||||
expectedMsg := "invalid path: /no/backsteps/allowed/.. must not contain '..'"
|
||||
if err.Error() != expectedMsg {
|
||||
t.Fatalf("expected error `%s` but got `%s`", expectedMsg, err)
|
||||
|
@ -307,7 +307,7 @@ func TestMountUnmount(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
|
||||
|
@ -412,7 +412,9 @@ func testFSGroupMount(plug volume.VolumePlugin, pod *v1.Pod, tmpDir string, fsGr
|
|||
return fmt.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(&fsGroup); err != nil {
|
||||
var mounterArgs volume.MounterArgs
|
||||
mounterArgs.FsGroup = &fsGroup
|
||||
if err := mounter.SetUp(mounterArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -517,7 +519,7 @@ func TestMountOptions(t *testing.T) {
|
|||
fakeMounter := &mount.FakeMounter{}
|
||||
mounter.(*localVolumeMounter).mounter = fakeMounter
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
mountOptions := fakeMounter.MountPoints[0].Opts
|
||||
|
|
|
@ -233,11 +233,11 @@ func (nfsMounter *nfsMounter) GetAttributes() volume.Attributes {
|
|||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (nfsMounter *nfsMounter) SetUp(fsGroup *int64) error {
|
||||
return nfsMounter.SetUpAt(nfsMounter.GetPath(), fsGroup)
|
||||
func (nfsMounter *nfsMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return nfsMounter.SetUpAt(nfsMounter.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
func (nfsMounter *nfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (nfsMounter *nfsMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
notMnt, err := mount.IsNotMountPoint(nfsMounter.mounter, dir)
|
||||
klog.V(4).Infof("NFS mount set up: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
|
|
|
@ -122,7 +122,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||
if volumePath != expectedPath {
|
||||
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, volumePath)
|
||||
}
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volumePath); err != nil {
|
||||
|
|
|
@ -200,12 +200,12 @@ func (b *photonPersistentDiskMounter) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *photonPersistentDiskMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *photonPersistentDiskMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *photonPersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *photonPersistentDiskMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
klog.V(4).Infof("Photon Persistent Disk setup %s to %s", b.pdID, dir)
|
||||
|
||||
// TODO: handle failed mounts here.
|
||||
|
|
|
@ -128,7 +128,7 @@ func TestPlugin(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
|
|
|
@ -298,12 +298,12 @@ func (b *portworxVolumeMounter) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *portworxVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *portworxVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// SetUpAt attaches the disk and bind mounts to the volume path.
|
||||
func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *portworxVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
klog.Infof("Portworx Volume set up. Dir: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
|
@ -331,7 +331,7 @@ func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
volume.SetVolumeOwnership(b, mounterArgs.FsGroup)
|
||||
}
|
||||
klog.Infof("Portworx Volume %s setup at %s", b.volumeID, dir)
|
||||
return nil
|
||||
|
|
|
@ -163,7 +163,7 @@ func TestPlugin(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
|
|
|
@ -188,11 +188,11 @@ func (s *projectedVolumeMounter) CanMount() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *projectedVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return s.SetUpAt(s.GetPath(), fsGroup)
|
||||
func (s *projectedVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return s.SetUpAt(s.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (s *projectedVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
klog.V(3).Infof("Setting up volume %v for pod %v at %v", s.volName, s.pod.UID, dir)
|
||||
|
||||
wrapped, err := s.plugin.host.NewWrapperMounter(s.volName, wrappedVolumeSpec(), s.pod, *s.opts)
|
||||
|
@ -207,7 +207,7 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
|
||||
setupSuccess := false
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
if err := wrapped.SetUpAt(dir, mounterArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -243,9 +243,9 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
err = volume.SetVolumeOwnership(s, fsGroup)
|
||||
err = volume.SetVolumeOwnership(s, mounterArgs.FsGroup)
|
||||
if err != nil {
|
||||
klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
klog.Errorf("Error applying volume ownership settings for group: %v", mounterArgs.FsGroup)
|
||||
return err
|
||||
}
|
||||
setupSuccess = true
|
||||
|
|
|
@ -878,7 +878,7 @@ func TestPlugin(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
err = mounter.SetUp(volume.MounterArgs{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
@ -943,7 +943,8 @@ func TestInvalidPathProjected(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
var mounterArgs volume.MounterArgs
|
||||
err = mounter.SetUp(mounterArgs)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error while setting up secret")
|
||||
}
|
||||
|
@ -994,7 +995,7 @@ func TestPluginReboot(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
err = mounter.SetUp(volume.MounterArgs{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
@ -1046,7 +1047,7 @@ func TestPluginOptional(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
err = mounter.SetUp(volume.MounterArgs{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
@ -1144,7 +1145,7 @@ func TestPluginOptionalKeys(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
err = mounter.SetUp(volume.MounterArgs{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
|
|
@ -237,12 +237,12 @@ func (mounter *quobyteMounter) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (mounter *quobyteMounter) SetUp(fsGroup *int64) error {
|
||||
func (mounter *quobyteMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
pluginDir := mounter.plugin.host.GetPluginDir(utilstrings.EscapeQualifiedName(quobytePluginName))
|
||||
return mounter.SetUpAt(pluginDir, fsGroup)
|
||||
return mounter.SetUpAt(pluginDir, mounterArgs)
|
||||
}
|
||||
|
||||
func (mounter *quobyteMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (mounter *quobyteMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
// Check if Quobyte is already mounted on the host in the Plugin Dir
|
||||
// if so we can use this mountpoint instead of creating a new one
|
||||
// IsLikelyNotMountPoint wouldn't check the mount type
|
||||
|
|
|
@ -101,7 +101,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||
if volumePath != fmt.Sprintf("%s/plugins/kubernetes.io~quobyte/root#root@vol", tmpDir) {
|
||||
t.Errorf("Got unexpected path: %s expected: %s", volumePath, fmt.Sprintf("%s/plugins/kubernetes.io~quobyte/root#root@vol", tmpDir))
|
||||
}
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
unmounter, err := plug.(*quobytePlugin).newUnmounterInternal("vol", types.UID("poduid"), &mount.FakeMounter{})
|
||||
|
|
|
@ -825,14 +825,14 @@ func (b *rbdMounter) CanMount() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *rbdMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *rbdMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
func (b *rbdMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *rbdMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
// diskSetUp checks mountpoints and prevent repeated calls
|
||||
klog.V(4).Infof("rbd: attempting to setup at %s", dir)
|
||||
err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup)
|
||||
err := diskSetUp(b.manager, *b, dir, b.mounter, mounterArgs.FsGroup)
|
||||
if err != nil {
|
||||
klog.Errorf("rbd: failed to setup at %s %v", dir, err)
|
||||
}
|
||||
|
|
|
@ -305,7 +305,7 @@ func doTestPlugin(t *testing.T, c *testcase) {
|
|||
t.Errorf("Unexpected path, expected %q, got: %q", c.expectedPodMountPath, path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
|
|
|
@ -77,12 +77,12 @@ func (v *sioVolume) CanMount() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (v *sioVolume) SetUp(fsGroup *int64) error {
|
||||
return v.SetUpAt(v.GetPath(), fsGroup)
|
||||
func (v *sioVolume) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return v.SetUpAt(v.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// SetUp bind mounts the disk global mount to the volume path.
|
||||
func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (v *sioVolume) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
v.plugin.volumeMtx.LockKey(v.volSpecName)
|
||||
defer v.plugin.volumeMtx.UnlockKey(v.volSpecName)
|
||||
|
||||
|
@ -154,9 +154,9 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if !v.readOnly && fsGroup != nil {
|
||||
if !v.readOnly && mounterArgs.FsGroup != nil {
|
||||
klog.V(4).Info(log("applying value FSGroup ownership"))
|
||||
volume.SetVolumeOwnership(v, fsGroup)
|
||||
volume.SetVolumeOwnership(v, mounterArgs.FsGroup)
|
||||
}
|
||||
|
||||
klog.V(4).Info(log("successfully setup PV %s: volume %s mapped as %s mounted at %s", v.volSpecName, v.volName, devicePath, dir))
|
||||
|
|
|
@ -191,7 +191,7 @@ func TestVolumeMounterUnmounter(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := sioMounter.SetUp(nil); err != nil {
|
||||
if err := sioMounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
|
@ -345,7 +345,7 @@ func TestVolumeProvisioner(t *testing.T) {
|
|||
t.Fatalf("failed to create sio mgr: %v", err)
|
||||
}
|
||||
sioVol.sioMgr.client = sio
|
||||
if err := sioMounter.SetUp(nil); err != nil {
|
||||
if err := sioMounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Fatalf("Expected success, got: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -178,11 +178,11 @@ func (b *secretVolumeMounter) CanMount() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *secretVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *secretVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *secretVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
klog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
|
||||
|
||||
// Wrap EmptyDir, let it do the setup.
|
||||
|
@ -219,7 +219,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
|
||||
setupSuccess := false
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
if err := wrapped.SetUpAt(dir, mounterArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
|
@ -254,9 +254,9 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
err = volume.SetVolumeOwnership(b, fsGroup)
|
||||
err = volume.SetVolumeOwnership(b, mounterArgs.FsGroup)
|
||||
if err != nil {
|
||||
klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
klog.Errorf("Error applying volume ownership settings for group: %v", mounterArgs.FsGroup)
|
||||
return err
|
||||
}
|
||||
setupSuccess = true
|
||||
|
|
|
@ -327,7 +327,7 @@ func TestPlugin(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
err = mounter.SetUp(volume.MounterArgs{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
@ -401,7 +401,8 @@ func TestInvalidPathSecret(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
var mounterArgs volume.MounterArgs
|
||||
err = mounter.SetUp(mounterArgs)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error while setting up secret")
|
||||
}
|
||||
|
@ -452,7 +453,7 @@ func TestPluginReboot(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
err = mounter.SetUp(volume.MounterArgs{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
@ -504,7 +505,7 @@ func TestPluginOptional(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
err = mounter.SetUp(volume.MounterArgs{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
@ -602,7 +603,7 @@ func TestPluginOptionalKeys(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
err = mounter.SetUp(volume.MounterArgs{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup volume: %v", err)
|
||||
}
|
||||
|
|
|
@ -345,7 +345,7 @@ func (b *storageosMounter) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *storageosMounter) SetUp(fsGroup *int64) error {
|
||||
func (b *storageosMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
// Need a namespace to find the volume, try pod's namespace if not set.
|
||||
if b.volNamespace == "" {
|
||||
klog.V(2).Infof("Setting StorageOS volume namespace to pod namespace: %s", b.podNamespace)
|
||||
|
@ -375,11 +375,11 @@ func (b *storageosMounter) SetUp(fsGroup *int64) error {
|
|||
klog.V(4).Infof("Successfully mounted StorageOS volume %s into global mount directory", b.volName)
|
||||
|
||||
// Bind mount the volume into the pod
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// SetUp bind mounts the disk global mount to the give volume path.
|
||||
func (b *storageosMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *storageosMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
klog.V(4).Infof("StorageOS volume set up: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
|
@ -433,7 +433,7 @@ func (b *storageosMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
volume.SetVolumeOwnership(b, mounterArgs.FsGroup)
|
||||
}
|
||||
klog.V(4).Infof("StorageOS volume setup complete on %s", dir)
|
||||
return nil
|
||||
|
|
|
@ -208,7 +208,7 @@ func TestPlugin(t *testing.T) {
|
|||
t.Errorf("Expected path: '%s' got: '%s'", expectedPath, volPath)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(volPath); err != nil {
|
||||
|
|
|
@ -765,11 +765,11 @@ func (fv *FakeVolume) CanMount() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (fv *FakeVolume) SetUp(fsGroup *int64) error {
|
||||
func (fv *FakeVolume) SetUp(mounterArgs MounterArgs) error {
|
||||
fv.Lock()
|
||||
defer fv.Unlock()
|
||||
fv.SetUpCallCount++
|
||||
return fv.SetUpAt(fv.getPath(), fsGroup)
|
||||
return fv.SetUpAt(fv.getPath(), mounterArgs)
|
||||
}
|
||||
|
||||
func (fv *FakeVolume) GetSetUpCallCount() int {
|
||||
|
@ -778,7 +778,7 @@ func (fv *FakeVolume) GetSetUpCallCount() int {
|
|||
return fv.SetUpCallCount
|
||||
}
|
||||
|
||||
func (fv *FakeVolume) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (fv *FakeVolume) SetUpAt(dir string, mounterArgs MounterArgs) error {
|
||||
return os.MkdirAll(dir, 0750)
|
||||
}
|
||||
|
||||
|
|
|
@ -91,6 +91,7 @@ filegroup(
|
|||
"//pkg/volume/util/nestedpendingoperations:all-srcs",
|
||||
"//pkg/volume/util/nsenter:all-srcs",
|
||||
"//pkg/volume/util/operationexecutor:all-srcs",
|
||||
"//pkg/volume/util/quota:all-srcs",
|
||||
"//pkg/volume/util/recyclerclient:all-srcs",
|
||||
"//pkg/volume/util/subpath:all-srcs",
|
||||
"//pkg/volume/util/types:all-srcs",
|
||||
|
|
|
@ -14,6 +14,7 @@ go_library(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//pkg/volume/util/quota:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
],
|
||||
|
@ -24,6 +25,7 @@ go_library(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/volume/util/quota:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
],
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"golang.org/x/sys/unix"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota"
|
||||
)
|
||||
|
||||
// FSInfo linux returns (available bytes, byte capacity, byte usage, total inodes, inodes free, inode usage, error)
|
||||
|
@ -56,6 +57,15 @@ func FsInfo(path string) (int64, int64, int64, int64, int64, int64, error) {
|
|||
|
||||
// DiskUsage gets disk usage of specified path.
|
||||
func DiskUsage(path string) (*resource.Quantity, error) {
|
||||
// First check whether the quota system knows about this directory
|
||||
// A nil quantity with no error means that the path does not support quotas
|
||||
// and we should use other mechanisms.
|
||||
data, err := quota.GetConsumption(path)
|
||||
if data != nil {
|
||||
return data, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("unable to retrieve disk consumption via quota for %s: %v", path, err)
|
||||
}
|
||||
// Uses the same niceness level as cadvisor.fs does when running du
|
||||
// Uses -B 1 to always scale to a blocksize of 1 byte
|
||||
out, err := exec.Command("nice", "-n", "19", "du", "-s", "-B", "1", path).CombinedOutput()
|
||||
|
@ -76,6 +86,15 @@ func Find(path string) (int64, error) {
|
|||
if path == "" {
|
||||
return 0, fmt.Errorf("invalid directory")
|
||||
}
|
||||
// First check whether the quota system knows about this directory
|
||||
// A nil quantity with no error means that the path does not support quotas
|
||||
// and we should use other mechanisms.
|
||||
inodes, err := quota.GetInodes(path)
|
||||
if inodes != nil {
|
||||
return inodes.Value(), nil
|
||||
} else if err != nil {
|
||||
return 0, fmt.Errorf("unable to retrieve inode consumption via quota for %s: %v", path, err)
|
||||
}
|
||||
var counter byteCounter
|
||||
var stderr bytes.Buffer
|
||||
findCmd := exec.Command("find", path, "-xdev", "-printf", ".")
|
||||
|
|
|
@ -26,6 +26,7 @@ go_library(
|
|||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
@ -346,6 +347,10 @@ type VolumeToMount struct {
|
|||
// ReportedInUse indicates that the volume was successfully added to the
|
||||
// VolumesInUse field in the node's status.
|
||||
ReportedInUse bool
|
||||
|
||||
// DesiredSizeLimit indicates the desired upper bound on the size of the volume
|
||||
// (if so implemented)
|
||||
DesiredSizeLimit *resource.Quantity
|
||||
}
|
||||
|
||||
// GenerateMsgDetailed returns detailed msgs for volumes to mount
|
||||
|
|
|
@ -701,7 +701,11 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
|
|||
}
|
||||
|
||||
// Execute mount
|
||||
mountErr := volumeMounter.SetUp(fsGroup)
|
||||
mountErr := volumeMounter.SetUp(volume.MounterArgs{
|
||||
FsGroup: fsGroup,
|
||||
DesiredSize: volumeToMount.DesiredSizeLimit,
|
||||
PodUID: string(volumeToMount.Pod.UID),
|
||||
})
|
||||
if mountErr != nil {
|
||||
// On failure, return error. Caller will log and retry.
|
||||
return volumeToMount.GenerateError("MountVolume.SetUp failed", mountErr)
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"project.go",
|
||||
"quota.go",
|
||||
"quota_linux.go",
|
||||
"quota_unsupported.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/util/quota",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/volume/util/quota/common:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["quota_linux_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume/util/quota/common:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/volume/util/quota/common:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
|
@ -0,0 +1,31 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"quota_linux_common.go",
|
||||
"quota_linux_common_impl.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/util/quota/common",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
|
@ -0,0 +1,105 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// QuotaID is generic quota identifier.
|
||||
// Data type based on quotactl(2).
|
||||
type QuotaID int32
|
||||
|
||||
const (
|
||||
// UnknownQuotaID -- cannot determine whether a quota is in force
|
||||
UnknownQuotaID QuotaID = -1
|
||||
// BadQuotaID -- Invalid quota
|
||||
BadQuotaID QuotaID = 0
|
||||
)
|
||||
|
||||
const (
|
||||
acct = iota
|
||||
enforcing = iota
|
||||
)
|
||||
|
||||
// QuotaType -- type of quota to be applied
|
||||
type QuotaType int
|
||||
|
||||
const (
|
||||
// FSQuotaAccounting for quotas for accounting only
|
||||
FSQuotaAccounting QuotaType = 1 << iota
|
||||
// FSQuotaEnforcing for quotas for enforcement
|
||||
FSQuotaEnforcing QuotaType = 1 << iota
|
||||
)
|
||||
|
||||
// FirstQuota is the quota ID we start with.
|
||||
// XXXXXXX Need a better way of doing this...
|
||||
var FirstQuota QuotaID = 1048577
|
||||
|
||||
// MountsFile is the location of the system mount data
|
||||
var MountsFile = "/proc/self/mounts"
|
||||
|
||||
// MountParseRegexp parses out /proc/sys/self/mounts
|
||||
var MountParseRegexp = regexp.MustCompilePOSIX("^([^ ]*)[ \t]*([^ ]*)[ \t]*([^ ]*)") // Ignore options etc.
|
||||
|
||||
// LinuxVolumeQuotaProvider returns an appropriate quota applier
|
||||
// object if we can support quotas on this device
|
||||
type LinuxVolumeQuotaProvider interface {
|
||||
// GetQuotaApplier retrieves an object that can apply
|
||||
// quotas (or nil if this provider cannot support quotas
|
||||
// on the device)
|
||||
GetQuotaApplier(mountpoint string, backingDev string) LinuxVolumeQuotaApplier
|
||||
}
|
||||
|
||||
// LinuxVolumeQuotaApplier is a generic interface to any quota
|
||||
// mechanism supported by Linux
|
||||
type LinuxVolumeQuotaApplier interface {
|
||||
// GetQuotaOnDir gets the quota ID (if any) that applies to
|
||||
// this directory
|
||||
GetQuotaOnDir(path string) (QuotaID, error)
|
||||
|
||||
// SetQuotaOnDir applies the specified quota ID to a directory.
|
||||
// Negative value for bytes means that a non-enforcing quota
|
||||
// should be applied (perhaps by setting a quota too large to
|
||||
// be hit)
|
||||
SetQuotaOnDir(path string, id QuotaID, bytes int64) error
|
||||
|
||||
// QuotaIDIsInUse determines whether the quota ID is in use.
|
||||
// Implementations should not check /etc/project or /etc/projid,
|
||||
// only whether their underlying mechanism already has the ID in
|
||||
// use.
|
||||
// Return value of false with no error means that the ID is not
|
||||
// in use; true means that it is already in use. An error
|
||||
// return means that any quota ID will fail.
|
||||
QuotaIDIsInUse(id QuotaID) (bool, error)
|
||||
|
||||
// GetConsumption returns the consumption (in bytes) of the
|
||||
// directory, determined by the implementation's quota-based
|
||||
// mechanism. If it is unable to do so using that mechanism,
|
||||
// it should return an error and allow higher layers to
|
||||
// enumerate the directory.
|
||||
GetConsumption(path string, id QuotaID) (int64, error)
|
||||
|
||||
// GetInodes returns the number of inodes used by the
|
||||
// directory, determined by the implementation's quota-based
|
||||
// mechanism. If it is unable to do so using that mechanism,
|
||||
// it should return an error and allow higher layers to
|
||||
// enumerate the directory.
|
||||
GetInodes(path string, id QuotaID) (int64, error)
|
||||
}
|
|
@ -0,0 +1,286 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var quotaCmd string
|
||||
var quotaCmdInitialized bool
|
||||
var quotaCmdLock sync.RWMutex
|
||||
|
||||
// If we later get a filesystem that uses project quota semantics other than
|
||||
// XFS, we'll need to change this.
|
||||
// Higher levels don't need to know what's inside
|
||||
type linuxFilesystemType struct {
|
||||
name string
|
||||
typeMagic int64 // Filesystem magic number, per statfs(2)
|
||||
maxQuota int64
|
||||
allowEmptyOutput bool // Accept empty output from "quota" command
|
||||
}
|
||||
|
||||
const (
|
||||
bitsPerWord = 32 << (^uint(0) >> 63) // either 32 or 64
|
||||
)
|
||||
|
||||
var (
|
||||
linuxSupportedFilesystems = []linuxFilesystemType{
|
||||
{
|
||||
name: "XFS",
|
||||
typeMagic: 0x58465342,
|
||||
maxQuota: 1<<(bitsPerWord-1) - 1,
|
||||
allowEmptyOutput: true, // XFS filesystems report nothing if a quota is not present
|
||||
}, {
|
||||
name: "ext4fs",
|
||||
typeMagic: 0xef53,
|
||||
maxQuota: (1<<(bitsPerWord-1) - 1) & (1<<58 - 1),
|
||||
allowEmptyOutput: false, // ext4 filesystems always report something even if a quota is not present
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// VolumeProvider supplies a quota applier to the generic code.
|
||||
type VolumeProvider struct {
|
||||
}
|
||||
|
||||
var quotaCmds = []string{"/sbin/xfs_quota",
|
||||
"/usr/sbin/xfs_quota",
|
||||
"/bin/xfs_quota"}
|
||||
|
||||
var quotaParseRegexp = regexp.MustCompilePOSIX("^[^ \t]*[ \t]*([0-9]+)")
|
||||
|
||||
var lsattrCmd = "/usr/bin/lsattr"
|
||||
var lsattrParseRegexp = regexp.MustCompilePOSIX("^ *([0-9]+) [^ ]+ (.*)$")
|
||||
|
||||
// GetQuotaApplier -- does this backing device support quotas that
|
||||
// can be applied to directories?
|
||||
func (*VolumeProvider) GetQuotaApplier(mountpoint string, backingDev string) LinuxVolumeQuotaApplier {
|
||||
for _, fsType := range linuxSupportedFilesystems {
|
||||
if isFilesystemOfType(mountpoint, backingDev, fsType.typeMagic) {
|
||||
return linuxVolumeQuotaApplier{mountpoint: mountpoint,
|
||||
maxQuota: fsType.maxQuota,
|
||||
allowEmptyOutput: fsType.allowEmptyOutput,
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type linuxVolumeQuotaApplier struct {
|
||||
mountpoint string
|
||||
maxQuota int64
|
||||
allowEmptyOutput bool
|
||||
}
|
||||
|
||||
func getXFSQuotaCmd() (string, error) {
|
||||
quotaCmdLock.Lock()
|
||||
defer quotaCmdLock.Unlock()
|
||||
if quotaCmdInitialized {
|
||||
return quotaCmd, nil
|
||||
}
|
||||
for _, program := range quotaCmds {
|
||||
fileinfo, err := os.Stat(program)
|
||||
if err == nil && ((fileinfo.Mode().Perm() & (1 << 6)) != 0) {
|
||||
klog.V(3).Infof("Found xfs_quota program %s", program)
|
||||
quotaCmd = program
|
||||
quotaCmdInitialized = true
|
||||
return quotaCmd, nil
|
||||
}
|
||||
}
|
||||
quotaCmdInitialized = true
|
||||
return "", fmt.Errorf("No xfs_quota program found")
|
||||
}
|
||||
|
||||
func doRunXFSQuotaCommand(mountpoint string, mountsFile, command string) (string, error) {
|
||||
quotaCmd, err := getXFSQuotaCmd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// We're using numeric project IDs directly; no need to scan
|
||||
// /etc/projects or /etc/projid
|
||||
klog.V(4).Infof("runXFSQuotaCommand %s -t %s -P/dev/null -D/dev/null -x -f %s -c %s", quotaCmd, mountsFile, mountpoint, command)
|
||||
cmd := exec.Command(quotaCmd, "-t", mountsFile, "-P/dev/null", "-D/dev/null", "-x", "-f", mountpoint, "-c", command)
|
||||
|
||||
data, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
klog.V(4).Infof("runXFSQuotaCommand output %q", string(data))
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// Extract the mountpoint we care about into a temporary mounts file so that xfs_quota does
|
||||
// not attempt to scan every mount on the filesystem, which could hang if e. g.
|
||||
// a stuck NFS mount is present.
|
||||
// See https://bugzilla.redhat.com/show_bug.cgi?id=237120 for an example
|
||||
// of the problem that could be caused if this were to happen.
|
||||
func runXFSQuotaCommand(mountpoint string, command string) (string, error) {
|
||||
tmpMounts, err := ioutil.TempFile("", "mounts")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Cannot create temporary mount file: %v", err)
|
||||
}
|
||||
tmpMountsFileName := tmpMounts.Name()
|
||||
defer tmpMounts.Close()
|
||||
defer os.Remove(tmpMountsFileName)
|
||||
|
||||
mounts, err := os.Open(MountsFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Cannot open mounts file %s: %v", MountsFile, err)
|
||||
}
|
||||
defer mounts.Close()
|
||||
|
||||
scanner := bufio.NewScanner(mounts)
|
||||
for scanner.Scan() {
|
||||
match := MountParseRegexp.FindStringSubmatch(scanner.Text())
|
||||
if match != nil {
|
||||
mount := match[2]
|
||||
if mount == mountpoint {
|
||||
if _, err := tmpMounts.WriteString(fmt.Sprintf("%s\n", scanner.Text())); err != nil {
|
||||
return "", fmt.Errorf("Cannot write temporary mounts file: %v", err)
|
||||
}
|
||||
if err := tmpMounts.Sync(); err != nil {
|
||||
return "", fmt.Errorf("Cannot sync temporary mounts file: %v", err)
|
||||
}
|
||||
return doRunXFSQuotaCommand(mountpoint, tmpMountsFileName, command)
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("Cannot run xfs_quota: cannot find mount point %s in %s", mountpoint, MountsFile)
|
||||
}
|
||||
|
||||
// SupportsQuotas determines whether the filesystem supports quotas.
|
||||
func SupportsQuotas(mountpoint string, qType QuotaType) (bool, error) {
|
||||
data, err := runXFSQuotaCommand(mountpoint, "state -p")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if qType == FSQuotaEnforcing {
|
||||
return strings.Contains(data, "Enforcement: ON"), nil
|
||||
}
|
||||
return strings.Contains(data, "Accounting: ON"), nil
|
||||
}
|
||||
|
||||
func isFilesystemOfType(mountpoint string, backingDev string, typeMagic int64) bool {
|
||||
var buf syscall.Statfs_t
|
||||
err := syscall.Statfs(mountpoint, &buf)
|
||||
if err != nil {
|
||||
klog.Warningf("Warning: Unable to statfs %s: %v", mountpoint, err)
|
||||
return false
|
||||
}
|
||||
if int64(buf.Type) != typeMagic {
|
||||
return false
|
||||
}
|
||||
if answer, _ := SupportsQuotas(mountpoint, FSQuotaAccounting); answer {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetQuotaOnDir retrieves the quota ID (if any) associated with the specified directory
|
||||
// If we can't make system calls, all we can say is that we don't know whether
|
||||
// it has a quota, and higher levels have to make the call.
|
||||
func (v linuxVolumeQuotaApplier) GetQuotaOnDir(path string) (QuotaID, error) {
|
||||
cmd := exec.Command(lsattrCmd, "-pd", path)
|
||||
data, err := cmd.Output()
|
||||
if err != nil {
|
||||
return BadQuotaID, fmt.Errorf("cannot run lsattr: %v", err)
|
||||
}
|
||||
match := lsattrParseRegexp.FindStringSubmatch(string(data))
|
||||
if match == nil {
|
||||
return BadQuotaID, fmt.Errorf("Unable to parse lsattr -pd %s output %s", path, string(data))
|
||||
}
|
||||
if match[2] != path {
|
||||
return BadQuotaID, fmt.Errorf("Mismatch between supplied and returned path (%s != %s)", path, match[2])
|
||||
}
|
||||
projid, err := strconv.ParseInt(match[1], 10, 32)
|
||||
if err != nil {
|
||||
return BadQuotaID, fmt.Errorf("Unable to parse project ID from %s (%v)", match[1], err)
|
||||
}
|
||||
return QuotaID(projid), nil
|
||||
}
|
||||
|
||||
// SetQuotaOnDir applies a quota to the specified directory under the specified mountpoint.
|
||||
func (v linuxVolumeQuotaApplier) SetQuotaOnDir(path string, id QuotaID, bytes int64) error {
|
||||
if bytes < 0 || bytes > v.maxQuota {
|
||||
bytes = v.maxQuota
|
||||
}
|
||||
_, err := runXFSQuotaCommand(v.mountpoint, fmt.Sprintf("limit -p bhard=%v bsoft=%v %v", bytes, bytes, id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = runXFSQuotaCommand(v.mountpoint, fmt.Sprintf("project -s -p %s %v", path, id))
|
||||
return err
|
||||
}
|
||||
|
||||
func getQuantity(mountpoint string, id QuotaID, xfsQuotaArg string, multiplier int64, allowEmptyOutput bool) (int64, error) {
|
||||
data, err := runXFSQuotaCommand(mountpoint, fmt.Sprintf("quota -p -N -n -v %s %v", xfsQuotaArg, id))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Unable to run xfs_quota: %v", err)
|
||||
}
|
||||
if data == "" && allowEmptyOutput {
|
||||
return 0, nil
|
||||
}
|
||||
match := quotaParseRegexp.FindStringSubmatch(data)
|
||||
if match == nil {
|
||||
return 0, fmt.Errorf("Unable to parse quota output '%s'", data)
|
||||
}
|
||||
size, err := strconv.ParseInt(match[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Unable to parse data size '%s' from '%s': %v", match[1], data, err)
|
||||
}
|
||||
klog.V(4).Infof("getQuantity %s %d %s %d => %d %v", mountpoint, id, xfsQuotaArg, multiplier, size, err)
|
||||
return size * multiplier, nil
|
||||
}
|
||||
|
||||
// GetConsumption returns the consumption in bytes if available via quotas
|
||||
func (v linuxVolumeQuotaApplier) GetConsumption(_ string, id QuotaID) (int64, error) {
|
||||
return getQuantity(v.mountpoint, id, "-b", 1024, v.allowEmptyOutput)
|
||||
}
|
||||
|
||||
// GetInodes returns the inodes in use if available via quotas
|
||||
func (v linuxVolumeQuotaApplier) GetInodes(_ string, id QuotaID) (int64, error) {
|
||||
return getQuantity(v.mountpoint, id, "-i", 1, v.allowEmptyOutput)
|
||||
}
|
||||
|
||||
// QuotaIDIsInUse checks whether the specified quota ID is in use on the specified
|
||||
// filesystem
|
||||
func (v linuxVolumeQuotaApplier) QuotaIDIsInUse(id QuotaID) (bool, error) {
|
||||
bytes, err := v.GetConsumption(v.mountpoint, id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if bytes > 0 {
|
||||
return true, nil
|
||||
}
|
||||
inodes, err := v.GetInodes(v.mountpoint, id)
|
||||
return inodes > 0, err
|
||||
}
|
|
@ -0,0 +1,357 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota/common"
|
||||
)
|
||||
|
||||
var projectsFile = "/etc/projects"
|
||||
var projidFile = "/etc/projid"
|
||||
|
||||
var projectsParseRegexp = regexp.MustCompilePOSIX("^([[:digit:]]+):(.*)$")
|
||||
var projidParseRegexp = regexp.MustCompilePOSIX("^([^#][^:]*):([[:digit:]]+)$")
|
||||
|
||||
var quotaIDLock sync.RWMutex
|
||||
|
||||
const maxUnusedQuotasToSearch = 128 // Don't go into an infinite loop searching for an unused quota
|
||||
|
||||
type projectType struct {
|
||||
isValid bool // False if we need to remove this line
|
||||
id common.QuotaID
|
||||
data string // Project name (projid) or directory (projects)
|
||||
line string
|
||||
}
|
||||
|
||||
type projectsList struct {
|
||||
projects []projectType
|
||||
projid []projectType
|
||||
}
|
||||
|
||||
func projFilesAreOK() error {
|
||||
if sf, err := os.Lstat(projectsFile); err != nil || sf.Mode().IsRegular() {
|
||||
if sf, err := os.Lstat(projidFile); err != nil || sf.Mode().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%s exists but is not a plain file, cannot continue", projidFile)
|
||||
}
|
||||
return fmt.Errorf("%s exists but is not a plain file, cannot continue", projectsFile)
|
||||
}
|
||||
|
||||
func lockFile(file *os.File) error {
|
||||
return unix.Flock(int(file.Fd()), unix.LOCK_EX)
|
||||
}
|
||||
|
||||
func unlockFile(file *os.File) error {
|
||||
return unix.Flock(int(file.Fd()), unix.LOCK_UN)
|
||||
}
|
||||
|
||||
// openAndLockProjectFiles opens /etc/projects and /etc/projid locked.
|
||||
// Creates them if they don't exist
|
||||
func openAndLockProjectFiles() (*os.File, *os.File, error) {
|
||||
// Make sure neither project-related file is a symlink!
|
||||
if err := projFilesAreOK(); err != nil {
|
||||
return nil, nil, fmt.Errorf("system project files failed verification: %v", err)
|
||||
}
|
||||
// We don't actually modify the original files; we create temporaries and
|
||||
// move them over the originals
|
||||
fProjects, err := os.OpenFile(projectsFile, os.O_RDONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("unable to open %s: %v", projectsFile, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
fProjid, err := os.OpenFile(projidFile, os.O_RDONLY|os.O_CREATE, 0644)
|
||||
if err == nil {
|
||||
// Check once more, to ensure nothing got changed out from under us
|
||||
if err := projFilesAreOK(); err == nil {
|
||||
err = lockFile(fProjects)
|
||||
if err == nil {
|
||||
err = lockFile(fProjid)
|
||||
if err == nil {
|
||||
return fProjects, fProjid, nil
|
||||
}
|
||||
// Nothing useful we can do if we get an error here
|
||||
err = fmt.Errorf("unable to lock %s: %v", projidFile, err)
|
||||
unlockFile(fProjects)
|
||||
} else {
|
||||
err = fmt.Errorf("unable to lock %s: %v", projectsFile, err)
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("system project files failed re-verification: %v", err)
|
||||
}
|
||||
fProjid.Close()
|
||||
} else {
|
||||
err = fmt.Errorf("unable to open %s: %v", projidFile, err)
|
||||
}
|
||||
fProjects.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
func closeProjectFiles(fProjects *os.File, fProjid *os.File) error {
|
||||
// Nothing useful we can do if either of these fail,
|
||||
// but we have to close (and thereby unlock) the files anyway.
|
||||
var err error
|
||||
var err1 error
|
||||
if fProjid != nil {
|
||||
err = fProjid.Close()
|
||||
}
|
||||
if fProjects != nil {
|
||||
err1 = fProjects.Close()
|
||||
}
|
||||
if err == nil {
|
||||
return err1
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func parseProject(l string) projectType {
|
||||
if match := projectsParseRegexp.FindStringSubmatch(l); match != nil {
|
||||
i, err := strconv.Atoi(match[1])
|
||||
if err == nil {
|
||||
return projectType{true, common.QuotaID(i), match[2], l}
|
||||
}
|
||||
}
|
||||
return projectType{true, common.BadQuotaID, "", l}
|
||||
}
|
||||
|
||||
func parseProjid(l string) projectType {
|
||||
if match := projidParseRegexp.FindStringSubmatch(l); match != nil {
|
||||
i, err := strconv.Atoi(match[2])
|
||||
if err == nil {
|
||||
return projectType{true, common.QuotaID(i), match[1], l}
|
||||
}
|
||||
}
|
||||
return projectType{true, common.BadQuotaID, "", l}
|
||||
}
|
||||
|
||||
func parseProjFile(f *os.File, parser func(l string) projectType) []projectType {
|
||||
var answer []projectType
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
answer = append(answer, parser(scanner.Text()))
|
||||
}
|
||||
return answer
|
||||
}
|
||||
|
||||
func readProjectFiles(projects *os.File, projid *os.File) projectsList {
|
||||
return projectsList{parseProjFile(projects, parseProject), parseProjFile(projid, parseProjid)}
|
||||
}
|
||||
|
||||
func findAvailableQuota(path string, idMap map[common.QuotaID]bool) (common.QuotaID, error) {
|
||||
unusedQuotasSearched := 0
|
||||
for id := common.FirstQuota; id == id; id++ {
|
||||
if _, ok := idMap[id]; !ok {
|
||||
isInUse, err := getApplier(path).QuotaIDIsInUse(id)
|
||||
if err != nil {
|
||||
return common.BadQuotaID, err
|
||||
} else if !isInUse {
|
||||
return id, nil
|
||||
}
|
||||
unusedQuotasSearched++
|
||||
if unusedQuotasSearched > maxUnusedQuotasToSearch {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return common.BadQuotaID, fmt.Errorf("Cannot find available quota ID")
|
||||
}
|
||||
|
||||
func addDirToProject(path string, id common.QuotaID, list *projectsList) (common.QuotaID, bool, error) {
|
||||
idMap := make(map[common.QuotaID]bool)
|
||||
for _, project := range list.projects {
|
||||
if project.data == path {
|
||||
if id != project.id {
|
||||
return common.BadQuotaID, false, fmt.Errorf("Attempt to reassign project ID for %s", path)
|
||||
}
|
||||
// Trying to reassign a directory to the project it's
|
||||
// already in. Maybe this should be an error, but for
|
||||
// now treat it as an idempotent operation
|
||||
return id, false, nil
|
||||
}
|
||||
idMap[project.id] = true
|
||||
}
|
||||
var needToAddProjid = true
|
||||
for _, projid := range list.projid {
|
||||
idMap[projid.id] = true
|
||||
if projid.id == id && id != common.BadQuotaID {
|
||||
needToAddProjid = false
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if id == common.BadQuotaID {
|
||||
id, err = findAvailableQuota(path, idMap)
|
||||
if err != nil {
|
||||
return common.BadQuotaID, false, err
|
||||
}
|
||||
needToAddProjid = true
|
||||
}
|
||||
if needToAddProjid {
|
||||
name := fmt.Sprintf("volume%v", id)
|
||||
line := fmt.Sprintf("%s:%v", name, id)
|
||||
list.projid = append(list.projid, projectType{true, id, name, line})
|
||||
}
|
||||
line := fmt.Sprintf("%v:%s", id, path)
|
||||
list.projects = append(list.projects, projectType{true, id, path, line})
|
||||
return id, needToAddProjid, nil
|
||||
}
|
||||
|
||||
func removeDirFromProject(path string, id common.QuotaID, list *projectsList) (bool, error) {
|
||||
if id == common.BadQuotaID {
|
||||
return false, fmt.Errorf("Attempt to remove invalid quota ID from %s", path)
|
||||
}
|
||||
foundAt := -1
|
||||
countByID := make(map[common.QuotaID]int)
|
||||
for i, project := range list.projects {
|
||||
if project.data == path {
|
||||
if id != project.id {
|
||||
return false, fmt.Errorf("Attempting to remove quota ID %v from path %s, but expecting ID %v", id, path, project.id)
|
||||
} else if foundAt != -1 {
|
||||
return false, fmt.Errorf("Found multiple quota IDs for path %s", path)
|
||||
}
|
||||
// Faster and easier than deleting an element
|
||||
list.projects[i].isValid = false
|
||||
foundAt = i
|
||||
}
|
||||
countByID[project.id]++
|
||||
}
|
||||
if foundAt == -1 {
|
||||
return false, fmt.Errorf("Cannot find quota associated with path %s", path)
|
||||
}
|
||||
if countByID[id] <= 1 {
|
||||
// Removing the last entry means that we're no longer using
|
||||
// the quota ID, so remove that as well
|
||||
for i, projid := range list.projid {
|
||||
if projid.id == id {
|
||||
list.projid[i].isValid = false
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func writeProjectFile(base *os.File, projects []projectType) (string, error) {
|
||||
oname := base.Name()
|
||||
stat, err := base.Stat()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
mode := stat.Mode() & os.ModePerm
|
||||
f, err := ioutil.TempFile(filepath.Dir(oname), filepath.Base(oname))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
filename := f.Name()
|
||||
if err := os.Chmod(filename, mode); err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, proj := range projects {
|
||||
if proj.isValid {
|
||||
if _, err := f.WriteString(fmt.Sprintf("%s\n", proj.line)); err != nil {
|
||||
f.Close()
|
||||
os.Remove(filename)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
os.Remove(filename)
|
||||
return "", err
|
||||
}
|
||||
return filename, nil
|
||||
}
|
||||
|
||||
func writeProjectFiles(fProjects *os.File, fProjid *os.File, writeProjid bool, list projectsList) error {
|
||||
tmpProjects, err := writeProjectFile(fProjects, list.projects)
|
||||
if err == nil {
|
||||
// Ensure that both files are written before we try to rename either.
|
||||
if writeProjid {
|
||||
tmpProjid, err := writeProjectFile(fProjid, list.projid)
|
||||
if err == nil {
|
||||
err = os.Rename(tmpProjid, fProjid.Name())
|
||||
if err != nil {
|
||||
os.Remove(tmpProjid)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
err = os.Rename(tmpProjects, fProjects.Name())
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
// We're in a bit of trouble here; at this
|
||||
// point we've successfully renamed tmpProjid
|
||||
// to the real thing, but renaming tmpProject
|
||||
// to the real file failed. There's not much we
|
||||
// can do in this position. Anything we could do
|
||||
// to try to undo it would itself be likely to fail.
|
||||
}
|
||||
os.Remove(tmpProjects)
|
||||
}
|
||||
return fmt.Errorf("Unable to write project files: %v", err)
|
||||
}
|
||||
|
||||
func createProjectID(path string, ID common.QuotaID) (common.QuotaID, error) {
|
||||
quotaIDLock.Lock()
|
||||
defer quotaIDLock.Unlock()
|
||||
fProjects, fProjid, err := openAndLockProjectFiles()
|
||||
if err == nil {
|
||||
defer closeProjectFiles(fProjects, fProjid)
|
||||
list := readProjectFiles(fProjects, fProjid)
|
||||
writeProjid := true
|
||||
ID, writeProjid, err = addDirToProject(path, ID, &list)
|
||||
if err == nil && ID != common.BadQuotaID {
|
||||
if err = writeProjectFiles(fProjects, fProjid, writeProjid, list); err == nil {
|
||||
return ID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return common.BadQuotaID, fmt.Errorf("createProjectID %s %v failed %v", path, ID, err)
|
||||
}
|
||||
|
||||
func removeProjectID(path string, ID common.QuotaID) error {
|
||||
if ID == common.BadQuotaID {
|
||||
return fmt.Errorf("attempting to remove invalid quota ID %v", ID)
|
||||
}
|
||||
quotaIDLock.Lock()
|
||||
defer quotaIDLock.Unlock()
|
||||
fProjects, fProjid, err := openAndLockProjectFiles()
|
||||
if err == nil {
|
||||
defer closeProjectFiles(fProjects, fProjid)
|
||||
list := readProjectFiles(fProjects, fProjid)
|
||||
writeProjid := true
|
||||
writeProjid, err = removeDirFromProject(path, ID, &list)
|
||||
if err == nil {
|
||||
if err = writeProjectFiles(fProjects, fProjid, writeProjid, list); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("removeProjectID %s %v failed %v", path, ID, err)
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
// Interface -- quota interface
|
||||
type Interface interface {
|
||||
// Does the path provided support quotas, and if so, what types
|
||||
SupportsQuotas(m mount.Interface, path string) (bool, error)
|
||||
// Assign a quota (picked by the quota mechanism) to a path,
|
||||
// and return it.
|
||||
AssignQuota(m mount.Interface, path string, poduid string, bytes *resource.Quantity) error
|
||||
|
||||
// Get the quota-based storage consumption for the path
|
||||
GetConsumption(path string) (*resource.Quantity, error)
|
||||
|
||||
// Get the quota-based inode consumption for the path
|
||||
GetInodes(path string) (*resource.Quantity, error)
|
||||
|
||||
// Remove the quota from a path
|
||||
// Implementations may assume that any data covered by the
|
||||
// quota has already been removed.
|
||||
ClearQuota(m mount.Interface, path string, poduid string) error
|
||||
}
|
||||
|
||||
func enabledQuotasForMonitoring() bool {
|
||||
return utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolationFSQuotaMonitoring)
|
||||
}
|
|
@ -0,0 +1,440 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota/common"
|
||||
)
|
||||
|
||||
// Pod -> ID
|
||||
var podQuotaMap = make(map[string]common.QuotaID)
|
||||
|
||||
// Dir -> ID (for convenience)
|
||||
var dirQuotaMap = make(map[string]common.QuotaID)
|
||||
|
||||
// ID -> pod
|
||||
var quotaPodMap = make(map[common.QuotaID]string)
|
||||
|
||||
// Directory -> pod
|
||||
var dirPodMap = make(map[string]string)
|
||||
|
||||
// Backing device -> applier
|
||||
// This is *not* cleaned up; its size will be bounded.
|
||||
var devApplierMap = make(map[string]common.LinuxVolumeQuotaApplier)
|
||||
|
||||
// Directory -> applier
|
||||
var dirApplierMap = make(map[string]common.LinuxVolumeQuotaApplier)
|
||||
var dirApplierLock sync.RWMutex
|
||||
|
||||
// Pod -> refcount
|
||||
var podDirCountMap = make(map[string]int)
|
||||
|
||||
// ID -> size
|
||||
var quotaSizeMap = make(map[common.QuotaID]int64)
|
||||
var quotaLock sync.RWMutex
|
||||
|
||||
var supportsQuotasMap = make(map[string]bool)
|
||||
var supportsQuotasLock sync.RWMutex
|
||||
|
||||
// Directory -> backingDev
|
||||
var backingDevMap = make(map[string]string)
|
||||
var backingDevLock sync.RWMutex
|
||||
|
||||
var mountpointMap = make(map[string]string)
|
||||
var mountpointLock sync.RWMutex
|
||||
|
||||
var providers = []common.LinuxVolumeQuotaProvider{
|
||||
&common.VolumeProvider{},
|
||||
}
|
||||
|
||||
// Separate the innards for ease of testing
|
||||
func detectBackingDevInternal(mountpoint string, mounts string) (string, error) {
|
||||
file, err := os.Open(mounts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer file.Close()
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
match := common.MountParseRegexp.FindStringSubmatch(scanner.Text())
|
||||
if match != nil {
|
||||
device := match[1]
|
||||
mount := match[2]
|
||||
if mount == mountpoint {
|
||||
return device, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("couldn't find backing device for %s", mountpoint)
|
||||
}
|
||||
|
||||
// detectBackingDev assumes that the mount point provided is valid
|
||||
func detectBackingDev(_ mount.Interface, mountpoint string) (string, error) {
|
||||
return detectBackingDevInternal(mountpoint, common.MountsFile)
|
||||
}
|
||||
|
||||
func clearBackingDev(path string) {
|
||||
backingDevLock.Lock()
|
||||
defer backingDevLock.Unlock()
|
||||
delete(backingDevMap, path)
|
||||
}
|
||||
|
||||
// Assumes that the path has been fully canonicalized
|
||||
// Breaking this up helps with testing
|
||||
func detectMountpointInternal(m mount.Interface, path string) (string, error) {
|
||||
for path != "" && path != "/" {
|
||||
// per pkg/util/mount/mount_linux this detects all but
|
||||
// a bind mount from one part of a mount to another.
|
||||
// For our purposes that's fine; we simply want the "true"
|
||||
// mount point
|
||||
//
|
||||
// IsNotMountPoint proved much more troublesome; it actually
|
||||
// scans the mounts, and when a lot of mount/unmount
|
||||
// activity takes place, it is not able to get a consistent
|
||||
// view of /proc/self/mounts, causing it to time out and
|
||||
// report incorrectly.
|
||||
isNotMount, err := m.IsLikelyNotMountPoint(path)
|
||||
if err != nil {
|
||||
return "/", err
|
||||
}
|
||||
if !isNotMount {
|
||||
return path, nil
|
||||
}
|
||||
path = filepath.Dir(path)
|
||||
}
|
||||
return "/", nil
|
||||
}
|
||||
|
||||
func detectMountpoint(m mount.Interface, path string) (string, error) {
|
||||
xpath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return "/", err
|
||||
}
|
||||
xpath, err = filepath.EvalSymlinks(xpath)
|
||||
if err != nil {
|
||||
return "/", err
|
||||
}
|
||||
if xpath, err = detectMountpointInternal(m, xpath); err == nil {
|
||||
return xpath, nil
|
||||
}
|
||||
return "/", err
|
||||
}
|
||||
|
||||
func clearMountpoint(path string) {
|
||||
mountpointLock.Lock()
|
||||
defer mountpointLock.Unlock()
|
||||
delete(mountpointMap, path)
|
||||
}
|
||||
|
||||
// getFSInfo Returns mountpoint and backing device
|
||||
// getFSInfo should cache the mountpoint and backing device for the
|
||||
// path.
|
||||
func getFSInfo(m mount.Interface, path string) (string, string, error) {
|
||||
mountpointLock.Lock()
|
||||
defer mountpointLock.Unlock()
|
||||
|
||||
backingDevLock.Lock()
|
||||
defer backingDevLock.Unlock()
|
||||
|
||||
var err error
|
||||
|
||||
mountpoint, okMountpoint := mountpointMap[path]
|
||||
if !okMountpoint {
|
||||
mountpoint, err = detectMountpoint(m, path)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Cannot determine mountpoint for %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
backingDev, okBackingDev := backingDevMap[path]
|
||||
if !okBackingDev {
|
||||
backingDev, err = detectBackingDev(m, mountpoint)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Cannot determine backing device for %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
mountpointMap[path] = mountpoint
|
||||
backingDevMap[path] = backingDev
|
||||
return mountpoint, backingDev, nil
|
||||
}
|
||||
|
||||
func clearFSInfo(path string) {
|
||||
clearMountpoint(path)
|
||||
clearBackingDev(path)
|
||||
}
|
||||
|
||||
func getApplier(path string) common.LinuxVolumeQuotaApplier {
|
||||
dirApplierLock.Lock()
|
||||
defer dirApplierLock.Unlock()
|
||||
return dirApplierMap[path]
|
||||
}
|
||||
|
||||
func setApplier(path string, applier common.LinuxVolumeQuotaApplier) {
|
||||
dirApplierLock.Lock()
|
||||
defer dirApplierLock.Unlock()
|
||||
dirApplierMap[path] = applier
|
||||
}
|
||||
|
||||
func clearApplier(path string) {
|
||||
dirApplierLock.Lock()
|
||||
defer dirApplierLock.Unlock()
|
||||
delete(dirApplierMap, path)
|
||||
}
|
||||
|
||||
func setQuotaOnDir(path string, id common.QuotaID, bytes int64) error {
|
||||
return getApplier(path).SetQuotaOnDir(path, id, bytes)
|
||||
}
|
||||
|
||||
func getQuotaOnDir(m mount.Interface, path string) (common.QuotaID, error) {
|
||||
_, _, err := getFSInfo(m, path)
|
||||
if err != nil {
|
||||
return common.BadQuotaID, err
|
||||
}
|
||||
return getApplier(path).GetQuotaOnDir(path)
|
||||
}
|
||||
|
||||
func clearQuotaOnDir(m mount.Interface, path string) error {
|
||||
// Since we may be called without path being in the map,
|
||||
// we explicitly have to check in this case.
|
||||
klog.V(4).Infof("clearQuotaOnDir %s", path)
|
||||
supportsQuotas, err := SupportsQuotas(m, path)
|
||||
if !supportsQuotas {
|
||||
return nil
|
||||
}
|
||||
projid, err := getQuotaOnDir(m, path)
|
||||
if err == nil && projid != common.BadQuotaID {
|
||||
// This means that we have a quota on the directory but
|
||||
// we can't clear it. That's not good.
|
||||
err = setQuotaOnDir(path, projid, 0)
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Attempt to clear quota failed: %v", err)
|
||||
}
|
||||
// Even if clearing the quota failed, we still need to
|
||||
// try to remove the project ID, or that may be left dangling.
|
||||
err1 := removeProjectID(path, projid)
|
||||
if err1 != nil {
|
||||
klog.V(3).Infof("Attempt to remove quota ID from system files failed: %v", err1)
|
||||
}
|
||||
clearFSInfo(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err1
|
||||
}
|
||||
// If we couldn't get a quota, that's fine -- there may
|
||||
// never have been one, and we have no way to know otherwise
|
||||
klog.V(3).Infof("clearQuotaOnDir fails %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SupportsQuotas -- Does the path support quotas
|
||||
// Cache the applier for paths that support quotas. For paths that don't,
|
||||
// don't cache the result because nothing will clean it up.
|
||||
// However, do cache the device->applier map; the number of devices
|
||||
// is bounded.
|
||||
func SupportsQuotas(m mount.Interface, path string) (bool, error) {
|
||||
if !enabledQuotasForMonitoring() {
|
||||
klog.V(3).Info("SupportsQuotas called, but quotas disabled")
|
||||
return false, nil
|
||||
}
|
||||
supportsQuotasLock.Lock()
|
||||
defer supportsQuotasLock.Unlock()
|
||||
if supportsQuotas, ok := supportsQuotasMap[path]; ok {
|
||||
return supportsQuotas, nil
|
||||
}
|
||||
mount, dev, err := getFSInfo(m, path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Do we know about this device?
|
||||
applier, ok := devApplierMap[mount]
|
||||
if !ok {
|
||||
for _, provider := range providers {
|
||||
if applier = provider.GetQuotaApplier(mount, dev); applier != nil {
|
||||
devApplierMap[mount] = applier
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if applier != nil {
|
||||
supportsQuotasMap[path] = true
|
||||
setApplier(path, applier)
|
||||
return true, nil
|
||||
}
|
||||
delete(backingDevMap, path)
|
||||
delete(mountpointMap, path)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// AssignQuota -- assign a quota to the specified directory.
|
||||
// AssignQuota chooses the quota ID based on the pod UID and path.
|
||||
// If the pod UID is identical to another one known, it may (but presently
|
||||
// doesn't) choose the same quota ID as other volumes in the pod.
|
||||
func AssignQuota(m mount.Interface, path string, poduid string, bytes *resource.Quantity) error {
|
||||
if bytes == nil {
|
||||
return fmt.Errorf("Attempting to assign null quota to %s", path)
|
||||
}
|
||||
ibytes := bytes.Value()
|
||||
if ok, err := SupportsQuotas(m, path); !ok {
|
||||
return fmt.Errorf("Quotas not supported on %s: %v", path, err)
|
||||
}
|
||||
quotaLock.Lock()
|
||||
defer quotaLock.Unlock()
|
||||
// Current policy is to set individual quotas on each volumes.
|
||||
// If we decide later that we want to assign one quota for all
|
||||
// volumes in a pod, we can simply remove this line of code.
|
||||
// If and when we decide permanently that we're going to adop
|
||||
// one quota per volume, we can rip all of the pod code out.
|
||||
poduid = string(uuid.NewUUID())
|
||||
if pod, ok := dirPodMap[path]; ok && pod != poduid {
|
||||
return fmt.Errorf("Requesting quota on existing directory %s but different pod %s %s", path, pod, poduid)
|
||||
}
|
||||
oid, ok := podQuotaMap[poduid]
|
||||
if ok {
|
||||
if quotaSizeMap[oid] != ibytes {
|
||||
return fmt.Errorf("Requesting quota of different size: old %v new %v", quotaSizeMap[oid], bytes)
|
||||
}
|
||||
} else {
|
||||
oid = common.BadQuotaID
|
||||
}
|
||||
id, err := createProjectID(path, oid)
|
||||
if err == nil {
|
||||
if oid != common.BadQuotaID && oid != id {
|
||||
return fmt.Errorf("Attempt to reassign quota %v to %v", oid, id)
|
||||
}
|
||||
// When enforcing quotas are enabled, we'll condition this
|
||||
// on their being disabled also.
|
||||
if ibytes > 0 {
|
||||
ibytes = -1
|
||||
}
|
||||
if err = setQuotaOnDir(path, id, ibytes); err == nil {
|
||||
quotaPodMap[id] = poduid
|
||||
quotaSizeMap[id] = ibytes
|
||||
podQuotaMap[poduid] = id
|
||||
dirQuotaMap[path] = id
|
||||
dirPodMap[path] = poduid
|
||||
podDirCountMap[poduid]++
|
||||
klog.V(4).Infof("Assigning quota ID %d (%d) to %s", id, ibytes, path)
|
||||
return nil
|
||||
}
|
||||
removeProjectID(path, id)
|
||||
}
|
||||
return fmt.Errorf("Assign quota FAILED %v", err)
|
||||
}
|
||||
|
||||
// GetConsumption -- retrieve the consumption (in bytes) of the directory
|
||||
func GetConsumption(path string) (*resource.Quantity, error) {
|
||||
// Note that we actually need to hold the lock at least through
|
||||
// running the quota command, so it can't get recycled behind our back
|
||||
quotaLock.Lock()
|
||||
defer quotaLock.Unlock()
|
||||
applier := getApplier(path)
|
||||
// No applier means directory is not under quota management
|
||||
if applier == nil {
|
||||
return nil, nil
|
||||
}
|
||||
ibytes, err := applier.GetConsumption(path, dirQuotaMap[path])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resource.NewQuantity(ibytes, resource.DecimalSI), nil
|
||||
}
|
||||
|
||||
// GetInodes -- retrieve the number of inodes in use under the directory
|
||||
func GetInodes(path string) (*resource.Quantity, error) {
|
||||
// Note that we actually need to hold the lock at least through
|
||||
// running the quota command, so it can't get recycled behind our back
|
||||
quotaLock.Lock()
|
||||
defer quotaLock.Unlock()
|
||||
applier := getApplier(path)
|
||||
// No applier means directory is not under quota management
|
||||
if applier == nil {
|
||||
return nil, nil
|
||||
}
|
||||
inodes, err := applier.GetInodes(path, dirQuotaMap[path])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resource.NewQuantity(inodes, resource.DecimalSI), nil
|
||||
}
|
||||
|
||||
// ClearQuota -- remove the quota assigned to a directory
|
||||
func ClearQuota(m mount.Interface, path string) error {
|
||||
klog.V(3).Infof("ClearQuota %s", path)
|
||||
if !enabledQuotasForMonitoring() {
|
||||
return fmt.Errorf("ClearQuota called, but quotas disabled")
|
||||
}
|
||||
quotaLock.Lock()
|
||||
defer quotaLock.Unlock()
|
||||
poduid, ok := dirPodMap[path]
|
||||
if !ok {
|
||||
// Nothing in the map either means that there was no
|
||||
// quota to begin with or that we're clearing a
|
||||
// stale directory, so if we find a quota, just remove it.
|
||||
// The process of clearing the quota requires that an applier
|
||||
// be found, which needs to be cleaned up.
|
||||
defer delete(supportsQuotasMap, path)
|
||||
defer clearApplier(path)
|
||||
return clearQuotaOnDir(m, path)
|
||||
}
|
||||
_, ok = podQuotaMap[poduid]
|
||||
if !ok {
|
||||
return fmt.Errorf("ClearQuota: No quota available for %s", path)
|
||||
}
|
||||
var err error
|
||||
projid, err := getQuotaOnDir(m, path)
|
||||
if projid != dirQuotaMap[path] {
|
||||
return fmt.Errorf("Expected quota ID %v on dir %s does not match actual %v", dirQuotaMap[path], path, projid)
|
||||
}
|
||||
count, ok := podDirCountMap[poduid]
|
||||
if count <= 1 || !ok {
|
||||
err = clearQuotaOnDir(m, path)
|
||||
// This error should be noted; we still need to clean up
|
||||
// and otherwise handle in the same way.
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Unable to clear quota %v %s: %v", dirQuotaMap[path], path, err)
|
||||
}
|
||||
delete(quotaSizeMap, podQuotaMap[poduid])
|
||||
delete(quotaPodMap, podQuotaMap[poduid])
|
||||
delete(podDirCountMap, poduid)
|
||||
delete(podQuotaMap, poduid)
|
||||
} else {
|
||||
err = removeProjectID(path, projid)
|
||||
podDirCountMap[poduid]--
|
||||
klog.V(4).Infof("Not clearing quota for pod %s; still %v dirs outstanding", poduid, podDirCountMap[poduid])
|
||||
}
|
||||
delete(dirPodMap, path)
|
||||
delete(dirQuotaMap, path)
|
||||
delete(supportsQuotasMap, path)
|
||||
clearApplier(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to clear quota for %s: %v", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,754 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota/common"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const dummyMountData = `sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
|
||||
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
|
||||
devtmpfs /dev devtmpfs rw,nosuid,size=6133536k,nr_inodes=1533384,mode=755 0 0
|
||||
tmpfs /tmp tmpfs rw,nosuid,nodev 0 0
|
||||
/dev/sda1 /boot ext4 rw,relatime 0 0
|
||||
/dev/mapper/fedora-root / ext4 rw,noatime 0 0
|
||||
/dev/mapper/fedora-home /home ext4 rw,noatime 0 0
|
||||
/dev/sdb1 /virt xfs rw,noatime,attr2,inode64,usrquota,prjquota 0 0
|
||||
`
|
||||
|
||||
const dummyMountDataPquota = `tmpfs /tmp tmpfs rw,nosuid,nodev 0 0
|
||||
/dev/sda1 /boot ext4 rw,relatime 0 0
|
||||
/dev/mapper/fedora-root / ext4 rw,noatime 0 0
|
||||
/dev/mapper/fedora-home /home ext4 rw,noatime 0 0
|
||||
/dev/sdb1 /mnt/virt xfs rw,noatime,attr2,inode64,usrquota,prjquota 0 0
|
||||
`
|
||||
const dummyMountDataNoPquota = `tmpfs /tmp tmpfs rw,nosuid,nodev 0 0
|
||||
/dev/sda1 /boot ext4 rw,relatime 0 0
|
||||
/dev/mapper/fedora-root / ext4 rw,noatime 0 0
|
||||
/dev/mapper/fedora-home /home ext4 rw,noatime 0 0
|
||||
/dev/sdb1 /mnt/virt xfs rw,noatime,attr2,inode64,usrquota 0 0
|
||||
`
|
||||
|
||||
const dummyMountTest = `/dev/sda1 / ext4 rw,noatime 0 0
|
||||
/dev/sda2 /quota ext4 rw,prjquota 0 0
|
||||
/dev/sda3 /noquota ext4 rw 0 0
|
||||
`
|
||||
|
||||
func dummyFakeMount1() mount.Interface {
|
||||
return &mount.FakeMounter{
|
||||
MountPoints: []mount.MountPoint{
|
||||
{
|
||||
Device: "tmpfs",
|
||||
Path: "/tmp",
|
||||
Type: "tmpfs",
|
||||
Opts: []string{"rw", "nosuid", "nodev"},
|
||||
},
|
||||
{
|
||||
Device: "/dev/sda1",
|
||||
Path: "/boot",
|
||||
Type: "ext4",
|
||||
Opts: []string{"rw", "relatime"},
|
||||
},
|
||||
{
|
||||
Device: "/dev/mapper/fedora-root",
|
||||
Path: "/",
|
||||
Type: "ext4",
|
||||
Opts: []string{"rw", "relatime"},
|
||||
},
|
||||
{
|
||||
Device: "/dev/mapper/fedora-home",
|
||||
Path: "/home",
|
||||
Type: "ext4",
|
||||
Opts: []string{"rw", "relatime"},
|
||||
},
|
||||
{
|
||||
Device: "/dev/sdb1",
|
||||
Path: "/mnt/virt",
|
||||
Type: "xfs",
|
||||
Opts: []string{"rw", "relatime", "attr2", "inode64", "usrquota", "prjquota"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type backingDevTest struct {
|
||||
path string
|
||||
mountdata string
|
||||
expectedResult string
|
||||
expectFailure bool
|
||||
}
|
||||
|
||||
type mountpointTest struct {
|
||||
path string
|
||||
mounter mount.Interface
|
||||
expectedResult string
|
||||
expectFailure bool
|
||||
}
|
||||
|
||||
func testBackingDev1(testcase backingDevTest) error {
|
||||
tmpfile, err := ioutil.TempFile("", "backingdev")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
if _, err = tmpfile.WriteString(testcase.mountdata); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
backingDev, err := detectBackingDevInternal(testcase.path, tmpfile.Name())
|
||||
if err != nil {
|
||||
if testcase.expectFailure {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if testcase.expectFailure {
|
||||
return fmt.Errorf("Path %s expected to fail; succeeded and got %s", testcase.path, backingDev)
|
||||
}
|
||||
if backingDev == testcase.expectedResult {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Mismatch: path %s expects mountpoint %s got %s", testcase.path, testcase.expectedResult, backingDev)
|
||||
}
|
||||
|
||||
func TestBackingDev(t *testing.T) {
|
||||
testcasesBackingDev := map[string]backingDevTest{
|
||||
"Root": {
|
||||
"/",
|
||||
dummyMountData,
|
||||
"/dev/mapper/fedora-root",
|
||||
false,
|
||||
},
|
||||
"tmpfs": {
|
||||
"/tmp",
|
||||
dummyMountData,
|
||||
"tmpfs",
|
||||
false,
|
||||
},
|
||||
"user filesystem": {
|
||||
"/virt",
|
||||
dummyMountData,
|
||||
"/dev/sdb1",
|
||||
false,
|
||||
},
|
||||
"empty mountpoint": {
|
||||
"",
|
||||
dummyMountData,
|
||||
"",
|
||||
true,
|
||||
},
|
||||
"bad mountpoint": {
|
||||
"/kiusf",
|
||||
dummyMountData,
|
||||
"",
|
||||
true,
|
||||
},
|
||||
}
|
||||
for name, testcase := range testcasesBackingDev {
|
||||
err := testBackingDev1(testcase)
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %s", name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectMountPoint(t *testing.T) {
|
||||
testcasesMount := map[string]mountpointTest{
|
||||
"Root": {
|
||||
"/",
|
||||
dummyFakeMount1(),
|
||||
"/",
|
||||
false,
|
||||
},
|
||||
"(empty)": {
|
||||
"",
|
||||
dummyFakeMount1(),
|
||||
"/",
|
||||
false,
|
||||
},
|
||||
"(invalid)": {
|
||||
"",
|
||||
dummyFakeMount1(),
|
||||
"/",
|
||||
false,
|
||||
},
|
||||
"/usr": {
|
||||
"/usr",
|
||||
dummyFakeMount1(),
|
||||
"/",
|
||||
false,
|
||||
},
|
||||
"/var/tmp": {
|
||||
"/var/tmp",
|
||||
dummyFakeMount1(),
|
||||
"/",
|
||||
false,
|
||||
},
|
||||
}
|
||||
for name, testcase := range testcasesMount {
|
||||
mountpoint, err := detectMountpointInternal(testcase.mounter, testcase.path)
|
||||
if err == nil && testcase.expectFailure {
|
||||
t.Errorf("Case %s expected failure, but succeeded, returning mountpoint %s", name, mountpoint)
|
||||
} else if err != nil {
|
||||
t.Errorf("Case %s failed: %s", name, err.Error())
|
||||
} else if mountpoint != testcase.expectedResult {
|
||||
t.Errorf("Case %s got mountpoint %s, expected %s", name, mountpoint, testcase.expectedResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var dummyMountPoints = []mount.MountPoint{
|
||||
{
|
||||
Device: "/dev/sda2",
|
||||
Path: "/quota1",
|
||||
Type: "ext4",
|
||||
Opts: []string{"rw", "relatime", "prjquota"},
|
||||
},
|
||||
{
|
||||
Device: "/dev/sda3",
|
||||
Path: "/quota2",
|
||||
Type: "ext4",
|
||||
Opts: []string{"rw", "relatime", "prjquota"},
|
||||
},
|
||||
{
|
||||
Device: "/dev/sda3",
|
||||
Path: "/noquota",
|
||||
Type: "ext4",
|
||||
Opts: []string{"rw", "relatime"},
|
||||
},
|
||||
{
|
||||
Device: "/dev/sda1",
|
||||
Path: "/",
|
||||
Type: "ext4",
|
||||
Opts: []string{"rw", "relatime"},
|
||||
},
|
||||
}
|
||||
|
||||
func dummyQuotaTest() mount.Interface {
|
||||
return &mount.FakeMounter{
|
||||
MountPoints: dummyMountPoints,
|
||||
}
|
||||
}
|
||||
|
||||
func dummySetFSInfo(path string) {
|
||||
if enabledQuotasForMonitoring() {
|
||||
for _, mount := range dummyMountPoints {
|
||||
if strings.HasPrefix(path, mount.Path) {
|
||||
mountpointMap[path] = mount.Path
|
||||
backingDevMap[path] = mount.Device
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type VolumeProvider1 struct {
|
||||
}
|
||||
|
||||
type VolumeProvider2 struct {
|
||||
}
|
||||
|
||||
type testVolumeQuota struct {
|
||||
}
|
||||
|
||||
func logAllMaps(where string) {
|
||||
fmt.Printf("Maps at %s\n", where)
|
||||
fmt.Printf(" Map podQuotaMap contents:\n")
|
||||
for key, val := range podQuotaMap {
|
||||
fmt.Printf(" %v -> %v\n", key, val)
|
||||
}
|
||||
fmt.Printf(" Map dirQuotaMap contents:\n")
|
||||
for key, val := range dirQuotaMap {
|
||||
fmt.Printf(" %v -> %v\n", key, val)
|
||||
}
|
||||
fmt.Printf(" Map quotaPodMap contents:\n")
|
||||
for key, val := range quotaPodMap {
|
||||
fmt.Printf(" %v -> %v\n", key, val)
|
||||
}
|
||||
fmt.Printf(" Map dirPodMap contents:\n")
|
||||
for key, val := range dirPodMap {
|
||||
fmt.Printf(" %v -> %v\n", key, val)
|
||||
}
|
||||
fmt.Printf(" Map devApplierMap contents:\n")
|
||||
for key, val := range devApplierMap {
|
||||
fmt.Printf(" %v -> %v\n", key, val)
|
||||
}
|
||||
fmt.Printf(" Map dirApplierMap contents:\n")
|
||||
for key, val := range dirApplierMap {
|
||||
fmt.Printf(" %v -> %v\n", key, val)
|
||||
}
|
||||
fmt.Printf(" Map podDirCountMap contents:\n")
|
||||
for key, val := range podDirCountMap {
|
||||
fmt.Printf(" %v -> %v\n", key, val)
|
||||
}
|
||||
fmt.Printf(" Map quotaSizeMap contents:\n")
|
||||
for key, val := range quotaSizeMap {
|
||||
fmt.Printf(" %v -> %v\n", key, val)
|
||||
}
|
||||
fmt.Printf(" Map supportsQuotasMap contents:\n")
|
||||
for key, val := range supportsQuotasMap {
|
||||
fmt.Printf(" %v -> %v\n", key, val)
|
||||
}
|
||||
fmt.Printf(" Map backingDevMap contents:\n")
|
||||
for key, val := range backingDevMap {
|
||||
fmt.Printf(" %v -> %v\n", key, val)
|
||||
}
|
||||
fmt.Printf(" Map mountpointMap contents:\n")
|
||||
for key, val := range mountpointMap {
|
||||
fmt.Printf(" %v -> %v\n", key, val)
|
||||
}
|
||||
fmt.Printf("End maps %s\n", where)
|
||||
}
|
||||
|
||||
var testIDQuotaMap = make(map[common.QuotaID]string)
|
||||
var testQuotaIDMap = make(map[string]common.QuotaID)
|
||||
|
||||
func (*VolumeProvider1) GetQuotaApplier(mountpoint string, backingDev string) common.LinuxVolumeQuotaApplier {
|
||||
if strings.HasPrefix(mountpoint, "/quota1") {
|
||||
return testVolumeQuota{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*VolumeProvider2) GetQuotaApplier(mountpoint string, backingDev string) common.LinuxVolumeQuotaApplier {
|
||||
if strings.HasPrefix(mountpoint, "/quota2") {
|
||||
return testVolumeQuota{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v testVolumeQuota) SetQuotaOnDir(dir string, id common.QuotaID, _ int64) error {
|
||||
odir, ok := testIDQuotaMap[id]
|
||||
if ok && dir != odir {
|
||||
return fmt.Errorf("ID %v is already in use", id)
|
||||
}
|
||||
oid, ok := testQuotaIDMap[dir]
|
||||
if ok && id != oid {
|
||||
return fmt.Errorf("Directory %s already has a quota applied", dir)
|
||||
}
|
||||
testQuotaIDMap[dir] = id
|
||||
testIDQuotaMap[id] = dir
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v testVolumeQuota) GetQuotaOnDir(path string) (common.QuotaID, error) {
|
||||
id, ok := testQuotaIDMap[path]
|
||||
if ok {
|
||||
return id, nil
|
||||
}
|
||||
return common.BadQuotaID, fmt.Errorf("No quota available for %s", path)
|
||||
}
|
||||
|
||||
func (v testVolumeQuota) QuotaIDIsInUse(id common.QuotaID) (bool, error) {
|
||||
if _, ok := testIDQuotaMap[id]; ok {
|
||||
return true, nil
|
||||
}
|
||||
// So that we reject some
|
||||
if id%3 == 0 {
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (v testVolumeQuota) GetConsumption(_ string, _ common.QuotaID) (int64, error) {
|
||||
return 4096, nil
|
||||
}
|
||||
|
||||
func (v testVolumeQuota) GetInodes(_ string, _ common.QuotaID) (int64, error) {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
func fakeSupportsQuotas(path string) (bool, error) {
|
||||
dummySetFSInfo(path)
|
||||
return SupportsQuotas(dummyQuotaTest(), path)
|
||||
}
|
||||
|
||||
func fakeAssignQuota(path string, poduid string, bytes int64) error {
|
||||
dummySetFSInfo(path)
|
||||
return AssignQuota(dummyQuotaTest(), path, poduid, resource.NewQuantity(bytes, resource.DecimalSI))
|
||||
}
|
||||
|
||||
func fakeClearQuota(path string) error {
|
||||
dummySetFSInfo(path)
|
||||
return ClearQuota(dummyQuotaTest(), path)
|
||||
}
|
||||
|
||||
type quotaTestCase struct {
|
||||
path string
|
||||
poduid string
|
||||
bytes int64
|
||||
op string
|
||||
expectedProjects string
|
||||
expectedProjid string
|
||||
supportsQuota bool
|
||||
expectsSetQuota bool
|
||||
deltaExpectedPodQuotaCount int
|
||||
deltaExpectedDirQuotaCount int
|
||||
deltaExpectedQuotaPodCount int
|
||||
deltaExpectedDirPodCount int
|
||||
deltaExpectedDevApplierCount int
|
||||
deltaExpectedDirApplierCount int
|
||||
deltaExpectedPodDirCountCount int
|
||||
deltaExpectedQuotaSizeCount int
|
||||
deltaExpectedSupportsQuotasCount int
|
||||
deltaExpectedBackingDevCount int
|
||||
deltaExpectedMountpointCount int
|
||||
}
|
||||
|
||||
const (
|
||||
projectsHeader = `# This is a /etc/projects header
|
||||
1048578:/quota/d
|
||||
`
|
||||
projects1 = `1048577:/quota1/a
|
||||
`
|
||||
projects2 = `1048577:/quota1/a
|
||||
1048580:/quota1/b
|
||||
`
|
||||
projects3 = `1048577:/quota1/a
|
||||
1048580:/quota1/b
|
||||
1048581:/quota2/b
|
||||
`
|
||||
projects4 = `1048577:/quota1/a
|
||||
1048581:/quota2/b
|
||||
`
|
||||
projects5 = `1048581:/quota2/b
|
||||
`
|
||||
|
||||
projidHeader = `# This is a /etc/projid header
|
||||
xxxxxx:1048579
|
||||
`
|
||||
projid1 = `volume1048577:1048577
|
||||
`
|
||||
projid2 = `volume1048577:1048577
|
||||
volume1048580:1048580
|
||||
`
|
||||
projid3 = `volume1048577:1048577
|
||||
volume1048580:1048580
|
||||
volume1048581:1048581
|
||||
`
|
||||
projid4 = `volume1048577:1048577
|
||||
volume1048581:1048581
|
||||
`
|
||||
projid5 = `volume1048581:1048581
|
||||
`
|
||||
)
|
||||
|
||||
var quotaTestCases = []quotaTestCase{
|
||||
{
|
||||
"/quota1/a", "", 1024, "Supports", "", "",
|
||||
true, true, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1,
|
||||
},
|
||||
{
|
||||
"/quota1/a", "", 1024, "Set", projects1, projid1,
|
||||
true, true, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0,
|
||||
},
|
||||
{
|
||||
"/quota1/b", "x", 1024, "Set", projects2, projid2,
|
||||
true, true, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
|
||||
},
|
||||
{
|
||||
"/quota2/b", "x", 1024, "Set", projects3, projid3,
|
||||
true, true, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
},
|
||||
{
|
||||
"/quota1/b", "x", 1024, "Set", projects3, projid3,
|
||||
true, false, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
},
|
||||
{
|
||||
"/quota1/b", "", 1024, "Clear", projects4, projid4,
|
||||
true, true, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1,
|
||||
},
|
||||
{
|
||||
"/noquota/a", "", 1024, "Supports", projects4, projid4,
|
||||
false, false, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
},
|
||||
{
|
||||
"/quota1/a", "", 1024, "Clear", projects5, projid5,
|
||||
true, true, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1,
|
||||
},
|
||||
{
|
||||
"/quota1/a", "", 1024, "Clear", projects5, projid5,
|
||||
true, false, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
},
|
||||
{
|
||||
"/quota2/b", "", 1024, "Clear", "", "",
|
||||
true, true, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1,
|
||||
},
|
||||
}
|
||||
|
||||
func compareProjectsFiles(t *testing.T, testcase quotaTestCase, projectsFile string, projidFile string, enabled bool) {
|
||||
bytes, err := ioutil.ReadFile(projectsFile)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
} else {
|
||||
s := string(bytes)
|
||||
p := projectsHeader
|
||||
if enabled {
|
||||
p += testcase.expectedProjects
|
||||
}
|
||||
if s != p {
|
||||
t.Errorf("Case %v /etc/projects miscompare: expected\n`%s`\ngot\n`%s`\n", testcase.path, p, s)
|
||||
}
|
||||
}
|
||||
bytes, err = ioutil.ReadFile(projidFile)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
} else {
|
||||
s := string(bytes)
|
||||
p := projidHeader
|
||||
if enabled {
|
||||
p += testcase.expectedProjid
|
||||
}
|
||||
if s != p {
|
||||
t.Errorf("Case %v /etc/projid miscompare: expected\n`%s`\ngot\n`%s`\n", testcase.path, p, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runCaseEnabled(t *testing.T, testcase quotaTestCase, seq int) bool {
|
||||
fail := false
|
||||
var err error
|
||||
switch testcase.op {
|
||||
case "Supports":
|
||||
supports, err := fakeSupportsQuotas(testcase.path)
|
||||
if err != nil {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) Got error in fakeSupportsQuotas: %v", seq, testcase.path, true, err)
|
||||
}
|
||||
if supports != testcase.supportsQuota {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) fakeSupportsQuotas got %v, expect %v", seq, testcase.path, true, supports, testcase.supportsQuota)
|
||||
}
|
||||
return fail
|
||||
case "Set":
|
||||
err = fakeAssignQuota(testcase.path, testcase.poduid, testcase.bytes)
|
||||
case "Clear":
|
||||
err = fakeClearQuota(testcase.path)
|
||||
case "GetConsumption":
|
||||
_, err = GetConsumption(testcase.path)
|
||||
case "GetInodes":
|
||||
_, err = GetInodes(testcase.path)
|
||||
default:
|
||||
t.Errorf("Case %v (%s, %v) unknown operation %s", seq, testcase.path, true, testcase.op)
|
||||
return true
|
||||
}
|
||||
if err != nil && testcase.expectsSetQuota {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) %s expected to clear quota but failed %v", seq, testcase.path, true, testcase.op, err)
|
||||
} else if err == nil && !testcase.expectsSetQuota {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) %s expected not to clear quota but succeeded", seq, testcase.path, true, testcase.op)
|
||||
}
|
||||
return fail
|
||||
}
|
||||
|
||||
func runCaseDisabled(t *testing.T, testcase quotaTestCase, seq int) bool {
|
||||
var err error
|
||||
var supports bool
|
||||
switch testcase.op {
|
||||
case "Supports":
|
||||
if supports, err = fakeSupportsQuotas(testcase.path); supports {
|
||||
t.Errorf("Case %v (%s, %v) supports quotas but shouldn't", seq, testcase.path, false)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
case "Set":
|
||||
err = fakeAssignQuota(testcase.path, testcase.poduid, testcase.bytes)
|
||||
case "Clear":
|
||||
err = fakeClearQuota(testcase.path)
|
||||
case "GetConsumption":
|
||||
_, err = GetConsumption(testcase.path)
|
||||
case "GetInodes":
|
||||
_, err = GetInodes(testcase.path)
|
||||
default:
|
||||
t.Errorf("Case %v (%s, %v) unknown operation %s", seq, testcase.path, false, testcase.op)
|
||||
return true
|
||||
}
|
||||
if err == nil {
|
||||
t.Errorf("Case %v (%s, %v) %s: supports quotas but shouldn't", seq, testcase.path, false, testcase.op)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func testAddRemoveQuotas(t *testing.T, enabled bool) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LocalStorageCapacityIsolationFSQuotaMonitoring, enabled)()
|
||||
tmpProjectsFile, err := ioutil.TempFile("", "projects")
|
||||
if err == nil {
|
||||
_, err = tmpProjectsFile.WriteString(projectsHeader)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Unable to create fake projects file")
|
||||
}
|
||||
projectsFile = tmpProjectsFile.Name()
|
||||
tmpProjectsFile.Close()
|
||||
tmpProjidFile, err := ioutil.TempFile("", "projid")
|
||||
if err == nil {
|
||||
_, err = tmpProjidFile.WriteString(projidHeader)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Unable to create fake projid file")
|
||||
}
|
||||
projidFile = tmpProjidFile.Name()
|
||||
tmpProjidFile.Close()
|
||||
providers = []common.LinuxVolumeQuotaProvider{
|
||||
&VolumeProvider1{},
|
||||
&VolumeProvider2{},
|
||||
}
|
||||
for k := range podQuotaMap {
|
||||
delete(podQuotaMap, k)
|
||||
}
|
||||
for k := range dirQuotaMap {
|
||||
delete(dirQuotaMap, k)
|
||||
}
|
||||
for k := range quotaPodMap {
|
||||
delete(quotaPodMap, k)
|
||||
}
|
||||
for k := range dirPodMap {
|
||||
delete(dirPodMap, k)
|
||||
}
|
||||
for k := range devApplierMap {
|
||||
delete(devApplierMap, k)
|
||||
}
|
||||
for k := range dirApplierMap {
|
||||
delete(dirApplierMap, k)
|
||||
}
|
||||
for k := range podDirCountMap {
|
||||
delete(podDirCountMap, k)
|
||||
}
|
||||
for k := range quotaSizeMap {
|
||||
delete(quotaSizeMap, k)
|
||||
}
|
||||
for k := range supportsQuotasMap {
|
||||
delete(supportsQuotasMap, k)
|
||||
}
|
||||
for k := range backingDevMap {
|
||||
delete(backingDevMap, k)
|
||||
}
|
||||
for k := range mountpointMap {
|
||||
delete(mountpointMap, k)
|
||||
}
|
||||
for k := range testIDQuotaMap {
|
||||
delete(testIDQuotaMap, k)
|
||||
}
|
||||
for k := range testQuotaIDMap {
|
||||
delete(testQuotaIDMap, k)
|
||||
}
|
||||
expectedPodQuotaCount := 0
|
||||
expectedDirQuotaCount := 0
|
||||
expectedQuotaPodCount := 0
|
||||
expectedDirPodCount := 0
|
||||
expectedDevApplierCount := 0
|
||||
expectedDirApplierCount := 0
|
||||
expectedPodDirCountCount := 0
|
||||
expectedQuotaSizeCount := 0
|
||||
expectedSupportsQuotasCount := 0
|
||||
expectedBackingDevCount := 0
|
||||
expectedMountpointCount := 0
|
||||
for seq, testcase := range quotaTestCases {
|
||||
if enabled {
|
||||
expectedPodQuotaCount += testcase.deltaExpectedPodQuotaCount
|
||||
expectedDirQuotaCount += testcase.deltaExpectedDirQuotaCount
|
||||
expectedQuotaPodCount += testcase.deltaExpectedQuotaPodCount
|
||||
expectedDirPodCount += testcase.deltaExpectedDirPodCount
|
||||
expectedDevApplierCount += testcase.deltaExpectedDevApplierCount
|
||||
expectedDirApplierCount += testcase.deltaExpectedDirApplierCount
|
||||
expectedPodDirCountCount += testcase.deltaExpectedPodDirCountCount
|
||||
expectedQuotaSizeCount += testcase.deltaExpectedQuotaSizeCount
|
||||
expectedSupportsQuotasCount += testcase.deltaExpectedSupportsQuotasCount
|
||||
expectedBackingDevCount += testcase.deltaExpectedBackingDevCount
|
||||
expectedMountpointCount += testcase.deltaExpectedMountpointCount
|
||||
}
|
||||
fail := false
|
||||
if enabled {
|
||||
fail = runCaseEnabled(t, testcase, seq)
|
||||
} else {
|
||||
fail = runCaseDisabled(t, testcase, seq)
|
||||
}
|
||||
|
||||
compareProjectsFiles(t, testcase, projectsFile, projidFile, enabled)
|
||||
if len(podQuotaMap) != expectedPodQuotaCount {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) podQuotaCount mismatch: got %v, expect %v", seq, testcase.path, enabled, len(podQuotaMap), expectedPodQuotaCount)
|
||||
}
|
||||
if len(dirQuotaMap) != expectedDirQuotaCount {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) dirQuotaCount mismatch: got %v, expect %v", seq, testcase.path, enabled, len(dirQuotaMap), expectedDirQuotaCount)
|
||||
}
|
||||
if len(quotaPodMap) != expectedQuotaPodCount {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) quotaPodCount mismatch: got %v, expect %v", seq, testcase.path, enabled, len(quotaPodMap), expectedQuotaPodCount)
|
||||
}
|
||||
if len(dirPodMap) != expectedDirPodCount {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) dirPodCount mismatch: got %v, expect %v", seq, testcase.path, enabled, len(dirPodMap), expectedDirPodCount)
|
||||
}
|
||||
if len(devApplierMap) != expectedDevApplierCount {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) devApplierCount mismatch: got %v, expect %v", seq, testcase.path, enabled, len(devApplierMap), expectedDevApplierCount)
|
||||
}
|
||||
if len(dirApplierMap) != expectedDirApplierCount {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) dirApplierCount mismatch: got %v, expect %v", seq, testcase.path, enabled, len(dirApplierMap), expectedDirApplierCount)
|
||||
}
|
||||
if len(podDirCountMap) != expectedPodDirCountCount {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) podDirCountCount mismatch: got %v, expect %v", seq, testcase.path, enabled, len(podDirCountMap), expectedPodDirCountCount)
|
||||
}
|
||||
if len(quotaSizeMap) != expectedQuotaSizeCount {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) quotaSizeCount mismatch: got %v, expect %v", seq, testcase.path, enabled, len(quotaSizeMap), expectedQuotaSizeCount)
|
||||
}
|
||||
if len(supportsQuotasMap) != expectedSupportsQuotasCount {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) supportsQuotasCount mismatch: got %v, expect %v", seq, testcase.path, enabled, len(supportsQuotasMap), expectedSupportsQuotasCount)
|
||||
}
|
||||
if len(backingDevMap) != expectedBackingDevCount {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) BackingDevCount mismatch: got %v, expect %v", seq, testcase.path, enabled, len(backingDevMap), expectedBackingDevCount)
|
||||
}
|
||||
if len(mountpointMap) != expectedMountpointCount {
|
||||
fail = true
|
||||
t.Errorf("Case %v (%s, %v) MountpointCount mismatch: got %v, expect %v", seq, testcase.path, enabled, len(mountpointMap), expectedMountpointCount)
|
||||
}
|
||||
if fail {
|
||||
logAllMaps(fmt.Sprintf("%v %s", seq, testcase.path))
|
||||
}
|
||||
}
|
||||
os.Remove(projectsFile)
|
||||
os.Remove(projidFile)
|
||||
}
|
||||
|
||||
func TestAddRemoveQuotasEnabled(t *testing.T) {
|
||||
testAddRemoveQuotas(t, true)
|
||||
}
|
||||
|
||||
func TestAddRemoveQuotasDisabled(t *testing.T) {
|
||||
testAddRemoveQuotas(t, false)
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
// Dummy quota implementation for systems that do not implement support
|
||||
// for volume quotas
|
||||
|
||||
var errNotImplemented = errors.New("not implemented")
|
||||
|
||||
// SupportsQuotas -- dummy implementation
|
||||
func SupportsQuotas(_ mount.Interface, _ string) (bool, error) {
|
||||
return false, errNotImplemented
|
||||
}
|
||||
|
||||
// AssignQuota -- dummy implementation
|
||||
func AssignQuota(_ mount.Interface, _ string, _ string, _ *resource.Quantity) error {
|
||||
return errNotImplemented
|
||||
}
|
||||
|
||||
// GetConsumption -- dummy implementation
|
||||
func GetConsumption(_ string) (*resource.Quantity, error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// GetInodes -- dummy implementation
|
||||
func GetInodes(_ string) (*resource.Quantity, error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// ClearQuota -- dummy implementation
|
||||
func ClearQuota(_ mount.Interface, _ string) error {
|
||||
return errNotImplemented
|
||||
}
|
|
@ -540,3 +540,11 @@ func GetPluginMountDir(host volume.VolumeHost, name string) string {
|
|||
mntDir := filepath.Join(host.GetPluginDir(name), MountsInGlobalPDPath)
|
||||
return mntDir
|
||||
}
|
||||
|
||||
// IsLocalEphemeralVolume determines whether the argument is a local ephemeral
|
||||
// volume vs. some other type
|
||||
func IsLocalEphemeralVolume(volume v1.Volume) bool {
|
||||
return volume.GitRepo != nil ||
|
||||
(volume.EmptyDir != nil && volume.EmptyDir.Medium != v1.StorageMediumMemory) ||
|
||||
volume.ConfigMap != nil || volume.DownwardAPI != nil
|
||||
}
|
||||
|
|
|
@ -101,6 +101,13 @@ type Attributes struct {
|
|||
SupportsSELinux bool
|
||||
}
|
||||
|
||||
// MounterArgs provides more easily extensible arguments to Mounter
|
||||
type MounterArgs struct {
|
||||
FsGroup *int64
|
||||
DesiredSize *resource.Quantity
|
||||
PodUID string
|
||||
}
|
||||
|
||||
// Mounter interface provides methods to set up/mount the volume.
|
||||
type Mounter interface {
|
||||
// Uses Interface to provide the path for Docker binds.
|
||||
|
@ -122,14 +129,14 @@ type Mounter interface {
|
|||
// content should be owned by 'fsGroup' so that it can be
|
||||
// accessed by the pod. This may be called more than once, so
|
||||
// implementations must be idempotent.
|
||||
SetUp(fsGroup *int64) error
|
||||
SetUp(mounterArgs MounterArgs) error
|
||||
// SetUpAt prepares and mounts/unpacks the volume to the
|
||||
// specified directory path, which may or may not exist yet.
|
||||
// The mount point and its content should be owned by
|
||||
// 'fsGroup' so that it can be accessed by the pod. This may
|
||||
// be called more than once, so implementations must be
|
||||
// idempotent.
|
||||
SetUpAt(dir string, fsGroup *int64) error
|
||||
SetUpAt(dir string, mounterArgs MounterArgs) error
|
||||
// GetAttributes returns the attributes of the mounter.
|
||||
// This function is called after SetUp()/SetUpAt().
|
||||
GetAttributes() Attributes
|
||||
|
|
|
@ -208,8 +208,8 @@ func (b *vsphereVolumeMounter) GetAttributes() volume.Attributes {
|
|||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *vsphereVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
func (b *vsphereVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
|
||||
return b.SetUpAt(b.GetPath(), mounterArgs)
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
|
@ -220,7 +220,7 @@ func (b *vsphereVolumeMounter) CanMount() error {
|
|||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *vsphereVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *vsphereVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||
klog.V(5).Infof("vSphere volume setup %s to %s", b.volPath, dir)
|
||||
|
||||
// TODO: handle failed mounts here.
|
||||
|
@ -269,7 +269,7 @@ func (b *vsphereVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
os.Remove(dir)
|
||||
return err
|
||||
}
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
volume.SetVolumeOwnership(b, mounterArgs.FsGroup)
|
||||
klog.V(3).Infof("vSphere volume %s mounted to %s", b.volPath, dir)
|
||||
|
||||
return nil
|
||||
|
|
|
@ -123,7 +123,7 @@ func TestPlugin(t *testing.T) {
|
|||
t.Errorf("Got unexpected path: %s", path)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@ go_library(
|
|||
"node_problem_detector_linux.go",
|
||||
"resource_collector.go",
|
||||
"util.go",
|
||||
"util_xfs_linux.go",
|
||||
"util_xfs_unsupported.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e_node",
|
||||
visibility = ["//visibility:public"],
|
||||
|
@ -41,7 +43,9 @@ go_library(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
|
||||
"//staging/src/k8s.io/cri-api/pkg/apis:go_default_library",
|
||||
"//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library",
|
||||
"//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library",
|
||||
|
@ -62,6 +66,7 @@ go_library(
|
|||
"//vendor/k8s.io/klog:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/procfs:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
|
@ -105,6 +110,7 @@ go_test(
|
|||
"node_perf_test.go",
|
||||
"pids_test.go",
|
||||
"pods_container_manager_test.go",
|
||||
"quota_lsci_test.go",
|
||||
"resource_metrics_test.go",
|
||||
"resource_usage_test.go",
|
||||
"restart_test.go",
|
||||
|
@ -138,6 +144,8 @@ go_test(
|
|||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume/util/quota:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/scheduling/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
|
|
|
@ -49,6 +49,7 @@ var NodeImageWhiteList = sets.NewString(
|
|||
busyboxImage,
|
||||
"k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff",
|
||||
imageutils.GetE2EImage(imageutils.Nginx),
|
||||
imageutils.GetE2EImage(imageutils.Perl),
|
||||
imageutils.GetE2EImage(imageutils.ServeHostname),
|
||||
imageutils.GetE2EImage(imageutils.Netexec),
|
||||
imageutils.GetE2EImage(imageutils.Nonewprivs),
|
||||
|
|
|
@ -0,0 +1,147 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume/util/quota"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
const (
|
||||
LSCIQuotaFeature = features.LocalStorageCapacityIsolationFSQuotaMonitoring
|
||||
)
|
||||
|
||||
func runOneQuotaTest(f *framework.Framework, quotasRequested bool) {
|
||||
evictionTestTimeout := 10 * time.Minute
|
||||
sizeLimit := resource.MustParse("100Mi")
|
||||
useOverLimit := 101 /* Mb */
|
||||
useUnderLimit := 99 /* Mb */
|
||||
// TODO: remove hardcoded kubelet volume directory path
|
||||
// framework.TestContext.KubeVolumeDir is currently not populated for node e2e
|
||||
// As for why we do this: see comment below at isXfs.
|
||||
if isXfs("/var/lib/kubelet") {
|
||||
useUnderLimit = 50 /* Mb */
|
||||
}
|
||||
priority := 0
|
||||
if quotasRequested {
|
||||
priority = 1
|
||||
}
|
||||
Context(fmt.Sprintf(testContextFmt, fmt.Sprintf("use quotas for LSCI monitoring (quotas enabled: %v)", quotasRequested)), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
defer withFeatureGate(LSCIQuotaFeature, quotasRequested)()
|
||||
// TODO: remove hardcoded kubelet volume directory path
|
||||
// framework.TestContext.KubeVolumeDir is currently not populated for node e2e
|
||||
if quotasRequested && !supportsQuotas("/var/lib/kubelet") {
|
||||
// No point in running this as a positive test if quotas are not
|
||||
// enabled on the underlying filesystem.
|
||||
framework.Skipf("Cannot run LocalStorageCapacityIsolationQuotaMonitoring on filesystem without project quota enabled")
|
||||
}
|
||||
// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
|
||||
initialConfig.EvictionHard = map[string]string{"memory.available": "0%"}
|
||||
initialConfig.FeatureGates[string(LSCIQuotaFeature)] = quotasRequested
|
||||
})
|
||||
runEvictionTest(f, evictionTestTimeout, noPressure, noStarvedResource, logDiskMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: priority, // This pod should be evicted because of emptyDir violation only if quotas are enabled
|
||||
pod: diskConcealingPod(fmt.Sprintf("emptydir-concealed-disk-over-sizelimit-quotas-%v", quotasRequested), useOverLimit, &v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{SizeLimit: &sizeLimit},
|
||||
}, v1.ResourceRequirements{}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 0, // This pod should not be evicted because it uses less than its limit (test for quotas)
|
||||
pod: diskConcealingPod(fmt.Sprintf("emptydir-concealed-disk-under-sizelimit-quotas-%v", quotasRequested), useUnderLimit, &v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{SizeLimit: &sizeLimit},
|
||||
}, v1.ResourceRequirements{}),
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// LocalStorageCapacityIsolationQuotaMonitoring tests that quotas are
|
||||
// used for monitoring rather than du. The mechanism is to create a
|
||||
// pod that creates a file, deletes it, and writes data to it. If
|
||||
// quotas are used to monitor, it will detect this deleted-but-in-use
|
||||
// file; if du is used to monitor, it will not detect this.
|
||||
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationQuotaMonitoring [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolationQuota][NodeFeature:LSCIQuotaMonitoring]", func() {
|
||||
f := framework.NewDefaultFramework("localstorage-quota-monitoring-test")
|
||||
runOneQuotaTest(f, true)
|
||||
runOneQuotaTest(f, false)
|
||||
})
|
||||
|
||||
const (
|
||||
writeConcealedPodCommand = `
|
||||
my $file = "%s.bin";
|
||||
open OUT, ">$file" || die "Cannot open $file: $!\n";
|
||||
unlink "$file" || die "Cannot unlink $file: $!\n";
|
||||
my $a = "a";
|
||||
foreach (1..20) { $a = "$a$a"; }
|
||||
foreach (1..%d) { syswrite(OUT, $a); }
|
||||
sleep 999999;`
|
||||
)
|
||||
|
||||
// This is needed for testing eviction of pods using disk space in concealed files; the shell has no convenient
|
||||
// way of performing I/O to a concealed file, and the busybox image doesn't contain Perl.
|
||||
func diskConcealingPod(name string, diskConsumedMB int, volumeSource *v1.VolumeSource, resources v1.ResourceRequirements) *v1.Pod {
|
||||
path := ""
|
||||
volumeMounts := []v1.VolumeMount{}
|
||||
volumes := []v1.Volume{}
|
||||
if volumeSource != nil {
|
||||
path = volumeMountPath
|
||||
volumeMounts = []v1.VolumeMount{{MountPath: volumeMountPath, Name: volumeName}}
|
||||
volumes = []v1.Volume{{Name: volumeName, VolumeSource: *volumeSource}}
|
||||
}
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-pod", name)},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageutils.GetE2EImage(imageutils.Perl),
|
||||
Name: fmt.Sprintf("%s-container", name),
|
||||
Command: []string{
|
||||
"perl",
|
||||
"-e",
|
||||
fmt.Sprintf(writeConcealedPodCommand, filepath.Join(path, "file"), diskConsumedMB),
|
||||
},
|
||||
Resources: resources,
|
||||
VolumeMounts: volumeMounts,
|
||||
},
|
||||
},
|
||||
Volumes: volumes,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Don't bother returning an error; if something goes wrong,
|
||||
// simply treat it as "no".
|
||||
func supportsQuotas(dir string) bool {
|
||||
supportsQuota, err := quota.SupportsQuotas(mount.New(""), dir)
|
||||
return supportsQuota && err == nil
|
||||
}
|
|
@ -34,7 +34,9 @@ import (
|
|||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/component-base/featuregate"
|
||||
internalapi "k8s.io/cri-api/pkg/apis"
|
||||
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
|
@ -62,6 +64,7 @@ var kubeletAddress = flag.String("kubelet-address", "http://127.0.0.1:10255", "H
|
|||
var startServices = flag.Bool("start-services", true, "If true, start local node services")
|
||||
var stopServices = flag.Bool("stop-services", true, "If true, stop local node services after running tests")
|
||||
var busyboxImage = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
var perlImage = imageutils.GetE2EImage(imageutils.Perl)
|
||||
|
||||
const (
|
||||
// Kubelet internal cgroup name for node allocatable cgroup.
|
||||
|
@ -440,3 +443,15 @@ func reduceAllocatableMemoryUsage() {
|
|||
_, err := exec.Command("sudo", "sh", "-c", cmd).CombinedOutput()
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// Equivalent of featuregatetesting.SetFeatureGateDuringTest
|
||||
// which can't be used here because we're not in a Testing context.
|
||||
// This must be in a non-"_test" file to pass
|
||||
// make verify WHAT=test-featuregates
|
||||
func withFeatureGate(feature featuregate.Feature, desired bool) func() {
|
||||
current := utilfeature.DefaultFeatureGate.Enabled(feature)
|
||||
utilfeature.DefaultMutableFeatureGate.Set(fmt.Sprintf("%s=%v", string(feature), desired))
|
||||
return func() {
|
||||
utilfeature.DefaultMutableFeatureGate.Set(fmt.Sprintf("%s=%v", string(feature), current))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
)
|
||||
|
||||
func detectMountpoint(m mount.Interface, path string) string {
|
||||
path, err := filepath.Abs(path)
|
||||
if err == nil {
|
||||
path, err = filepath.EvalSymlinks(path)
|
||||
}
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
for path != "" && path != "/" {
|
||||
isNotMount, err := m.IsLikelyNotMountPoint(path)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if !isNotMount {
|
||||
return path
|
||||
}
|
||||
path = filepath.Dir(path)
|
||||
}
|
||||
return "/"
|
||||
}
|
||||
|
||||
const (
|
||||
xfsMagic = 0x58465342
|
||||
)
|
||||
|
||||
// XFS over-allocates and then eventually removes that excess allocation.
|
||||
// That can lead to a file growing beyond its eventual size, causing
|
||||
// an unnecessary eviction:
|
||||
//
|
||||
// % ls -ls
|
||||
// total 32704
|
||||
// 32704 -rw-r--r-- 1 rkrawitz rkrawitz 20971520 Jan 15 13:16 foo.bin
|
||||
//
|
||||
// This issue can be hit regardless of the means used to count storage.
|
||||
// It is not present in ext4fs.
|
||||
func isXfs(dir string) bool {
|
||||
mountpoint := detectMountpoint(mount.New(""), dir)
|
||||
if mountpoint == "" {
|
||||
return false
|
||||
}
|
||||
var buf syscall.Statfs_t
|
||||
err := syscall.Statfs(mountpoint, &buf)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return buf.Type == xfsMagic
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
|
||||
func isXfs(dir string) bool {
|
||||
return false
|
||||
}
|
|
@ -171,6 +171,8 @@ const (
|
|||
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
|
||||
// Pause image
|
||||
Pause
|
||||
// Perl image
|
||||
Perl
|
||||
// Porter image
|
||||
Porter
|
||||
// PortForwardTester image
|
||||
|
@ -236,6 +238,7 @@ func initImageConfigs() map[int]Config {
|
|||
configs[NoSnatTestProxy] = Config{e2eRegistry, "no-snat-test-proxy", "1.0"}
|
||||
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
|
||||
configs[Pause] = Config{gcRegistry, "pause", "3.1"}
|
||||
configs[Perl] = Config{dockerLibraryRegistry, "perl", "5.26"}
|
||||
configs[Porter] = Config{e2eRegistry, "porter", "1.0"}
|
||||
configs[PortForwardTester] = Config{e2eRegistry, "port-forward-tester", "1.0"}
|
||||
configs[Redis] = Config{e2eRegistry, "redis", "1.0"}
|
||||
|
|
Loading…
Reference in New Issue