mirror of https://github.com/k3s-io/k3s
update import
parent
dad09c1142
commit
f737ad62ed
|
@ -2150,17 +2150,17 @@ func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (KubernetesVolumeID, er
|
||||||
|
|
||||||
var createAZ string
|
var createAZ string
|
||||||
if !volumeOptions.ZonePresent && !volumeOptions.ZonesPresent {
|
if !volumeOptions.ZonePresent && !volumeOptions.ZonesPresent {
|
||||||
createAZ = volume.ChooseZoneForVolume(allZones, volumeOptions.PVCName)
|
createAZ = volumeutil.ChooseZoneForVolume(allZones, volumeOptions.PVCName)
|
||||||
}
|
}
|
||||||
if !volumeOptions.ZonePresent && volumeOptions.ZonesPresent {
|
if !volumeOptions.ZonePresent && volumeOptions.ZonesPresent {
|
||||||
if adminSetOfZones, err := volumeutil.ZonesToSet(volumeOptions.AvailabilityZones); err != nil {
|
if adminSetOfZones, err := volumeutil.ZonesToSet(volumeOptions.AvailabilityZones); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
} else {
|
} else {
|
||||||
createAZ = volume.ChooseZoneForVolume(adminSetOfZones, volumeOptions.PVCName)
|
createAZ = volumeutil.ChooseZoneForVolume(adminSetOfZones, volumeOptions.PVCName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if volumeOptions.ZonePresent && !volumeOptions.ZonesPresent {
|
if volumeOptions.ZonePresent && !volumeOptions.ZonesPresent {
|
||||||
if err := volume.ValidateZone(volumeOptions.AvailabilityZone); err != nil {
|
if err := volumeutil.ValidateZone(volumeOptions.AvailabilityZone); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
createAZ = volumeOptions.AvailabilityZone
|
createAZ = volumeOptions.AvailabilityZone
|
||||||
|
@ -2476,7 +2476,7 @@ func (c *Cloud) ResizeDisk(
|
||||||
}
|
}
|
||||||
requestBytes := newSize.Value()
|
requestBytes := newSize.Value()
|
||||||
// AWS resizes in chunks of GiB (not GB)
|
// AWS resizes in chunks of GiB (not GB)
|
||||||
requestGiB := volume.RoundUpSize(requestBytes, 1024*1024*1024)
|
requestGiB := volumeutil.RoundUpSize(requestBytes, 1024*1024*1024)
|
||||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
|
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
|
||||||
|
|
||||||
// If disk already if of greater or equal size than requested we return
|
// If disk already if of greater or equal size than requested we return
|
||||||
|
|
|
@ -771,7 +771,7 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity,
|
||||||
|
|
||||||
requestBytes := newSize.Value()
|
requestBytes := newSize.Value()
|
||||||
// GCE resizes in chunks of GBs (not GiB)
|
// GCE resizes in chunks of GBs (not GiB)
|
||||||
requestGB := volume.RoundUpSize(requestBytes, 1000*1000*1000)
|
requestGB := volumeutil.RoundUpSize(requestBytes, 1000*1000*1000)
|
||||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dG", requestGB))
|
newSizeQuant := resource.MustParse(fmt.Sprintf("%dG", requestGB))
|
||||||
|
|
||||||
// If disk is already of size equal or greater than requested size, we simply return
|
// If disk is already of size equal or greater than requested size, we simply return
|
||||||
|
|
|
@ -412,7 +412,7 @@ func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, ne
|
||||||
|
|
||||||
volSizeBytes := newSize.Value()
|
volSizeBytes := newSize.Value()
|
||||||
// Cinder works with gigabytes, convert to GiB with rounding up
|
// Cinder works with gigabytes, convert to GiB with rounding up
|
||||||
volSizeGB := int(k8s_volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
volSizeGB := int(volumeutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
||||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGB))
|
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGB))
|
||||||
|
|
||||||
// if volume size equals to or greater than the newSize, return nil
|
// if volume size equals to or greater than the newSize, return nil
|
||||||
|
|
|
@ -47,7 +47,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TimerConfig contains configuration of internal attach/detach timers and
|
// TimerConfig contains configuration of internal attach/detach timers and
|
||||||
|
@ -137,7 +137,7 @@ func NewAttachDetachController(
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach-controller"})
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach-controller"})
|
||||||
blkutil := volumeutil.NewBlockVolumePathHandler()
|
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
|
|
||||||
adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr)
|
adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr)
|
||||||
adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr)
|
adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr)
|
||||||
|
@ -361,7 +361,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error {
|
||||||
err)
|
err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf(
|
glog.Errorf(
|
||||||
"Failed to find unique name for volume %q, pod %q/%q: %v",
|
"Failed to find unique name for volume %q, pod %q/%q: %v",
|
||||||
|
@ -587,10 +587,10 @@ func (adc *attachDetachController) GetExec(pluginName string) mount.Exec {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (adc *attachDetachController) addNodeToDswp(node *v1.Node, nodeName types.NodeName) {
|
func (adc *attachDetachController) addNodeToDswp(node *v1.Node, nodeName types.NodeName) {
|
||||||
if _, exists := node.Annotations[volumehelper.ControllerManagedAttachAnnotation]; exists {
|
if _, exists := node.Annotations[volumeutil.ControllerManagedAttachAnnotation]; exists {
|
||||||
keepTerminatedPodVolumes := false
|
keepTerminatedPodVolumes := false
|
||||||
|
|
||||||
if t, ok := node.Annotations[volumehelper.KeepTerminatedPodVolumesAnnotation]; ok {
|
if t, ok := node.Annotations[volumeutil.KeepTerminatedPodVolumesAnnotation]; ok {
|
||||||
keepTerminatedPodVolumes = (t == "true")
|
keepTerminatedPodVolumes = (t == "true")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,8 +31,8 @@ import (
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ActualStateOfWorld defines a set of thread-safe operations supported on
|
// ActualStateOfWorld defines a set of thread-safe operations supported on
|
||||||
|
@ -275,7 +275,7 @@ func (asw *actualStateOfWorld) AddVolumeNode(
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeName, err = volumehelper.GetUniqueVolumeNameFromSpec(
|
volumeName, err = util.GetUniqueVolumeNameFromSpec(
|
||||||
attachableVolumePlugin, volumeSpec)
|
attachableVolumePlugin, volumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf(
|
return "", fmt.Errorf(
|
||||||
|
|
|
@ -28,9 +28,9 @@ import (
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// DesiredStateOfWorld defines a set of thread-safe operations supported on
|
// DesiredStateOfWorld defines a set of thread-safe operations supported on
|
||||||
|
@ -231,7 +231,7 @@ func (dsw *desiredStateOfWorld) AddPod(
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
|
volumeName, err := util.GetUniqueVolumeNameFromSpec(
|
||||||
attachableVolumePlugin, volumeSpec)
|
attachableVolumePlugin, volumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf(
|
return "", fmt.Errorf(
|
||||||
|
|
|
@ -33,7 +33,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/util"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/util"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DesiredStateOfWorldPopulator periodically verifies that the pods in the
|
// DesiredStateOfWorldPopulator periodically verifies that the pods in the
|
||||||
|
@ -133,7 +133,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||||
true /* default volume action */)
|
true /* default volume action */)
|
||||||
|
|
||||||
if volumeActionFlag {
|
if volumeActionFlag {
|
||||||
informerPodUID := volumehelper.GetUniquePodName(informerPod)
|
informerPodUID := volutil.GetUniquePodName(informerPod)
|
||||||
// Check whether the unique identifier of the pod from dsw matches the one retrieved from pod informer
|
// Check whether the unique identifier of the pod from dsw matches the one retrieved from pod informer
|
||||||
if informerPodUID == dswPodUID {
|
if informerPodUID == dswPodUID {
|
||||||
glog.V(10).Infof("Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID)
|
glog.V(10).Infof("Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID)
|
||||||
|
@ -158,7 +158,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndAddActivePods() {
|
||||||
dswp.timeOfLastListPods = time.Now()
|
dswp.timeOfLastListPods = time.Now()
|
||||||
|
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if volumehelper.IsPodTerminated(pod, pod.Status) {
|
if volutil.IsPodTerminated(pod, pod.Status) {
|
||||||
// Do not add volumes for terminated pods
|
// Do not add volumes for terminated pods
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
|
func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
|
||||||
|
@ -66,7 +66,7 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
|
||||||
|
|
||||||
fakePodInformer.Informer().GetStore().Add(pod)
|
fakePodInformer.Informer().GetStore().Add(pod)
|
||||||
|
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
|
|
||||||
generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name
|
generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ import (
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
const TestPluginName = "kubernetes.io/testPlugin"
|
const TestPluginName = "kubernetes.io/testPlugin"
|
||||||
|
@ -142,7 +142,7 @@ func CreateTestClient() *fake.Clientset {
|
||||||
"name": nodeName,
|
"name": nodeName,
|
||||||
},
|
},
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.ControllerManagedAttachAnnotation: "true",
|
util.ControllerManagedAttachAnnotation: "true",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Status: v1.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
|
|
|
@ -25,7 +25,7 @@ import (
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CreateVolumeSpec creates and returns a mutatable volume.Spec object for the
|
// CreateVolumeSpec creates and returns a mutatable volume.Spec object for the
|
||||||
|
@ -150,7 +150,7 @@ func DetermineVolumeAction(pod *v1.Pod, desiredStateOfWorld cache.DesiredStateOf
|
||||||
nodeName := types.NodeName(pod.Spec.NodeName)
|
nodeName := types.NodeName(pod.Spec.NodeName)
|
||||||
keepTerminatedPodVolume := desiredStateOfWorld.GetKeepTerminatedPodVolumesForNode(nodeName)
|
keepTerminatedPodVolume := desiredStateOfWorld.GetKeepTerminatedPodVolumesForNode(nodeName)
|
||||||
|
|
||||||
if volumehelper.IsPodTerminated(pod, pod.Status) {
|
if util.IsPodTerminated(pod, pod.Status) {
|
||||||
// if pod is terminate we let kubelet policy dictate if volume
|
// if pod is terminate we let kubelet policy dictate if volume
|
||||||
// should be detached or not
|
// should be detached or not
|
||||||
return keepTerminatedPodVolume
|
return keepTerminatedPodVolume
|
||||||
|
@ -216,7 +216,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
uniquePodName := volumehelper.GetUniquePodName(pod)
|
uniquePodName := util.GetUniquePodName(pod)
|
||||||
if addVolumes {
|
if addVolumes {
|
||||||
// Add volume to desired state of world
|
// Add volume to desired state of world
|
||||||
_, err := desiredStateOfWorld.AddPod(
|
_, err := desiredStateOfWorld.AddPod(
|
||||||
|
@ -232,7 +232,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// Remove volume from desired state of world
|
// Remove volume from desired state of world
|
||||||
uniqueVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
|
uniqueVolumeName, err := util.GetUniqueVolumeNameFromSpec(
|
||||||
attachableVolumePlugin, volumeSpec)
|
attachableVolumePlugin, volumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(10).Infof(
|
glog.V(10).Infof(
|
||||||
|
|
|
@ -42,8 +42,8 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/io"
|
"k8s.io/kubernetes/pkg/util/io"
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
|
||||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -118,7 +118,7 @@ func NewExpandController(
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"})
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"})
|
||||||
blkutil := util.NewBlockVolumePathHandler()
|
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
|
|
||||||
expc.opExecutor = operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
expc.opExecutor = operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||||
kubeClient,
|
kubeClient,
|
||||||
|
|
|
@ -49,6 +49,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/testapi"
|
"k8s.io/kubernetes/pkg/api/testapi"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
vol "k8s.io/kubernetes/pkg/volume"
|
vol "k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This is a unit test framework for persistent volume controller.
|
// This is a unit test framework for persistent volume controller.
|
||||||
|
@ -1262,7 +1263,7 @@ func (plugin *mockVolumePlugin) GetMetrics() (*vol.Metrics, error) {
|
||||||
|
|
||||||
// Recycler interfaces
|
// Recycler interfaces
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) Recycle(pvName string, spec *vol.Spec, eventRecorder vol.RecycleEventRecorder) error {
|
func (plugin *mockVolumePlugin) Recycle(pvName string, spec *vol.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
|
||||||
if len(plugin.recycleCalls) == 0 {
|
if len(plugin.recycleCalls) == 0 {
|
||||||
return fmt.Errorf("Mock plugin error: no recycleCalls configured")
|
return fmt.Errorf("Mock plugin error: no recycleCalls configured")
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,6 @@ import (
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -321,7 +320,7 @@ func (pvIndex *persistentVolumeOrderedIndex) allPossibleMatchingAccessModes(requ
|
||||||
keys := pvIndex.store.ListIndexFuncValues("accessmodes")
|
keys := pvIndex.store.ListIndexFuncValues("accessmodes")
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
indexedModes := v1helper.GetAccessModesFromString(key)
|
indexedModes := v1helper.GetAccessModesFromString(key)
|
||||||
if volume.AccessModesContainedInAll(indexedModes, requestedModes) {
|
if volumeutil.AccessModesContainedInAll(indexedModes, requestedModes) {
|
||||||
matchedModes = append(matchedModes, indexedModes)
|
matchedModes = append(matchedModes, indexedModes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@ import (
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
ref "k8s.io/client-go/tools/reference"
|
ref "k8s.io/client-go/tools/reference"
|
||||||
"k8s.io/kubernetes/pkg/api/testapi"
|
"k8s.io/kubernetes/pkg/api/testapi"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func makePVC(size string, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentVolumeClaim {
|
func makePVC(size string, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentVolumeClaim {
|
||||||
|
@ -304,7 +304,7 @@ func TestAllPossibleAccessModes(t *testing.T) {
|
||||||
t.Errorf("Expected 3 arrays of modes that match RWO, but got %v", len(possibleModes))
|
t.Errorf("Expected 3 arrays of modes that match RWO, but got %v", len(possibleModes))
|
||||||
}
|
}
|
||||||
for _, m := range possibleModes {
|
for _, m := range possibleModes {
|
||||||
if !volume.AccessModesContains(m, v1.ReadWriteOnce) {
|
if !util.AccessModesContains(m, v1.ReadWriteOnce) {
|
||||||
t.Errorf("AccessModes does not contain %s", v1.ReadWriteOnce)
|
t.Errorf("AccessModes does not contain %s", v1.ReadWriteOnce)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -313,7 +313,7 @@ func TestAllPossibleAccessModes(t *testing.T) {
|
||||||
if len(possibleModes) != 1 {
|
if len(possibleModes) != 1 {
|
||||||
t.Errorf("Expected 1 array of modes that match RWX, but got %v", len(possibleModes))
|
t.Errorf("Expected 1 array of modes that match RWX, but got %v", len(possibleModes))
|
||||||
}
|
}
|
||||||
if !volume.AccessModesContains(possibleModes[0], v1.ReadWriteMany) {
|
if !util.AccessModesContains(possibleModes[0], v1.ReadWriteMany) {
|
||||||
t.Errorf("AccessModes does not contain %s", v1.ReadWriteOnce)
|
t.Errorf("AccessModes does not contain %s", v1.ReadWriteOnce)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
|
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
|
||||||
vol "k8s.io/kubernetes/pkg/volume"
|
vol "k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
)
|
)
|
||||||
|
@ -1262,7 +1262,7 @@ func (ctrl *PersistentVolumeController) isVolumeUsed(pv *v1.PersistentVolume) ([
|
||||||
return nil, false, fmt.Errorf("error listing pods: %s", err)
|
return nil, false, fmt.Errorf("error listing pods: %s", err)
|
||||||
}
|
}
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
if volumehelper.IsPodTerminated(pod, pod.Status) {
|
if util.IsPodTerminated(pod, pod.Status) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for i := range pod.Spec.Volumes {
|
for i := range pod.Spec.Volumes {
|
||||||
|
@ -1550,7 +1550,7 @@ func (ctrl *PersistentVolumeController) scheduleOperation(operationName string,
|
||||||
|
|
||||||
// newRecyclerEventRecorder returns a RecycleEventRecorder that sends all events
|
// newRecyclerEventRecorder returns a RecycleEventRecorder that sends all events
|
||||||
// to given volume.
|
// to given volume.
|
||||||
func (ctrl *PersistentVolumeController) newRecyclerEventRecorder(volume *v1.PersistentVolume) vol.RecycleEventRecorder {
|
func (ctrl *PersistentVolumeController) newRecyclerEventRecorder(volume *v1.PersistentVolume) recyclerclient.RecycleEventRecorder {
|
||||||
return func(eventtype, message string) {
|
return func(eventtype, message string) {
|
||||||
ctrl.eventRecorder.Eventf(volume, eventtype, events.RecyclerPod, "Recycler pod: %s", message)
|
ctrl.eventRecorder.Eventf(volume, eventtype, events.RecyclerPod, "Recycler pod: %s", message)
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/metrics"
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
"k8s.io/kubernetes/pkg/util/slice"
|
"k8s.io/kubernetes/pkg/util/slice"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Controller is controller that removes PVCProtectionFinalizer
|
// Controller is controller that removes PVCProtectionFinalizer
|
||||||
|
@ -214,7 +213,7 @@ func (c *Controller) isBeingUsed(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
||||||
glog.V(4).Infof("Skipping unscheduled pod %s when checking PVC %s/%s", pod.Name, pvc.Namespace, pvc.Name)
|
glog.V(4).Infof("Skipping unscheduled pod %s when checking PVC %s/%s", pod.Name, pvc.Namespace, pvc.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if volumehelper.IsPodTerminated(pod, pod.Status) {
|
if volumeutil.IsPodTerminated(pod, pod.Status) {
|
||||||
// This pod is being unmounted/detached or is already
|
// This pod is being unmounted/detached or is already
|
||||||
// unmounted/detached. It does not block the PVC from deletion.
|
// unmounted/detached. It does not block the PVC from deletion.
|
||||||
continue
|
continue
|
||||||
|
@ -270,7 +269,7 @@ func (c *Controller) podAddedDeletedUpdated(obj interface{}, deleted bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter out pods that can't help us to remove a finalizer on PVC
|
// Filter out pods that can't help us to remove a finalizer on PVC
|
||||||
if !deleted && !volumehelper.IsPodTerminated(pod, pod.Status) && pod.Spec.NodeName != "" {
|
if !deleted && !volumeutil.IsPodTerminated(pod, pod.Status) && pod.Spec.NodeName != "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||||
"k8s.io/kubernetes/pkg/version"
|
"k8s.io/kubernetes/pkg/version"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -190,8 +190,8 @@ func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool
|
||||||
// whether the existing node must be updated.
|
// whether the existing node must be updated.
|
||||||
func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v1.Node) bool {
|
func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v1.Node) bool {
|
||||||
var (
|
var (
|
||||||
existingCMAAnnotation = existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation]
|
existingCMAAnnotation = existingNode.Annotations[volutil.ControllerManagedAttachAnnotation]
|
||||||
newCMAAnnotation, newSet = node.Annotations[volumehelper.ControllerManagedAttachAnnotation]
|
newCMAAnnotation, newSet = node.Annotations[volutil.ControllerManagedAttachAnnotation]
|
||||||
)
|
)
|
||||||
|
|
||||||
if newCMAAnnotation == existingCMAAnnotation {
|
if newCMAAnnotation == existingCMAAnnotation {
|
||||||
|
@ -203,13 +203,13 @@ func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v
|
||||||
// the correct value of the annotation.
|
// the correct value of the annotation.
|
||||||
if !newSet {
|
if !newSet {
|
||||||
glog.Info("Controller attach-detach setting changed to false; updating existing Node")
|
glog.Info("Controller attach-detach setting changed to false; updating existing Node")
|
||||||
delete(existingNode.Annotations, volumehelper.ControllerManagedAttachAnnotation)
|
delete(existingNode.Annotations, volutil.ControllerManagedAttachAnnotation)
|
||||||
} else {
|
} else {
|
||||||
glog.Info("Controller attach-detach setting changed to true; updating existing Node")
|
glog.Info("Controller attach-detach setting changed to true; updating existing Node")
|
||||||
if existingNode.Annotations == nil {
|
if existingNode.Annotations == nil {
|
||||||
existingNode.Annotations = make(map[string]string)
|
existingNode.Annotations = make(map[string]string)
|
||||||
}
|
}
|
||||||
existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation] = newCMAAnnotation
|
existingNode.Annotations[volutil.ControllerManagedAttachAnnotation] = newCMAAnnotation
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
@ -270,7 +270,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.Infof("Setting node annotation to enable volume controller attach/detach")
|
glog.Infof("Setting node annotation to enable volume controller attach/detach")
|
||||||
node.Annotations[volumehelper.ControllerManagedAttachAnnotation] = "true"
|
node.Annotations[volutil.ControllerManagedAttachAnnotation] = "true"
|
||||||
} else {
|
} else {
|
||||||
glog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes")
|
glog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes")
|
||||||
}
|
}
|
||||||
|
@ -280,7 +280,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
||||||
node.Annotations = make(map[string]string)
|
node.Annotations = make(map[string]string)
|
||||||
}
|
}
|
||||||
glog.Infof("Setting node annotation to keep pod volumes of terminated pods attached to the node")
|
glog.Infof("Setting node annotation to keep pod volumes of terminated pods attached to the node")
|
||||||
node.Annotations[volumehelper.KeepTerminatedPodVolumesAnnotation] = "true"
|
node.Annotations[volutil.KeepTerminatedPodVolumesAnnotation] = "true"
|
||||||
}
|
}
|
||||||
|
|
||||||
// @question: should this be place after the call to the cloud provider? which also applies labels
|
// @question: should this be place after the call to the cloud provider? which also applies labels
|
||||||
|
|
|
@ -53,7 +53,7 @@ import (
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
|
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
|
||||||
"k8s.io/kubernetes/pkg/version"
|
"k8s.io/kubernetes/pkg/version"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -937,7 +937,7 @@ func TestTryRegisterWithApiServer(t *testing.T) {
|
||||||
|
|
||||||
if cmad {
|
if cmad {
|
||||||
node.Annotations = make(map[string]string)
|
node.Annotations = make(map[string]string)
|
||||||
node.Annotations[volumehelper.ControllerManagedAttachAnnotation] = "true"
|
node.Annotations[util.ControllerManagedAttachAnnotation] = "true"
|
||||||
}
|
}
|
||||||
|
|
||||||
return node
|
return node
|
||||||
|
@ -1089,7 +1089,7 @@ func TestTryRegisterWithApiServer(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[volumehelper.ControllerManagedAttachAnnotation])
|
actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[util.ControllerManagedAttachAnnotation])
|
||||||
assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name)
|
assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,9 +61,8 @@ import (
|
||||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||||
utilfile "k8s.io/kubernetes/pkg/util/file"
|
utilfile "k8s.io/kubernetes/pkg/util/file"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
volumevalidation "k8s.io/kubernetes/pkg/volume/validation"
|
volumevalidation "k8s.io/kubernetes/pkg/volume/validation"
|
||||||
"k8s.io/kubernetes/third_party/forked/golang/expansion"
|
"k8s.io/kubernetes/third_party/forked/golang/expansion"
|
||||||
)
|
)
|
||||||
|
@ -129,7 +128,7 @@ func makeAbsolutePath(goos, path string) string {
|
||||||
|
|
||||||
// makeBlockVolumes maps the raw block devices specified in the path of the container
|
// makeBlockVolumes maps the raw block devices specified in the path of the container
|
||||||
// Experimental
|
// Experimental
|
||||||
func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumeutil.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) {
|
func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumepathhandler.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) {
|
||||||
var devices []kubecontainer.DeviceInfo
|
var devices []kubecontainer.DeviceInfo
|
||||||
for _, device := range container.VolumeDevices {
|
for _, device := range container.VolumeDevices {
|
||||||
// check path is absolute
|
// check path is absolute
|
||||||
|
@ -188,7 +187,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
|
||||||
vol.SELinuxLabeled = true
|
vol.SELinuxLabeled = true
|
||||||
relabelVolume = true
|
relabelVolume = true
|
||||||
}
|
}
|
||||||
hostPath, err := volume.GetPath(vol.Mounter)
|
hostPath, err := volumeutil.GetPath(vol.Mounter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -451,7 +450,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
opts.Hostname = hostname
|
opts.Hostname = hostname
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := volumeutil.GetUniquePodName(pod)
|
||||||
volumes := kl.volumeManager.GetMountedVolumesForPod(podName)
|
volumes := kl.volumeManager.GetMountedVolumesForPod(podName)
|
||||||
|
|
||||||
opts.PortMappings = kubecontainer.MakePortMappings(container)
|
opts.PortMappings = kubecontainer.MakePortMappings(container)
|
||||||
|
@ -464,7 +463,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
|
||||||
|
|
||||||
// TODO: remove feature gate check after no longer needed
|
// TODO: remove feature gate check after no longer needed
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||||
blkutil := volumeutil.NewBlockVolumePathHandler()
|
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
blkVolumes, err := kl.makeBlockVolumes(pod, container, volumes, blkutil)
|
blkVolumes, err := kl.makeBlockVolumes(pod, container, volumes, blkutil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -73,7 +73,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
_ "k8s.io/kubernetes/pkg/volume/host_path"
|
_ "k8s.io/kubernetes/pkg/volume/host_path"
|
||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -2134,7 +2134,7 @@ func waitForVolumeUnmount(
|
||||||
func() (bool, error) {
|
func() (bool, error) {
|
||||||
// Verify volumes detached
|
// Verify volumes detached
|
||||||
podVolumes = volumeManager.GetMountedVolumesForPod(
|
podVolumes = volumeManager.GetMountedVolumesForPod(
|
||||||
volumehelper.GetUniquePodName(pod))
|
util.GetUniquePodName(pod))
|
||||||
|
|
||||||
if len(podVolumes) != 0 {
|
if len(podVolumes) != 0 {
|
||||||
return false, nil
|
return false, nil
|
||||||
|
|
|
@ -28,7 +28,7 @@ import (
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestListVolumesForPod(t *testing.T) {
|
func TestListVolumesForPod(t *testing.T) {
|
||||||
|
@ -64,7 +64,7 @@ func TestListVolumesForPod(t *testing.T) {
|
||||||
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
|
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
|
|
||||||
volumesToReturn, volumeExsit := kubelet.ListVolumesForPod(types.UID(podName))
|
volumesToReturn, volumeExsit := kubelet.ListVolumesForPod(types.UID(podName))
|
||||||
assert.True(t, volumeExsit, "expected to find volumes for pod %q", podName)
|
assert.True(t, volumeExsit, "expected to find volumes for pod %q", podName)
|
||||||
|
@ -180,7 +180,7 @@ func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
||||||
volumehelper.GetUniquePodName(pod))
|
util.GetUniquePodName(pod))
|
||||||
|
|
||||||
expectedPodVolumes := []string{"vol1"}
|
expectedPodVolumes := []string{"vol1"}
|
||||||
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
||||||
|
@ -227,7 +227,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
||||||
volumehelper.GetUniquePodName(pod))
|
util.GetUniquePodName(pod))
|
||||||
|
|
||||||
expectedPodVolumes := []string{"vol1"}
|
expectedPodVolumes := []string{"vol1"}
|
||||||
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
||||||
|
@ -252,7 +252,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
|
||||||
|
|
||||||
// Verify volumes unmounted
|
// Verify volumes unmounted
|
||||||
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
|
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
|
||||||
volumehelper.GetUniquePodName(pod))
|
util.GetUniquePodName(pod))
|
||||||
|
|
||||||
assert.Len(t, podVolumes, 0,
|
assert.Len(t, podVolumes, 0,
|
||||||
"Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
|
"Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
|
||||||
|
@ -317,7 +317,7 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
|
||||||
assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod))
|
assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod))
|
||||||
|
|
||||||
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
||||||
volumehelper.GetUniquePodName(pod))
|
util.GetUniquePodName(pod))
|
||||||
|
|
||||||
expectedPodVolumes := []string{"vol1"}
|
expectedPodVolumes := []string{"vol1"}
|
||||||
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
||||||
|
@ -386,7 +386,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
|
||||||
assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod))
|
assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod))
|
||||||
|
|
||||||
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
||||||
volumehelper.GetUniquePodName(pod))
|
util.GetUniquePodName(pod))
|
||||||
|
|
||||||
expectedPodVolumes := []string{"vol1"}
|
expectedPodVolumes := []string{"vol1"}
|
||||||
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
||||||
|
@ -410,7 +410,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
|
||||||
|
|
||||||
// Verify volumes unmounted
|
// Verify volumes unmounted
|
||||||
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
|
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
|
||||||
volumehelper.GetUniquePodName(pod))
|
util.GetUniquePodName(pod))
|
||||||
|
|
||||||
assert.Len(t, podVolumes, 0,
|
assert.Len(t, podVolumes, 0,
|
||||||
"Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
|
"Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
|
||||||
|
|
|
@ -36,6 +36,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/io"
|
"k8s.io/kubernetes/pkg/util/io"
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewInitializedVolumePluginMgr returns a new instance of
|
// NewInitializedVolumePluginMgr returns a new instance of
|
||||||
|
@ -94,7 +95,7 @@ func (kvh *kubeletVolumeHost) GetVolumeDevicePluginDir(pluginName string) string
|
||||||
func (kvh *kubeletVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
|
func (kvh *kubeletVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
|
||||||
dir := kvh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName)
|
dir := kvh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName)
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
dir = volume.GetWindowsPath(dir)
|
dir = util.GetWindowsPath(dir)
|
||||||
}
|
}
|
||||||
return dir
|
return dir
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,9 +29,9 @@ import (
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ActualStateOfWorld defines a set of thread-safe operations for the kubelet
|
// ActualStateOfWorld defines a set of thread-safe operations for the kubelet
|
||||||
|
@ -358,7 +358,7 @@ func (asw *actualStateOfWorld) addVolume(
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(volumeName) == 0 {
|
if len(volumeName) == 0 {
|
||||||
volumeName, err = volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
|
volumeName, err = util.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
|
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
|
||||||
|
|
|
@ -23,8 +23,8 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var emptyVolumeName = v1.UniqueVolumeName("")
|
var emptyVolumeName = v1.UniqueVolumeName("")
|
||||||
|
@ -56,7 +56,7 @@ func Test_MarkVolumeAsAttached_Positive_NewVolume(t *testing.T) {
|
||||||
}
|
}
|
||||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
devicePath := "fake/device/path"
|
devicePath := "fake/device/path"
|
||||||
generatedVolumeName, _ := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
generatedVolumeName, _ := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||||
|
@ -143,7 +143,7 @@ func Test_MarkVolumeAsAttached_Positive_ExistingVolume(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
generatedVolumeName, _ := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
generatedVolumeName, _ := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||||
|
|
||||||
err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -191,13 +191,13 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
generatedVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||||
|
|
||||||
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
||||||
}
|
}
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
|
|
||||||
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
|
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -255,14 +255,14 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
generatedVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
|
generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec(
|
||||||
plugin, volumeSpec)
|
plugin, volumeSpec)
|
||||||
|
|
||||||
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
|
||||||
}
|
}
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
|
|
||||||
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
|
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -339,10 +339,10 @@ func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) {
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
|
volumeName, err := util.GetUniqueVolumeNameFromSpec(
|
||||||
plugin, volumeSpec)
|
plugin, volumeSpec)
|
||||||
|
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
|
|
||||||
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
|
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -404,7 +404,7 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) {
|
||||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
devicePath := "fake/device/path"
|
devicePath := "fake/device/path"
|
||||||
deviceMountPath := "fake/device/mount/path"
|
deviceMountPath := "fake/device/mount/path"
|
||||||
generatedVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||||
|
|
||||||
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -26,9 +26,9 @@ import (
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// DesiredStateOfWorld defines a set of thread-safe operations for the kubelet
|
// DesiredStateOfWorld defines a set of thread-safe operations for the kubelet
|
||||||
|
@ -206,7 +206,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
||||||
// For attachable volumes, use the unique volume name as reported by
|
// For attachable volumes, use the unique volume name as reported by
|
||||||
// the plugin.
|
// the plugin.
|
||||||
volumeName, err =
|
volumeName, err =
|
||||||
volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
|
util.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf(
|
return "", fmt.Errorf(
|
||||||
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
|
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
|
||||||
|
@ -217,7 +217,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
||||||
} else {
|
} else {
|
||||||
// For non-attachable volumes, generate a unique name based on the pod
|
// For non-attachable volumes, generate a unique name based on the pod
|
||||||
// namespace and name and the name of the volume within the pod.
|
// namespace and name and the name of the volume within the pod.
|
||||||
volumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(podName, volumePlugin, volumeSpec)
|
volumeName = util.GetUniqueVolumeNameForNonAttachableVolume(podName, volumePlugin, volumeSpec)
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
|
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
|
||||||
|
|
|
@ -23,8 +23,8 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Calls AddPodToVolume() to add new pod to new volume
|
// Calls AddPodToVolume() to add new pod to new volume
|
||||||
|
@ -54,7 +54,7 @@ func Test_AddPodToVolume_Positive_NewPodNewVolume(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
|
@ -99,7 +99,7 @@ func Test_AddPodToVolume_Positive_ExistingPodExistingVolume(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
|
@ -144,7 +144,7 @@ func Test_DeletePodFromVolume_Positive_PodExistsVolumeExists(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -197,7 +197,7 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
volume1Spec := &volume.Spec{Volume: &pod1.Spec.Volumes[0]}
|
volume1Spec := &volume.Spec{Volume: &pod1.Spec.Volumes[0]}
|
||||||
pod1Name := volumehelper.GetUniquePodName(pod1)
|
pod1Name := util.GetUniquePodName(pod1)
|
||||||
|
|
||||||
pod2 := &v1.Pod{
|
pod2 := &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
@ -219,7 +219,7 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
volume2Spec := &volume.Spec{Volume: &pod2.Spec.Volumes[0]}
|
volume2Spec := &volume.Spec{Volume: &pod2.Spec.Volumes[0]}
|
||||||
pod2Name := volumehelper.GetUniquePodName(pod2)
|
pod2Name := util.GetUniquePodName(pod2)
|
||||||
|
|
||||||
pod3 := &v1.Pod{
|
pod3 := &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
@ -241,7 +241,7 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
volume3Spec := &volume.Spec{Volume: &pod3.Spec.Volumes[0]}
|
volume3Spec := &volume.Spec{Volume: &pod3.Spec.Volumes[0]}
|
||||||
pod3Name := volumehelper.GetUniquePodName(pod3)
|
pod3Name := util.GetUniquePodName(pod3)
|
||||||
|
|
||||||
generatedVolume1Name, err := dsw.AddPodToVolume(
|
generatedVolume1Name, err := dsw.AddPodToVolume(
|
||||||
pod1Name, pod1, volume1Spec, volume1Spec.Name(), "" /* volumeGidValue */)
|
pod1Name, pod1, volume1Spec, volume1Spec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
|
@ -41,8 +41,8 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// DesiredStateOfWorldPopulator periodically loops through the list of active
|
// DesiredStateOfWorldPopulator periodically loops through the list of active
|
||||||
|
@ -176,7 +176,7 @@ func (dswp *desiredStateOfWorldPopulator) isPodTerminated(pod *v1.Pod) bool {
|
||||||
if !found {
|
if !found {
|
||||||
podStatus = pod.Status
|
podStatus = pod.Status
|
||||||
}
|
}
|
||||||
return volumehelper.IsPodTerminated(pod, podStatus)
|
return util.IsPodTerminated(pod, podStatus)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate through all pods and add to desired state of world if they don't
|
// Iterate through all pods and add to desired state of world if they don't
|
||||||
|
@ -260,7 +260,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
uniquePodName := volumehelper.GetUniquePodName(pod)
|
uniquePodName := util.GetUniquePodName(pod)
|
||||||
if dswp.podPreviouslyProcessed(uniquePodName) {
|
if dswp.podPreviouslyProcessed(uniquePodName) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -393,7 +393,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||||
|
|
||||||
// TODO: remove feature gate check after no longer needed
|
// TODO: remove feature gate check after no longer needed
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||||
volumeMode, err := volumehelper.GetVolumeMode(volumeSpec)
|
volumeMode, err := util.GetVolumeMode(volumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
@ -525,7 +525,7 @@ func (dswp *desiredStateOfWorldPopulator) makeVolumeMap(containers []v1.Containe
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPVVolumeGidAnnotationValue(pv *v1.PersistentVolume) string {
|
func getPVVolumeGidAnnotationValue(pv *v1.PersistentVolume) string {
|
||||||
if volumeGid, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]; ok {
|
if volumeGid, ok := pv.Annotations[util.VolumeGidAnnotationKey]; ok {
|
||||||
return volumeGid
|
return volumeGid
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,8 +35,8 @@ import (
|
||||||
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
|
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFindAndAddNewPods_FindAndRemoveDeletedPods(t *testing.T) {
|
func TestFindAndAddNewPods_FindAndRemoveDeletedPods(t *testing.T) {
|
||||||
|
@ -74,7 +74,7 @@ func TestFindAndAddNewPods_FindAndRemoveDeletedPods(t *testing.T) {
|
||||||
|
|
||||||
fakePodManager.AddPod(pod)
|
fakePodManager.AddPod(pod)
|
||||||
|
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
|
|
||||||
generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name
|
generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name
|
||||||
|
|
||||||
|
@ -184,7 +184,7 @@ func TestFindAndAddNewPods_FindAndRemoveDeletedPods_Valid_Block_VolumeDevices(t
|
||||||
|
|
||||||
fakePodManager.AddPod(pod)
|
fakePodManager.AddPod(pod)
|
||||||
|
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
|
|
||||||
generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name
|
generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name
|
||||||
|
|
||||||
|
|
|
@ -41,10 +41,10 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
volumepkg "k8s.io/kubernetes/pkg/volume"
|
volumepkg "k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations"
|
"k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reconciler runs a periodic loop to reconcile the desired state of the world
|
// Reconciler runs a periodic loop to reconcile the desired state of the world
|
||||||
|
@ -445,12 +445,12 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
||||||
|
|
||||||
var uniqueVolumeName v1.UniqueVolumeName
|
var uniqueVolumeName v1.UniqueVolumeName
|
||||||
if attachablePlugin != nil {
|
if attachablePlugin != nil {
|
||||||
uniqueVolumeName, err = volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
uniqueVolumeName, err = util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
uniqueVolumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec)
|
uniqueVolumeName = util.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec)
|
||||||
}
|
}
|
||||||
// Check existence of mount point for filesystem volume or symbolic link for block volume
|
// Check existence of mount point for filesystem volume or symbolic link for block volume
|
||||||
isExist, checkErr := rc.operationExecutor.CheckVolumeExistenceOperation(volumeSpec, volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin)
|
isExist, checkErr := rc.operationExecutor.CheckVolumeExistenceOperation(volumeSpec, volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin)
|
||||||
|
|
|
@ -36,8 +36,8 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -149,7 +149,7 @@ func Test_Run_Positive_VolumeAttachAndMount(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
@ -227,7 +227,7 @@ func Test_Run_Positive_VolumeMountControllerAttachEnabled(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
|
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
|
||||||
|
@ -306,7 +306,7 @@ func Test_Run_Positive_VolumeAttachMountUnmountDetach(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
@ -396,7 +396,7 @@ func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
@ -491,7 +491,7 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
|
||||||
volumeSpec := &volume.Spec{
|
volumeSpec := &volume.Spec{
|
||||||
PersistentVolume: gcepv,
|
PersistentVolume: gcepv,
|
||||||
}
|
}
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
@ -582,7 +582,7 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
|
||||||
volumeSpec := &volume.Spec{
|
volumeSpec := &volume.Spec{
|
||||||
PersistentVolume: gcepv,
|
PersistentVolume: gcepv,
|
||||||
}
|
}
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
|
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
|
||||||
|
@ -674,7 +674,7 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
|
||||||
volumeSpec := &volume.Spec{
|
volumeSpec := &volume.Spec{
|
||||||
PersistentVolume: gcepv,
|
PersistentVolume: gcepv,
|
||||||
}
|
}
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
@ -776,7 +776,7 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
|
||||||
volumeSpec := &volume.Spec{
|
volumeSpec := &volume.Spec{
|
||||||
PersistentVolume: gcepv,
|
PersistentVolume: gcepv,
|
||||||
}
|
}
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -169,7 +169,7 @@ func NewVolumeManager(
|
||||||
volumePluginMgr,
|
volumePluginMgr,
|
||||||
recorder,
|
recorder,
|
||||||
checkNodeCapabilitiesBeforeMount,
|
checkNodeCapabilitiesBeforeMount,
|
||||||
util.NewBlockVolumePathHandler())),
|
volumepathhandler.NewBlockVolumePathHandler())),
|
||||||
}
|
}
|
||||||
|
|
||||||
vm.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator(
|
vm.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator(
|
||||||
|
@ -264,7 +264,7 @@ func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) co
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
|
func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
supplementalGroups := sets.NewString()
|
supplementalGroups := sets.NewString()
|
||||||
|
|
||||||
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
||||||
|
@ -340,7 +340,7 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod))
|
glog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod))
|
||||||
uniquePodName := volumehelper.GetUniquePodName(pod)
|
uniquePodName := util.GetUniquePodName(pod)
|
||||||
|
|
||||||
// Some pods expect to have Setup called over and over again to update.
|
// Some pods expect to have Setup called over and over again to update.
|
||||||
// Remount plugins for which this is true. (Atomically updating volumes,
|
// Remount plugins for which this is true. (Atomically updating volumes,
|
||||||
|
|
|
@ -41,8 +41,8 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -168,7 +168,7 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "pvA",
|
Name: "pvA",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeGidAnnotationKey: tc.gidAnnotation,
|
util.VolumeGidAnnotationKey: tc.gidAnnotation,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
|
|
@ -30,7 +30,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type awsElasticBlockStoreAttacher struct {
|
type awsElasticBlockStoreAttacher struct {
|
||||||
|
@ -219,8 +218,8 @@ func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, dev
|
||||||
options = append(options, "ro")
|
options = append(options, "ro")
|
||||||
}
|
}
|
||||||
if notMnt {
|
if notMnt {
|
||||||
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(awsElasticBlockStorePluginName, attacher.host)
|
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(awsElasticBlockStorePluginName, attacher.host)
|
||||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Remove(deviceMountPath)
|
os.Remove(deviceMountPath)
|
||||||
|
|
|
@ -34,7 +34,6 @@ import (
|
||||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// This is the primary entrypoint for volume plugins.
|
// This is the primary entrypoint for volume plugins.
|
||||||
|
@ -134,7 +133,7 @@ func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec,
|
||||||
},
|
},
|
||||||
fsType: fsType,
|
fsType: fsType,
|
||||||
readOnly: readOnly,
|
readOnly: readOnly,
|
||||||
diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||||
|
@ -456,7 +455,7 @@ type awsElasticBlockStoreProvisioner struct {
|
||||||
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
|
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
|
||||||
|
|
||||||
func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -475,7 +474,7 @@ func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, err
|
||||||
Name: c.options.PVName,
|
Name: c.options.PVName,
|
||||||
Labels: map[string]string{},
|
Labels: map[string]string{},
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeDynamicallyCreatedByKey: "aws-ebs-dynamic-provisioner",
|
util.VolumeDynamicallyCreatedByKey: "aws-ebs-dynamic-provisioner",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
|
|
@ -30,7 +30,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{}
|
var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{}
|
||||||
|
@ -41,7 +41,7 @@ var _ volume.ProvisionableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||||
|
|
||||||
func (plugin *awsElasticBlockStorePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
func (plugin *awsElasticBlockStorePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
||||||
pluginDir := plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName)
|
pluginDir := plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName)
|
||||||
blkutil := util.NewBlockVolumePathHandler()
|
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -80,12 +80,12 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.K
|
||||||
} else {
|
} else {
|
||||||
tags = *c.options.CloudTags
|
tags = *c.options.CloudTags
|
||||||
}
|
}
|
||||||
tags["Name"] = volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters
|
tags["Name"] = volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters
|
||||||
|
|
||||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
requestBytes := capacity.Value()
|
requestBytes := capacity.Value()
|
||||||
// AWS works with gigabytes, convert to GiB with rounding up
|
// AWS works with gigabytes, convert to GiB with rounding up
|
||||||
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
requestGB := int(volumeutil.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||||
volumeOptions := &aws.VolumeOptions{
|
volumeOptions := &aws.VolumeOptions{
|
||||||
CapacityGB: requestGB,
|
CapacityGB: requestGB,
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
|
|
|
@ -36,8 +36,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type azureDiskDetacher struct {
|
type azureDiskDetacher struct {
|
||||||
|
@ -249,8 +248,8 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str
|
||||||
|
|
||||||
options := []string{}
|
options := []string{}
|
||||||
if notMnt {
|
if notMnt {
|
||||||
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, attacher.plugin.host)
|
diskMounter := util.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, attacher.plugin.host)
|
||||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
mountOptions := util.MountOptionFromSpec(spec, options...)
|
||||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions)
|
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if cleanErr := os.Remove(deviceMountPath); cleanErr != nil {
|
if cleanErr := os.Remove(deviceMountPath); cleanErr != nil {
|
||||||
|
@ -295,7 +294,7 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro
|
||||||
|
|
||||||
// UnmountDevice unmounts the volume on the node
|
// UnmountDevice unmounts the volume on the node
|
||||||
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
||||||
err := volumeutil.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName()))
|
err := util.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName()))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
|
glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -116,7 +116,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.options.MountOptions != nil {
|
if m.options.MountOptions != nil {
|
||||||
options = volume.JoinMountOptions(m.options.MountOptions, options)
|
options = util.JoinMountOptions(m.options.MountOptions, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir)
|
glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir)
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
type azureDiskProvisioner struct {
|
type azureDiskProvisioner struct {
|
||||||
|
@ -65,7 +66,7 @@ func (d *azureDiskDeleter) Delete() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
supportedModes := p.plugin.GetAccessModes()
|
supportedModes := p.plugin.GetAccessModes()
|
||||||
|
@ -93,10 +94,10 @@ func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
// maxLength = 79 - (4 for ".vhd") = 75
|
// maxLength = 79 - (4 for ".vhd") = 75
|
||||||
name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
|
name := util.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
|
||||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
requestBytes := capacity.Value()
|
requestBytes := capacity.Value()
|
||||||
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
requestGB := int(util.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||||
|
|
||||||
for k, v := range p.options.Parameters {
|
for k, v := range p.options.Parameters {
|
||||||
switch strings.ToLower(k) {
|
switch strings.ToLower(k) {
|
||||||
|
|
|
@ -31,7 +31,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProbeVolumePlugins is the primary endpoint for volume plugins
|
// ProbeVolumePlugins is the primary endpoint for volume plugins
|
||||||
|
@ -122,7 +122,7 @@ func (plugin *azureFilePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod
|
||||||
secretName: secretName,
|
secretName: secretName,
|
||||||
shareName: share,
|
shareName: share,
|
||||||
readOnly: readOnly,
|
readOnly: readOnly,
|
||||||
mountOptions: volume.MountOptionFromSpec(spec),
|
mountOptions: volutil.MountOptionFromSpec(spec),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -168,7 +168,7 @@ func (plugin *azureFilePlugin) ExpandVolumeDevice(
|
||||||
return oldSize, err
|
return oldSize, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := azure.ResizeFileShare(accountName, accountKey, shareName, int(volume.RoundUpToGiB(newSize))); err != nil {
|
if err := azure.ResizeFileShare(accountName, accountKey, shareName, int(volutil.RoundUpToGiB(newSize))); err != nil {
|
||||||
return oldSize, err
|
return oldSize, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,7 +262,7 @@ func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||||
if b.readOnly {
|
if b.readOnly {
|
||||||
options = append(options, "ro")
|
options = append(options, "ro")
|
||||||
}
|
}
|
||||||
mountOptions = volume.JoinMountOptions(b.mountOptions, options)
|
mountOptions = volutil.JoinMountOptions(b.mountOptions, options)
|
||||||
mountOptions = appendDefaultMountOptions(mountOptions, fsGroup)
|
mountOptions = appendDefaultMountOptions(mountOptions, fsGroup)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -306,7 +306,7 @@ func (c *azureFileUnmounter) TearDown() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *azureFileUnmounter) TearDownAt(dir string) error {
|
func (c *azureFileUnmounter) TearDownAt(dir string) error {
|
||||||
return util.UnmountPath(dir, c.mounter)
|
return volutil.UnmountPath(dir, c.mounter)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (string, bool, error) {
|
func getVolumeSource(spec *volume.Spec) (string, bool, error) {
|
||||||
|
|
|
@ -28,7 +28,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ volume.DeletableVolumePlugin = &azureFilePlugin{}
|
var _ volume.DeletableVolumePlugin = &azureFilePlugin{}
|
||||||
|
@ -132,18 +132,18 @@ type azureFileProvisioner struct {
|
||||||
var _ volume.Provisioner = &azureFileProvisioner{}
|
var _ volume.Provisioner = &azureFileProvisioner{}
|
||||||
|
|
||||||
func (a *azureFileProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (a *azureFileProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) {
|
if !util.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", a.options.PVC.Spec.AccessModes, a.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", a.options.PVC.Spec.AccessModes, a.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
var sku, location, account string
|
var sku, location, account string
|
||||||
|
|
||||||
// File share name has a length limit of 63, and it cannot contain two consecutive '-'s.
|
// File share name has a length limit of 63, and it cannot contain two consecutive '-'s.
|
||||||
name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63)
|
name := util.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63)
|
||||||
name = strings.Replace(name, "--", "-", -1)
|
name = strings.Replace(name, "--", "-", -1)
|
||||||
capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
requestBytes := capacity.Value()
|
requestBytes := capacity.Value()
|
||||||
requestGiB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
requestGiB := int(util.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||||
secretNamespace := a.options.PVC.Namespace
|
secretNamespace := a.options.PVC.Namespace
|
||||||
// Apply ProvisionerParameters (case-insensitive). We leave validation of
|
// Apply ProvisionerParameters (case-insensitive). We leave validation of
|
||||||
// the values to the cloud provider.
|
// the values to the cloud provider.
|
||||||
|
@ -182,7 +182,7 @@ func (a *azureFileProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
Name: a.options.PVName,
|
Name: a.options.PVName,
|
||||||
Labels: map[string]string{},
|
Labels: map[string]string{},
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeDynamicallyCreatedByKey: "azure-file-dynamic-provisioner",
|
util.VolumeDynamicallyCreatedByKey: "azure-file-dynamic-provisioner",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
|
|
@ -148,7 +148,7 @@ func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.U
|
||||||
readonly: readOnly,
|
readonly: readOnly,
|
||||||
mounter: mounter,
|
mounter: mounter,
|
||||||
plugin: plugin,
|
plugin: plugin,
|
||||||
mountOptions: volume.MountOptionFromSpec(spec),
|
mountOptions: util.MountOptionFromSpec(spec),
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -323,7 +323,7 @@ func (cephfsVolume *cephfs) execMount(mountpoint string) error {
|
||||||
}
|
}
|
||||||
src += hosts[i] + ":" + cephfsVolume.path
|
src += hosts[i] + ":" + cephfsVolume.path
|
||||||
|
|
||||||
mountOptions := volume.JoinMountOptions(cephfsVolume.mountOptions, opt)
|
mountOptions := util.JoinMountOptions(cephfsVolume.mountOptions, opt)
|
||||||
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", mountOptions); err != nil {
|
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", mountOptions); err != nil {
|
||||||
return fmt.Errorf("CephFS: mount failed: %v", err)
|
return fmt.Errorf("CephFS: mount failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type cinderDiskAttacher struct {
|
type cinderDiskAttacher struct {
|
||||||
|
@ -286,8 +285,8 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st
|
||||||
options = append(options, "ro")
|
options = append(options, "ro")
|
||||||
}
|
}
|
||||||
if notMnt {
|
if notMnt {
|
||||||
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
|
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
|
||||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Remove(deviceMountPath)
|
os.Remove(deviceMountPath)
|
||||||
|
|
|
@ -34,7 +34,6 @@ import (
|
||||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -145,7 +144,7 @@ func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.U
|
||||||
},
|
},
|
||||||
fsType: fsType,
|
fsType: fsType,
|
||||||
readOnly: readOnly,
|
readOnly: readOnly,
|
||||||
blockDeviceMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
blockDeviceMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||||
|
@ -501,7 +500,7 @@ type cinderVolumeProvisioner struct {
|
||||||
var _ volume.Provisioner = &cinderVolumeProvisioner{}
|
var _ volume.Provisioner = &cinderVolumeProvisioner{}
|
||||||
|
|
||||||
func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -515,7 +514,7 @@ func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
Name: c.options.PVName,
|
Name: c.options.PVName,
|
||||||
Labels: labels,
|
Labels: labels,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeDynamicallyCreatedByKey: "cinder-dynamic-provisioner",
|
util.VolumeDynamicallyCreatedByKey: "cinder-dynamic-provisioner",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
|
|
@ -32,6 +32,7 @@ import (
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/utils/exec"
|
"k8s.io/utils/exec"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -170,8 +171,8 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string,
|
||||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
volSizeBytes := capacity.Value()
|
volSizeBytes := capacity.Value()
|
||||||
// Cinder works with gigabytes, convert to GiB with rounding up
|
// Cinder works with gigabytes, convert to GiB with rounding up
|
||||||
volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
volSizeGB := int(volutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
||||||
name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
|
name := volutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
|
||||||
vtype := ""
|
vtype := ""
|
||||||
availability := ""
|
availability := ""
|
||||||
// Apply ProvisionerParameters (case-insensitive). We leave validation of
|
// Apply ProvisionerParameters (case-insensitive). We leave validation of
|
||||||
|
@ -203,7 +204,7 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string,
|
||||||
// if we did not get any zones, lets leave it blank and gophercloud will
|
// if we did not get any zones, lets leave it blank and gophercloud will
|
||||||
// use zone "nova" as default
|
// use zone "nova" as default
|
||||||
if len(zones) > 0 {
|
if len(zones) > 0 {
|
||||||
availability = volume.ChooseZoneForVolume(zones, c.options.PVC.Name)
|
availability = volutil.ChooseZoneForVolume(zones, c.options.PVC.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -313,7 +313,7 @@ func (c *configMapVolumeUnmounter) TearDown() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMapVolumeUnmounter) TearDownAt(dir string) error {
|
func (c *configMapVolumeUnmounter) TearDownAt(dir string) error {
|
||||||
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*v1.ConfigMapVolumeSource, bool) {
|
func getVolumeSource(spec *volume.Spec) (*v1.ConfigMapVolumeSource, bool) {
|
||||||
|
|
|
@ -283,7 +283,7 @@ func (c *downwardAPIVolumeUnmounter) TearDown() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
|
func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
|
||||||
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *downwardAPIVolumeMounter) getMetaDir() string {
|
func (b *downwardAPIVolumeMounter) getMetaDir() string {
|
||||||
|
|
|
@ -31,7 +31,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type fcAttacher struct {
|
type fcAttacher struct {
|
||||||
|
@ -113,7 +112,7 @@ func (attacher *fcAttacher) MountDevice(spec *volume.Spec, devicePath string, de
|
||||||
}
|
}
|
||||||
if notMnt {
|
if notMnt {
|
||||||
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(fcPluginName)}
|
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(fcPluginName)}
|
||||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Remove(deviceMountPath)
|
os.Remove(deviceMountPath)
|
||||||
|
@ -189,7 +188,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun
|
||||||
}
|
}
|
||||||
// TODO: remove feature gate check after no longer needed
|
// TODO: remove feature gate check after no longer needed
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||||
volumeMode, err := volumehelper.GetVolumeMode(spec)
|
volumeMode, err := volumeutil.GetVolumeMode(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -199,7 +198,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun
|
||||||
fsType: fc.FSType,
|
fsType: fc.FSType,
|
||||||
volumeMode: volumeMode,
|
volumeMode: volumeMode,
|
||||||
readOnly: readOnly,
|
readOnly: readOnly,
|
||||||
mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host),
|
mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host),
|
||||||
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
|
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -207,7 +206,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun
|
||||||
fcDisk: fcDisk,
|
fcDisk: fcDisk,
|
||||||
fsType: fc.FSType,
|
fsType: fc.FSType,
|
||||||
readOnly: readOnly,
|
readOnly: readOnly,
|
||||||
mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host),
|
mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host),
|
||||||
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
|
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,7 +32,7 @@ import (
|
||||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This is the primary entrypoint for volume plugins.
|
// This is the primary entrypoint for volume plugins.
|
||||||
|
@ -133,7 +133,7 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID,
|
||||||
}
|
}
|
||||||
// TODO: remove feature gate check after no longer needed
|
// TODO: remove feature gate check after no longer needed
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||||
volumeMode, err := volumehelper.GetVolumeMode(spec)
|
volumeMode, err := util.GetVolumeMode(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -297,7 +297,7 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu
|
||||||
// globalMapPathUUID : plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid}
|
// globalMapPathUUID : plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid}
|
||||||
func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
||||||
pluginDir := plugin.host.GetVolumeDevicePluginDir(fcPluginName)
|
pluginDir := plugin.host.GetVolumeDevicePluginDir(fcPluginName)
|
||||||
blkutil := util.NewBlockVolumePathHandler()
|
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ioHandler interface {
|
type ioHandler interface {
|
||||||
|
@ -354,14 +355,14 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri
|
||||||
// and remove loopback device then it will be remained on kubelet node. To avoid the problem,
|
// and remove loopback device then it will be remained on kubelet node. To avoid the problem,
|
||||||
// local attach plugins needs to remove loopback device during TearDownDevice().
|
// local attach plugins needs to remove loopback device during TearDownDevice().
|
||||||
var devices []string
|
var devices []string
|
||||||
blkUtil := volumeutil.NewBlockVolumePathHandler()
|
blkUtil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
dm := c.deviceUtil.FindMultipathDeviceForDevice(dstPath)
|
dm := c.deviceUtil.FindMultipathDeviceForDevice(dstPath)
|
||||||
if len(dm) != 0 {
|
if len(dm) != 0 {
|
||||||
dstPath = dm
|
dstPath = dm
|
||||||
}
|
}
|
||||||
loop, err := volumeutil.BlockVolumePathHandler.GetLoopDevice(blkUtil, dstPath)
|
loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, dstPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err.Error() != volumeutil.ErrDeviceNotFound {
|
if err.Error() != volumepathhandler.ErrDeviceNotFound {
|
||||||
return fmt.Errorf("fc: failed to get loopback for destination path: %v, err: %v", dstPath, err)
|
return fmt.Errorf("fc: failed to get loopback for destination path: %v, err: %v", dstPath, err)
|
||||||
}
|
}
|
||||||
glog.Warning("fc: loopback for destination path: %s not found", dstPath)
|
glog.Warning("fc: loopback for destination path: %s not found", dstPath)
|
||||||
|
@ -389,7 +390,7 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri
|
||||||
}
|
}
|
||||||
if len(loop) != 0 {
|
if len(loop) != 0 {
|
||||||
// The volume was successfully detached from node. We can safely remove the loopback.
|
// The volume was successfully detached from node. We can safely remove the loopback.
|
||||||
err = volumeutil.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop)
|
err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("fc: failed to remove loopback :%v, err: %v", loop, err)
|
return fmt.Errorf("fc: failed to remove loopback :%v, err: %v", loop, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/utils/exec"
|
"k8s.io/utils/exec"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -103,7 +104,7 @@ func (plugin *flexVolumePlugin) getExecutable() string {
|
||||||
execName := parts[len(parts)-1]
|
execName := parts[len(parts)-1]
|
||||||
execPath := path.Join(plugin.execPath, execName)
|
execPath := path.Join(plugin.execPath, execName)
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
execPath = volume.GetWindowsPath(execPath)
|
execPath = util.GetWindowsPath(execPath)
|
||||||
}
|
}
|
||||||
return execPath
|
return execPath
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,8 @@ import (
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/rand"
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
|
||||||
|
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
|
|
||||||
flockerapi "github.com/clusterhq/flocker-go"
|
flockerapi "github.com/clusterhq/flocker-go"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
@ -73,7 +74,7 @@ func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID
|
||||||
|
|
||||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
requestBytes := capacity.Value()
|
requestBytes := capacity.Value()
|
||||||
volumeSizeGB = int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
volumeSizeGB = int(volutil.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||||
|
|
||||||
createOptions := &flockerapi.CreateDatasetOptions{
|
createOptions := &flockerapi.CreateDatasetOptions{
|
||||||
MaximumSize: requestBytes,
|
MaximumSize: requestBytes,
|
||||||
|
|
|
@ -23,7 +23,7 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
type volumeManager interface {
|
type volumeManager interface {
|
||||||
|
@ -55,7 +55,7 @@ type flockerVolumeProvisioner struct {
|
||||||
var _ volume.Provisioner = &flockerVolumeProvisioner{}
|
var _ volume.Provisioner = &flockerVolumeProvisioner{}
|
||||||
|
|
||||||
func (c *flockerVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (c *flockerVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ func (c *flockerVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
Name: c.options.PVName,
|
Name: c.options.PVName,
|
||||||
Labels: map[string]string{},
|
Labels: map[string]string{},
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeDynamicallyCreatedByKey: "flocker-dynamic-provisioner",
|
util.VolumeDynamicallyCreatedByKey: "flocker-dynamic-provisioner",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
|
|
@ -32,7 +32,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type gcePersistentDiskAttacher struct {
|
type gcePersistentDiskAttacher struct {
|
||||||
|
@ -209,8 +208,8 @@ func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, device
|
||||||
options = append(options, "ro")
|
options = append(options, "ro")
|
||||||
}
|
}
|
||||||
if notMnt {
|
if notMnt {
|
||||||
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(gcePersistentDiskPluginName, attacher.host)
|
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(gcePersistentDiskPluginName, attacher.host)
|
||||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Remove(deviceMountPath)
|
os.Remove(deviceMountPath)
|
||||||
|
|
|
@ -31,7 +31,6 @@ import (
|
||||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// This is the primary entrypoint for volume plugins.
|
// This is the primary entrypoint for volume plugins.
|
||||||
|
@ -398,7 +397,7 @@ type gcePersistentDiskProvisioner struct {
|
||||||
var _ volume.Provisioner = &gcePersistentDiskProvisioner{}
|
var _ volume.Provisioner = &gcePersistentDiskProvisioner{}
|
||||||
|
|
||||||
func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -416,7 +415,7 @@ func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error)
|
||||||
Name: c.options.PVName,
|
Name: c.options.PVName,
|
||||||
Labels: map[string]string{},
|
Labels: map[string]string{},
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeDynamicallyCreatedByKey: "gce-pd-dynamic-provisioner",
|
util.VolumeDynamicallyCreatedByKey: "gce-pd-dynamic-provisioner",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
|
|
@ -28,7 +28,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ volume.VolumePlugin = &gcePersistentDiskPlugin{}
|
var _ volume.VolumePlugin = &gcePersistentDiskPlugin{}
|
||||||
|
@ -40,7 +40,7 @@ var _ volume.ExpandableVolumePlugin = &gcePersistentDiskPlugin{}
|
||||||
|
|
||||||
func (plugin *gcePersistentDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
func (plugin *gcePersistentDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
||||||
pluginDir := plugin.host.GetVolumeDevicePluginDir(gcePersistentDiskPluginName)
|
pluginDir := plugin.host.GetVolumeDevicePluginDir(gcePersistentDiskPluginName)
|
||||||
blkutil := util.NewBlockVolumePathHandler()
|
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCanSupport(t *testing.T) {
|
func TestCanSupport(t *testing.T) {
|
||||||
|
@ -176,7 +177,7 @@ func TestPlugin(t *testing.T) {
|
||||||
}
|
}
|
||||||
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
||||||
size := cap.Value()
|
size := cap.Value()
|
||||||
if size != 100*volume.GB {
|
if size != 100*util.GB {
|
||||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -82,10 +82,10 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
|
||||||
return "", 0, nil, "", err
|
return "", 0, nil, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters
|
name := volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters
|
||||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
// GCE PDs are allocated in chunks of GBs (not GiBs)
|
// GCE PDs are allocated in chunks of GBs (not GiBs)
|
||||||
requestGB := volume.RoundUpToGB(capacity)
|
requestGB := volumeutil.RoundUpToGB(capacity)
|
||||||
|
|
||||||
// Apply Parameters.
|
// Apply Parameters.
|
||||||
// Values for parameter "replication-type" are canonicalized to lower case.
|
// Values for parameter "replication-type" are canonicalized to lower case.
|
||||||
|
@ -169,13 +169,13 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
|
||||||
} else if zonePresent && !zonesPresent {
|
} else if zonePresent && !zonesPresent {
|
||||||
// 10 - "zone" specified
|
// 10 - "zone" specified
|
||||||
// Use specified zone
|
// Use specified zone
|
||||||
if err := volume.ValidateZone(configuredZone); err != nil {
|
if err := volumeutil.ValidateZone(configuredZone); err != nil {
|
||||||
return "", 0, nil, "", err
|
return "", 0, nil, "", err
|
||||||
}
|
}
|
||||||
zones = make(sets.String)
|
zones = make(sets.String)
|
||||||
zones.Insert(configuredZone)
|
zones.Insert(configuredZone)
|
||||||
}
|
}
|
||||||
zone := volume.ChooseZoneForVolume(zones, c.options.PVC.Name)
|
zone := volumeutil.ChooseZoneForVolume(zones, c.options.PVC.Name)
|
||||||
|
|
||||||
if err := cloud.CreateDisk(
|
if err := cloud.CreateDisk(
|
||||||
name,
|
name,
|
||||||
|
@ -237,7 +237,7 @@ func createRegionalPD(
|
||||||
selectedReplicaZones = replicaZones
|
selectedReplicaZones = replicaZones
|
||||||
} else {
|
} else {
|
||||||
// Must randomly select zones
|
// Must randomly select zones
|
||||||
selectedReplicaZones = volume.ChooseZonesForVolume(
|
selectedReplicaZones = volumeutil.ChooseZonesForVolume(
|
||||||
replicaZones, pvcName, maxRegionalPDZones)
|
replicaZones, pvcName, maxRegionalPDZones)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -264,7 +264,7 @@ func (c *gitRepoVolumeUnmounter) TearDown() error {
|
||||||
|
|
||||||
// TearDownAt simply deletes everything in the directory.
|
// TearDownAt simply deletes everything in the directory.
|
||||||
func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error {
|
func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error {
|
||||||
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*v1.GitRepoVolumeSource, bool) {
|
func getVolumeSource(spec *volume.Spec) (*v1.GitRepoVolumeSource, bool) {
|
||||||
|
|
|
@ -43,7 +43,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/strings"
|
"k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
||||||
|
@ -178,7 +177,7 @@ func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endp
|
||||||
hosts: ep,
|
hosts: ep,
|
||||||
path: source.Path,
|
path: source.Path,
|
||||||
readOnly: readOnly,
|
readOnly: readOnly,
|
||||||
mountOptions: volume.MountOptionFromSpec(spec),
|
mountOptions: volutil.MountOptionFromSpec(spec),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -328,7 +327,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
||||||
|
|
||||||
}
|
}
|
||||||
options = append(options, "backup-volfile-servers="+dstrings.Join(addrlist[:], ":"))
|
options = append(options, "backup-volfile-servers="+dstrings.Join(addrlist[:], ":"))
|
||||||
mountOptions := volume.JoinMountOptions(b.mountOptions, options)
|
mountOptions := volutil.JoinMountOptions(b.mountOptions, options)
|
||||||
|
|
||||||
// with `backup-volfile-servers` mount option in place, it is not required to
|
// with `backup-volfile-servers` mount option in place, it is not required to
|
||||||
// iterate over all the servers in the addrlist. A mount attempt with this option
|
// iterate over all the servers in the addrlist. A mount attempt with this option
|
||||||
|
@ -502,7 +501,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
|
||||||
|
|
||||||
pvName := pv.ObjectMeta.Name
|
pvName := pv.ObjectMeta.Name
|
||||||
|
|
||||||
gidStr, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]
|
gidStr, ok := pv.Annotations[volutil.VolumeGidAnnotationKey]
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Warningf("no GID found in pv %v", pvName)
|
glog.Warningf("no GID found in pv %v", pvName)
|
||||||
|
@ -583,7 +582,7 @@ func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) {
|
func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) {
|
||||||
gidStr, ok := d.spec.Annotations[volumehelper.VolumeGidAnnotationKey]
|
gidStr, ok := d.spec.Annotations[volutil.VolumeGidAnnotationKey]
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, false, nil
|
return 0, false, nil
|
||||||
|
@ -669,7 +668,7 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
if !volutil.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -723,12 +722,12 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
gidStr := strconv.FormatInt(int64(gid), 10)
|
gidStr := strconv.FormatInt(int64(gid), 10)
|
||||||
|
|
||||||
pv.Annotations = map[string]string{
|
pv.Annotations = map[string]string{
|
||||||
volumehelper.VolumeGidAnnotationKey: gidStr,
|
volutil.VolumeGidAnnotationKey: gidStr,
|
||||||
volumehelper.VolumeDynamicallyCreatedByKey: heketiAnn,
|
volutil.VolumeDynamicallyCreatedByKey: heketiAnn,
|
||||||
glusterTypeAnn: "file",
|
glusterTypeAnn: "file",
|
||||||
"Description": glusterDescAnn,
|
"Description": glusterDescAnn,
|
||||||
v1.MountOptionAnnotation: "auto_unmount",
|
v1.MountOptionAnnotation: "auto_unmount",
|
||||||
heketiVolIDAnn: volID,
|
heketiVolIDAnn: volID,
|
||||||
}
|
}
|
||||||
|
|
||||||
pv.Spec.Capacity = v1.ResourceList{
|
pv.Spec.Capacity = v1.ResourceList{
|
||||||
|
@ -743,8 +742,9 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum
|
||||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
|
|
||||||
// GlusterFS/heketi creates volumes in units of GiB.
|
// GlusterFS/heketi creates volumes in units of GiB.
|
||||||
sz := int(volume.RoundUpToGiB(capacity))
|
sz := int(volutil.RoundUpToGiB(capacity))
|
||||||
glog.V(2).Infof("create volume of size %dGiB", sz)
|
glog.V(2).Infof("create volume of size %dGiB", sz)
|
||||||
|
|
||||||
if p.url == "" {
|
if p.url == "" {
|
||||||
glog.Errorf("REST server endpoint is empty")
|
glog.Errorf("REST server endpoint is empty")
|
||||||
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty")
|
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty")
|
||||||
|
@ -1126,10 +1126,10 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
|
||||||
|
|
||||||
// Find out delta size
|
// Find out delta size
|
||||||
expansionSize := (newSize.Value() - oldSize.Value())
|
expansionSize := (newSize.Value() - oldSize.Value())
|
||||||
expansionSizeGiB := int(volume.RoundUpSize(expansionSize, volume.GIB))
|
expansionSizeGiB := int(volutil.RoundUpSize(expansionSize, volutil.GIB))
|
||||||
|
|
||||||
// Find out requested Size
|
// Find out requested Size
|
||||||
requestGiB := volume.RoundUpToGiB(newSize)
|
requestGiB := volutil.RoundUpToGiB(newSize)
|
||||||
|
|
||||||
//Check the existing volume size
|
//Check the existing volume size
|
||||||
currentVolumeInfo, err := cli.VolumeInfo(volumeID)
|
currentVolumeInfo, err := cli.VolumeInfo(volumeID)
|
||||||
|
|
|
@ -27,7 +27,8 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||||
"k8s.io/kubernetes/pkg/volume/validation"
|
"k8s.io/kubernetes/pkg/volume/validation"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -129,13 +130,13 @@ func (plugin *hostPathPlugin) NewUnmounter(volName string, podUID types.UID) (vo
|
||||||
// Recycle recycles/scrubs clean a HostPath volume.
|
// Recycle recycles/scrubs clean a HostPath volume.
|
||||||
// Recycle blocks until the pod has completed or any error occurs.
|
// Recycle blocks until the pod has completed or any error occurs.
|
||||||
// HostPath recycling only works in single node clusters and is meant for testing purposes only.
|
// HostPath recycling only works in single node clusters and is meant for testing purposes only.
|
||||||
func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) error {
|
func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
|
||||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
|
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
|
||||||
return fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
|
return fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
pod := plugin.config.RecyclerPodTemplate
|
pod := plugin.config.RecyclerPodTemplate
|
||||||
timeout := volume.CalculateTimeoutForVolume(plugin.config.RecyclerMinimumTimeout, plugin.config.RecyclerTimeoutIncrement, spec.PersistentVolume)
|
timeout := util.CalculateTimeoutForVolume(plugin.config.RecyclerMinimumTimeout, plugin.config.RecyclerTimeoutIncrement, spec.PersistentVolume)
|
||||||
// overrides
|
// overrides
|
||||||
pod.Spec.ActiveDeadlineSeconds = &timeout
|
pod.Spec.ActiveDeadlineSeconds = &timeout
|
||||||
pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{
|
pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{
|
||||||
|
@ -143,7 +144,7 @@ func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRec
|
||||||
Path: spec.PersistentVolume.Spec.HostPath.Path,
|
Path: spec.PersistentVolume.Spec.HostPath.Path,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder)
|
return recyclerclient.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||||
|
@ -272,7 +273,7 @@ func (r *hostPathProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: r.options.PVName,
|
Name: r.options.PVName,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeDynamicallyCreatedByKey: "hostpath-dynamic-provisioner",
|
util.VolumeDynamicallyCreatedByKey: "hostpath-dynamic-provisioner",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
|
|
@ -29,7 +29,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type iscsiAttacher struct {
|
type iscsiAttacher struct {
|
||||||
|
@ -113,7 +112,7 @@ func (attacher *iscsiAttacher) MountDevice(spec *volume.Spec, devicePath string,
|
||||||
}
|
}
|
||||||
if notMnt {
|
if notMnt {
|
||||||
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(iscsiPluginName)}
|
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(iscsiPluginName)}
|
||||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, fsType, mountOptions)
|
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, fsType, mountOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Remove(deviceMountPath)
|
os.Remove(deviceMountPath)
|
||||||
|
@ -184,7 +183,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod)
|
||||||
exec := host.GetExec(iscsiPluginName)
|
exec := host.GetExec(iscsiPluginName)
|
||||||
// TODO: remove feature gate check after no longer needed
|
// TODO: remove feature gate check after no longer needed
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||||
volumeMode, err := volumehelper.GetVolumeMode(spec)
|
volumeMode, err := volumeutil.GetVolumeMode(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Abstract interface to disk operations.
|
// Abstract interface to disk operations.
|
||||||
|
@ -63,7 +64,7 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter
|
||||||
b.iscsiDisk.Iface = b.iscsiDisk.Portals[0] + ":" + b.iscsiDisk.VolName
|
b.iscsiDisk.Iface = b.iscsiDisk.Portals[0] + ":" + b.iscsiDisk.VolName
|
||||||
}
|
}
|
||||||
globalPDPath := manager.MakeGlobalPDName(*b.iscsiDisk)
|
globalPDPath := manager.MakeGlobalPDName(*b.iscsiDisk)
|
||||||
mountOptions := volume.JoinMountOptions(b.mountOptions, options)
|
mountOptions := util.JoinMountOptions(b.mountOptions, options)
|
||||||
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
|
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err)
|
glog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err)
|
||||||
|
|
|
@ -31,6 +31,7 @@ import (
|
||||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
ioutil "k8s.io/kubernetes/pkg/volume/util"
|
ioutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This is the primary entrypoint for volume plugins.
|
// This is the primary entrypoint for volume plugins.
|
||||||
|
@ -118,7 +119,7 @@ func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UI
|
||||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||||
exec: exec,
|
exec: exec,
|
||||||
deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()),
|
deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()),
|
||||||
mountOptions: volume.MountOptionFromSpec(spec),
|
mountOptions: ioutil.MountOptionFromSpec(spec),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,7 +236,7 @@ func (plugin *iscsiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*v
|
||||||
|
|
||||||
func (plugin *iscsiPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
func (plugin *iscsiPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
||||||
pluginDir := plugin.host.GetVolumeDevicePluginDir(iscsiPluginName)
|
pluginDir := plugin.host.GetVolumeDevicePluginDir(iscsiPluginName)
|
||||||
blkutil := ioutil.NewBlockVolumePathHandler()
|
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -33,6 +33,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -518,10 +519,10 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string)
|
||||||
// GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get
|
// GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get
|
||||||
// and remove loopback device then it will be remained on kubelet node. To avoid the problem,
|
// and remove loopback device then it will be remained on kubelet node. To avoid the problem,
|
||||||
// local attach plugins needs to remove loopback device during TearDownDevice().
|
// local attach plugins needs to remove loopback device during TearDownDevice().
|
||||||
blkUtil := volumeutil.NewBlockVolumePathHandler()
|
blkUtil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
loop, err := volumeutil.BlockVolumePathHandler.GetLoopDevice(blkUtil, devicePath)
|
loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, devicePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err.Error() != volumeutil.ErrDeviceNotFound {
|
if err.Error() != volumepathhandler.ErrDeviceNotFound {
|
||||||
return fmt.Errorf("failed to get loopback for device: %v, err: %v", devicePath, err)
|
return fmt.Errorf("failed to get loopback for device: %v, err: %v", devicePath, err)
|
||||||
}
|
}
|
||||||
glog.Warning("iscsi: loopback for device: %s not found", device)
|
glog.Warning("iscsi: loopback for device: %s not found", device)
|
||||||
|
@ -533,7 +534,7 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string)
|
||||||
}
|
}
|
||||||
if len(loop) != 0 {
|
if len(loop) != 0 {
|
||||||
// The volume was successfully detached from node. We can safely remove the loopback.
|
// The volume was successfully detached from node. We can safely remove the loopback.
|
||||||
err = volumeutil.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop)
|
err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to remove loopback :%v, err: %v", loop, err)
|
return fmt.Errorf("failed to remove loopback :%v, err: %v", loop, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ package volume
|
||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ MetricsProvider = &metricsDu{}
|
var _ MetricsProvider = &metricsDu{}
|
||||||
|
@ -66,7 +66,7 @@ func (md *metricsDu) GetMetrics() (*Metrics, error) {
|
||||||
|
|
||||||
// runDu executes the "du" command and writes the results to metrics.Used
|
// runDu executes the "du" command and writes the results to metrics.Used
|
||||||
func (md *metricsDu) runDu(metrics *Metrics) error {
|
func (md *metricsDu) runDu(metrics *Metrics) error {
|
||||||
used, err := util.Du(md.path)
|
used, err := fs.Du(md.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ func (md *metricsDu) runDu(metrics *Metrics) error {
|
||||||
|
|
||||||
// runFind executes the "find" command and writes the results to metrics.InodesUsed
|
// runFind executes the "find" command and writes the results to metrics.InodesUsed
|
||||||
func (md *metricsDu) runFind(metrics *Metrics) error {
|
func (md *metricsDu) runFind(metrics *Metrics) error {
|
||||||
inodesUsed, err := util.Find(md.path)
|
inodesUsed, err := fs.Find(md.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,7 @@ func (md *metricsDu) runFind(metrics *Metrics) error {
|
||||||
// getFsInfo writes metrics.Capacity and metrics.Available from the filesystem
|
// getFsInfo writes metrics.Capacity and metrics.Available from the filesystem
|
||||||
// info
|
// info
|
||||||
func (md *metricsDu) getFsInfo(metrics *Metrics) error {
|
func (md *metricsDu) getFsInfo(metrics *Metrics) error {
|
||||||
available, capacity, _, inodes, inodesFree, _, err := util.FsInfo(md.path)
|
available, capacity, _, inodes, inodesFree, _, err := fs.FsInfo(md.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return NewFsInfoFailedError(err)
|
return NewFsInfoFailedError(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ package volume
|
||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ MetricsProvider = &metricsStatFS{}
|
var _ MetricsProvider = &metricsStatFS{}
|
||||||
|
@ -55,7 +55,7 @@ func (md *metricsStatFS) GetMetrics() (*Metrics, error) {
|
||||||
|
|
||||||
// getFsInfo writes metrics.Capacity, metrics.Used and metrics.Available from the filesystem info
|
// getFsInfo writes metrics.Capacity, metrics.Used and metrics.Available from the filesystem info
|
||||||
func (md *metricsStatFS) getFsInfo(metrics *Metrics) error {
|
func (md *metricsStatFS) getFsInfo(metrics *Metrics) error {
|
||||||
available, capacity, usage, inodes, inodesFree, inodesUsed, err := util.FsInfo(md.path)
|
available, capacity, usage, inodes, inodesFree, inodesUsed, err := fs.FsInfo(md.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return NewFsInfoFailedError(err)
|
return NewFsInfoFailedError(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/strings"
|
"k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This is the primary entrypoint for volume plugins.
|
// This is the primary entrypoint for volume plugins.
|
||||||
|
@ -123,7 +124,7 @@ func (plugin *nfsPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, moun
|
||||||
server: source.Server,
|
server: source.Server,
|
||||||
exportPath: source.Path,
|
exportPath: source.Path,
|
||||||
readOnly: readOnly,
|
readOnly: readOnly,
|
||||||
mountOptions: volume.MountOptionFromSpec(spec),
|
mountOptions: util.MountOptionFromSpec(spec),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,13 +143,13 @@ func (plugin *nfsPlugin) newUnmounterInternal(volName string, podUID types.UID,
|
||||||
|
|
||||||
// Recycle recycles/scrubs clean an NFS volume.
|
// Recycle recycles/scrubs clean an NFS volume.
|
||||||
// Recycle blocks until the pod has completed or any error occurs.
|
// Recycle blocks until the pod has completed or any error occurs.
|
||||||
func (plugin *nfsPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) error {
|
func (plugin *nfsPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
|
||||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.NFS == nil {
|
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.NFS == nil {
|
||||||
return fmt.Errorf("spec.PersistentVolumeSource.NFS is nil")
|
return fmt.Errorf("spec.PersistentVolumeSource.NFS is nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
pod := plugin.config.RecyclerPodTemplate
|
pod := plugin.config.RecyclerPodTemplate
|
||||||
timeout := volume.CalculateTimeoutForVolume(plugin.config.RecyclerMinimumTimeout, plugin.config.RecyclerTimeoutIncrement, spec.PersistentVolume)
|
timeout := util.CalculateTimeoutForVolume(plugin.config.RecyclerMinimumTimeout, plugin.config.RecyclerTimeoutIncrement, spec.PersistentVolume)
|
||||||
// overrides
|
// overrides
|
||||||
pod.Spec.ActiveDeadlineSeconds = &timeout
|
pod.Spec.ActiveDeadlineSeconds = &timeout
|
||||||
pod.GenerateName = "pv-recycler-nfs-"
|
pod.GenerateName = "pv-recycler-nfs-"
|
||||||
|
@ -158,7 +159,7 @@ func (plugin *nfsPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder
|
||||||
Path: spec.PersistentVolume.Spec.NFS.Path,
|
Path: spec.PersistentVolume.Spec.NFS.Path,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder)
|
return recyclerclient.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *nfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
func (plugin *nfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||||
|
@ -249,7 +250,7 @@ func (b *nfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||||
if b.readOnly {
|
if b.readOnly {
|
||||||
options = append(options, "ro")
|
options = append(options, "ro")
|
||||||
}
|
}
|
||||||
mountOptions := volume.JoinMountOptions(b.mountOptions, options)
|
mountOptions := util.JoinMountOptions(b.mountOptions, options)
|
||||||
err = b.mounter.Mount(source, dir, "nfs", mountOptions)
|
err = b.mounter.Mount(source, dir, "nfs", mountOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
notMnt, mntErr := b.mounter.IsNotMountPoint(dir)
|
notMnt, mntErr := b.mounter.IsNotMountPoint(dir)
|
||||||
|
|
|
@ -32,7 +32,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type photonPersistentDiskAttacher struct {
|
type photonPersistentDiskAttacher struct {
|
||||||
|
@ -211,8 +210,8 @@ func (attacher *photonPersistentDiskAttacher) MountDevice(spec *volume.Spec, dev
|
||||||
options := []string{}
|
options := []string{}
|
||||||
|
|
||||||
if notMnt {
|
if notMnt {
|
||||||
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(photonPersistentDiskPluginName, attacher.host)
|
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(photonPersistentDiskPluginName, attacher.host)
|
||||||
mountOptions := volume.MountOptionFromSpec(spec)
|
mountOptions := volumeutil.MountOptionFromSpec(spec)
|
||||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Remove(deviceMountPath)
|
os.Remove(deviceMountPath)
|
||||||
|
|
|
@ -30,7 +30,6 @@ import (
|
||||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// This is the primary entrypoint for volume plugins.
|
// This is the primary entrypoint for volume plugins.
|
||||||
|
@ -115,7 +114,7 @@ func (plugin *photonPersistentDiskPlugin) newMounterInternal(spec *volume.Spec,
|
||||||
plugin: plugin,
|
plugin: plugin,
|
||||||
},
|
},
|
||||||
fsType: fsType,
|
fsType: fsType,
|
||||||
diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *photonPersistentDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Unmounter, error) {
|
func (plugin *photonPersistentDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||||
|
@ -342,7 +341,7 @@ func (plugin *photonPersistentDiskPlugin) newProvisionerInternal(options volume.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -360,7 +359,7 @@ func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, err
|
||||||
Name: p.options.PVName,
|
Name: p.options.PVName,
|
||||||
Labels: map[string]string{},
|
Labels: map[string]string{},
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeDynamicallyCreatedByKey: "photon-volume-dynamic-provisioner",
|
util.VolumeDynamicallyCreatedByKey: "photon-volume-dynamic-provisioner",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
|
|
@ -90,8 +90,8 @@ func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pd
|
||||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
volSizeBytes := capacity.Value()
|
volSizeBytes := capacity.Value()
|
||||||
// PhotonController works with GB, convert to GB with rounding up
|
// PhotonController works with GB, convert to GB with rounding up
|
||||||
volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
volSizeGB := int(volumeutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
||||||
name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 255)
|
name := volumeutil.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 255)
|
||||||
volumeOptions := &photon.VolumeOptions{
|
volumeOptions := &photon.VolumeOptions{
|
||||||
CapacityGB: volSizeGB,
|
CapacityGB: volSizeGB,
|
||||||
Tags: *p.options.CloudTags,
|
Tags: *p.options.CloudTags,
|
||||||
|
|
|
@ -33,6 +33,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/util/io"
|
"k8s.io/kubernetes/pkg/util/io"
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -161,7 +162,7 @@ type RecyclableVolumePlugin interface {
|
||||||
// Recycle will use the provided recorder to write any events that might be
|
// Recycle will use the provided recorder to write any events that might be
|
||||||
// interesting to user. It's expected that caller will pass these events to
|
// interesting to user. It's expected that caller will pass these events to
|
||||||
// the PV being recycled.
|
// the PV being recycled.
|
||||||
Recycle(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) error
|
Recycle(pvName string, spec *Spec, eventRecorder recyclerclient.RecycleEventRecorder) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeletableVolumePlugin is an extended interface of VolumePlugin and is used
|
// DeletableVolumePlugin is an extended interface of VolumePlugin and is used
|
||||||
|
|
|
@ -28,7 +28,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -119,7 +119,7 @@ func (plugin *portworxVolumePlugin) newMounterInternal(spec *volume.Spec, podUID
|
||||||
},
|
},
|
||||||
fsType: fsType,
|
fsType: fsType,
|
||||||
readOnly: readOnly,
|
readOnly: readOnly,
|
||||||
diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *portworxVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
func (plugin *portworxVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||||
|
@ -358,7 +358,7 @@ type portworxVolumeProvisioner struct {
|
||||||
var _ volume.Provisioner = &portworxVolumeProvisioner{}
|
var _ volume.Provisioner = &portworxVolumeProvisioner{}
|
||||||
|
|
||||||
func (c *portworxVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (c *portworxVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -372,7 +372,7 @@ func (c *portworxVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
Name: c.options.PVName,
|
Name: c.options.PVName,
|
||||||
Labels: map[string]string{},
|
Labels: map[string]string{},
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeDynamicallyCreatedByKey: "portworx-volume-dynamic-provisioner",
|
util.VolumeDynamicallyCreatedByKey: "portworx-volume-dynamic-provisioner",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -55,7 +56,7 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri
|
||||||
|
|
||||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
// Portworx Volumes are specified in GB
|
// Portworx Volumes are specified in GB
|
||||||
requestGB := int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024))
|
requestGB := int(volutil.RoundUpSize(capacity.Value(), 1024*1024*1024))
|
||||||
|
|
||||||
// Perform a best-effort parsing of parameters. Portworx 1.2.9 and later parses volume parameters from
|
// Perform a best-effort parsing of parameters. Portworx 1.2.9 and later parses volume parameters from
|
||||||
// spec.VolumeLabels. So even if below SpecFromOpts() fails to parse certain parameters or
|
// spec.VolumeLabels. So even if below SpecFromOpts() fails to parse certain parameters or
|
||||||
|
|
|
@ -32,7 +32,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/strings"
|
"k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
||||||
|
@ -176,7 +175,7 @@ func (plugin *quobytePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod,
|
||||||
},
|
},
|
||||||
registry: source.Registry,
|
registry: source.Registry,
|
||||||
readOnly: readOnly,
|
readOnly: readOnly,
|
||||||
mountOptions: volume.MountOptionFromSpec(spec),
|
mountOptions: util.MountOptionFromSpec(spec),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,7 +255,7 @@ func (mounter *quobyteMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
//if a trailing slash is missing we add it here
|
//if a trailing slash is missing we add it here
|
||||||
mountOptions := volume.JoinMountOptions(mounter.mountOptions, options)
|
mountOptions := util.JoinMountOptions(mounter.mountOptions, options)
|
||||||
if err := mounter.mounter.Mount(mounter.correctTraillingSlash(mounter.registry), dir, "quobyte", mountOptions); err != nil {
|
if err := mounter.mounter.Mount(mounter.correctTraillingSlash(mounter.registry), dir, "quobyte", mountOptions); err != nil {
|
||||||
return fmt.Errorf("quobyte: mount failed: %v", err)
|
return fmt.Errorf("quobyte: mount failed: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -356,7 +355,7 @@ type quobyteVolumeProvisioner struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (provisioner *quobyteVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (provisioner *quobyteVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(provisioner.plugin.GetAccessModes(), provisioner.options.PVC.Spec.AccessModes) {
|
if !util.AccessModesContainedInAll(provisioner.plugin.GetAccessModes(), provisioner.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", provisioner.options.PVC.Spec.AccessModes, provisioner.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", provisioner.options.PVC.Spec.AccessModes, provisioner.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -410,7 +409,7 @@ func (provisioner *quobyteVolumeProvisioner) Provision() (*v1.PersistentVolume,
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
pv := new(v1.PersistentVolume)
|
pv := new(v1.PersistentVolume)
|
||||||
metav1.SetMetaDataAnnotation(&pv.ObjectMeta, volumehelper.VolumeDynamicallyCreatedByKey, "quobyte-dynamic-provisioner")
|
metav1.SetMetaDataAnnotation(&pv.ObjectMeta, util.VolumeDynamicallyCreatedByKey, "quobyte-dynamic-provisioner")
|
||||||
pv.Spec.PersistentVolumeSource.Quobyte = vol
|
pv.Spec.PersistentVolumeSource.Quobyte = vol
|
||||||
pv.Spec.PersistentVolumeReclaimPolicy = provisioner.options.PersistentVolumeReclaimPolicy
|
pv.Spec.PersistentVolumeReclaimPolicy = provisioner.options.PersistentVolumeReclaimPolicy
|
||||||
pv.Spec.AccessModes = provisioner.options.PVC.Spec.AccessModes
|
pv.Spec.AccessModes = provisioner.options.PVC.Spec.AccessModes
|
||||||
|
|
|
@ -22,7 +22,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
quobyteapi "github.com/quobyte/api"
|
quobyteapi "github.com/quobyte/api"
|
||||||
|
@ -34,7 +34,7 @@ type quobyteVolumeManager struct {
|
||||||
|
|
||||||
func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProvisioner, createQuota bool) (quobyte *v1.QuobyteVolumeSource, size int, err error) {
|
func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProvisioner, createQuota bool) (quobyte *v1.QuobyteVolumeSource, size int, err error) {
|
||||||
capacity := provisioner.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := provisioner.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
volumeSize := int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024))
|
volumeSize := int(util.RoundUpSize(capacity.Value(), 1024*1024*1024))
|
||||||
// Quobyte has the concept of Volumes which doen't have a specific size (they can grow unlimited)
|
// Quobyte has the concept of Volumes which doen't have a specific size (they can grow unlimited)
|
||||||
// to simulate a size constraint we set here a Quota for logical space
|
// to simulate a size constraint we set here a Quota for logical space
|
||||||
volumeRequest := &quobyteapi.CreateVolumeRequest{
|
volumeRequest := &quobyteapi.CreateVolumeRequest{
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewAttacher implements AttachableVolumePlugin.NewAttacher.
|
// NewAttacher implements AttachableVolumePlugin.NewAttacher.
|
||||||
|
@ -39,7 +38,7 @@ func (plugin *rbdPlugin) newAttacherInternal(manager diskManager) (volume.Attach
|
||||||
return &rbdAttacher{
|
return &rbdAttacher{
|
||||||
plugin: plugin,
|
plugin: plugin,
|
||||||
manager: manager,
|
manager: manager,
|
||||||
mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
|
mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,7 +51,7 @@ func (plugin *rbdPlugin) newDetacherInternal(manager diskManager) (volume.Detach
|
||||||
return &rbdDetacher{
|
return &rbdDetacher{
|
||||||
plugin: plugin,
|
plugin: plugin,
|
||||||
manager: manager,
|
manager: manager,
|
||||||
mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
|
mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,7 +153,7 @@ func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, d
|
||||||
if ro {
|
if ro {
|
||||||
options = append(options, "ro")
|
options = append(options, "ro")
|
||||||
}
|
}
|
||||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
mountOptions := volutil.MountOptionFromSpec(spec, options...)
|
||||||
err = attacher.mounter.FormatAndMount(devicePath, deviceMountPath, fstype, mountOptions)
|
err = attacher.mounter.FormatAndMount(devicePath, deviceMountPath, fstype, mountOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Remove(deviceMountPath)
|
os.Remove(deviceMountPath)
|
||||||
|
|
|
@ -31,6 +31,7 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Abstract interface to disk operations.
|
// Abstract interface to disk operations.
|
||||||
|
@ -85,7 +86,7 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount.
|
||||||
if (&b).GetAttributes().ReadOnly {
|
if (&b).GetAttributes().ReadOnly {
|
||||||
options = append(options, "ro")
|
options = append(options, "ro")
|
||||||
}
|
}
|
||||||
mountOptions := volume.JoinMountOptions(b.mountOptions, options)
|
mountOptions := util.JoinMountOptions(b.mountOptions, options)
|
||||||
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
|
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to bind mount:%s", globalPDPath)
|
glog.Errorf("failed to bind mount:%s", globalPDPath)
|
||||||
|
|
|
@ -35,7 +35,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/strings"
|
"k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -327,7 +327,7 @@ func (plugin *rbdPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID,
|
||||||
Keyring: keyring,
|
Keyring: keyring,
|
||||||
Secret: secret,
|
Secret: secret,
|
||||||
fsType: fstype,
|
fsType: fstype,
|
||||||
mountOptions: volume.MountOptionFromSpec(spec),
|
mountOptions: volutil.MountOptionFromSpec(spec),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -389,7 +389,7 @@ func (plugin *rbdPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*vol
|
||||||
|
|
||||||
func (plugin *rbdPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
func (plugin *rbdPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
|
||||||
pluginDir := plugin.host.GetVolumeDevicePluginDir(rbdPluginName)
|
pluginDir := plugin.host.GetVolumeDevicePluginDir(rbdPluginName)
|
||||||
blkutil := volutil.NewBlockVolumePathHandler()
|
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
|
|
||||||
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -569,7 +569,7 @@ type rbdVolumeProvisioner struct {
|
||||||
var _ volume.Provisioner = &rbdVolumeProvisioner{}
|
var _ volume.Provisioner = &rbdVolumeProvisioner{}
|
||||||
|
|
||||||
func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(r.plugin.GetAccessModes(), r.options.PVC.Spec.AccessModes) {
|
if !volutil.AccessModesContainedInAll(r.plugin.GetAccessModes(), r.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", r.options.PVC.Spec.AccessModes, r.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", r.options.PVC.Spec.AccessModes, r.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -665,7 +665,7 @@ func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
}
|
}
|
||||||
glog.Infof("successfully created rbd image %q", image)
|
glog.Infof("successfully created rbd image %q", image)
|
||||||
pv := new(v1.PersistentVolume)
|
pv := new(v1.PersistentVolume)
|
||||||
metav1.SetMetaDataAnnotation(&pv.ObjectMeta, volumehelper.VolumeDynamicallyCreatedByKey, "rbd-dynamic-provisioner")
|
metav1.SetMetaDataAnnotation(&pv.ObjectMeta, volutil.VolumeDynamicallyCreatedByKey, "rbd-dynamic-provisioner")
|
||||||
|
|
||||||
if secretName != "" {
|
if secretName != "" {
|
||||||
rbd.SecretRef = new(v1.SecretReference)
|
rbd.SecretRef = new(v1.SecretReference)
|
||||||
|
@ -741,7 +741,7 @@ func newRBD(podUID types.UID, volName string, image string, pool string, readOnl
|
||||||
Pool: pool,
|
Pool: pool,
|
||||||
ReadOnly: readOnly,
|
ReadOnly: readOnly,
|
||||||
plugin: plugin,
|
plugin: plugin,
|
||||||
mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
|
mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
|
||||||
exec: plugin.host.GetExec(plugin.GetPluginName()),
|
exec: plugin.host.GetExec(plugin.GetPluginName()),
|
||||||
manager: manager,
|
manager: manager,
|
||||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
|
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
|
||||||
|
@ -937,13 +937,13 @@ func (rbd *rbdDiskUnmapper) TearDownDevice(mapPath, _ string) error {
|
||||||
// GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get
|
// GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get
|
||||||
// and remove loopback device then it will be remained on kubelet node. To avoid the problem,
|
// and remove loopback device then it will be remained on kubelet node. To avoid the problem,
|
||||||
// local attach plugins needs to remove loopback device during TearDownDevice().
|
// local attach plugins needs to remove loopback device during TearDownDevice().
|
||||||
blkUtil := volutil.NewBlockVolumePathHandler()
|
blkUtil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
loop, err := volutil.BlockVolumePathHandler.GetLoopDevice(blkUtil, device)
|
loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, device)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("rbd: failed to get loopback for device: %v, err: %v", device, err)
|
return fmt.Errorf("rbd: failed to get loopback for device: %v, err: %v", device, err)
|
||||||
}
|
}
|
||||||
// Remove loop device before detaching volume since volume detach operation gets busy if volume is opened by loopback.
|
// Remove loop device before detaching volume since volume detach operation gets busy if volume is opened by loopback.
|
||||||
err = volutil.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop)
|
err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("rbd: failed to remove loopback :%v, err: %v", loop, err)
|
return fmt.Errorf("rbd: failed to remove loopback :%v, err: %v", loop, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -563,7 +563,7 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVo
|
||||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
volSizeBytes := capacity.Value()
|
volSizeBytes := capacity.Value()
|
||||||
// Convert to MB that rbd defaults on.
|
// Convert to MB that rbd defaults on.
|
||||||
sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024))
|
sz := int(volutil.RoundUpSize(volSizeBytes, 1024*1024))
|
||||||
volSz := fmt.Sprintf("%d", sz)
|
volSz := fmt.Sprintf("%d", sz)
|
||||||
mon := util.kernelRBDMonitorsOpt(p.Mon)
|
mon := util.kernelRBDMonitorsOpt(p.Mon)
|
||||||
if p.rbdMounter.imageFormat == rbdImageFormat2 {
|
if p.rbdMounter.imageFormat == rbdImageFormat2 {
|
||||||
|
@ -621,7 +621,7 @@ func (util *RBDUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resourc
|
||||||
var err error
|
var err error
|
||||||
volSizeBytes := newSize.Value()
|
volSizeBytes := newSize.Value()
|
||||||
// Convert to MB that rbd defaults on.
|
// Convert to MB that rbd defaults on.
|
||||||
sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024))
|
sz := int(volutil.RoundUpSize(volSizeBytes, 1024*1024))
|
||||||
newVolSz := fmt.Sprintf("%d", sz)
|
newVolSz := fmt.Sprintf("%d", sz)
|
||||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dMi", sz))
|
newSizeQuant := resource.MustParse(fmt.Sprintf("%dMi", sz))
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,6 @@ import (
|
||||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type sioVolume struct {
|
type sioVolume struct {
|
||||||
|
@ -142,7 +141,7 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error {
|
||||||
}
|
}
|
||||||
glog.V(4).Info(log("setup created mount point directory %s", dir))
|
glog.V(4).Info(log("setup created mount point directory %s", dir))
|
||||||
|
|
||||||
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(v.plugin.GetPluginName(), v.plugin.host)
|
diskMounter := util.NewSafeFormatAndMountFromHost(v.plugin.GetPluginName(), v.plugin.host)
|
||||||
err = diskMounter.FormatAndMount(devicePath, dir, v.fsType, options)
|
err = diskMounter.FormatAndMount(devicePath, dir, v.fsType, options)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -256,7 +255,7 @@ var _ volume.Provisioner = &sioVolume{}
|
||||||
func (v *sioVolume) Provision() (*api.PersistentVolume, error) {
|
func (v *sioVolume) Provision() (*api.PersistentVolume, error) {
|
||||||
glog.V(4).Info(log("attempting to dynamically provision pvc %v", v.options.PVC.Name))
|
glog.V(4).Info(log("attempting to dynamically provision pvc %v", v.options.PVC.Name))
|
||||||
|
|
||||||
if !volume.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) {
|
if !util.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -267,14 +266,14 @@ func (v *sioVolume) Provision() (*api.PersistentVolume, error) {
|
||||||
|
|
||||||
capacity := v.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
capacity := v.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||||
volSizeBytes := capacity.Value()
|
volSizeBytes := capacity.Value()
|
||||||
volSizeGB := int64(volume.RoundUpSize(volSizeBytes, oneGig))
|
volSizeGB := int64(util.RoundUpSize(volSizeBytes, oneGig))
|
||||||
|
|
||||||
if volSizeBytes == 0 {
|
if volSizeBytes == 0 {
|
||||||
return nil, fmt.Errorf("invalid volume size of 0 specified")
|
return nil, fmt.Errorf("invalid volume size of 0 specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
if volSizeBytes < eightGig {
|
if volSizeBytes < eightGig {
|
||||||
volSizeGB = int64(volume.RoundUpSize(eightGig, oneGig))
|
volSizeGB = int64(util.RoundUpSize(eightGig, oneGig))
|
||||||
glog.V(4).Info(log("capacity less than 8Gi found, adjusted to %dGi", volSizeGB))
|
glog.V(4).Info(log("capacity less than 8Gi found, adjusted to %dGi", volSizeGB))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -314,7 +313,7 @@ func (v *sioVolume) Provision() (*api.PersistentVolume, error) {
|
||||||
Namespace: v.options.PVC.Namespace,
|
Namespace: v.options.PVC.Namespace,
|
||||||
Labels: map[string]string{},
|
Labels: map[string]string{},
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeDynamicallyCreatedByKey: "scaleio-dynamic-provisioner",
|
util.VolumeDynamicallyCreatedByKey: "scaleio-dynamic-provisioner",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: api.PersistentVolumeSpec{
|
Spec: api.PersistentVolumeSpec{
|
||||||
|
|
|
@ -303,7 +303,7 @@ func (c *secretVolumeUnmounter) TearDown() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *secretVolumeUnmounter) TearDownAt(dir string) error {
|
func (c *secretVolumeUnmounter) TearDownAt(dir string) error {
|
||||||
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*v1.SecretVolumeSource, bool) {
|
func getVolumeSource(spec *volume.Spec) (*v1.SecretVolumeSource, bool) {
|
||||||
|
|
|
@ -35,7 +35,6 @@ import (
|
||||||
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
kstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
||||||
|
@ -137,7 +136,7 @@ func (plugin *storageosPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod
|
||||||
MetricsProvider: volume.NewMetricsStatFS(getPath(pod.UID, volNamespace, volName, spec.Name(), plugin.host)),
|
MetricsProvider: volume.NewMetricsStatFS(getPath(pod.UID, volNamespace, volName, spec.Name(), plugin.host)),
|
||||||
},
|
},
|
||||||
diskMounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
diskMounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||||
mountOptions: volume.MountOptionFromSpec(spec),
|
mountOptions: util.MountOptionFromSpec(spec),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -389,7 +388,7 @@ func (b *storageosMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||||
if b.readOnly {
|
if b.readOnly {
|
||||||
options = append(options, "ro")
|
options = append(options, "ro")
|
||||||
}
|
}
|
||||||
mountOptions := volume.JoinMountOptions(b.mountOptions, options)
|
mountOptions := util.JoinMountOptions(b.mountOptions, options)
|
||||||
|
|
||||||
globalPDPath := makeGlobalPDName(b.plugin.host, b.pvName, b.volNamespace, b.volName)
|
globalPDPath := makeGlobalPDName(b.plugin.host, b.pvName, b.volNamespace, b.volName)
|
||||||
glog.V(4).Infof("Attempting to bind mount to pod volume at %s", dir)
|
glog.V(4).Infof("Attempting to bind mount to pod volume at %s", dir)
|
||||||
|
@ -562,7 +561,7 @@ type storageosProvisioner struct {
|
||||||
var _ volume.Provisioner = &storageosProvisioner{}
|
var _ volume.Provisioner = &storageosProvisioner{}
|
||||||
|
|
||||||
func (c *storageosProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (c *storageosProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -600,7 +599,7 @@ func (c *storageosProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
c.labels[k] = v
|
c.labels[k] = v
|
||||||
}
|
}
|
||||||
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
c.sizeGB = int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024))
|
c.sizeGB = int(util.RoundUpSize(capacity.Value(), 1024*1024*1024))
|
||||||
|
|
||||||
apiCfg, err := parsePVSecret(adminSecretNamespace, adminSecretName, c.plugin.host.GetKubeClient())
|
apiCfg, err := parsePVSecret(adminSecretNamespace, adminSecretName, c.plugin.host.GetKubeClient())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -622,7 +621,7 @@ func (c *storageosProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
Name: vol.Name,
|
Name: vol.Name,
|
||||||
Labels: map[string]string{},
|
Labels: map[string]string{},
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeDynamicallyCreatedByKey: "storageos-dynamic-provisioner",
|
util.VolumeDynamicallyCreatedByKey: "storageos-dynamic-provisioner",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
|
|
@ -40,7 +40,8 @@ import (
|
||||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
. "k8s.io/kubernetes/pkg/volume"
|
. "k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// fakeVolumeHost is useful for testing volume plugins.
|
// fakeVolumeHost is useful for testing volume plugins.
|
||||||
|
@ -379,7 +380,7 @@ func (plugin *FakeVolumePlugin) GetNewDetacherCallCount() int {
|
||||||
return plugin.NewDetacherCallCount
|
return plugin.NewDetacherCallCount
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) Recycle(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) error {
|
func (plugin *FakeVolumePlugin) Recycle(pvName string, spec *Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -711,7 +712,7 @@ func (fc *FakeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: fc.Options.PVName,
|
Name: fc.Options.PVName,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeDynamicallyCreatedByKey: "fakeplugin-provisioner",
|
util.VolumeDynamicallyCreatedByKey: "fakeplugin-provisioner",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
@ -731,10 +732,10 @@ func (fc *FakeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
return pv, nil
|
return pv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ util.BlockVolumePathHandler = &FakeVolumePathHandler{}
|
var _ volumepathhandler.BlockVolumePathHandler = &FakeVolumePathHandler{}
|
||||||
|
|
||||||
//NewDeviceHandler Create a new IoHandler implementation
|
//NewDeviceHandler Create a new IoHandler implementation
|
||||||
func NewBlockVolumePathHandler() util.BlockVolumePathHandler {
|
func NewBlockVolumePathHandler() volumepathhandler.BlockVolumePathHandler {
|
||||||
return &FakeVolumePathHandler{}
|
return &FakeVolumePathHandler{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations"
|
"k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations"
|
||||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OperationExecutor defines a set of operations for attaching, detaching,
|
// OperationExecutor defines a set of operations for attaching, detaching,
|
||||||
|
@ -708,7 +708,7 @@ func (oe *operationExecutor) MountVolume(
|
||||||
volumeToMount VolumeToMount,
|
volumeToMount VolumeToMount,
|
||||||
actualStateOfWorld ActualStateOfWorldMounterUpdater,
|
actualStateOfWorld ActualStateOfWorldMounterUpdater,
|
||||||
isRemount bool) error {
|
isRemount bool) error {
|
||||||
fsVolume, err := volumehelper.CheckVolumeModeFilesystem(volumeToMount.VolumeSpec)
|
fsVolume, err := util.CheckVolumeModeFilesystem(volumeToMount.VolumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -736,7 +736,7 @@ func (oe *operationExecutor) MountVolume(
|
||||||
if !volumeToMount.PluginIsAttachable {
|
if !volumeToMount.PluginIsAttachable {
|
||||||
// Non-attachable volume plugins can execute mount for multiple pods
|
// Non-attachable volume plugins can execute mount for multiple pods
|
||||||
// referencing the same volume in parallel
|
// referencing the same volume in parallel
|
||||||
podName = volumehelper.GetUniquePodName(volumeToMount.Pod)
|
podName = util.GetUniquePodName(volumeToMount.Pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO mount_device
|
// TODO mount_device
|
||||||
|
@ -747,7 +747,7 @@ func (oe *operationExecutor) MountVolume(
|
||||||
func (oe *operationExecutor) UnmountVolume(
|
func (oe *operationExecutor) UnmountVolume(
|
||||||
volumeToUnmount MountedVolume,
|
volumeToUnmount MountedVolume,
|
||||||
actualStateOfWorld ActualStateOfWorldMounterUpdater) error {
|
actualStateOfWorld ActualStateOfWorldMounterUpdater) error {
|
||||||
fsVolume, err := volumehelper.CheckVolumeModeFilesystem(volumeToUnmount.VolumeSpec)
|
fsVolume, err := util.CheckVolumeModeFilesystem(volumeToUnmount.VolumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -778,7 +778,7 @@ func (oe *operationExecutor) UnmountDevice(
|
||||||
deviceToDetach AttachedVolume,
|
deviceToDetach AttachedVolume,
|
||||||
actualStateOfWorld ActualStateOfWorldMounterUpdater,
|
actualStateOfWorld ActualStateOfWorldMounterUpdater,
|
||||||
mounter mount.Interface) error {
|
mounter mount.Interface) error {
|
||||||
fsVolume, err := volumehelper.CheckVolumeModeFilesystem(deviceToDetach.VolumeSpec)
|
fsVolume, err := util.CheckVolumeModeFilesystem(deviceToDetach.VolumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -881,7 +881,7 @@ func (oe *operationExecutor) CheckVolumeExistenceOperation(
|
||||||
podName volumetypes.UniquePodName,
|
podName volumetypes.UniquePodName,
|
||||||
podUID types.UID,
|
podUID types.UID,
|
||||||
attachable volume.AttachableVolumePlugin) (bool, error) {
|
attachable volume.AttachableVolumePlugin) (bool, error) {
|
||||||
fsVolume, err := volumehelper.CheckVolumeModeFilesystem(volumeSpec)
|
fsVolume, err := util.CheckVolumeModeFilesystem(volumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -913,7 +913,7 @@ func (oe *operationExecutor) CheckVolumeExistenceOperation(
|
||||||
// is there. Either plugin is attachable or non-attachable, the plugin should
|
// is there. Either plugin is attachable or non-attachable, the plugin should
|
||||||
// have symbolic link associated to raw block device under pod device map
|
// have symbolic link associated to raw block device under pod device map
|
||||||
// if volume exists.
|
// if volume exists.
|
||||||
blkutil := util.NewBlockVolumePathHandler()
|
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
var islinkExist bool
|
var islinkExist bool
|
||||||
var checkErr error
|
var checkErr error
|
||||||
if islinkExist, checkErr = blkutil.IsSymlinkExist(mountPath); checkErr != nil {
|
if islinkExist, checkErr = blkutil.IsSymlinkExist(mountPath); checkErr != nil {
|
||||||
|
|
|
@ -37,7 +37,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ OperationGenerator = &operationGenerator{}
|
var _ OperationGenerator = &operationGenerator{}
|
||||||
|
@ -60,7 +60,7 @@ type operationGenerator struct {
|
||||||
checkNodeCapabilitiesBeforeMount bool
|
checkNodeCapabilitiesBeforeMount bool
|
||||||
|
|
||||||
// blkUtil provides volume path related operations for block volume
|
// blkUtil provides volume path related operations for block volume
|
||||||
blkUtil util.BlockVolumePathHandler
|
blkUtil volumepathhandler.BlockVolumePathHandler
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewOperationGenerator is returns instance of operationGenerator
|
// NewOperationGenerator is returns instance of operationGenerator
|
||||||
|
@ -68,7 +68,7 @@ func NewOperationGenerator(kubeClient clientset.Interface,
|
||||||
volumePluginMgr *volume.VolumePluginMgr,
|
volumePluginMgr *volume.VolumePluginMgr,
|
||||||
recorder record.EventRecorder,
|
recorder record.EventRecorder,
|
||||||
checkNodeCapabilitiesBeforeMount bool,
|
checkNodeCapabilitiesBeforeMount bool,
|
||||||
blkUtil util.BlockVolumePathHandler) OperationGenerator {
|
blkUtil volumepathhandler.BlockVolumePathHandler) OperationGenerator {
|
||||||
|
|
||||||
return &operationGenerator{
|
return &operationGenerator{
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
|
@ -378,7 +378,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc(
|
||||||
// Get attacher plugin and the volumeName by splitting the volume unique name in case
|
// Get attacher plugin and the volumeName by splitting the volume unique name in case
|
||||||
// there's no VolumeSpec: this happens only on attach/detach controller crash recovery
|
// there's no VolumeSpec: this happens only on attach/detach controller crash recovery
|
||||||
// when a pod has been deleted during the controller downtime
|
// when a pod has been deleted during the controller downtime
|
||||||
pluginName, volumeName, err = volumehelper.SplitUniqueName(volumeToDetach.VolumeName)
|
pluginName, volumeName, err = util.SplitUniqueName(volumeToDetach.VolumeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.SplitUniqueName failed", err)
|
return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.SplitUniqueName failed", err)
|
||||||
}
|
}
|
||||||
|
@ -1290,7 +1290,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc(
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkMountOptionSupport(og *operationGenerator, volumeToMount VolumeToMount, plugin volume.VolumePlugin) error {
|
func checkMountOptionSupport(og *operationGenerator, volumeToMount VolumeToMount, plugin volume.VolumePlugin) error {
|
||||||
mountOptions := volume.MountOptionFromSpec(volumeToMount.VolumeSpec)
|
mountOptions := util.MountOptionFromSpec(volumeToMount.VolumeSpec)
|
||||||
|
|
||||||
if len(mountOptions) > 0 && !plugin.SupportsMountOption() {
|
if len(mountOptions) > 0 && !plugin.SupportsMountOption() {
|
||||||
return fmt.Errorf("Mount options are not supported for this volume type")
|
return fmt.Errorf("Mount options are not supported for this volume type")
|
||||||
|
|
|
@ -101,7 +101,7 @@ func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName
|
||||||
}
|
}
|
||||||
// Remove old symbolic link(or file) then create new one.
|
// Remove old symbolic link(or file) then create new one.
|
||||||
// This should be done because current symbolic link is
|
// This should be done because current symbolic link is
|
||||||
// stale accross node reboot.
|
// stale across node reboot.
|
||||||
linkPath := path.Join(mapPath, string(linkName))
|
linkPath := path.Join(mapPath, string(linkName))
|
||||||
if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
|
if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -30,7 +30,6 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type vsphereVMDKAttacher struct {
|
type vsphereVMDKAttacher struct {
|
||||||
|
@ -219,8 +218,8 @@ func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath s
|
||||||
options := []string{}
|
options := []string{}
|
||||||
|
|
||||||
if notMnt {
|
if notMnt {
|
||||||
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(vsphereVolumePluginName, attacher.host)
|
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(vsphereVolumePluginName, attacher.host)
|
||||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
|
||||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Remove(deviceMountPath)
|
os.Remove(deviceMountPath)
|
||||||
|
|
|
@ -31,7 +31,6 @@ import (
|
||||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// This is the primary entrypoint for volume plugins.
|
// This is the primary entrypoint for volume plugins.
|
||||||
|
@ -120,7 +119,7 @@ func (plugin *vsphereVolumePlugin) newMounterInternal(spec *volume.Spec, podUID
|
||||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
|
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
|
||||||
},
|
},
|
||||||
fsType: fsType,
|
fsType: fsType,
|
||||||
diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *vsphereVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Unmounter, error) {
|
func (plugin *vsphereVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||||
|
@ -350,7 +349,7 @@ func (plugin *vsphereVolumePlugin) newProvisionerInternal(options volume.VolumeO
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
if !volume.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) {
|
if !util.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) {
|
||||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes())
|
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -368,7 +367,7 @@ func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||||
Name: v.options.PVName,
|
Name: v.options.PVName,
|
||||||
Labels: map[string]string{},
|
Labels: map[string]string{},
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeDynamicallyCreatedByKey: "vsphere-volume-dynamic-provisioner",
|
util.VolumeDynamicallyCreatedByKey: "vsphere-volume-dynamic-provisioner",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
|
|
@ -93,8 +93,8 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec
|
||||||
capacity := v.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
capacity := v.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||||
volSizeBytes := capacity.Value()
|
volSizeBytes := capacity.Value()
|
||||||
// vSphere works with kilobytes, convert to KiB with rounding up
|
// vSphere works with kilobytes, convert to KiB with rounding up
|
||||||
volSizeKB := int(volume.RoundUpSize(volSizeBytes, 1024))
|
volSizeKB := int(volumeutil.RoundUpSize(volSizeBytes, 1024))
|
||||||
name := volume.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255)
|
name := volumeutil.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255)
|
||||||
volumeOptions := &vclib.VolumeOptions{
|
volumeOptions := &vclib.VolumeOptions{
|
||||||
CapacityKB: volSizeKB,
|
CapacityKB: volSizeKB,
|
||||||
Tags: *v.options.CloudTags,
|
Tags: *v.options.CloudTags,
|
||||||
|
|
|
@ -38,7 +38,7 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/api/testapi"
|
"k8s.io/kubernetes/pkg/api/testapi"
|
||||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -588,7 +588,7 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
|
||||||
GenerateName: pvConfig.NamePrefix,
|
GenerateName: pvConfig.NamePrefix,
|
||||||
Labels: pvConfig.Labels,
|
Labels: pvConfig.Labels,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeGidAnnotationKey: "777",
|
util.VolumeGidAnnotationKey: "777",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
|
|
@ -37,7 +37,7 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPo
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
GenerateName: pvConfig.NamePrefix,
|
GenerateName: pvConfig.NamePrefix,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.VolumeGidAnnotationKey: "777",
|
util.VolumeGidAnnotationKey: "777",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
|
|
@ -34,7 +34,7 @@ import (
|
||||||
volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -86,7 +86,7 @@ func TestPodDeletionWithDswp(t *testing.T) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "node-sandbox",
|
Name: "node-sandbox",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.ControllerManagedAttachAnnotation: "true",
|
util.ControllerManagedAttachAnnotation: "true",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -152,7 +152,7 @@ func TestPodUpdateWithWithADC(t *testing.T) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "node-sandbox",
|
Name: "node-sandbox",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.ControllerManagedAttachAnnotation: "true",
|
util.ControllerManagedAttachAnnotation: "true",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -219,8 +219,8 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "node-sandbox",
|
Name: "node-sandbox",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.ControllerManagedAttachAnnotation: "true",
|
util.ControllerManagedAttachAnnotation: "true",
|
||||||
volumehelper.KeepTerminatedPodVolumesAnnotation: "true",
|
util.KeepTerminatedPodVolumesAnnotation: "true",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -383,7 +383,7 @@ func TestPodAddedByDswp(t *testing.T) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "node-sandbox",
|
Name: "node-sandbox",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
volumehelper.ControllerManagedAttachAnnotation: "true",
|
util.ControllerManagedAttachAnnotation: "true",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue